Merge tag 'v4.9.233' into khadas-vims-4.9.y
authorNick Xie <nick@khadas.com>
Sat, 7 Nov 2020 02:26:54 +0000 (10:26 +0800)
committerNick Xie <nick@khadas.com>
Sat, 7 Nov 2020 02:26:54 +0000 (10:26 +0800)
This is the 4.9.233 stable release

19 files changed:
1  2 
Makefile
drivers/android/binder.c
drivers/gpio/gpiolib-of.c
drivers/pci/pcie/aspm.c
drivers/pci/quirks.c
fs/ext4/inode.c
fs/f2fs/dir.c
fs/xattr.c
include/asm-generic/vmlinux.lds.h
include/linux/mmzone.h
include/net/addrconf.h
include/net/sock.h
init/main.c
kernel/cgroup.c
kernel/trace/ftrace.c
mm/mmap.c
mm/page_alloc.c
net/core/sock.c
net/socket.c

diff --cc Makefile
Simple merge
@@@ -574,46 -334,10 +574,47 @@@ enum 
        BINDER_LOOPER_STATE_EXITED      = 0x04,
        BINDER_LOOPER_STATE_INVALID     = 0x08,
        BINDER_LOOPER_STATE_WAITING     = 0x10,
-       BINDER_LOOPER_STATE_POLL        = 0x20,
+       BINDER_LOOPER_STATE_NEED_RETURN = 0x20,
+       BINDER_LOOPER_STATE_POLL        = 0x40,
  };
  
 +/**
 + * struct binder_thread - binder thread bookkeeping
 + * @proc:                 binder process for this thread
 + *                        (invariant after initialization)
 + * @rb_node:              element for proc->threads rbtree
 + *                        (protected by @proc->inner_lock)
 + * @waiting_thread_node:  element for @proc->waiting_threads list
 + *                        (protected by @proc->inner_lock)
 + * @pid:                  PID for this thread
 + *                        (invariant after initialization)
 + * @looper:               bitmap of looping state
 + *                        (only accessed by this thread)
 + * @looper_needs_return:  looping thread needs to exit driver
 + *                        (no lock needed)
 + * @transaction_stack:    stack of in-progress transactions for this thread
 + *                        (protected by @proc->inner_lock)
 + * @todo:                 list of work to do for this thread
 + *                        (protected by @proc->inner_lock)
 + * @process_todo:         whether work in @todo should be processed
 + *                        (protected by @proc->inner_lock)
 + * @return_error:         transaction errors reported by this thread
 + *                        (only accessed by this thread)
 + * @reply_error:          transaction errors reported by target thread
 + *                        (protected by @proc->inner_lock)
 + * @wait:                 wait queue for thread work
 + * @stats:                per-thread statistics
 + *                        (atomics, no lock needed)
 + * @tmp_ref:              temporary reference to indicate thread is in use
 + *                        (atomic since @proc->inner_lock cannot
 + *                        always be acquired)
 + * @is_dead:              thread is dead and awaiting free
 + *                        when outstanding transactions are cleaned up
 + *                        (protected by @proc->inner_lock)
 + * @task:                 struct task_struct for this thread
 + *
 + * Bookkeeping structure for binder threads.
 + */
  struct binder_thread {
        struct binder_proc *proc;
        struct rb_node rb_node;
        return retval;
  }
  
 -static inline void binder_lock(const char *tag)
 +static bool binder_has_work_ilocked(struct binder_thread *thread,
 +                                  bool do_proc_work)
  {
 -      trace_binder_lock(tag);
 -      mutex_lock(&binder_main_lock);
 -      trace_binder_locked(tag);
 +      return thread->process_todo ||
 +              thread->looper_need_return ||
 +              (do_proc_work &&
 +               !binder_worklist_empty_ilocked(&thread->proc->todo));
  }
  
 -static inline void binder_unlock(const char *tag)
 +static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
  {
 -      trace_binder_unlock(tag);
 -      mutex_unlock(&binder_main_lock);
 -}
 +      bool has_work;
  
 -static void binder_set_nice(long nice)
 -{
 -      long min_nice;
 +      binder_inner_proc_lock(thread->proc);
 +      has_work = binder_has_work_ilocked(thread, do_proc_work);
 +      binder_inner_proc_unlock(thread->proc);
  
 -      if (can_nice(current, nice)) {
 -              set_user_nice(current, nice);
 -              return;
 -      }
 -      min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
 -      binder_debug(BINDER_DEBUG_PRIORITY_CAP,
 -                   "%d: nice value %ld not allowed use %ld instead\n",
 -                    current->pid, nice, min_nice);
 -      set_user_nice(current, min_nice);
 -      if (min_nice <= MAX_NICE)
 -              return;
 -      binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
 +      return has_work;
  }
  
 -static size_t binder_buffer_size(struct binder_proc *proc,
 -                               struct binder_buffer *buffer)
 +static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
  {
 -      if (list_is_last(&buffer->entry, &proc->buffers))
 -              return proc->buffer + proc->buffer_size - (void *)buffer->data;
 -      return (size_t)list_entry(buffer->entry.next,
 -                        struct binder_buffer, entry) - (size_t)buffer->data;
 +      return !thread->transaction_stack &&
 +              binder_worklist_empty_ilocked(&thread->todo) &&
 +              (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
 +                                 BINDER_LOOPER_STATE_REGISTERED));
  }
  
 -static void binder_insert_free_buffer(struct binder_proc *proc,
 -                                    struct binder_buffer *new_buffer)
 +static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
 +                                             bool sync)
  {
 -      struct rb_node **p = &proc->free_buffers.rb_node;
 -      struct rb_node *parent = NULL;
 -      struct binder_buffer *buffer;
 -      size_t buffer_size;
 -      size_t new_buffer_size;
 +      struct rb_node *n;
 +      struct binder_thread *thread;
 +
 +      for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
 +              thread = rb_entry(n, struct binder_thread, rb_node);
 +              if (thread->looper & BINDER_LOOPER_STATE_POLL &&
 +                  binder_available_for_proc_work_ilocked(thread)) {
 +                      if (sync)
 +                              wake_up_interruptible_sync(&thread->wait);
 +                      else
 +                              wake_up_interruptible(&thread->wait);
 +              }
 +      }
 +}
  
 -      BUG_ON(!new_buffer->free);
 +/**
 + * binder_select_thread_ilocked() - selects a thread for doing proc work.
 + * @proc:     process to select a thread from
 + *
 + * Note that calling this function moves the thread off the waiting_threads
 + * list, so it can only be woken up by the caller of this function, or a
 + * signal. Therefore, callers *should* always wake up the thread this function
 + * returns.
 + *
 + * Return:    If there's a thread currently waiting for process work,
 + *            returns that thread. Otherwise returns NULL.
 + */
 +static struct binder_thread *
 +binder_select_thread_ilocked(struct binder_proc *proc)
 +{
 +      struct binder_thread *thread;
  
 -      new_buffer_size = binder_buffer_size(proc, new_buffer);
 +      assert_spin_locked(&proc->inner_lock);
 +      thread = list_first_entry_or_null(&proc->waiting_threads,
 +                                        struct binder_thread,
 +                                        waiting_thread_node);
  
-       if (thread)
-               list_del_init(&thread->waiting_thread_node);
+       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                    "%d: add free buffer, size %zd, at %pK\n",
+                     proc->pid, new_buffer_size, new_buffer);
  
 -      while (*p) {
 -              parent = *p;
 -              buffer = rb_entry(parent, struct binder_buffer, rb_node);
 -              BUG_ON(!buffer->free);
 +      return thread;
 +}
  
 -              buffer_size = binder_buffer_size(proc, buffer);
 +/**
 + * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
 + * @proc:     process to wake up a thread in
 + * @thread:   specific thread to wake-up (may be NULL)
 + * @sync:     whether to do a synchronous wake-up
 + *
 + * This function wakes up a thread in the @proc process.
 + * The caller may provide a specific thread to wake-up in
 + * the @thread parameter. If @thread is NULL, this function
 + * will wake up threads that have called poll().
 + *
 + * Note that for this function to work as expected, callers
 + * should first call binder_select_thread() to find a thread
 + * to handle the work (if they don't have a thread already),
 + * and pass the result into the @thread parameter.
 + */
 +static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
 +                                       struct binder_thread *thread,
 +                                       bool sync)
 +{
 +      assert_spin_locked(&proc->inner_lock);
  
 -              if (new_buffer_size < buffer_size)
 -                      p = &parent->rb_left;
 +      if (thread) {
 +              if (sync)
 +                      wake_up_interruptible_sync(&thread->wait);
                else
 -                      p = &parent->rb_right;
 +                      wake_up_interruptible(&thread->wait);
 +              return;
        }
 -      rb_link_node(&new_buffer->rb_node, parent, p);
 -      rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
 +
 +      /* Didn't find a thread waiting for proc work; this can happen
 +       * in two scenarios:
 +       * 1. All threads are busy handling transactions
 +       *    In that case, one of those threads should call back into
 +       *    the kernel driver soon and pick up this work.
 +       * 2. Threads are using the (e)poll interface, in which case
 +       *    they may be blocked on the waitqueue without having been
 +       *    added to waiting_threads. For this case, we just iterate
 +       *    over all threads not handling transaction work, and
 +       *    wake them all up. We wake all because we don't know whether
 +       *    a thread that called into (e)poll is handling non-binder
 +       *    work currently.
 +       */
 +      binder_wakeup_poll_threads_ilocked(proc, sync);
  }
  
 -static void binder_insert_allocated_buffer(struct binder_proc *proc,
 -                                         struct binder_buffer *new_buffer)
 +static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
  {
 -      struct rb_node **p = &proc->allocated_buffers.rb_node;
 -      struct rb_node *parent = NULL;
 -      struct binder_buffer *buffer;
 -
 -      BUG_ON(new_buffer->free);
 +      struct binder_thread *thread = binder_select_thread_ilocked(proc);
  
 -      while (*p) {
 -              parent = *p;
 -              buffer = rb_entry(parent, struct binder_buffer, rb_node);
 -              BUG_ON(buffer->free);
 +      binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
 +}
  
 -              if (new_buffer < buffer)
 -                      p = &parent->rb_left;
 -              else if (new_buffer > buffer)
 -                      p = &parent->rb_right;
 -              else
 -                      BUG();
 -      }
 -      rb_link_node(&new_buffer->rb_node, parent, p);
 -      rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
 +static bool is_rt_policy(int policy)
 +{
 +      return policy == SCHED_FIFO || policy == SCHED_RR;
  }
  
 -static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
 -                                                uintptr_t user_ptr)
 +static bool is_fair_policy(int policy)
  {
 -      struct rb_node *n = proc->allocated_buffers.rb_node;
 -      struct binder_buffer *buffer;
 -      struct binder_buffer *kern_ptr;
 +      return policy == SCHED_NORMAL || policy == SCHED_BATCH;
 +}
  
 -      kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
 -              - offsetof(struct binder_buffer, data));
 +static bool binder_supported_policy(int policy)
 +{
 +      return is_fair_policy(policy) || is_rt_policy(policy);
 +}
  
 -      while (n) {
 -              buffer = rb_entry(n, struct binder_buffer, rb_node);
 -              BUG_ON(buffer->free);
 +static int to_userspace_prio(int policy, int kernel_priority)
 +{
 +      if (is_fair_policy(policy))
 +              return PRIO_TO_NICE(kernel_priority);
 +      else
 +              return MAX_USER_RT_PRIO - 1 - kernel_priority;
 +}
  
 -              if (kern_ptr < buffer)
 -                      n = n->rb_left;
 -              else if (kern_ptr > buffer)
 -                      n = n->rb_right;
 -              else
 -                      return buffer;
 -      }
 -      return NULL;
 +static int to_kernel_prio(int policy, int user_priority)
 +{
 +      if (is_fair_policy(policy))
 +              return NICE_TO_PRIO(user_priority);
 +      else
 +              return MAX_USER_RT_PRIO - 1 - user_priority;
  }
  
 -static int binder_update_page_range(struct binder_proc *proc, int allocate,
 -                                  void *start, void *end,
 -                                  struct vm_area_struct *vma)
 +static void binder_do_set_priority(struct task_struct *task,
 +                                 struct binder_priority desired,
 +                                 bool verify)
  {
 -      void *page_addr;
 -      unsigned long user_page_addr;
 -      struct page **page;
 -      struct mm_struct *mm;
 +      int priority; /* user-space prio value */
 +      bool has_cap_nice;
 +      unsigned int policy = desired.sched_policy;
  
-       if (task->policy == policy && task->normal_prio == desired.prio)
-               return;
+       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                    "%d: %s pages %pK-%pK\n", proc->pid,
+                    allocate ? "allocate" : "free", start, end);
  
 -      if (end <= start)
 -              return 0;
 +      has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
  
 -      trace_binder_update_page_range(proc, allocate, start, end);
 +      priority = to_userspace_prio(policy, desired.prio);
  
 -      if (vma)
 -              mm = NULL;
 -      else
 -              mm = get_task_mm(proc->tsk);
 +      if (verify && is_rt_policy(policy) && !has_cap_nice) {
 +              long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
  
-               if (max_rtprio == 0) {
-                       policy = SCHED_NORMAL;
-                       priority = MIN_NICE;
-               } else if (priority > max_rtprio) {
-                       priority = max_rtprio;
+       if (mm) {
+               down_write(&mm->mmap_sem);
+               if (!mmget_still_valid(mm)) {
+                       if (allocate == 0)
+                               goto free_range;
+                       goto err_no_vma;
+               }
+               vma = proc->vma;
+               if (vma && mm != proc->vma_vm_mm) {
+                       pr_err("%d: vma mm and task mm mismatch\n",
+                               proc->pid);
+                       vma = NULL;
                }
        }
  
 -      if (allocate == 0)
 -              goto free_range;
 +      if (verify && is_fair_policy(policy) && !has_cap_nice) {
 +              long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
  
 -      if (vma == NULL) {
 -              pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
 -                      proc->pid);
 -              goto err_no_vma;
 +              if (min_nice > MAX_NICE) {
 +                      binder_user_error("%d RLIMIT_NICE not set\n",
 +                                        task->pid);
 +                      return;
 +              } else if (priority < min_nice) {
 +                      priority = min_nice;
 +              }
        }
  
 -      for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
 -              int ret;
 +      if (policy != desired.sched_policy ||
 +          to_kernel_prio(policy, priority) != desired.prio)
 +              binder_debug(BINDER_DEBUG_PRIORITY_CAP,
 +                           "%d: priority %d not allowed, using %d instead\n",
 +                            task->pid, desired.prio,
 +                            to_kernel_prio(policy, priority));
  
-       trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
-                                 to_kernel_prio(policy, priority),
-                                 desired.prio);
+               page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+               BUG_ON(*page);
+               *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+               if (*page == NULL) {
+                       pr_err("%d: binder_alloc_buf failed for page at %pK\n",
+                               proc->pid, page_addr);
+                       goto err_alloc_page_failed;
+               }
+               ret = map_kernel_range_noflush((unsigned long)page_addr,
+                                       PAGE_SIZE, PAGE_KERNEL, page);
+               flush_cache_vmap((unsigned long)page_addr,
+                               (unsigned long)page_addr + PAGE_SIZE);
+               if (ret != 1) {
+                       pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
+                              proc->pid, page_addr);
+                       goto err_map_kernel_failed;
+               }
+               user_page_addr =
+                       (uintptr_t)page_addr + proc->user_buffer_offset;
+               ret = vm_insert_page(vma, user_page_addr, page[0]);
+               if (ret) {
+                       pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
+                              proc->pid, user_page_addr);
+                       goto err_vm_insert_page_failed;
+               }
+               /* vm_insert_page does not seem to increment the refcount */
+       }
+       if (mm) {
+               up_write(&mm->mmap_sem);
+               mmput(mm);
+       }
+       return 0;
  
 -free_range:
 -      for (page_addr = end - PAGE_SIZE; page_addr >= start;
 -           page_addr -= PAGE_SIZE) {
 -              page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
 -              if (vma)
 -                      zap_page_range(vma, (uintptr_t)page_addr +
 -                              proc->user_buffer_offset, PAGE_SIZE, NULL);
 -err_vm_insert_page_failed:
 -              unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
 -err_map_kernel_failed:
 -              __free_page(*page);
 -              *page = NULL;
 -err_alloc_page_failed:
 -              ;
 -      }
 -err_no_vma:
 -      if (mm) {
 -              up_write(&mm->mmap_sem);
 -              mmput(mm);
 +      /* Set the actual priority */
 +      if (task->policy != policy || is_rt_policy(policy)) {
 +              struct sched_param params;
 +
 +              params.sched_priority = is_rt_policy(policy) ? priority : 0;
 +
 +              sched_setscheduler_nocheck(task,
 +                                         policy | SCHED_RESET_ON_FORK,
 +                                         &params);
        }
 -      return -ENOMEM;
 +      if (is_fair_policy(policy))
 +              set_user_nice(task, priority);
  }
  
 -static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
 -                                            size_t data_size,
 -                                            size_t offsets_size, int is_async)
 +static void binder_set_priority(struct task_struct *task,
 +                              struct binder_priority desired)
  {
 -      struct rb_node *n = proc->free_buffers.rb_node;
 -      struct binder_buffer *buffer;
 -      size_t buffer_size;
 -      struct rb_node *best_fit = NULL;
 -      void *has_page_addr;
 -      void *end_page_addr;
 -      size_t size;
 -
 -      if (proc->vma == NULL) {
 -              pr_err("%d: binder_alloc_buf, no vma\n",
 -                     proc->pid);
 -              return NULL;
 -      }
 +      binder_do_set_priority(task, desired, /* verify = */ true);
 +}
  
 -      size = ALIGN(data_size, sizeof(void *)) +
 -              ALIGN(offsets_size, sizeof(void *));
 +static void binder_restore_priority(struct task_struct *task,
 +                                  struct binder_priority desired)
 +{
 +      binder_do_set_priority(task, desired, /* verify = */ false);
 +}
  
 -      if (size < data_size || size < offsets_size) {
 -              binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
 -                              proc->pid, data_size, offsets_size);
 -              return NULL;
 -      }
 +static void binder_transaction_priority(struct task_struct *task,
 +                                      struct binder_transaction *t,
 +                                      struct binder_priority node_prio,
 +                                      bool inherit_rt)
 +{
 +      struct binder_priority desired_prio = t->priority;
  
 -      if (is_async &&
 -          proc->free_async_space < size + sizeof(struct binder_buffer)) {
 -              binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
 -                           "%d: binder_alloc_buf size %zd failed, no async space left\n",
 -                            proc->pid, size);
 -              return NULL;
 +      if (t->set_priority_called)
 +              return;
 +
 +      t->set_priority_called = true;
 +      t->saved_priority.sched_policy = task->policy;
 +      t->saved_priority.prio = task->normal_prio;
 +
 +      if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
 +              desired_prio.prio = NICE_TO_PRIO(0);
 +              desired_prio.sched_policy = SCHED_NORMAL;
 +      }
 +
 +      if (node_prio.prio < t->priority.prio ||
 +          (node_prio.prio == t->priority.prio &&
 +           node_prio.sched_policy == SCHED_FIFO)) {
 +              /*
 +               * In case the minimum priority on the node is
 +               * higher (lower value), use that priority. If
 +               * the priority is the same, but the node uses
 +               * SCHED_FIFO, prefer SCHED_FIFO, since it can
 +               * run unbounded, unlike SCHED_RR.
 +               */
 +              desired_prio = node_prio;
        }
  
-       binder_set_priority(task, desired_prio);
+       while (n) {
+               buffer = rb_entry(n, struct binder_buffer, rb_node);
+               BUG_ON(!buffer->free);
+               buffer_size = binder_buffer_size(proc, buffer);
+               if (size < buffer_size) {
+                       best_fit = n;
+                       n = n->rb_left;
+               } else if (size > buffer_size)
+                       n = n->rb_right;
+               else {
+                       best_fit = n;
+                       break;
+               }
+       }
+       if (best_fit == NULL) {
+               pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
+                       proc->pid, size);
+               return NULL;
+       }
+       if (n == NULL) {
+               buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
+               buffer_size = binder_buffer_size(proc, buffer);
+       }
+       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                    "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
+                     proc->pid, size, buffer, buffer_size);
+       has_page_addr =
+               (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+       if (n == NULL) {
+               if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
+                       buffer_size = size; /* no room for other buffers */
+               else
+                       buffer_size = size + sizeof(struct binder_buffer);
+       }
+       end_page_addr =
+               (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
+       if (end_page_addr > has_page_addr)
+               end_page_addr = has_page_addr;
+       if (binder_update_page_range(proc, 1,
+           (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
+               return NULL;
+       rb_erase(best_fit, &proc->free_buffers);
+       buffer->free = 0;
+       binder_insert_allocated_buffer(proc, buffer);
+       if (buffer_size != size) {
+               struct binder_buffer *new_buffer = (void *)buffer->data + size;
+               list_add(&new_buffer->entry, &buffer->entry);
+               new_buffer->free = 1;
+               binder_insert_free_buffer(proc, new_buffer);
+       }
+       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                    "%d: binder_alloc_buf size %zd got %pK\n",
+                     proc->pid, size, buffer);
+       buffer->data_size = data_size;
+       buffer->offsets_size = offsets_size;
+       buffer->async_transaction = is_async;
+       if (is_async) {
+               proc->free_async_space -= size + sizeof(struct binder_buffer);
+               binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+                            "%d: binder_alloc_buf size %zd async free %zd\n",
+                             proc->pid, size, proc->free_async_space);
+       }
+       return buffer;
+ }
+ static void *buffer_start_page(struct binder_buffer *buffer)
+ {
+       return (void *)((uintptr_t)buffer & PAGE_MASK);
+ }
+ static void *buffer_end_page(struct binder_buffer *buffer)
+ {
+       return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
+ }
+ static void binder_delete_free_buffer(struct binder_proc *proc,
+                                     struct binder_buffer *buffer)
+ {
+       struct binder_buffer *prev, *next = NULL;
+       int free_page_end = 1;
+       int free_page_start = 1;
+       BUG_ON(proc->buffers.next == &buffer->entry);
+       prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+       BUG_ON(!prev->free);
+       if (buffer_end_page(prev) == buffer_start_page(buffer)) {
+               free_page_start = 0;
+               if (buffer_end_page(prev) == buffer_end_page(buffer))
+                       free_page_end = 0;
+               binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                            "%d: merge free, buffer %pK share page with %pK\n",
+                             proc->pid, buffer, prev);
+       }
+       if (!list_is_last(&buffer->entry, &proc->buffers)) {
+               next = list_entry(buffer->entry.next,
+                                 struct binder_buffer, entry);
+               if (buffer_start_page(next) == buffer_end_page(buffer)) {
+                       free_page_end = 0;
+                       if (buffer_start_page(next) ==
+                           buffer_start_page(buffer))
+                               free_page_start = 0;
+                       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                                    "%d: merge free, buffer %pK share page with %pK\n",
+                                     proc->pid, buffer, prev);
+               }
+       }
+       list_del(&buffer->entry);
+       if (free_page_start || free_page_end) {
+               binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                            "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
+                            proc->pid, buffer, free_page_start ? "" : " end",
+                            free_page_end ? "" : " start", prev, next);
+               binder_update_page_range(proc, 0, free_page_start ?
+                       buffer_start_page(buffer) : buffer_end_page(buffer),
+                       (free_page_end ? buffer_end_page(buffer) :
+                       buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+       }
+ }
+ static void binder_free_buf(struct binder_proc *proc,
+                           struct binder_buffer *buffer)
+ {
+       size_t size, buffer_size;
+       buffer_size = binder_buffer_size(proc, buffer);
+       size = ALIGN(buffer->data_size, sizeof(void *)) +
+               ALIGN(buffer->offsets_size, sizeof(void *));
+       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                    "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
+                     proc->pid, buffer, size, buffer_size);
+       BUG_ON(buffer->free);
+       BUG_ON(size > buffer_size);
+       BUG_ON(buffer->transaction != NULL);
+       BUG_ON((void *)buffer < proc->buffer);
+       BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
+       if (buffer->async_transaction) {
+               proc->free_async_space += size + sizeof(struct binder_buffer);
+               binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+                            "%d: binder_free_buf size %zd async free %zd\n",
+                             proc->pid, size, proc->free_async_space);
+       }
+       binder_update_page_range(proc, 0,
+               (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+               (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
+               NULL);
+       rb_erase(&buffer->rb_node, &proc->allocated_buffers);
+       buffer->free = 1;
+       if (!list_is_last(&buffer->entry, &proc->buffers)) {
+               struct binder_buffer *next = list_entry(buffer->entry.next,
+                                               struct binder_buffer, entry);
+               if (next->free) {
+                       rb_erase(&next->rb_node, &proc->free_buffers);
+                       binder_delete_free_buffer(proc, next);
+               }
+       }
+       if (proc->buffers.next != &buffer->entry) {
+               struct binder_buffer *prev = list_entry(buffer->entry.prev,
+                                               struct binder_buffer, entry);
+               if (prev->free) {
+                       binder_delete_free_buffer(proc, buffer);
+                       rb_erase(&prev->rb_node, &proc->free_buffers);
+                       buffer = prev;
+               }
+       }
+       binder_insert_free_buffer(proc, buffer);
  }
  
- static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
-                                                  binder_uintptr_t ptr)
+ static struct binder_node *binder_get_node(struct binder_proc *proc,
+                                          binder_uintptr_t ptr)
  {
        struct rb_node *n = proc->nodes.rb_node;
        struct binder_node *node;
@@@ -1791,1088 -1157,186 +2011,1091 @@@ static int binder_inc_ref_olocked(struc
        return 0;
  }
  
 -
 -static int binder_dec_ref(struct binder_ref *ref, int strong)
 +/**
 + * binder_dec_ref() - dec the ref for given handle
 + * @ref:      ref to be decremented
 + * @strong:   if true, strong decrement, else weak
 + *
 + * Decrement the ref.
 + *
 + * Return: true if ref is cleaned up and ready to be freed
 + */
 +static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
  {
        if (strong) {
 -              if (ref->strong == 0) {
 +              if (ref->data.strong == 0) {
                        binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
 -                                        ref->proc->pid, ref->debug_id,
 -                                        ref->desc, ref->strong, ref->weak);
 -                      return -EINVAL;
 +                                        ref->proc->pid, ref->data.debug_id,
 +                                        ref->data.desc, ref->data.strong,
 +                                        ref->data.weak);
 +                      return false;
                }
 -              ref->strong--;
 -              if (ref->strong == 0) {
 -                      int ret;
 +              ref->data.strong--;
 +              if (ref->data.strong == 0)
 +                      binder_dec_node(ref->node, strong, 1);
 +      } else {
 +              if (ref->data.weak == 0) {
 +                      binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
 +                                        ref->proc->pid, ref->data.debug_id,
 +                                        ref->data.desc, ref->data.strong,
 +                                        ref->data.weak);
 +                      return false;
 +              }
 +              ref->data.weak--;
 +      }
 +      if (ref->data.strong == 0 && ref->data.weak == 0) {
 +              binder_cleanup_ref_olocked(ref);
 +              return true;
 +      }
 +      return false;
 +}
  
 -                      ret = binder_dec_node(ref->node, strong, 1);
 -                      if (ret)
 -                              return ret;
 +/**
 + * binder_get_node_from_ref() - get the node from the given proc/desc
 + * @proc:     proc containing the ref
 + * @desc:     the handle associated with the ref
 + * @need_strong_ref: if true, only return node if ref is strong
 + * @rdata:    the id/refcount data for the ref
 + *
 + * Given a proc and ref handle, return the associated binder_node
 + *
 + * Return: a binder_node or NULL if not found or not strong when strong required
 + */
 +static struct binder_node *binder_get_node_from_ref(
 +              struct binder_proc *proc,
 +              u32 desc, bool need_strong_ref,
 +              struct binder_ref_data *rdata)
 +{
 +      struct binder_node *node;
 +      struct binder_ref *ref;
 +
 +      binder_proc_lock(proc);
 +      ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
 +      if (!ref)
 +              goto err_no_ref;
 +      node = ref->node;
 +      /*
 +       * Take an implicit reference on the node to ensure
 +       * it stays alive until the call to binder_put_node()
 +       */
 +      binder_inc_node_tmpref(node);
 +      if (rdata)
 +              *rdata = ref->data;
 +      binder_proc_unlock(proc);
 +
 +      return node;
 +
 +err_no_ref:
 +      binder_proc_unlock(proc);
 +      return NULL;
 +}
 +
 +/**
 + * binder_free_ref() - free the binder_ref
 + * @ref:      ref to free
 + *
 + * Free the binder_ref. Free the binder_node indicated by ref->node
 + * (if non-NULL) and the binder_ref_death indicated by ref->death.
 + */
 +static void binder_free_ref(struct binder_ref *ref)
 +{
 +      if (ref->node)
 +              binder_free_node(ref->node);
 +      kfree(ref->death);
 +      kfree(ref);
 +}
 +
 +/**
 + * binder_update_ref_for_handle() - inc/dec the ref for given handle
 + * @proc:     proc containing the ref
 + * @desc:     the handle associated with the ref
 + * @increment:        true=inc reference, false=dec reference
 + * @strong:   true=strong reference, false=weak reference
 + * @rdata:    the id/refcount data for the ref
 + *
 + * Given a proc and ref handle, increment or decrement the ref
 + * according to "increment" arg.
 + *
 + * Return: 0 if successful, else errno
 + */
 +static int binder_update_ref_for_handle(struct binder_proc *proc,
 +              uint32_t desc, bool increment, bool strong,
 +              struct binder_ref_data *rdata)
 +{
 +      int ret = 0;
 +      struct binder_ref *ref;
 +      bool delete_ref = false;
 +
 +      binder_proc_lock(proc);
 +      ref = binder_get_ref_olocked(proc, desc, strong);
 +      if (!ref) {
 +              ret = -EINVAL;
 +              goto err_no_ref;
 +      }
 +      if (increment)
 +              ret = binder_inc_ref_olocked(ref, strong, NULL);
 +      else
 +              delete_ref = binder_dec_ref_olocked(ref, strong);
 +
 +      if (rdata)
 +              *rdata = ref->data;
 +      binder_proc_unlock(proc);
 +
 +      if (delete_ref)
 +              binder_free_ref(ref);
 +      return ret;
 +
 +err_no_ref:
 +      binder_proc_unlock(proc);
 +      return ret;
 +}
 +
 +/**
 + * binder_dec_ref_for_handle() - dec the ref for given handle
 + * @proc:     proc containing the ref
 + * @desc:     the handle associated with the ref
 + * @strong:   true=strong reference, false=weak reference
 + * @rdata:    the id/refcount data for the ref
 + *
 + * Just calls binder_update_ref_for_handle() to decrement the ref.
 + *
 + * Return: 0 if successful, else errno
 + */
 +static int binder_dec_ref_for_handle(struct binder_proc *proc,
 +              uint32_t desc, bool strong, struct binder_ref_data *rdata)
 +{
 +      return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
 +}
 +
 +
 +/**
 + * binder_inc_ref_for_node() - increment the ref for given proc/node
 + * @proc:      proc containing the ref
 + * @node:      target node
 + * @strong:    true=strong reference, false=weak reference
 + * @target_list: worklist to use if node is incremented
 + * @rdata:     the id/refcount data for the ref
 + *
 + * Given a proc and node, increment the ref. Create the ref if it
 + * doesn't already exist
 + *
 + * Return: 0 if successful, else errno
 + */
 +static int binder_inc_ref_for_node(struct binder_proc *proc,
 +                      struct binder_node *node,
 +                      bool strong,
 +                      struct list_head *target_list,
 +                      struct binder_ref_data *rdata)
 +{
 +      struct binder_ref *ref;
 +      struct binder_ref *new_ref = NULL;
 +      int ret = 0;
 +
 +      binder_proc_lock(proc);
 +      ref = binder_get_ref_for_node_olocked(proc, node, NULL);
 +      if (!ref) {
 +              binder_proc_unlock(proc);
 +              new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
 +              if (!new_ref)
 +                      return -ENOMEM;
 +              binder_proc_lock(proc);
 +              ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
 +      }
 +      ret = binder_inc_ref_olocked(ref, strong, target_list);
 +      *rdata = ref->data;
 +      binder_proc_unlock(proc);
 +      if (new_ref && ref != new_ref)
 +              /*
 +               * Another thread created the ref first so
 +               * free the one we allocated
 +               */
 +              kfree(new_ref);
 +      return ret;
 +}
 +
 +static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
 +                                         struct binder_transaction *t)
 +{
 +      BUG_ON(!target_thread);
 +      assert_spin_locked(&target_thread->proc->inner_lock);
 +      BUG_ON(target_thread->transaction_stack != t);
 +      BUG_ON(target_thread->transaction_stack->from != target_thread);
 +      target_thread->transaction_stack =
 +              target_thread->transaction_stack->from_parent;
 +      t->from = NULL;
 +}
 +
 +/**
 + * binder_thread_dec_tmpref() - decrement thread->tmp_ref
 + * @thread:   thread to decrement
 + *
 + * A thread needs to be kept alive while being used to create or
 + * handle a transaction. binder_get_txn_from() is used to safely
 + * extract t->from from a binder_transaction and keep the thread
 + * indicated by t->from from being freed. When done with that
 + * binder_thread, this function is called to decrement the
 + * tmp_ref and free if appropriate (thread has been released
 + * and no transaction being processed by the driver)
 + */
 +static void binder_thread_dec_tmpref(struct binder_thread *thread)
 +{
 +      /*
 +       * atomic is used to protect the counter value while
 +       * it cannot reach zero or thread->is_dead is false
 +       */
 +      binder_inner_proc_lock(thread->proc);
 +      atomic_dec(&thread->tmp_ref);
 +      if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
 +              binder_inner_proc_unlock(thread->proc);
 +              binder_free_thread(thread);
 +              return;
 +      }
 +      binder_inner_proc_unlock(thread->proc);
 +}
 +
 +/**
 + * binder_proc_dec_tmpref() - decrement proc->tmp_ref
 + * @proc:     proc to decrement
 + *
 + * A binder_proc needs to be kept alive while being used to create or
 + * handle a transaction. proc->tmp_ref is incremented when
 + * creating a new transaction or the binder_proc is currently in-use
 + * by threads that are being released. When done with the binder_proc,
 + * this function is called to decrement the counter and free the
 + * proc if appropriate (proc has been released, all threads have
 + * been released and not currenly in-use to process a transaction).
 + */
 +static void binder_proc_dec_tmpref(struct binder_proc *proc)
 +{
 +      binder_inner_proc_lock(proc);
 +      proc->tmp_ref--;
 +      if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
 +                      !proc->tmp_ref) {
 +              binder_inner_proc_unlock(proc);
 +              binder_free_proc(proc);
 +              return;
 +      }
 +      binder_inner_proc_unlock(proc);
 +}
 +
 +/**
 + * binder_get_txn_from() - safely extract the "from" thread in transaction
 + * @t:        binder transaction for t->from
 + *
 + * Atomically return the "from" thread and increment the tmp_ref
 + * count for the thread to ensure it stays alive until
 + * binder_thread_dec_tmpref() is called.
 + *
 + * Return: the value of t->from
 + */
 +static struct binder_thread *binder_get_txn_from(
 +              struct binder_transaction *t)
 +{
 +      struct binder_thread *from;
 +
 +      spin_lock(&t->lock);
 +      from = t->from;
 +      if (from)
 +              atomic_inc(&from->tmp_ref);
 +      spin_unlock(&t->lock);
 +      return from;
 +}
 +
 +/**
 + * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
 + * @t:        binder transaction for t->from
 + *
 + * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
 + * to guarantee that the thread cannot be released while operating on it.
 + * The caller must call binder_inner_proc_unlock() to release the inner lock
 + * as well as call binder_dec_thread_txn() to release the reference.
 + *
 + * Return: the value of t->from
 + */
 +static struct binder_thread *binder_get_txn_from_and_acq_inner(
 +              struct binder_transaction *t)
 +{
 +      struct binder_thread *from;
 +
 +      from = binder_get_txn_from(t);
 +      if (!from)
 +              return NULL;
 +      binder_inner_proc_lock(from->proc);
 +      if (t->from) {
 +              BUG_ON(from != t->from);
 +              return from;
 +      }
 +      binder_inner_proc_unlock(from->proc);
 +      binder_thread_dec_tmpref(from);
 +      return NULL;
 +}
 +
 +static void binder_free_transaction(struct binder_transaction *t)
 +{
 +      if (t->buffer)
 +              t->buffer->transaction = NULL;
 +      kfree(t);
 +      binder_stats_deleted(BINDER_STAT_TRANSACTION);
 +}
 +
 +static void binder_send_failed_reply(struct binder_transaction *t,
 +                                   uint32_t error_code)
 +{
 +      struct binder_thread *target_thread;
 +      struct binder_transaction *next;
 +
 +      BUG_ON(t->flags & TF_ONE_WAY);
 +      while (1) {
 +              target_thread = binder_get_txn_from_and_acq_inner(t);
 +              if (target_thread) {
 +                      binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
 +                                   "send failed reply for transaction %d to %d:%d\n",
 +                                    t->debug_id,
 +                                    target_thread->proc->pid,
 +                                    target_thread->pid);
 +
 +                      binder_pop_transaction_ilocked(target_thread, t);
 +                      if (target_thread->reply_error.cmd == BR_OK) {
 +                              target_thread->reply_error.cmd = error_code;
 +                              binder_enqueue_thread_work_ilocked(
 +                                      target_thread,
 +                                      &target_thread->reply_error.work);
 +                              wake_up_interruptible(&target_thread->wait);
 +                      } else {
 +                              /*
 +                               * Cannot get here for normal operation, but
 +                               * we can if multiple synchronous transactions
 +                               * are sent without blocking for responses.
 +                               * Just ignore the 2nd error in this case.
 +                               */
 +                              pr_warn("Unexpected reply error: %u\n",
 +                                      target_thread->reply_error.cmd);
 +                      }
 +                      binder_inner_proc_unlock(target_thread->proc);
 +                      binder_thread_dec_tmpref(target_thread);
 +                      binder_free_transaction(t);
 +                      return;
                }
 +              next = t->from_parent;
 +
 +              binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
 +                           "send failed reply for transaction %d, target dead\n",
 +                           t->debug_id);
 +
 +              binder_free_transaction(t);
 +              if (next == NULL) {
 +                      binder_debug(BINDER_DEBUG_DEAD_BINDER,
 +                                   "reply failed, no target thread at root\n");
 +                      return;
 +              }
 +              t = next;
 +              binder_debug(BINDER_DEBUG_DEAD_BINDER,
 +                           "reply failed, no target thread -- retry %d\n",
 +                            t->debug_id);
 +      }
 +}
 +
 +/**
 + * binder_cleanup_transaction() - cleans up undelivered transaction
 + * @t:                transaction that needs to be cleaned up
 + * @reason:   reason the transaction wasn't delivered
 + * @error_code:       error to return to caller (if synchronous call)
 + */
 +static void binder_cleanup_transaction(struct binder_transaction *t,
 +                                     const char *reason,
 +                                     uint32_t error_code)
 +{
 +      if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
 +              binder_send_failed_reply(t, error_code);
        } else {
 -              if (ref->weak == 0) {
 -                      binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
 -                                        ref->proc->pid, ref->debug_id,
 -                                        ref->desc, ref->strong, ref->weak);
 -                      return -EINVAL;
 +              binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
 +                      "undelivered transaction %d, %s\n",
 +                      t->debug_id, reason);
 +              binder_free_transaction(t);
 +      }
 +}
 +
 +/**
 + * binder_validate_object() - checks for a valid metadata object in a buffer.
 + * @buffer:   binder_buffer that we're parsing.
 + * @offset:   offset in the buffer at which to validate an object.
 + *
 + * Return:    If there's a valid metadata object at @offset in @buffer, the
 + *            size of that object. Otherwise, it returns zero.
 + */
 +static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
 +{
 +      /* Check if we can read a header first */
 +      struct binder_object_header *hdr;
 +      size_t object_size = 0;
 +
 +      if (buffer->data_size < sizeof(*hdr) ||
 +          offset > buffer->data_size - sizeof(*hdr) ||
 +          !IS_ALIGNED(offset, sizeof(u32)))
 +              return 0;
 +
 +      /* Ok, now see if we can read a complete object. */
 +      hdr = (struct binder_object_header *)(buffer->data + offset);
 +      switch (hdr->type) {
 +      case BINDER_TYPE_BINDER:
 +      case BINDER_TYPE_WEAK_BINDER:
 +      case BINDER_TYPE_HANDLE:
 +      case BINDER_TYPE_WEAK_HANDLE:
 +              object_size = sizeof(struct flat_binder_object);
 +              break;
 +      case BINDER_TYPE_FD:
 +              object_size = sizeof(struct binder_fd_object);
 +              break;
 +      case BINDER_TYPE_PTR:
 +              object_size = sizeof(struct binder_buffer_object);
 +              break;
 +      case BINDER_TYPE_FDA:
 +              object_size = sizeof(struct binder_fd_array_object);
 +              break;
 +      default:
 +              return 0;
 +      }
 +      if (offset <= buffer->data_size - object_size &&
 +          buffer->data_size >= object_size)
 +              return object_size;
 +      else
 +              return 0;
 +}
 +
 +/**
 + * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
 + * @b:                binder_buffer containing the object
 + * @index:    index in offset array at which the binder_buffer_object is
 + *            located
 + * @start:    points to the start of the offset array
 + * @num_valid:        the number of valid offsets in the offset array
 + *
 + * Return:    If @index is within the valid range of the offset array
 + *            described by @start and @num_valid, and if there's a valid
 + *            binder_buffer_object at the offset found in index @index
 + *            of the offset array, that object is returned. Otherwise,
 + *            %NULL is returned.
 + *            Note that the offset found in index @index itself is not
 + *            verified; this function assumes that @num_valid elements
 + *            from @start were previously verified to have valid offsets.
 + */
 +static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
 +                                                      binder_size_t index,
 +                                                      binder_size_t *start,
 +                                                      binder_size_t num_valid)
 +{
 +      struct binder_buffer_object *buffer_obj;
 +      binder_size_t *offp;
 +
 +      if (index >= num_valid)
 +              return NULL;
 +
 +      offp = start + index;
 +      buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
 +      if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
 +              return NULL;
 +
 +      return buffer_obj;
 +}
 +
 +/**
 + * binder_validate_fixup() - validates pointer/fd fixups happen in order.
 + * @b:                        transaction buffer
 + * @objects_start     start of objects buffer
 + * @buffer:           binder_buffer_object in which to fix up
 + * @offset:           start offset in @buffer to fix up
 + * @last_obj:         last binder_buffer_object that we fixed up in
 + * @last_min_offset:  minimum fixup offset in @last_obj
 + *
 + * Return:            %true if a fixup in buffer @buffer at offset @offset is
 + *                    allowed.
 + *
 + * For safety reasons, we only allow fixups inside a buffer to happen
 + * at increasing offsets; additionally, we only allow fixup on the last
 + * buffer object that was verified, or one of its parents.
 + *
 + * Example of what is allowed:
 + *
 + * A
 + *   B (parent = A, offset = 0)
 + *   C (parent = A, offset = 16)
 + *     D (parent = C, offset = 0)
 + *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
 + *
 + * Examples of what is not allowed:
 + *
 + * Decreasing offsets within the same parent:
 + * A
 + *   C (parent = A, offset = 16)
 + *   B (parent = A, offset = 0) // decreasing offset within A
 + *
 + * Referring to a parent that wasn't the last object or any of its parents:
 + * A
 + *   B (parent = A, offset = 0)
 + *   C (parent = A, offset = 0)
 + *   C (parent = A, offset = 16)
 + *     D (parent = B, offset = 0) // B is not A or any of A's parents
 + */
 +static bool binder_validate_fixup(struct binder_buffer *b,
 +                                binder_size_t *objects_start,
 +                                struct binder_buffer_object *buffer,
 +                                binder_size_t fixup_offset,
 +                                struct binder_buffer_object *last_obj,
 +                                binder_size_t last_min_offset)
 +{
 +      if (!last_obj) {
 +              /* Nothing to fix up in */
 +              return false;
 +      }
 +
 +      while (last_obj != buffer) {
 +              /*
 +               * Safe to retrieve the parent of last_obj, since it
 +               * was already previously verified by the driver.
 +               */
 +              if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
 +                      return false;
 +              last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
 +              last_obj = (struct binder_buffer_object *)
 +                      (b->data + *(objects_start + last_obj->parent));
 +      }
 +      return (fixup_offset >= last_min_offset);
 +}
 +
 +static void binder_transaction_buffer_release(struct binder_proc *proc,
 +                                            struct binder_buffer *buffer,
 +                                            binder_size_t *failed_at)
 +{
 +      binder_size_t *offp, *off_start, *off_end;
 +      int debug_id = buffer->debug_id;
 +
 +      binder_debug(BINDER_DEBUG_TRANSACTION,
 +                   "%d buffer release %d, size %zd-%zd, failed at %pK\n",
 +                   proc->pid, buffer->debug_id,
 +                   buffer->data_size, buffer->offsets_size, failed_at);
 +
 +      if (buffer->target_node)
 +              binder_dec_node(buffer->target_node, 1, 0);
 +
 +      off_start = (binder_size_t *)(buffer->data +
 +                                    ALIGN(buffer->data_size, sizeof(void *)));
 +      if (failed_at)
 +              off_end = failed_at;
 +      else
 +              off_end = (void *)off_start + buffer->offsets_size;
 +      for (offp = off_start; offp < off_end; offp++) {
 +              struct binder_object_header *hdr;
 +              size_t object_size = binder_validate_object(buffer, *offp);
 +
 +              if (object_size == 0) {
 +                      pr_err("transaction release %d bad object at offset %lld, size %zd\n",
 +                             debug_id, (u64)*offp, buffer->data_size);
 +                      continue;
 +              }
 +              hdr = (struct binder_object_header *)(buffer->data + *offp);
 +              switch (hdr->type) {
 +              case BINDER_TYPE_BINDER:
 +              case BINDER_TYPE_WEAK_BINDER: {
 +                      struct flat_binder_object *fp;
 +                      struct binder_node *node;
 +
 +                      fp = to_flat_binder_object(hdr);
 +                      node = binder_get_node(proc, fp->binder);
 +                      if (node == NULL) {
 +                              pr_err("transaction release %d bad node %016llx\n",
 +                                     debug_id, (u64)fp->binder);
 +                              break;
 +                      }
 +                      binder_debug(BINDER_DEBUG_TRANSACTION,
 +                                   "        node %d u%016llx\n",
 +                                   node->debug_id, (u64)node->ptr);
 +                      binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
 +                                      0);
 +                      binder_put_node(node);
 +              } break;
 +              case BINDER_TYPE_HANDLE:
 +              case BINDER_TYPE_WEAK_HANDLE: {
 +                      struct flat_binder_object *fp;
 +                      struct binder_ref_data rdata;
 +                      int ret;
 +
 +                      fp = to_flat_binder_object(hdr);
 +                      ret = binder_dec_ref_for_handle(proc, fp->handle,
 +                              hdr->type == BINDER_TYPE_HANDLE, &rdata);
 +
 +                      if (ret) {
 +                              pr_err("transaction release %d bad handle %d, ret = %d\n",
 +                               debug_id, fp->handle, ret);
 +                              break;
 +                      }
 +                      binder_debug(BINDER_DEBUG_TRANSACTION,
 +                                   "        ref %d desc %d\n",
 +                                   rdata.debug_id, rdata.desc);
 +              } break;
 +
 +              case BINDER_TYPE_FD: {
 +                      struct binder_fd_object *fp = to_binder_fd_object(hdr);
 +
 +                      binder_debug(BINDER_DEBUG_TRANSACTION,
 +                                   "        fd %d\n", fp->fd);
 +                      if (failed_at)
 +                              task_close_fd(proc, fp->fd);
 +              } break;
 +              case BINDER_TYPE_PTR:
 +                      /*
 +                       * Nothing to do here, this will get cleaned up when the
 +                       * transaction buffer gets freed
 +                       */
 +                      break;
 +              case BINDER_TYPE_FDA: {
 +                      struct binder_fd_array_object *fda;
 +                      struct binder_buffer_object *parent;
 +                      uintptr_t parent_buffer;
 +                      u32 *fd_array;
 +                      size_t fd_index;
 +                      binder_size_t fd_buf_size;
 +
 +                      fda = to_binder_fd_array_object(hdr);
 +                      parent = binder_validate_ptr(buffer, fda->parent,
 +                                                   off_start,
 +                                                   offp - off_start);
 +                      if (!parent) {
 +                              pr_err("transaction release %d bad parent offset",
 +                                     debug_id);
 +                              continue;
 +                      }
 +                      /*
 +                       * Since the parent was already fixed up, convert it
 +                       * back to kernel address space to access it
 +                       */
 +                      parent_buffer = parent->buffer -
 +                              binder_alloc_get_user_buffer_offset(
 +                                              &proc->alloc);
 +
 +                      fd_buf_size = sizeof(u32) * fda->num_fds;
 +                      if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
 +                              pr_err("transaction release %d invalid number of fds (%lld)\n",
 +                                     debug_id, (u64)fda->num_fds);
 +                              continue;
 +                      }
 +                      if (fd_buf_size > parent->length ||
 +                          fda->parent_offset > parent->length - fd_buf_size) {
 +                              /* No space for all file descriptors here. */
 +                              pr_err("transaction release %d not enough space for %lld fds in buffer\n",
 +                                     debug_id, (u64)fda->num_fds);
 +                              continue;
 +                      }
 +                      fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
 +                      for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
 +                              task_close_fd(proc, fd_array[fd_index]);
 +              } break;
 +              default:
 +                      pr_err("transaction release %d bad object type %x\n",
 +                              debug_id, hdr->type);
 +                      break;
                }
 -              ref->weak--;
        }
 -      if (ref->strong == 0 && ref->weak == 0)
 -              binder_delete_ref(ref);
 -      return 0;
  }
  
 -static void binder_pop_transaction(struct binder_thread *target_thread,
 -                                 struct binder_transaction *t)
 +static int binder_translate_binder(struct flat_binder_object *fp,
 +                                 struct binder_transaction *t,
 +                                 struct binder_thread *thread)
 +{
 +      struct binder_node *node;
 +      struct binder_proc *proc = thread->proc;
 +      struct binder_proc *target_proc = t->to_proc;
 +      struct binder_ref_data rdata;
 +      int ret = 0;
 +
 +      node = binder_get_node(proc, fp->binder);
 +      if (!node) {
 +              node = binder_new_node(proc, fp);
 +              if (!node)
 +                      return -ENOMEM;
 +      }
 +      if (fp->cookie != node->cookie) {
 +              binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
 +                                proc->pid, thread->pid, (u64)fp->binder,
 +                                node->debug_id, (u64)fp->cookie,
 +                                (u64)node->cookie);
 +              ret = -EINVAL;
 +              goto done;
 +      }
 +      if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
 +              ret = -EPERM;
 +              goto done;
 +      }
 +
 +      ret = binder_inc_ref_for_node(target_proc, node,
 +                      fp->hdr.type == BINDER_TYPE_BINDER,
 +                      &thread->todo, &rdata);
 +      if (ret)
 +              goto done;
 +
 +      if (fp->hdr.type == BINDER_TYPE_BINDER)
 +              fp->hdr.type = BINDER_TYPE_HANDLE;
 +      else
 +              fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
 +      fp->binder = 0;
 +      fp->handle = rdata.desc;
 +      fp->cookie = 0;
 +
 +      trace_binder_transaction_node_to_ref(t, node, &rdata);
 +      binder_debug(BINDER_DEBUG_TRANSACTION,
 +                   "        node %d u%016llx -> ref %d desc %d\n",
 +                   node->debug_id, (u64)node->ptr,
 +                   rdata.debug_id, rdata.desc);
 +done:
 +      binder_put_node(node);
 +      return ret;
 +}
 +
 +static int binder_translate_handle(struct flat_binder_object *fp,
 +                                 struct binder_transaction *t,
 +                                 struct binder_thread *thread)
 +{
 +      struct binder_proc *proc = thread->proc;
 +      struct binder_proc *target_proc = t->to_proc;
 +      struct binder_node *node;
 +      struct binder_ref_data src_rdata;
 +      int ret = 0;
 +
 +      node = binder_get_node_from_ref(proc, fp->handle,
 +                      fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
 +      if (!node) {
 +              binder_user_error("%d:%d got transaction with invalid handle, %d\n",
 +                                proc->pid, thread->pid, fp->handle);
 +              return -EINVAL;
 +      }
 +      if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
 +              ret = -EPERM;
 +              goto done;
 +      }
 +
 +      binder_node_lock(node);
 +      if (node->proc == target_proc) {
 +              if (fp->hdr.type == BINDER_TYPE_HANDLE)
 +                      fp->hdr.type = BINDER_TYPE_BINDER;
 +              else
 +                      fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
 +              fp->binder = node->ptr;
 +              fp->cookie = node->cookie;
 +              if (node->proc)
 +                      binder_inner_proc_lock(node->proc);
 +              binder_inc_node_nilocked(node,
 +                                       fp->hdr.type == BINDER_TYPE_BINDER,
 +                                       0, NULL);
 +              if (node->proc)
 +                      binder_inner_proc_unlock(node->proc);
 +              trace_binder_transaction_ref_to_node(t, node, &src_rdata);
 +              binder_debug(BINDER_DEBUG_TRANSACTION,
 +                           "        ref %d desc %d -> node %d u%016llx\n",
 +                           src_rdata.debug_id, src_rdata.desc, node->debug_id,
 +                           (u64)node->ptr);
 +              binder_node_unlock(node);
 +      } else {
 +              struct binder_ref_data dest_rdata;
 +
 +              binder_node_unlock(node);
 +              ret = binder_inc_ref_for_node(target_proc, node,
 +                              fp->hdr.type == BINDER_TYPE_HANDLE,
 +                              NULL, &dest_rdata);
 +              if (ret)
 +                      goto done;
 +
 +              fp->binder = 0;
 +              fp->handle = dest_rdata.desc;
 +              fp->cookie = 0;
 +              trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
 +                                                  &dest_rdata);
 +              binder_debug(BINDER_DEBUG_TRANSACTION,
 +                           "        ref %d desc %d -> ref %d desc %d (node %d)\n",
 +                           src_rdata.debug_id, src_rdata.desc,
 +                           dest_rdata.debug_id, dest_rdata.desc,
 +                           node->debug_id);
 +      }
 +done:
 +      binder_put_node(node);
 +      return ret;
 +}
 +
 +static int binder_translate_fd(int fd,
 +                             struct binder_transaction *t,
 +                             struct binder_thread *thread,
 +                             struct binder_transaction *in_reply_to)
 +{
 +      struct binder_proc *proc = thread->proc;
 +      struct binder_proc *target_proc = t->to_proc;
 +      int target_fd;
 +      struct file *file;
 +      int ret;
 +      bool target_allows_fd;
 +
 +      if (in_reply_to)
 +              target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
 +      else
 +              target_allows_fd = t->buffer->target_node->accept_fds;
 +      if (!target_allows_fd) {
 +              binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
 +                                proc->pid, thread->pid,
 +                                in_reply_to ? "reply" : "transaction",
 +                                fd);
 +              ret = -EPERM;
 +              goto err_fd_not_accepted;
 +      }
 +
 +      file = fget(fd);
 +      if (!file) {
 +              binder_user_error("%d:%d got transaction with invalid fd, %d\n",
 +                                proc->pid, thread->pid, fd);
 +              ret = -EBADF;
 +              goto err_fget;
 +      }
 +      ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
 +      if (ret < 0) {
 +              ret = -EPERM;
 +              goto err_security;
 +      }
 +
 +      target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
 +      if (target_fd < 0) {
 +              ret = -ENOMEM;
 +              goto err_get_unused_fd;
 +      }
 +      task_fd_install(target_proc, target_fd, file);
 +      trace_binder_transaction_fd(t, fd, target_fd);
 +      binder_debug(BINDER_DEBUG_TRANSACTION, "        fd %d -> %d\n",
 +                   fd, target_fd);
 +
 +      return target_fd;
 +
 +err_get_unused_fd:
 +err_security:
 +      fput(file);
 +err_fget:
 +err_fd_not_accepted:
 +      return ret;
 +}
 +
 +static int binder_translate_fd_array(struct binder_fd_array_object *fda,
 +                                   struct binder_buffer_object *parent,
 +                                   struct binder_transaction *t,
 +                                   struct binder_thread *thread,
 +                                   struct binder_transaction *in_reply_to)
  {
 -      if (target_thread) {
 -              BUG_ON(target_thread->transaction_stack != t);
 -              BUG_ON(target_thread->transaction_stack->from != target_thread);
 -              target_thread->transaction_stack =
 -                      target_thread->transaction_stack->from_parent;
 -              t->from = NULL;
 +      binder_size_t fdi, fd_buf_size, num_installed_fds;
 +      int target_fd;
 +      uintptr_t parent_buffer;
 +      u32 *fd_array;
 +      struct binder_proc *proc = thread->proc;
 +      struct binder_proc *target_proc = t->to_proc;
 +
 +      fd_buf_size = sizeof(u32) * fda->num_fds;
 +      if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
 +              binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
 +                                proc->pid, thread->pid, (u64)fda->num_fds);
 +              return -EINVAL;
        }
 -      t->need_reply = 0;
 -      if (t->buffer)
 -              t->buffer->transaction = NULL;
 -      kfree(t);
 -      binder_stats_deleted(BINDER_STAT_TRANSACTION);
 +      if (fd_buf_size > parent->length ||
 +          fda->parent_offset > parent->length - fd_buf_size) {
 +              /* No space for all file descriptors here. */
 +              binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
 +                                proc->pid, thread->pid, (u64)fda->num_fds);
 +              return -EINVAL;
 +      }
 +      /*
 +       * Since the parent was already fixed up, convert it
 +       * back to the kernel address space to access it
 +       */
 +      parent_buffer = parent->buffer -
 +              binder_alloc_get_user_buffer_offset(&target_proc->alloc);
 +      fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
 +      if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
 +              binder_user_error("%d:%d parent offset not aligned correctly.\n",
 +                                proc->pid, thread->pid);
 +              return -EINVAL;
 +      }
 +      for (fdi = 0; fdi < fda->num_fds; fdi++) {
 +              target_fd = binder_translate_fd(fd_array[fdi], t, thread,
 +                                              in_reply_to);
 +              if (target_fd < 0)
 +                      goto err_translate_fd_failed;
 +              fd_array[fdi] = target_fd;
 +      }
 +      return 0;
 +
 +err_translate_fd_failed:
 +      /*
 +       * Failed to allocate fd or security error, free fds
 +       * installed so far.
 +       */
 +      num_installed_fds = fdi;
 +      for (fdi = 0; fdi < num_installed_fds; fdi++)
 +              task_close_fd(target_proc, fd_array[fdi]);
 +      return target_fd;
  }
  
 -static void binder_send_failed_reply(struct binder_transaction *t,
 -                                   uint32_t error_code)
 +static int binder_fixup_parent(struct binder_transaction *t,
 +                             struct binder_thread *thread,
 +                             struct binder_buffer_object *bp,
 +                             binder_size_t *off_start,
 +                             binder_size_t num_valid,
 +                             struct binder_buffer_object *last_fixup_obj,
 +                             binder_size_t last_fixup_min_off)
  {
 -      struct binder_thread *target_thread;
 -      struct binder_transaction *next;
 +      struct binder_buffer_object *parent;
 +      u8 *parent_buffer;
 +      struct binder_buffer *b = t->buffer;
 +      struct binder_proc *proc = thread->proc;
 +      struct binder_proc *target_proc = t->to_proc;
  
 -      BUG_ON(t->flags & TF_ONE_WAY);
 -      while (1) {
 -              target_thread = t->from;
 -              if (target_thread) {
 -                      if (target_thread->return_error != BR_OK &&
 -                         target_thread->return_error2 == BR_OK) {
 -                              target_thread->return_error2 =
 -                                      target_thread->return_error;
 -                              target_thread->return_error = BR_OK;
 -                      }
 -                      if (target_thread->return_error == BR_OK) {
 -                              binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
 -                                           "send failed reply for transaction %d to %d:%d\n",
 -                                            t->debug_id,
 -                                            target_thread->proc->pid,
 -                                            target_thread->pid);
 -
 -                              binder_pop_transaction(target_thread, t);
 -                              target_thread->return_error = error_code;
 -                              wake_up_interruptible(&target_thread->wait);
 -                      } else {
 -                              pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
 -                                      target_thread->proc->pid,
 -                                      target_thread->pid,
 -                                      target_thread->return_error);
 -                      }
 -                      return;
 -              }
 -              next = t->from_parent;
 +      if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
 +              return 0;
  
 -              binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
 -                           "send failed reply for transaction %d, target dead\n",
 -                           t->debug_id);
 +      parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
 +      if (!parent) {
 +              binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
 +                                proc->pid, thread->pid);
 +              return -EINVAL;
 +      }
  
 -              binder_pop_transaction(target_thread, t);
 -              if (next == NULL) {
 -                      binder_debug(BINDER_DEBUG_DEAD_BINDER,
 -                                   "reply failed, no target thread at root\n");
 -                      return;
 -              }
 -              t = next;
 -              binder_debug(BINDER_DEBUG_DEAD_BINDER,
 -                           "reply failed, no target thread -- retry %d\n",
 -                            t->debug_id);
 +      if (!binder_validate_fixup(b, off_start,
 +                                 parent, bp->parent_offset,
 +                                 last_fixup_obj,
 +                                 last_fixup_min_off)) {
 +              binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
 +                                proc->pid, thread->pid);
 +              return -EINVAL;
 +      }
 +
 +      if (parent->length < sizeof(binder_uintptr_t) ||
 +          bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
 +              /* No space for a pointer here! */
 +              binder_user_error("%d:%d got transaction with invalid parent offset\n",
 +                                proc->pid, thread->pid);
 +              return -EINVAL;
        }
 +      parent_buffer = (u8 *)((uintptr_t)parent->buffer -
 +                      binder_alloc_get_user_buffer_offset(
 +                              &target_proc->alloc));
 +      *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
 +
 +      return 0;
  }
  
 -static void binder_transaction_buffer_release(struct binder_proc *proc,
 -                                            struct binder_buffer *buffer,
 -                                            binder_size_t *failed_at)
 +/**
 + * binder_proc_transaction() - sends a transaction to a process and wakes it up
 + * @t:                transaction to send
 + * @proc:     process to send the transaction to
 + * @thread:   thread in @proc to send the transaction to (may be NULL)
 + *
 + * This function queues a transaction to the specified process. It will try
 + * to find a thread in the target process to handle the transaction and
 + * wake it up. If no thread is found, the work is queued to the proc
 + * waitqueue.
 + *
 + * If the @thread parameter is not NULL, the transaction is always queued
 + * to the waitlist of that specific thread.
 + *
 + * Return:    true if the transactions was successfully queued
 + *            false if the target process or thread is dead
 + */
 +static bool binder_proc_transaction(struct binder_transaction *t,
 +                                  struct binder_proc *proc,
 +                                  struct binder_thread *thread)
  {
 -      binder_size_t *offp, *off_end;
 -      int debug_id = buffer->debug_id;
 +      struct binder_node *node = t->buffer->target_node;
 +      struct binder_priority node_prio;
 +      bool oneway = !!(t->flags & TF_ONE_WAY);
 +      bool pending_async = false;
 +
 +      BUG_ON(!node);
 +      binder_node_lock(node);
 +      node_prio.prio = node->min_priority;
 +      node_prio.sched_policy = node->sched_policy;
 +
 +      if (oneway) {
 +              BUG_ON(thread);
 +              if (node->has_async_transaction) {
 +                      pending_async = true;
 +              } else {
 +                      node->has_async_transaction = true;
 +              }
 +      }
  
-       binder_inner_proc_lock(proc);
+       binder_debug(BINDER_DEBUG_TRANSACTION,
+                    "%d buffer release %d, size %zd-%zd, failed at %pK\n",
+                    proc->pid, buffer->debug_id,
+                    buffer->data_size, buffer->offsets_size, failed_at);
  
 -      if (buffer->target_node)
 -              binder_dec_node(buffer->target_node, 1, 0);
 +      if (proc->is_dead || (thread && thread->is_dead)) {
 +              binder_inner_proc_unlock(proc);
 +              binder_node_unlock(node);
 +              return false;
 +      }
  
 -      offp = (binder_size_t *)(buffer->data +
 -                               ALIGN(buffer->data_size, sizeof(void *)));
 -      if (failed_at)
 -              off_end = failed_at;
 -      else
 -              off_end = (void *)offp + buffer->offsets_size;
 -      for (; offp < off_end; offp++) {
 -              struct flat_binder_object *fp;
 +      if (!thread && !pending_async)
 +              thread = binder_select_thread_ilocked(proc);
  
 -              if (*offp > buffer->data_size - sizeof(*fp) ||
 -                  buffer->data_size < sizeof(*fp) ||
 -                  !IS_ALIGNED(*offp, sizeof(u32))) {
 -                      pr_err("transaction release %d bad offset %lld, size %zd\n",
 -                             debug_id, (u64)*offp, buffer->data_size);
 -                      continue;
 -              }
 -              fp = (struct flat_binder_object *)(buffer->data + *offp);
 -              switch (fp->type) {
 -              case BINDER_TYPE_BINDER:
 -              case BINDER_TYPE_WEAK_BINDER: {
 -                      struct binder_node *node = binder_get_node(proc, fp->binder);
 +      if (thread) {
 +              binder_transaction_priority(thread->task, t, node_prio,
 +                                          node->inherit_rt);
 +              binder_enqueue_thread_work_ilocked(thread, &t->work);
 +      } else if (!pending_async) {
 +              binder_enqueue_work_ilocked(&t->work, &proc->todo);
 +      } else {
 +              binder_enqueue_work_ilocked(&t->work, &node->async_todo);
 +      }
  
 -                      if (node == NULL) {
 -                              pr_err("transaction release %d bad node %016llx\n",
 -                                     debug_id, (u64)fp->binder);
 -                              break;
 -                      }
 -                      binder_debug(BINDER_DEBUG_TRANSACTION,
 -                                   "        node %d u%016llx\n",
 -                                   node->debug_id, (u64)node->ptr);
 -                      binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
 -              } break;
 -              case BINDER_TYPE_HANDLE:
 -              case BINDER_TYPE_WEAK_HANDLE: {
 -                      struct binder_ref *ref;
 +      if (!pending_async)
 +              binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
  
 -                      ref = binder_get_ref(proc, fp->handle,
 -                                           fp->type == BINDER_TYPE_HANDLE);
 +      binder_inner_proc_unlock(proc);
 +      binder_node_unlock(node);
  
 -                      if (ref == NULL) {
 -                              pr_err("transaction release %d bad handle %d\n",
 -                               debug_id, fp->handle);
 -                              break;
 -                      }
 -                      binder_debug(BINDER_DEBUG_TRANSACTION,
 -                                   "        ref %d desc %d (node %d)\n",
 -                                   ref->debug_id, ref->desc, ref->node->debug_id);
 -                      binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
 -              } break;
 +      return true;
 +}
  
 -              case BINDER_TYPE_FD:
 -                      binder_debug(BINDER_DEBUG_TRANSACTION,
 -                                   "        fd %d\n", fp->handle);
 -                      if (failed_at)
 -                              task_close_fd(proc, fp->handle);
 -                      break;
 +/**
 + * binder_get_node_refs_for_txn() - Get required refs on node for txn
 + * @node:         struct binder_node for which to get refs
 + * @proc:         returns @node->proc if valid
 + * @error:        if no @proc then returns BR_DEAD_REPLY
 + *
 + * User-space normally keeps the node alive when creating a transaction
 + * since it has a reference to the target. The local strong ref keeps it
 + * alive if the sending process dies before the target process processes
 + * the transaction. If the source process is malicious or has a reference
 + * counting bug, relying on the local strong ref can fail.
 + *
 + * Since user-space can cause the local strong ref to go away, we also take
 + * a tmpref on the node to ensure it survives while we are constructing
 + * the transaction. We also need a tmpref on the proc while we are
 + * constructing the transaction, so we take that here as well.
 + *
 + * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
 + * Also sets @proc if valid. If the @node->proc is NULL indicating that the
 + * target proc has died, @error is set to BR_DEAD_REPLY
 + */
 +static struct binder_node *binder_get_node_refs_for_txn(
 +              struct binder_node *node,
 +              struct binder_proc **procp,
 +              uint32_t *error)
 +{
 +      struct binder_node *target_node = NULL;
  
 -              default:
 -                      pr_err("transaction release %d bad object type %x\n",
 -                              debug_id, fp->type);
 -                      break;
 -              }
 -      }
 +      binder_node_inner_lock(node);
 +      if (node->proc) {
 +              target_node = node;
 +              binder_inc_node_nilocked(node, 1, 0, NULL);
 +              binder_inc_node_tmpref_ilocked(node);
 +              node->proc->tmp_ref++;
 +              *procp = node->proc;
 +      } else
 +              *error = BR_DEAD_REPLY;
 +      binder_node_inner_unlock(node);
 +
 +      return target_node;
  }
  
  static void binder_transaction(struct binder_proc *proc,
@@@ -4549,23 -2637,18 +4772,32 @@@ static int binder_thread_release(struc
                        t = t->from_parent;
                } else
                        BUG();
 +              spin_unlock(&last_t->lock);
 +              if (t)
 +                      spin_lock(&t->lock);
 +      }
 +
 +      /*
 +       * If this thread used poll, make sure we remove the waitqueue
 +       * from any epoll data structures holding it with POLLFREE.
 +       * waitqueue_active() is safe to use here because we're holding
 +       * the inner lock.
 +       */
 +      if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
 +          waitqueue_active(&thread->wait)) {
 +              wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
        }
  
-       binder_inner_proc_unlock(thread->proc);
+       /*
+        * If this thread used poll, make sure we remove the waitqueue
+        * from any epoll data structures holding it with POLLFREE.
+        * waitqueue_active() is safe to use here because we're holding
+        * the global lock.
+        */
+       if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
+           waitqueue_active(&thread->wait)) {
+               wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
+       }
  
        /*
         * This is needed to avoid races between wake_up_poll() above and
@@@ -4588,23 -2672,36 +4820,24 @@@ static unsigned int binder_poll(struct 
  {
        struct binder_proc *proc = filp->private_data;
        struct binder_thread *thread = NULL;
 -      int wait_for_proc_work;
 -
 -      binder_lock(__func__);
 +      bool wait_for_proc_work;
  
        thread = binder_get_thread(proc);
 -      if (!thread) {
 -              binder_unlock(__func__);
 +      if (!thread)
                return POLLERR;
 -      }
  
-       binder_inner_proc_lock(thread->proc);
        thread->looper |= BINDER_LOOPER_STATE_POLL;
-       wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
+       wait_for_proc_work = thread->transaction_stack == NULL &&
+               list_empty(&thread->todo) && thread->return_error == BR_OK;
  
 -      binder_unlock(__func__);
 +      binder_inner_proc_unlock(thread->proc);
 +
 +      poll_wait(filp, &thread->wait, wait);
 +
 +      if (binder_has_work(thread, wait_for_proc_work))
 +              return POLLIN;
  
 -      if (wait_for_proc_work) {
 -              if (binder_has_proc_work(proc, thread))
 -                      return POLLIN;
 -              poll_wait(filp, &proc->wait, wait);
 -              if (binder_has_proc_work(proc, thread))
 -                      return POLLIN;
 -      } else {
 -              if (binder_has_thread_work(thread))
 -                      return POLLIN;
 -              poll_wait(filp, &thread->wait, wait);
 -              if (binder_has_thread_work(thread))
 -                      return POLLIN;
 -      }
        return 0;
  }
  
@@@ -5160,22 -3191,63 +5425,65 @@@ static void binder_deferred_release(str
  
                ref = rb_entry(n, struct binder_ref, rb_node_desc);
                outgoing_refs++;
-               binder_cleanup_ref_olocked(ref);
-               binder_proc_unlock(proc);
-               binder_free_ref(ref);
-               binder_proc_lock(proc);
+               binder_delete_ref(ref);
+       }
+       binder_release_work(&proc->todo);
+       binder_release_work(&proc->delivered_death);
+       buffers = 0;
+       while ((n = rb_first(&proc->allocated_buffers))) {
+               struct binder_buffer *buffer;
+               buffer = rb_entry(n, struct binder_buffer, rb_node);
+               t = buffer->transaction;
+               if (t) {
+                       t->buffer = NULL;
+                       buffer->transaction = NULL;
+                       pr_err("release proc %d, transaction %d, not freed\n",
+                              proc->pid, t->debug_id);
+                       /*BUG();*/
+               }
+               binder_free_buf(proc, buffer);
+               buffers++;
+       }
+       binder_stats_deleted(BINDER_STAT_PROC);
+       page_count = 0;
+       if (proc->pages) {
+               int i;
+               for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
+                       void *page_addr;
+                       if (!proc->pages[i])
+                               continue;
+                       page_addr = proc->buffer + i * PAGE_SIZE;
+                       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                                    "%s: %d: page %d at %pK not freed\n",
+                                    __func__, proc->pid, i, page_addr);
+                       unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+                       __free_page(proc->pages[i]);
+                       page_count++;
+               }
+               kfree(proc->pages);
+               vfree(proc->buffer);
        }
 +      binder_proc_unlock(proc);
  
 -      put_task_struct(proc->tsk);
 +      binder_release_work(proc, &proc->todo);
 +      binder_release_work(proc, &proc->delivered_death);
  
        binder_debug(BINDER_DEBUG_OPEN_CLOSE,
 -                   "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
 +                   "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
                     __func__, proc->pid, threads, nodes, incoming_refs,
 -                   outgoing_refs, active_transactions, buffers, page_count);
 +                   outgoing_refs, active_transactions);
  
 -      kfree(proc);
 +      binder_proc_dec_tmpref(proc);
  }
  
  static void binder_deferred_func(struct work_struct *work)
@@@ -5233,52 -3307,41 +5541,57 @@@ binder_defer_work(struct binder_proc *p
        mutex_unlock(&binder_deferred_lock);
  }
  
 -static void print_binder_transaction(struct seq_file *m, const char *prefix,
 -                                   struct binder_transaction *t)
 +static void print_binder_transaction_ilocked(struct seq_file *m,
 +                                           struct binder_proc *proc,
 +                                           const char *prefix,
 +                                           struct binder_transaction *t)
  {
 +      struct binder_proc *to_proc;
 +      struct binder_buffer *buffer = t->buffer;
 +
 +      spin_lock(&t->lock);
 +      to_proc = t->to_proc;
        seq_printf(m,
-                  "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
+                  "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
                   prefix, t->debug_id, t,
                   t->from ? t->from->proc->pid : 0,
                   t->from ? t->from->pid : 0,
 -                 t->to_proc ? t->to_proc->pid : 0,
 +                 to_proc ? to_proc->pid : 0,
                   t->to_thread ? t->to_thread->pid : 0,
 -                 t->code, t->flags, t->priority, t->need_reply);
 -      if (t->buffer == NULL) {
 -              seq_puts(m, " buffer free\n");
 +                 t->code, t->flags, t->priority.sched_policy,
 +                 t->priority.prio, t->need_reply);
 +      spin_unlock(&t->lock);
 +
 +      if (proc != to_proc) {
 +              /*
 +               * Can only safely deref buffer if we are holding the
 +               * correct proc inner lock for this node
 +               */
 +              seq_puts(m, "\n");
                return;
        }
-       if (buffer == NULL) {
-               seq_puts(m, " buffer free\n");
-               return;
-       }
-       if (buffer->target_node)
-               seq_printf(m, " node %d", buffer->target_node->debug_id);
+       if (t->buffer->target_node)
+               seq_printf(m, " node %d",
+                          t->buffer->target_node->debug_id);
        seq_printf(m, " size %zd:%zd data %pK\n",
+                  t->buffer->data_size, t->buffer->offsets_size,
+                  t->buffer->data);
+ }
+ static void print_binder_buffer(struct seq_file *m, const char *prefix,
+                               struct binder_buffer *buffer)
+ {
+       seq_printf(m, "%s %d: %pK size %zd:%zd %s\n",
+                  prefix, buffer->debug_id, buffer->data,
                   buffer->data_size, buffer->offsets_size,
 -                 buffer->transaction ? "active" : "delivered");
 +                 buffer->data);
  }
  
 -static void print_binder_work(struct seq_file *m, const char *prefix,
 -                            const char *transaction_prefix,
 -                            struct binder_work *w)
 +static void print_binder_work_ilocked(struct seq_file *m,
 +                                   struct binder_proc *proc,
 +                                   const char *prefix,
 +                                   const char *transaction_prefix,
 +                                   struct binder_work *w)
  {
        struct binder_node *node;
        struct binder_transaction *t;
@@@ -5381,23 -3429,16 +5694,19 @@@ static void print_binder_node_nilocked(
                        seq_printf(m, " %d", ref->proc->pid);
        }
        seq_puts(m, "\n");
 -      list_for_each_entry(w, &node->async_todo, entry)
 -              print_binder_work(m, "    ",
 -                                "    pending async transaction", w);
 +      if (node->proc) {
 +              list_for_each_entry(w, &node->async_todo, entry)
 +                      print_binder_work_ilocked(m, node->proc, "    ",
 +                                        "    pending async transaction", w);
 +      }
  }
  
 -static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
 +static void print_binder_ref_olocked(struct seq_file *m,
 +                                   struct binder_ref *ref)
  {
-       binder_node_lock(ref->node);
        seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
-                  ref->data.debug_id, ref->data.desc,
-                  ref->node->proc ? "" : "dead ",
-                  ref->node->debug_id, ref->data.strong,
-                  ref->data.weak, ref->death);
-       binder_node_unlock(ref->node);
+                  ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
+                  ref->node->debug_id, ref->strong, ref->weak, ref->death);
  }
  
  static void print_binder_proc(struct seq_file *m,
Simple merge
Simple merge
Simple merge
diff --cc fs/ext4/inode.c
Simple merge
diff --cc fs/f2fs/dir.c
@@@ -828,15 -870,11 +839,14 @@@ int f2fs_fill_dentries(struct dir_conte
  
                if (!dir_emit(ctx, de_name.name, de_name.len,
                                        le32_to_cpu(de->ino), d_type))
 -                      return true;
 +                      return 1;
 +
 +              if (sbi->readdir_ra == 1)
 +                      ra_node_page(sbi, le32_to_cpu(de->ino));
  
-               bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
                ctx->pos = start_pos + bit_pos;
        }
 -      return false;
 +      return 0;
  }
  
  static int f2fs_readdir(struct file *file, struct dir_context *ctx)
diff --cc fs/xattr.c
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc init/main.c
Simple merge
diff --cc kernel/cgroup.c
Simple merge
Simple merge
diff --cc mm/mmap.c
Simple merge
diff --cc mm/page_alloc.c
Simple merge
diff --cc net/core/sock.c
Simple merge
diff --cc net/socket.c
Simple merge