binder: fix sparse warnings on locking context
authorTodd Kjos <tkjos@android.com>
Tue, 6 Nov 2018 23:56:31 +0000 (15:56 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 26 Nov 2018 19:12:05 +0000 (20:12 +0100)
Add __acquire()/__release() annnotations to fix warnings
in sparse context checking

There is one case where the warning was due to a lack of
a "default:" case in a switch statement where a lock was
being released in each of the cases, so the default
case was added.

Signed-off-by: Todd Kjos <tkjos@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/android/binder.c
drivers/android/binder_alloc.c

index cb30a52..54fdd99 100644 (file)
@@ -660,6 +660,7 @@ struct binder_transaction {
 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
 static void
 _binder_proc_lock(struct binder_proc *proc, int line)
+       __acquires(&proc->outer_lock)
 {
        binder_debug(BINDER_DEBUG_SPINLOCKS,
                     "%s: line=%d\n", __func__, line);
@@ -675,6 +676,7 @@ _binder_proc_lock(struct binder_proc *proc, int line)
 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
 static void
 _binder_proc_unlock(struct binder_proc *proc, int line)
+       __releases(&proc->outer_lock)
 {
        binder_debug(BINDER_DEBUG_SPINLOCKS,
                     "%s: line=%d\n", __func__, line);
@@ -690,6 +692,7 @@ _binder_proc_unlock(struct binder_proc *proc, int line)
 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
 static void
 _binder_inner_proc_lock(struct binder_proc *proc, int line)
+       __acquires(&proc->inner_lock)
 {
        binder_debug(BINDER_DEBUG_SPINLOCKS,
                     "%s: line=%d\n", __func__, line);
@@ -705,6 +708,7 @@ _binder_inner_proc_lock(struct binder_proc *proc, int line)
 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
 static void
 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
+       __releases(&proc->inner_lock)
 {
        binder_debug(BINDER_DEBUG_SPINLOCKS,
                     "%s: line=%d\n", __func__, line);
@@ -720,6 +724,7 @@ _binder_inner_proc_unlock(struct binder_proc *proc, int line)
 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
 static void
 _binder_node_lock(struct binder_node *node, int line)
+       __acquires(&node->lock)
 {
        binder_debug(BINDER_DEBUG_SPINLOCKS,
                     "%s: line=%d\n", __func__, line);
@@ -735,6 +740,7 @@ _binder_node_lock(struct binder_node *node, int line)
 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
 static void
 _binder_node_unlock(struct binder_node *node, int line)
+       __releases(&node->lock)
 {
        binder_debug(BINDER_DEBUG_SPINLOCKS,
                     "%s: line=%d\n", __func__, line);
@@ -751,12 +757,16 @@ _binder_node_unlock(struct binder_node *node, int line)
 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
 static void
 _binder_node_inner_lock(struct binder_node *node, int line)
+       __acquires(&node->lock) __acquires(&node->proc->inner_lock)
 {
        binder_debug(BINDER_DEBUG_SPINLOCKS,
                     "%s: line=%d\n", __func__, line);
        spin_lock(&node->lock);
        if (node->proc)
                binder_inner_proc_lock(node->proc);
+       else
+               /* annotation for sparse */
+               __acquire(&node->proc->inner_lock);
 }
 
 /**
@@ -768,6 +778,7 @@ _binder_node_inner_lock(struct binder_node *node, int line)
 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
 static void
 _binder_node_inner_unlock(struct binder_node *node, int line)
+       __releases(&node->lock) __releases(&node->proc->inner_lock)
 {
        struct binder_proc *proc = node->proc;
 
@@ -775,6 +786,9 @@ _binder_node_inner_unlock(struct binder_node *node, int line)
                     "%s: line=%d\n", __func__, line);
        if (proc)
                binder_inner_proc_unlock(proc);
+       else
+               /* annotation for sparse */
+               __release(&node->proc->inner_lock);
        spin_unlock(&node->lock);
 }
 
@@ -1384,10 +1398,14 @@ static void binder_dec_node_tmpref(struct binder_node *node)
        binder_node_inner_lock(node);
        if (!node->proc)
                spin_lock(&binder_dead_nodes_lock);
+       else
+               __acquire(&binder_dead_nodes_lock);
        node->tmp_refs--;
        BUG_ON(node->tmp_refs < 0);
        if (!node->proc)
                spin_unlock(&binder_dead_nodes_lock);
+       else
+               __release(&binder_dead_nodes_lock);
        /*
         * Call binder_dec_node() to check if all refcounts are 0
         * and cleanup is needed. Calling with strong=0 and internal=1
@@ -1890,18 +1908,22 @@ static struct binder_thread *binder_get_txn_from(
  */
 static struct binder_thread *binder_get_txn_from_and_acq_inner(
                struct binder_transaction *t)
+       __acquires(&t->from->proc->inner_lock)
 {
        struct binder_thread *from;
 
        from = binder_get_txn_from(t);
-       if (!from)
+       if (!from) {
+               __acquire(&from->proc->inner_lock);
                return NULL;
+       }
        binder_inner_proc_lock(from->proc);
        if (t->from) {
                BUG_ON(from != t->from);
                return from;
        }
        binder_inner_proc_unlock(from->proc);
+       __acquire(&from->proc->inner_lock);
        binder_thread_dec_tmpref(from);
        return NULL;
 }
@@ -1973,6 +1995,8 @@ static void binder_send_failed_reply(struct binder_transaction *t,
                        binder_thread_dec_tmpref(target_thread);
                        binder_free_transaction(t);
                        return;
+               } else {
+                       __release(&target_thread->proc->inner_lock);
                }
                next = t->from_parent;
 
@@ -2394,11 +2418,15 @@ static int binder_translate_handle(struct flat_binder_object *fp,
                fp->cookie = node->cookie;
                if (node->proc)
                        binder_inner_proc_lock(node->proc);
+               else
+                       __acquire(&node->proc->inner_lock);
                binder_inc_node_nilocked(node,
                                         fp->hdr.type == BINDER_TYPE_BINDER,
                                         0, NULL);
                if (node->proc)
                        binder_inner_proc_unlock(node->proc);
+               else
+                       __release(&node->proc->inner_lock);
                trace_binder_transaction_ref_to_node(t, node, &src_rdata);
                binder_debug(BINDER_DEBUG_TRANSACTION,
                             "        ref %d desc %d -> node %d u%016llx\n",
@@ -2762,6 +2790,8 @@ static void binder_transaction(struct binder_proc *proc,
                binder_set_nice(in_reply_to->saved_priority);
                target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
                if (target_thread == NULL) {
+                       /* annotation for sparse */
+                       __release(&target_thread->proc->inner_lock);
                        return_error = BR_DEAD_REPLY;
                        return_error_line = __LINE__;
                        goto err_dead_binder;
@@ -4161,6 +4191,11 @@ retry:
                        if (cmd == BR_DEAD_BINDER)
                                goto done; /* DEAD_BINDER notifications can cause transactions */
                } break;
+               default:
+                       binder_inner_proc_unlock(proc);
+                       pr_err("%d:%d: bad work type %d\n",
+                              proc->pid, thread->pid, w->type);
+                       break;
                }
 
                if (!t)
@@ -4464,6 +4499,8 @@ static int binder_thread_release(struct binder_proc *proc,
                spin_lock(&t->lock);
                if (t->to_thread == thread)
                        send_reply = t;
+       } else {
+               __acquire(&t->lock);
        }
        thread->is_dead = true;
 
@@ -4492,7 +4529,11 @@ static int binder_thread_release(struct binder_proc *proc,
                spin_unlock(&last_t->lock);
                if (t)
                        spin_lock(&t->lock);
+               else
+                       __acquire(&t->lock);
        }
+       /* annotation for sparse, lock not acquired in last iteration above */
+       __release(&t->lock);
 
        /*
         * If this thread used poll, make sure we remove the waitqueue
index 64fd96e..52eb11e 100644 (file)
@@ -943,6 +943,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
                                       struct list_lru_one *lru,
                                       spinlock_t *lock,
                                       void *cb_arg)
+       __must_hold(lock)
 {
        struct mm_struct *mm = NULL;
        struct binder_lru_page *page = container_of(item,