lockdep: Assign lock keys on registration
authorMatthew Wilcox <mawilcox@microsoft.com>
Wed, 17 Jan 2018 15:14:12 +0000 (07:14 -0800)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 18 Jan 2018 10:56:48 +0000 (11:56 +0100)
Lockdep is assigning lock keys when a lock was looked up.  This is
unnecessary; if the lock has never been registered then it is known that it
is not locked.  It also complicates the calling convention.  Switch to
assigning the lock key in register_lock_class().

Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: "David S. Miller" <davem@davemloft.net>
Link: https://lkml.kernel.org/r/20180117151414.23686-2-willy@infradead.org
kernel/locking/lockdep.c

index 5fa1324..472547d 100644 (file)
@@ -647,18 +647,12 @@ static int count_matching_names(struct lock_class *new_class)
        return count + 1;
 }
 
-/*
- * Register a lock's class in the hash-table, if the class is not present
- * yet. Otherwise we look it up. We cache the result in the lock object
- * itself, so actual lookup of the hash should be once per lock object.
- */
 static inline struct lock_class *
 look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
 {
        struct lockdep_subclass_key *key;
        struct hlist_head *hash_head;
        struct lock_class *class;
-       bool is_static = false;
 
        if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
                debug_locks_off();
@@ -671,24 +665,11 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
        }
 
        /*
-        * Static locks do not have their class-keys yet - for them the key
-        * is the lock object itself. If the lock is in the per cpu area,
-        * the canonical address of the lock (per cpu offset removed) is
-        * used.
+        * If it is not initialised then it has never been locked,
+        * so it won't be present in the hash table.
         */
-       if (unlikely(!lock->key)) {
-               unsigned long can_addr, addr = (unsigned long)lock;
-
-               if (__is_kernel_percpu_address(addr, &can_addr))
-                       lock->key = (void *)can_addr;
-               else if (__is_module_percpu_address(addr, &can_addr))
-                       lock->key = (void *)can_addr;
-               else if (static_obj(lock))
-                       lock->key = (void *)lock;
-               else
-                       return ERR_PTR(-EINVAL);
-               is_static = true;
-       }
+       if (unlikely(!lock->key))
+               return NULL;
 
        /*
         * NOTE: the class-key must be unique. For dynamic locks, a static
@@ -720,7 +701,35 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
                }
        }
 
-       return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
+       return NULL;
+}
+
+/*
+ * Static locks do not have their class-keys yet - for them the key is
+ * the lock object itself. If the lock is in the per cpu area, the
+ * canonical address of the lock (per cpu offset removed) is used.
+ */
+static bool assign_lock_key(struct lockdep_map *lock)
+{
+       unsigned long can_addr, addr = (unsigned long)lock;
+
+       if (__is_kernel_percpu_address(addr, &can_addr))
+               lock->key = (void *)can_addr;
+       else if (__is_module_percpu_address(addr, &can_addr))
+               lock->key = (void *)can_addr;
+       else if (static_obj(lock))
+               lock->key = (void *)lock;
+       else {
+               /* Debug-check: all keys must be persistent! */
+               debug_locks_off();
+               pr_err("INFO: trying to register non-static key.\n");
+               pr_err("the code is fine but needs lockdep annotation.\n");
+               pr_err("turning off the locking correctness validator.\n");
+               dump_stack();
+               return false;
+       }
+
+       return true;
 }
 
 /*
@@ -738,18 +747,13 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
        DEBUG_LOCKS_WARN_ON(!irqs_disabled());
 
        class = look_up_lock_class(lock, subclass);
-       if (likely(!IS_ERR_OR_NULL(class)))
+       if (likely(class))
                goto out_set_class_cache;
 
-       /*
-        * Debug-check: all keys must be persistent!
-        */
-       if (IS_ERR(class)) {
-               debug_locks_off();
-               printk("INFO: trying to register non-static key.\n");
-               printk("the code is fine but needs lockdep annotation.\n");
-               printk("turning off the locking correctness validator.\n");
-               dump_stack();
+       if (!lock->key) {
+               if (!assign_lock_key(lock))
+                       return NULL;
+       } else if (!static_obj(lock->key)) {
                return NULL;
        }
 
@@ -3498,7 +3502,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
                 * Clearly if the lock hasn't been acquired _ever_, we're not
                 * holding it either, so report failure.
                 */
-               if (IS_ERR_OR_NULL(class))
+               if (!class)
                        return 0;
 
                /*
@@ -4294,7 +4298,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
                 * If the class exists we look it up and zap it:
                 */
                class = look_up_lock_class(lock, j);
-               if (!IS_ERR_OR_NULL(class))
+               if (class)
                        zap_class(class);
        }
        /*