locking/rwsem: Check for operations on an uninitialized rwsem
authorDavidlohr Bueso <dave@stgolabs.net>
Mon, 29 Jul 2019 04:47:35 +0000 (21:47 -0700)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 6 Aug 2019 10:49:15 +0000 (12:49 +0200)
Currently rwsems is the only locking primitive that lacks this
debug feature. Add it under CONFIG_DEBUG_RWSEMS and do the magic
checking in the locking fastpath (trylock) operation such that
we cover all cases. The unlocking part is pretty straightforward.

Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Waiman Long <longman@redhat.com>
Cc: mingo@kernel.org
Cc: Davidlohr Bueso <dave@stgolabs.net>
Link: https://lkml.kernel.org/r/20190729044735.9632-1-dave@stgolabs.net
include/linux/rwsem.h
kernel/locking/rwsem.c

index 9d9c663987d87ee9d13bb3650342c2237768daa6..00d6054687dd2b46a05b48fd69b8cfef1fde384b 100644 (file)
@@ -45,6 +45,9 @@ struct rw_semaphore {
 #endif
        raw_spinlock_t wait_lock;
        struct list_head wait_list;
+#ifdef CONFIG_DEBUG_RWSEMS
+       void *magic;
+#endif
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        struct lockdep_map      dep_map;
 #endif
@@ -73,6 +76,12 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
 # define __RWSEM_DEP_MAP_INIT(lockname)
 #endif
 
+#ifdef CONFIG_DEBUG_RWSEMS
+# define __DEBUG_RWSEM_INITIALIZER(lockname) , .magic = &lockname
+#else
+# define __DEBUG_RWSEM_INITIALIZER(lockname)
+#endif
+
 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 #define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED
 #else
@@ -85,6 +94,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
          .wait_list = LIST_HEAD_INIT((name).wait_list),        \
          .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
          __RWSEM_OPT_INIT(name)                                \
+         __DEBUG_RWSEM_INITIALIZER(name)                       \
          __RWSEM_DEP_MAP_INIT(name) }
 
 #define DECLARE_RWSEM(name) \
index 354238a08b7aea46fcd3394ae7a9239a8634fe43..eef04551eae712e02b8c44c02aaa2c06fbb49dc0 100644 (file)
 #ifdef CONFIG_DEBUG_RWSEMS
 # define DEBUG_RWSEMS_WARN_ON(c, sem)  do {                    \
        if (!debug_locks_silent &&                              \
-           WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
+           WARN_ONCE(c, "DEBUG_RWSEMS_WARN_ON(%s): count = 0x%lx, magic = 0x%lx, owner = 0x%lx, curr 0x%lx, list %sempty\n",\
                #c, atomic_long_read(&(sem)->count),            \
+               (unsigned long) sem->magic,                     \
                atomic_long_read(&(sem)->owner), (long)current, \
                list_empty(&(sem)->wait_list) ? "" : "not "))   \
                        debug_locks_off();                      \
@@ -329,6 +330,9 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
         */
        debug_check_no_locks_freed((void *)sem, sizeof(*sem));
        lockdep_init_map(&sem->dep_map, name, key, 0);
+#endif
+#ifdef CONFIG_DEBUG_RWSEMS
+       sem->magic = sem;
 #endif
        atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
        raw_spin_lock_init(&sem->wait_lock);
@@ -1358,11 +1362,14 @@ static inline int __down_read_killable(struct rw_semaphore *sem)
 
 static inline int __down_read_trylock(struct rw_semaphore *sem)
 {
+       long tmp;
+
+       DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
+
        /*
         * Optimize for the case when the rwsem is not locked at all.
         */
-       long tmp = RWSEM_UNLOCKED_VALUE;
-
+       tmp = RWSEM_UNLOCKED_VALUE;
        do {
                if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
                                        tmp + RWSEM_READER_BIAS)) {
@@ -1403,8 +1410,11 @@ static inline int __down_write_killable(struct rw_semaphore *sem)
 
 static inline int __down_write_trylock(struct rw_semaphore *sem)
 {
-       long tmp = RWSEM_UNLOCKED_VALUE;
+       long tmp;
 
+       DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
+
+       tmp  = RWSEM_UNLOCKED_VALUE;
        if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
                                            RWSEM_WRITER_LOCKED)) {
                rwsem_set_owner(sem);
@@ -1420,7 +1430,9 @@ inline void __up_read(struct rw_semaphore *sem)
 {
        long tmp;
 
+       DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
        DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
+
        rwsem_clear_reader_owned(sem);
        tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
        DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
@@ -1438,12 +1450,14 @@ static inline void __up_write(struct rw_semaphore *sem)
 {
        long tmp;
 
+       DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
        /*
         * sem->owner may differ from current if the ownership is transferred
         * to an anonymous writer by setting the RWSEM_NONSPINNABLE bits.
         */
        DEBUG_RWSEMS_WARN_ON((rwsem_owner(sem) != current) &&
                            !rwsem_test_oflags(sem, RWSEM_NONSPINNABLE), sem);
+
        rwsem_clear_owner(sem);
        tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
        if (unlikely(tmp & RWSEM_FLAG_WAITERS))