Merge tag 'core-debugobjects-2023-05-28' of git://git.kernel.org/pub/scm/linux/kernel...
[platform/kernel/linux-rpi.git] / kernel / locking / lockdep.c
index 62ef295..4dfd2f3 100644 (file)
@@ -1881,6 +1881,8 @@ print_circular_lock_scenario(struct held_lock *src,
        struct lock_class *source = hlock_class(src);
        struct lock_class *target = hlock_class(tgt);
        struct lock_class *parent = prt->class;
+       int src_read = src->read;
+       int tgt_read = tgt->read;
 
        /*
         * A direct locking problem where unsafe_class lock is taken
@@ -1908,7 +1910,10 @@ print_circular_lock_scenario(struct held_lock *src,
        printk(" Possible unsafe locking scenario:\n\n");
        printk("       CPU0                    CPU1\n");
        printk("       ----                    ----\n");
-       printk("  lock(");
+       if (tgt_read != 0)
+               printk("  rlock(");
+       else
+               printk("  lock(");
        __print_lock_name(target);
        printk(KERN_CONT ");\n");
        printk("                               lock(");
@@ -1917,7 +1922,12 @@ print_circular_lock_scenario(struct held_lock *src,
        printk("                               lock(");
        __print_lock_name(target);
        printk(KERN_CONT ");\n");
-       printk("  lock(");
+       if (src_read != 0)
+               printk("  rlock(");
+       else if (src->sync)
+               printk("  sync(");
+       else
+               printk("  lock(");
        __print_lock_name(source);
        printk(KERN_CONT ");\n");
        printk("\n *** DEADLOCK ***\n\n");
@@ -4536,7 +4546,13 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
                                        return 0;
                }
        }
-       if (!hlock->hardirqs_off) {
+
+       /*
+        * For lock_sync(), don't mark the ENABLED usage, since lock_sync()
+        * creates no critical section and no extra dependency can be introduced
+        * by interrupts
+        */
+       if (!hlock->hardirqs_off && !hlock->sync) {
                if (hlock->read) {
                        if (!mark_lock(curr, hlock,
                                        LOCK_ENABLED_HARDIRQ_READ))
@@ -4924,7 +4940,7 @@ static int __lock_is_held(const struct lockdep_map *lock, int read);
 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
                          int trylock, int read, int check, int hardirqs_off,
                          struct lockdep_map *nest_lock, unsigned long ip,
-                         int references, int pin_count)
+                         int references, int pin_count, int sync)
 {
        struct task_struct *curr = current;
        struct lock_class *class = NULL;
@@ -4975,7 +4991,8 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 
        class_idx = class - lock_classes;
 
-       if (depth) { /* we're holding locks */
+       if (depth && !sync) {
+               /* we're holding locks and the new held lock is not a sync */
                hlock = curr->held_locks + depth - 1;
                if (hlock->class_idx == class_idx && nest_lock) {
                        if (!references)
@@ -5009,6 +5026,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        hlock->trylock = trylock;
        hlock->read = read;
        hlock->check = check;
+       hlock->sync = !!sync;
        hlock->hardirqs_off = !!hardirqs_off;
        hlock->references = references;
 #ifdef CONFIG_LOCK_STAT
@@ -5070,6 +5088,10 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        if (!validate_chain(curr, hlock, chain_head, chain_key))
                return 0;
 
+       /* For lock_sync(), we are done here since no actual critical section */
+       if (hlock->sync)
+               return 1;
+
        curr->curr_chain_key = chain_key;
        curr->lockdep_depth++;
        check_chain_key(curr);
@@ -5211,7 +5233,7 @@ static int reacquire_held_locks(struct task_struct *curr, unsigned int depth,
                                    hlock->read, hlock->check,
                                    hlock->hardirqs_off,
                                    hlock->nest_lock, hlock->acquire_ip,
-                                   hlock->references, hlock->pin_count)) {
+                                   hlock->references, hlock->pin_count, 0)) {
                case 0:
                        return 1;
                case 1:
@@ -5681,7 +5703,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 
        lockdep_recursion_inc();
        __lock_acquire(lock, subclass, trylock, read, check,
-                      irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
+                      irqs_disabled_flags(flags), nest_lock, ip, 0, 0, 0);
        lockdep_recursion_finish();
        raw_local_irq_restore(flags);
 }
@@ -5707,6 +5729,34 @@ void lock_release(struct lockdep_map *lock, unsigned long ip)
 }
 EXPORT_SYMBOL_GPL(lock_release);
 
+/*
+ * lock_sync() - A special annotation for synchronize_{s,}rcu()-like API.
+ *
+ * No actual critical section is created by the APIs annotated with this: these
+ * APIs are used to wait for one or multiple critical sections (on other CPUs
+ * or threads), and it means that calling these APIs inside these critical
+ * sections is potential deadlock.
+ */
+void lock_sync(struct lockdep_map *lock, unsigned subclass, int read,
+              int check, struct lockdep_map *nest_lock, unsigned long ip)
+{
+       unsigned long flags;
+
+       if (unlikely(!lockdep_enabled()))
+               return;
+
+       raw_local_irq_save(flags);
+       check_flags(flags);
+
+       lockdep_recursion_inc();
+       __lock_acquire(lock, subclass, 0, read, check,
+                      irqs_disabled_flags(flags), nest_lock, ip, 0, 0, 1);
+       check_chain_key(current);
+       lockdep_recursion_finish();
+       raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(lock_sync);
+
 noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
 {
        unsigned long flags;