locking: ipipe: add pipeline-aware locks
authorPhilippe Gerum <rpm@xenomai.org>
Sun, 3 Dec 2017 10:57:56 +0000 (11:57 +0100)
committerMarek Szyprowski <m.szyprowski@samsung.com>
Fri, 27 Apr 2018 09:21:34 +0000 (11:21 +0200)
include/linux/rwlock.h
include/linux/rwlock_api_smp.h
include/linux/spinlock.h
include/linux/spinlock_api_smp.h
include/linux/spinlock_up.h
kernel/locking/lockdep.c
kernel/locking/spinlock.c
lib/bust_spinlocks.c

index bc2994ed66e1c0e915ab7b9dbae61a470b282e50..5e2da8d9a60be00afdce81789fd9c74116f70405 100644 (file)
@@ -61,8 +61,8 @@ do {                                                          \
 #define read_trylock(lock)     __cond_lock(lock, _raw_read_trylock(lock))
 #define write_trylock(lock)    __cond_lock(lock, _raw_write_trylock(lock))
 
-#define write_lock(lock)       _raw_write_lock(lock)
-#define read_lock(lock)                _raw_read_lock(lock)
+#define write_lock(lock)       PICK_RWOP(_write_lock, lock)
+#define read_lock(lock)                PICK_RWOP(_read_lock, lock)
 
 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
 
@@ -96,8 +96,8 @@ do {                                                          \
 #define read_lock_bh(lock)             _raw_read_lock_bh(lock)
 #define write_lock_irq(lock)           _raw_write_lock_irq(lock)
 #define write_lock_bh(lock)            _raw_write_lock_bh(lock)
-#define read_unlock(lock)              _raw_read_unlock(lock)
-#define write_unlock(lock)             _raw_write_unlock(lock)
+#define read_unlock(lock)              PICK_RWOP(_read_unlock, lock)
+#define write_unlock(lock)             PICK_RWOP(_write_unlock, lock)
 #define read_unlock_irq(lock)          _raw_read_unlock_irq(lock)
 #define write_unlock_irq(lock)         _raw_write_unlock_irq(lock)
 
index 5b9b84b204070f8e6780e3836ea3c9efdc4a1e61..6c8bb4dd73e66343af33e3529765b6c296ad4891 100644 (file)
@@ -141,7 +141,9 @@ static inline int __raw_write_trylock(rwlock_t *lock)
  * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
  * not re-enabled during lock-acquire (which the preempt-spin-ops do):
  */
-#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+#if !defined(CONFIG_GENERIC_LOCKBREAK) ||      \
+       defined(CONFIG_DEBUG_LOCK_ALLOC) ||     \
+       defined(CONFIG_IPIPE)
 
 static inline void __raw_read_lock(rwlock_t *lock)
 {
index 341e1a12bfc78c99c62e21857c2f9e304800db52..f2f45edcc476d39fe3b637868fc57c9a7402fe34 100644 (file)
 # include <linux/spinlock_up.h>
 #endif
 
+#include <linux/ipipe_lock.h>
+
 #ifdef CONFIG_DEBUG_SPINLOCK
   extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
                                   struct lock_class_key *key);
-# define raw_spin_lock_init(lock)                              \
+# define __real_raw_spin_lock_init(lock)                       \
 do {                                                           \
        static struct lock_class_key __key;                     \
                                                                \
@@ -101,11 +103,14 @@ do {                                                              \
 } while (0)
 
 #else
-# define raw_spin_lock_init(lock)                              \
+# define __real_raw_spin_lock_init(lock)                       \
        do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
 #endif
+#define raw_spin_lock_init(lock)       PICK_SPINOP(_lock_init, lock)
 
-#define raw_spin_is_locked(lock)       arch_spin_is_locked(&(lock)->raw_lock)
+#define __real_raw_spin_is_locked(lock)                                \
+       arch_spin_is_locked(&(lock)->raw_lock)
+#define raw_spin_is_locked(lock)       PICK_SPINOP_RET(_is_locked, lock, int)
 
 #ifdef CONFIG_GENERIC_LOCKBREAK
 #define raw_spin_is_contended(lock) ((lock)->break_lock)
@@ -191,9 +196,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
  * various methods are defined as nops in the case they are not
  * required.
  */
-#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
+#define __real_raw_spin_trylock(lock)  __cond_lock(lock, _raw_spin_trylock(lock))
+#define raw_spin_trylock(lock)         PICK_SPINOP_RET(_trylock, lock, int)
 
-#define raw_spin_lock(lock)    _raw_spin_lock(lock)
+#define __real_raw_spin_lock(lock)     _raw_spin_lock(lock)
+#define raw_spin_lock(lock)            PICK_SPINOP(_lock, lock)
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 # define raw_spin_lock_nested(lock, subclass) \
@@ -217,7 +224,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
 
 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
 
-#define raw_spin_lock_irqsave(lock, flags)                     \
+#define __real_raw_spin_lock_irqsave(lock, flags)      \
        do {                                            \
                typecheck(unsigned long, flags);        \
                flags = _raw_spin_lock_irqsave(lock);   \
@@ -239,7 +246,7 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
 
 #else
 
-#define raw_spin_lock_irqsave(lock, flags)             \
+#define __real_raw_spin_lock_irqsave(lock, flags)      \
        do {                                            \
                typecheck(unsigned long, flags);        \
                _raw_spin_lock_irqsave(lock, flags);    \
@@ -250,34 +257,46 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
 
 #endif
 
-#define raw_spin_lock_irq(lock)                _raw_spin_lock_irq(lock)
+#define raw_spin_lock_irqsave(lock, flags)  \
+       PICK_SPINLOCK_IRQSAVE(lock, flags)
+
+#define __real_raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
+#define raw_spin_lock_irq(lock)                PICK_SPINOP(_lock_irq, lock)
 #define raw_spin_lock_bh(lock)         _raw_spin_lock_bh(lock)
-#define raw_spin_unlock(lock)          _raw_spin_unlock(lock)
-#define raw_spin_unlock_irq(lock)      _raw_spin_unlock_irq(lock)
+#define __real_raw_spin_unlock(lock)   _raw_spin_unlock(lock)
+#define raw_spin_unlock(lock)          PICK_SPINOP(_unlock, lock)
+#define __real_raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
+#define raw_spin_unlock_irq(lock)      PICK_SPINOP(_unlock_irq, lock)
 
-#define raw_spin_unlock_irqrestore(lock, flags)                \
+#define __real_raw_spin_unlock_irqrestore(lock, flags)         \
        do {                                                    \
                typecheck(unsigned long, flags);                \
                _raw_spin_unlock_irqrestore(lock, flags);       \
        } while (0)
+#define raw_spin_unlock_irqrestore(lock, flags)        \
+       PICK_SPINUNLOCK_IRQRESTORE(lock, flags)
+
 #define raw_spin_unlock_bh(lock)       _raw_spin_unlock_bh(lock)
 
 #define raw_spin_trylock_bh(lock) \
        __cond_lock(lock, _raw_spin_trylock_bh(lock))
 
-#define raw_spin_trylock_irq(lock) \
+#define __real_raw_spin_trylock_irq(lock) \
 ({ \
        local_irq_disable(); \
-       raw_spin_trylock(lock) ? \
+       __real_raw_spin_trylock(lock) ? \
        1 : ({ local_irq_enable(); 0;  }); \
 })
+#define raw_spin_trylock_irq(lock)     PICK_SPINTRYLOCK_IRQ(lock)
 
-#define raw_spin_trylock_irqsave(lock, flags) \
+#define __real_raw_spin_trylock_irqsave(lock, flags) \
 ({ \
        local_irq_save(flags); \
        raw_spin_trylock(lock) ? \
        1 : ({ local_irq_restore(flags); 0; }); \
 })
+#define raw_spin_trylock_irqsave(lock, flags)  \
+       PICK_SPINTRYLOCK_IRQSAVE(lock, flags)
 
 /**
  * raw_spin_can_lock - would raw_spin_trylock() succeed?
@@ -308,24 +327,17 @@ static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
 
 #define spin_lock_init(_lock)                          \
 do {                                                   \
-       spinlock_check(_lock);                          \
-       raw_spin_lock_init(&(_lock)->rlock);            \
+       raw_spin_lock_init(_lock);                      \
 } while (0)
 
-static __always_inline void spin_lock(spinlock_t *lock)
-{
-       raw_spin_lock(&lock->rlock);
-}
+#define spin_lock(lock)                raw_spin_lock(lock)
 
 static __always_inline void spin_lock_bh(spinlock_t *lock)
 {
        raw_spin_lock_bh(&lock->rlock);
 }
 
-static __always_inline int spin_trylock(spinlock_t *lock)
-{
-       return raw_spin_trylock(&lock->rlock);
-}
+#define spin_trylock(lock)     raw_spin_trylock(lock)
 
 #define spin_lock_nested(lock, subclass)                       \
 do {                                                           \
@@ -337,14 +349,11 @@ do {                                                                      \
        raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);       \
 } while (0)
 
-static __always_inline void spin_lock_irq(spinlock_t *lock)
-{
-       raw_spin_lock_irq(&lock->rlock);
-}
+#define spin_lock_irq(lock)    raw_spin_lock_irq(lock)
 
 #define spin_lock_irqsave(lock, flags)                         \
 do {                                                           \
-       raw_spin_lock_irqsave(spinlock_check(lock), flags);     \
+       raw_spin_lock_irqsave(lock, flags);                     \
 } while (0)
 
 #define spin_lock_irqsave_nested(lock, flags, subclass)                        \
@@ -352,39 +361,28 @@ do {                                                                      \
        raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
 } while (0)
 
-static __always_inline void spin_unlock(spinlock_t *lock)
-{
-       raw_spin_unlock(&lock->rlock);
-}
+#define spin_unlock(lock)      raw_spin_unlock(lock)
 
 static __always_inline void spin_unlock_bh(spinlock_t *lock)
 {
        raw_spin_unlock_bh(&lock->rlock);
 }
 
-static __always_inline void spin_unlock_irq(spinlock_t *lock)
-{
-       raw_spin_unlock_irq(&lock->rlock);
-}
+#define spin_unlock_irq(lock)  raw_spin_unlock_irq(lock)
 
-static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
-{
-       raw_spin_unlock_irqrestore(&lock->rlock, flags);
-}
+#define spin_unlock_irqrestore(lock, flags)    \
+       raw_spin_unlock_irqrestore(lock, flags)
 
 static __always_inline int spin_trylock_bh(spinlock_t *lock)
 {
        return raw_spin_trylock_bh(&lock->rlock);
 }
 
-static __always_inline int spin_trylock_irq(spinlock_t *lock)
-{
-       return raw_spin_trylock_irq(&lock->rlock);
-}
+#define spin_trylock_irq(lock) raw_spin_trylock_irq(lock)
 
 #define spin_trylock_irqsave(lock, flags)                      \
 ({                                                             \
-       raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
+       raw_spin_trylock_irqsave(lock, flags);                  \
 })
 
 static __always_inline int spin_is_locked(spinlock_t *lock)
index 42dfab89e740aeb08de1896e491607789eacda4b..ebdc2a9116eccb0fad10028485608193681bf390 100644 (file)
@@ -99,7 +99,9 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
  * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
  * not re-enabled during lock-acquire (which the preempt-spin-ops do):
  */
-#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+#if !defined(CONFIG_GENERIC_LOCKBREAK) ||      \
+       defined(CONFIG_DEBUG_LOCK_ALLOC) ||     \
+       defined(CONFIG_IPIPE)
 
 static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
 {
@@ -113,7 +115,7 @@ static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
         * do_raw_spin_lock_flags() code, because lockdep assumes
         * that interrupts are not re-enabled during lock-acquire:
         */
-#ifdef CONFIG_LOCKDEP
+#if defined(CONFIG_LOCKDEP) || defined(CONFIG_IPIPE)
        LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
 #else
        do_raw_spin_lock_flags(lock, &flags);
index 612fb530af41b40f2e164cfe5f302a3cc48a3cf7..5bac4d6f730a9b725c25d2108cb6a16a6e064a90 100644 (file)
@@ -56,16 +56,6 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
        lock->slock = 1;
 }
 
-/*
- * Read-write spinlocks. No debug version.
- */
-#define arch_read_lock(lock)           do { barrier(); (void)(lock); } while (0)
-#define arch_write_lock(lock)          do { barrier(); (void)(lock); } while (0)
-#define arch_read_trylock(lock)        ({ barrier(); (void)(lock); 1; })
-#define arch_write_trylock(lock)       ({ barrier(); (void)(lock); 1; })
-#define arch_read_unlock(lock)         do { barrier(); (void)(lock); } while (0)
-#define arch_write_unlock(lock)        do { barrier(); (void)(lock); } while (0)
-
 #else /* DEBUG_SPINLOCK */
 #define arch_spin_is_locked(lock)      ((void)(lock), 0)
 /* for sched/core.c and kernel_lock.c: */
@@ -75,6 +65,13 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 # define arch_spin_trylock(lock)       ({ barrier(); (void)(lock); 1; })
 #endif /* DEBUG_SPINLOCK */
 
+#define arch_read_lock(lock)           do { barrier(); (void)(lock); } while (0)
+#define arch_write_lock(lock)          do { barrier(); (void)(lock); } while (0)
+#define arch_read_trylock(lock)        ({ barrier(); (void)(lock); 1; })
+#define arch_write_trylock(lock)       ({ barrier(); (void)(lock); 1; })
+#define arch_read_unlock(lock)         do { barrier(); (void)(lock); } while (0)
+#define arch_write_unlock(lock)        do { barrier(); (void)(lock); } while (0)
+
 #define arch_spin_is_contended(lock)   (((void)(lock), 0))
 
 #define arch_read_can_lock(lock)       (((void)(lock), 1))
index e36e652d996fe682157c52768949f0b9c7f1e6d1..8e103f21b98b615dbe87be2cdcbda22fa941e42c 100644 (file)
@@ -2887,6 +2887,9 @@ static void __trace_hardirqs_on_caller(unsigned long ip)
 
 __visible void trace_hardirqs_on_caller(unsigned long ip)
 {
+       if (!ipipe_root_p)
+               return;
+
        time_hardirqs_on(CALLER_ADDR0, ip);
 
        if (unlikely(!debug_locks || current->lockdep_recursion))
@@ -2907,7 +2910,7 @@ __visible void trace_hardirqs_on_caller(unsigned long ip)
         * already enabled, yet we find the hardware thinks they are in fact
         * enabled.. someone messed up their IRQ state tracing.
         */
-       if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+       if (DEBUG_LOCKS_WARN_ON(!irqs_disabled() && !hard_irqs_disabled()))
                return;
 
        /*
@@ -2940,7 +2943,12 @@ EXPORT_SYMBOL(trace_hardirqs_on);
  */
 __visible void trace_hardirqs_off_caller(unsigned long ip)
 {
-       struct task_struct *curr = current;
+       struct task_struct *curr;
+
+       if (!ipipe_root_p)
+               return;
+
+       curr = current;
 
        time_hardirqs_off(CALLER_ADDR0, ip);
 
@@ -2951,7 +2959,7 @@ __visible void trace_hardirqs_off_caller(unsigned long ip)
         * So we're supposed to get called after you mask local IRQs, but for
         * some reason the hardware doesn't quite think you did a proper job.
         */
-       if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+       if (DEBUG_LOCKS_WARN_ON(!irqs_disabled() && !hard_irqs_disabled()))
                return;
 
        if (curr->hardirqs_enabled) {
@@ -2987,7 +2995,7 @@ void trace_softirqs_on(unsigned long ip)
         * We fancy IRQs being disabled here, see softirq.c, avoids
         * funny state and nesting things.
         */
-       if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+       if (DEBUG_LOCKS_WARN_ON(!irqs_disabled() && !hard_irqs_disabled()))
                return;
 
        if (curr->softirqs_enabled) {
@@ -3026,7 +3034,7 @@ void trace_softirqs_off(unsigned long ip)
        /*
         * We fancy IRQs being disabled here, see softirq.c
         */
-       if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+       if (DEBUG_LOCKS_WARN_ON(!irqs_disabled() && !hard_irqs_disabled()))
                return;
 
        if (curr->softirqs_enabled) {
index 6e40fdfba326b9e415dbea410eb7cb2166d51e45..46698d68e2c5fdafbbbf11f35ffd68f3285c3ed0 100644 (file)
@@ -27,7 +27,9 @@
  * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
  * not re-enabled during lock-acquire (which the preempt-spin-ops do):
  */
-#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+#if !defined(CONFIG_GENERIC_LOCKBREAK) ||                      \
+       defined(CONFIG_DEBUG_LOCK_ALLOC) ||                     \
+       defined(CONFIG_IPIPE)
 /*
  * The __lock_function inlines are taken from
  * include/linux/spinlock_api_smp.h
index ab719495e2cb42ba7b8c85e7ef5dfadff4695946..35e3184e2d176c62de553092dec102db4773af4a 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/wait.h>
 #include <linux/vt_kern.h>
 #include <linux/console.h>
+#include <linux/ipipe_trace.h>
 
 
 void __attribute__((weak)) bust_spinlocks(int yes)
@@ -26,6 +27,7 @@ void __attribute__((weak)) bust_spinlocks(int yes)
                unblank_screen();
 #endif
                console_unblank();
+               ipipe_trace_panic_dump();
                if (--oops_in_progress == 0)
                        wake_up_klogd();
        }