sched: Add cond_resched_rwlock
authorBen Gardon <bgardon@google.com>
Tue, 2 Feb 2021 18:57:14 +0000 (10:57 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 4 Feb 2021 10:27:43 +0000 (05:27 -0500)
Safely rescheduling while holding a spin lock is essential for keeping
long running kernel operations running smoothly. Add the facility to
cond_resched rwlocks.

CC: Ingo Molnar <mingo@redhat.com>
CC: Will Deacon <will@kernel.org>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Davidlohr Bueso <dbueso@suse.de>
Acked-by: Waiman Long <longman@redhat.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Ben Gardon <bgardon@google.com>
Message-Id: <20210202185734.1680553-9-bgardon@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
include/linux/sched.h
kernel/sched/core.c

index 5d1378e5a04005bdc80a1b0d358d4cd628d8c4fb..3052d16da3cfc8551a63e414c666ca4a287f2b48 100644 (file)
@@ -1883,12 +1883,24 @@ static inline int _cond_resched(void) { return 0; }
 })
 
 extern int __cond_resched_lock(spinlock_t *lock);
+extern int __cond_resched_rwlock_read(rwlock_t *lock);
+extern int __cond_resched_rwlock_write(rwlock_t *lock);
 
 #define cond_resched_lock(lock) ({                             \
        ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
        __cond_resched_lock(lock);                              \
 })
 
+#define cond_resched_rwlock_read(lock) ({                      \
+       __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
+       __cond_resched_rwlock_read(lock);                       \
+})
+
+#define cond_resched_rwlock_write(lock) ({                     \
+       __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
+       __cond_resched_rwlock_write(lock);                      \
+})
+
 static inline void cond_resched_rcu(void)
 {
 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
index 15d2562118d1727aa197bd5f9cc6314cfebace6c..ade357642279191543d3aa3f91c21dfeaf15932e 100644 (file)
@@ -6695,6 +6695,46 @@ int __cond_resched_lock(spinlock_t *lock)
 }
 EXPORT_SYMBOL(__cond_resched_lock);
 
+int __cond_resched_rwlock_read(rwlock_t *lock)
+{
+       int resched = should_resched(PREEMPT_LOCK_OFFSET);
+       int ret = 0;
+
+       lockdep_assert_held_read(lock);
+
+       if (rwlock_needbreak(lock) || resched) {
+               read_unlock(lock);
+               if (resched)
+                       preempt_schedule_common();
+               else
+                       cpu_relax();
+               ret = 1;
+               read_lock(lock);
+       }
+       return ret;
+}
+EXPORT_SYMBOL(__cond_resched_rwlock_read);
+
+int __cond_resched_rwlock_write(rwlock_t *lock)
+{
+       int resched = should_resched(PREEMPT_LOCK_OFFSET);
+       int ret = 0;
+
+       lockdep_assert_held_write(lock);
+
+       if (rwlock_needbreak(lock) || resched) {
+               write_unlock(lock);
+               if (resched)
+                       preempt_schedule_common();
+               else
+                       cpu_relax();
+               ret = 1;
+               write_lock(lock);
+       }
+       return ret;
+}
+EXPORT_SYMBOL(__cond_resched_rwlock_write);
+
 /**
  * yield - yield the current processor to other threads.
  *