tick/nohz: Only check for RCU deferred wakeup on user/guest entry when needed
authorFrederic Weisbecker <frederic@kernel.org>
Thu, 27 May 2021 11:34:41 +0000 (13:34 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Mon, 31 May 2021 08:14:49 +0000 (10:14 +0200)
Checking for and processing RCU-nocb deferred wakeup upon user/guest
entry is only relevant when nohz_full runs on the local CPU, otherwise
the periodic tick should take care of it.

Make sure we don't needlessly pollute these fast-paths as a -3%
performance regression on a will-it-scale.per_process_ops has been
reported so far.

Fixes: 47b8ff194c1f (entry: Explicitly flush pending rcuog wakeup before last rescheduling point)
Fixes: 4ae7dc97f726 (entry/kvm: Explicitly flush pending rcuog wakeup before last rescheduling point)
Reported-by: kernel test robot <oliver.sang@intel.com>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Paul E. McKenney <paulmck@kernel.org>
Cc: stable@vger.kernel.org
Link: https://lkml.kernel.org/r/20210527113441.465489-1-frederic@kernel.org
include/linux/entry-kvm.h
include/linux/tick.h
kernel/entry/common.c
kernel/time/tick-sched.c

index 8b2b1d6..136b8d9 100644 (file)
@@ -3,6 +3,7 @@
 #define __LINUX_ENTRYKVM_H
 
 #include <linux/entry-common.h>
+#include <linux/tick.h>
 
 /* Transfer to guest mode work */
 #ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
@@ -57,7 +58,7 @@ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
 static inline void xfer_to_guest_mode_prepare(void)
 {
        lockdep_assert_irqs_disabled();
-       rcu_nocb_flush_deferred_wakeup();
+       tick_nohz_user_enter_prepare();
 }
 
 /**
index 7340613..1a0ff88 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/context_tracking_state.h>
 #include <linux/cpumask.h>
 #include <linux/sched.h>
+#include <linux/rcupdate.h>
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 extern void __init tick_init(void);
@@ -300,4 +301,10 @@ static inline void tick_nohz_task_switch(void)
                __tick_nohz_task_switch();
 }
 
+static inline void tick_nohz_user_enter_prepare(void)
+{
+       if (tick_nohz_full_cpu(smp_processor_id()))
+               rcu_nocb_flush_deferred_wakeup();
+}
+
 #endif
index a0b3b04..bf16395 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/highmem.h>
 #include <linux/livepatch.h>
 #include <linux/audit.h>
+#include <linux/tick.h>
 
 #include "common.h"
 
@@ -186,7 +187,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
                local_irq_disable_exit_to_user();
 
                /* Check if any of the above work has queued a deferred wakeup */
-               rcu_nocb_flush_deferred_wakeup();
+               tick_nohz_user_enter_prepare();
 
                ti_work = READ_ONCE(current_thread_info()->flags);
        }
@@ -202,7 +203,7 @@ static void exit_to_user_mode_prepare(struct pt_regs *regs)
        lockdep_assert_irqs_disabled();
 
        /* Flush pending rcuog wakeup before the last need_resched() check */
-       rcu_nocb_flush_deferred_wakeup();
+       tick_nohz_user_enter_prepare();
 
        if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
                ti_work = exit_to_user_mode_loop(regs, ti_work);
index 828b091..6784f27 100644 (file)
@@ -230,6 +230,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
 
 #ifdef CONFIG_NO_HZ_FULL
 cpumask_var_t tick_nohz_full_mask;
+EXPORT_SYMBOL_GPL(tick_nohz_full_mask);
 bool tick_nohz_full_running;
 EXPORT_SYMBOL_GPL(tick_nohz_full_running);
 static atomic_t tick_dep_mask;