sched/core: Convert nohz_flags to atomic_t
authorPeter Zijlstra <peterz@infradead.org>
Thu, 21 Dec 2017 09:01:24 +0000 (10:01 +0100)
committerIngo Molnar <mingo@kernel.org>
Fri, 9 Mar 2018 06:59:16 +0000 (07:59 +0100)
Using atomic_t allows us to use the more flexible bitops provided
there. Also its smaller.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/sched.h

index 4f5eeb6..96ad1c0 100644 (file)
@@ -583,7 +583,7 @@ static inline bool got_nohz_idle_kick(void)
 {
        int cpu = smp_processor_id();
 
-       if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
+       if (!(atomic_read(nohz_flags(cpu)) & NOHZ_BALANCE_KICK))
                return false;
 
        if (idle_cpu(cpu) && !need_resched())
@@ -593,7 +593,7 @@ static inline bool got_nohz_idle_kick(void)
         * We can't run Idle Load Balance on this CPU for this time so we
         * cancel it and clear NOHZ_BALANCE_KICK
         */
-       clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
+       atomic_andnot(NOHZ_BALANCE_KICK, nohz_flags(cpu));
        return false;
 }
 
@@ -6074,7 +6074,7 @@ void __init sched_init(void)
                rq_attach_root(rq, &def_root_domain);
 #ifdef CONFIG_NO_HZ_COMMON
                rq->last_load_update_tick = jiffies;
-               rq->nohz_flags = 0;
+               atomic_set(&rq->nohz_flags, 0);
 #endif
 #endif /* CONFIG_SMP */
                hrtick_rq_init(rq);
index 097db34..5d15047 100644 (file)
@@ -9072,6 +9072,7 @@ static inline int find_new_ilb(void)
  */
 static void nohz_balancer_kick(void)
 {
+       unsigned int flags;
        int ilb_cpu;
 
        nohz.next_balance++;
@@ -9081,7 +9082,8 @@ static void nohz_balancer_kick(void)
        if (ilb_cpu >= nr_cpu_ids)
                return;
 
-       if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
+       flags = atomic_fetch_or(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu));
+       if (flags & NOHZ_BALANCE_KICK)
                return;
        /*
         * Use smp_send_reschedule() instead of resched_cpu().
@@ -9095,7 +9097,9 @@ static void nohz_balancer_kick(void)
 
 void nohz_balance_exit_idle(unsigned int cpu)
 {
-       if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
+       unsigned int flags = atomic_read(nohz_flags(cpu));
+
+       if (unlikely(flags & NOHZ_TICK_STOPPED)) {
                /*
                 * Completely isolated CPUs don't ever set, so we must test.
                 */
@@ -9103,7 +9107,8 @@ void nohz_balance_exit_idle(unsigned int cpu)
                        cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
                        atomic_dec(&nohz.nr_cpus);
                }
-               clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
+
+               atomic_andnot(NOHZ_TICK_STOPPED, nohz_flags(cpu));
        }
 }
 
@@ -9155,7 +9160,7 @@ void nohz_balance_enter_idle(int cpu)
        if (!housekeeping_cpu(cpu, HK_FLAG_SCHED))
                return;
 
-       if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
+       if (atomic_read(nohz_flags(cpu)) & NOHZ_TICK_STOPPED)
                return;
 
        /* If we're a completely isolated CPU, we don't play: */
@@ -9164,7 +9169,7 @@ void nohz_balance_enter_idle(int cpu)
 
        cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
        atomic_inc(&nohz.nr_cpus);
-       set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
+       atomic_or(NOHZ_TICK_STOPPED, nohz_flags(cpu));
 }
 #endif
 
@@ -9302,8 +9307,10 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
        unsigned long next_balance = jiffies + 60*HZ;
        int update_next_balance = 0;
 
-       if (idle != CPU_IDLE ||
-           !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
+       if (!(atomic_read(nohz_flags(this_cpu)) & NOHZ_BALANCE_KICK))
+               return;
+
+       if (idle != CPU_IDLE)
                goto end;
 
        for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
@@ -9349,7 +9356,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
        if (likely(update_next_balance))
                nohz.next_balance = next_balance;
 end:
-       clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
+       atomic_andnot(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
 }
 
 /*
index 23ba4dd..d98e761 100644 (file)
@@ -763,7 +763,7 @@ struct rq {
 #ifdef CONFIG_SMP
        unsigned long           last_load_update_tick;
 #endif /* CONFIG_SMP */
-       unsigned long           nohz_flags;
+       atomic_t nohz_flags;
 #endif /* CONFIG_NO_HZ_COMMON */
 
        /* capture load from *all* tasks on this CPU: */
@@ -2034,10 +2034,11 @@ extern void cfs_bandwidth_usage_inc(void);
 extern void cfs_bandwidth_usage_dec(void);
 
 #ifdef CONFIG_NO_HZ_COMMON
-enum rq_nohz_flag_bits {
-       NOHZ_TICK_STOPPED,
-       NOHZ_BALANCE_KICK,
-};
+#define NOHZ_TICK_STOPPED_BIT  0
+#define NOHZ_BALANCE_KICK_BIT  1
+
+#define NOHZ_TICK_STOPPED      BIT(NOHZ_TICK_STOPPED_BIT)
+#define NOHZ_BALANCE_KICK      BIT(NOHZ_BALANCE_KICK_BIT)
 
 #define nohz_flags(cpu)        (&cpu_rq(cpu)->nohz_flags)