sched: Add sched_smt_active()
authorBen Hutchings <ben@decadent.org.uk>
Thu, 9 May 2019 23:46:25 +0000 (00:46 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 14 May 2019 17:19:36 +0000 (19:19 +0200)
Add the sched_smt_active() function needed for some x86 speculation
mitigations.  This was introduced upstream by commits 1b568f0aabf2
"sched/core: Optimize SCHED_SMT", ba2591a5993e "sched/smt: Update
sched_smt_present at runtime", c5511d03ec09 "sched/smt: Make
sched_smt_present track topology", and 321a874a7ef8 "sched/smt: Expose
sched_smt_present static key".  The upstream implementation uses the
static_key_{disable,enable}_cpuslocked() functions, which aren't
practical to backport.

Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/linux/sched/smt.h [new file with mode: 0644]
kernel/sched/core.c
kernel/sched/sched.h

diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
new file mode 100644 (file)
index 0000000..5209c26
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SCHED_SMT_H
+#define _LINUX_SCHED_SMT_H
+
+#include <linux/atomic.h>
+
+#ifdef CONFIG_SCHED_SMT
+extern atomic_t sched_smt_present;
+
+static __always_inline bool sched_smt_active(void)
+{
+       return atomic_read(&sched_smt_present);
+}
+#else
+static inline bool sched_smt_active(void) { return false; }
+#endif
+
+#endif
index 6b3fff6..50e80b1 100644 (file)
@@ -7355,11 +7355,22 @@ static int cpuset_cpu_inactive(unsigned int cpu)
        return 0;
 }
 
+#ifdef CONFIG_SCHED_SMT
+atomic_t sched_smt_present = ATOMIC_INIT(0);
+#endif
+
 int sched_cpu_activate(unsigned int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
        unsigned long flags;
 
+#ifdef CONFIG_SCHED_SMT
+       /*
+        * When going up, increment the number of cores with SMT present.
+        */
+       if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+               atomic_inc(&sched_smt_present);
+#endif
        set_cpu_active(cpu, true);
 
        if (sched_smp_initialized) {
@@ -7408,6 +7419,14 @@ int sched_cpu_deactivate(unsigned int cpu)
        else
                synchronize_rcu();
 
+#ifdef CONFIG_SCHED_SMT
+       /*
+        * When going down, decrement the number of cores with SMT present.
+        */
+       if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+               atomic_dec(&sched_smt_present);
+#endif
+
        if (!sched_smp_initialized)
                return 0;
 
index ec6e838..15c0875 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/sched.h>
 #include <linux/sched/sysctl.h>
 #include <linux/sched/rt.h>
+#include <linux/sched/smt.h>
 #include <linux/u64_stats_sync.h>
 #include <linux/sched/deadline.h>
 #include <linux/kernel_stat.h>