sched/numa: Calculate node scores in complex NUMA topologies
[platform/kernel/linux-rpi.git] / kernel / sched / fair.c
index b78280c..7e5712a 100644 (file)
@@ -828,11 +828,12 @@ static unsigned int task_nr_scan_windows(struct task_struct *p)
 
 static unsigned int task_scan_min(struct task_struct *p)
 {
+       unsigned int scan_size = ACCESS_ONCE(sysctl_numa_balancing_scan_size);
        unsigned int scan, floor;
        unsigned int windows = 1;
 
-       if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW)
-               windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size;
+       if (scan_size < MAX_SCAN_WINDOW)
+               windows = MAX_SCAN_WINDOW / scan_size;
        floor = 1000 / windows;
 
        scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
@@ -924,15 +925,81 @@ static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
                group->faults_cpu[task_faults_idx(nid, 1)];
 }
 
+/* Handle placement on systems where not all nodes are directly connected. */
+static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
+                                       int maxdist, bool task)
+{
+       unsigned long score = 0;
+       int node;
+
+       /*
+        * All nodes are directly connected, and the same distance
+        * from each other. No need for fancy placement algorithms.
+        */
+       if (sched_numa_topology_type == NUMA_DIRECT)
+               return 0;
+
+       /*
+        * This code is called for each node, introducing N^2 complexity,
+        * which should be ok given the number of nodes rarely exceeds 8.
+        */
+       for_each_online_node(node) {
+               unsigned long faults;
+               int dist = node_distance(nid, node);
+
+               /*
+                * The furthest away nodes in the system are not interesting
+                * for placement; nid was already counted.
+                */
+               if (dist == sched_max_numa_distance || node == nid)
+                       continue;
+
+               /*
+                * On systems with a backplane NUMA topology, compare groups
+                * of nodes, and move tasks towards the group with the most
+                * memory accesses. When comparing two nodes at distance
+                * "hoplimit", only nodes closer by than "hoplimit" are part
+                * of each group. Skip other nodes.
+                */
+               if (sched_numa_topology_type == NUMA_BACKPLANE &&
+                                       dist > maxdist)
+                       continue;
+
+               /* Add up the faults from nearby nodes. */
+               if (task)
+                       faults = task_faults(p, node);
+               else
+                       faults = group_faults(p, node);
+
+               /*
+                * On systems with a glueless mesh NUMA topology, there are
+                * no fixed "groups of nodes". Instead, nodes that are not
+                * directly connected bounce traffic through intermediate
+                * nodes; a numa_group can occupy any set of nodes.
+                * The further away a node is, the less the faults count.
+                * This seems to result in good task placement.
+                */
+               if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
+                       faults *= (sched_max_numa_distance - dist);
+                       faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
+               }
+
+               score += faults;
+       }
+
+       return score;
+}
+
 /*
  * These return the fraction of accesses done by a particular task, or
  * task group, on a particular numa node.  The group weight is given a
  * larger multiplier, in order to group tasks together that are almost
  * evenly spread out between numa nodes.
  */
-static inline unsigned long task_weight(struct task_struct *p, int nid)
+static inline unsigned long task_weight(struct task_struct *p, int nid,
+                                       int dist)
 {
-       unsigned long total_faults;
+       unsigned long faults, total_faults;
 
        if (!p->numa_faults_memory)
                return 0;
@@ -942,15 +1009,29 @@ static inline unsigned long task_weight(struct task_struct *p, int nid)
        if (!total_faults)
                return 0;
 
-       return 1000 * task_faults(p, nid) / total_faults;
+       faults = task_faults(p, nid);
+       faults += score_nearby_nodes(p, nid, dist, true);
+
+       return 1000 * faults / total_faults;
 }
 
-static inline unsigned long group_weight(struct task_struct *p, int nid)
+static inline unsigned long group_weight(struct task_struct *p, int nid,
+                                        int dist)
 {
-       if (!p->numa_group || !p->numa_group->total_faults)
+       unsigned long faults, total_faults;
+
+       if (!p->numa_group)
                return 0;
 
-       return 1000 * group_faults(p, nid) / p->numa_group->total_faults;
+       total_faults = p->numa_group->total_faults;
+
+       if (!total_faults)
+               return 0;
+
+       faults = group_faults(p, nid);
+       faults += score_nearby_nodes(p, nid, dist, false);
+
+       return 1000 * faults / total_faults;
 }
 
 bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
@@ -1083,6 +1164,7 @@ struct task_numa_env {
        struct numa_stats src_stats, dst_stats;
 
        int imbalance_pct;
+       int dist;
 
        struct task_struct *best_task;
        long best_imp;
@@ -1162,11 +1244,22 @@ static void task_numa_compare(struct task_numa_env *env,
        long load;
        long imp = env->p->numa_group ? groupimp : taskimp;
        long moveimp = imp;
+       int dist = env->dist;
 
        rcu_read_lock();
-       cur = ACCESS_ONCE(dst_rq->curr);
-       if (cur->pid == 0) /* idle */
+
+       raw_spin_lock_irq(&dst_rq->lock);
+       cur = dst_rq->curr;
+       /*
+        * No need to move the exiting task, and this ensures that ->curr
+        * wasn't reaped and thus get_task_struct() in task_numa_assign()
+        * is safe under RCU read lock.
+        * Note that rcu_read_lock() itself can't protect from the final
+        * put_task_struct() after the last schedule().
+        */
+       if ((cur->flags & PF_EXITING) || is_idle_task(cur))
                cur = NULL;
+       raw_spin_unlock_irq(&dst_rq->lock);
 
        /*
         * "imp" is the fault differential for the source task between the
@@ -1185,8 +1278,8 @@ static void task_numa_compare(struct task_numa_env *env,
                 * in any group then look only at task weights.
                 */
                if (cur->numa_group == env->p->numa_group) {
-                       imp = taskimp + task_weight(cur, env->src_nid) -
-                             task_weight(cur, env->dst_nid);
+                       imp = taskimp + task_weight(cur, env->src_nid, dist) -
+                             task_weight(cur, env->dst_nid, dist);
                        /*
                         * Add some hysteresis to prevent swapping the
                         * tasks within a group over tiny differences.
@@ -1200,11 +1293,11 @@ static void task_numa_compare(struct task_numa_env *env,
                         * instead.
                         */
                        if (cur->numa_group)
-                               imp += group_weight(cur, env->src_nid) -
-                                      group_weight(cur, env->dst_nid);
+                               imp += group_weight(cur, env->src_nid, dist) -
+                                      group_weight(cur, env->dst_nid, dist);
                        else
-                               imp += task_weight(cur, env->src_nid) -
-                                      task_weight(cur, env->dst_nid);
+                               imp += task_weight(cur, env->src_nid, dist) -
+                                      task_weight(cur, env->dst_nid, dist);
                }
        }
 
@@ -1303,7 +1396,7 @@ static int task_numa_migrate(struct task_struct *p)
        };
        struct sched_domain *sd;
        unsigned long taskweight, groupweight;
-       int nid, ret;
+       int nid, ret, dist;
        long taskimp, groupimp;
 
        /*
@@ -1331,12 +1424,13 @@ static int task_numa_migrate(struct task_struct *p)
                return -EINVAL;
        }
 
-       taskweight = task_weight(p, env.src_nid);
-       groupweight = group_weight(p, env.src_nid);
-       update_numa_stats(&env.src_stats, env.src_nid);
        env.dst_nid = p->numa_preferred_nid;
-       taskimp = task_weight(p, env.dst_nid) - taskweight;
-       groupimp = group_weight(p, env.dst_nid) - groupweight;
+       dist = env.dist = node_distance(env.src_nid, env.dst_nid);
+       taskweight = task_weight(p, env.src_nid, dist);
+       groupweight = group_weight(p, env.src_nid, dist);
+       update_numa_stats(&env.src_stats, env.src_nid);
+       taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
+       groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
        update_numa_stats(&env.dst_stats, env.dst_nid);
 
        /* Try to find a spot on the preferred nid. */
@@ -1348,12 +1442,20 @@ static int task_numa_migrate(struct task_struct *p)
                        if (nid == env.src_nid || nid == p->numa_preferred_nid)
                                continue;
 
+                       dist = node_distance(env.src_nid, env.dst_nid);
+                       if (sched_numa_topology_type == NUMA_BACKPLANE &&
+                                               dist != env.dist) {
+                               taskweight = task_weight(p, env.src_nid, dist);
+                               groupweight = group_weight(p, env.src_nid, dist);
+                       }
+
                        /* Only consider nodes where both task and groups benefit */
-                       taskimp = task_weight(p, nid) - taskweight;
-                       groupimp = group_weight(p, nid) - groupweight;
+                       taskimp = task_weight(p, nid, dist) - taskweight;
+                       groupimp = group_weight(p, nid, dist) - groupweight;
                        if (taskimp < 0 && groupimp < 0)
                                continue;
 
+                       env.dist = dist;
                        env.dst_nid = nid;
                        update_numa_stats(&env.dst_stats, env.dst_nid);
                        task_numa_find_cpu(&env, taskimp, groupimp);
@@ -1520,7 +1622,7 @@ static void update_task_scan_period(struct task_struct *p,
                 * scanning faster if shared accesses dominate as it may
                 * simply bounce migrations uselessly
                 */
-               ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared));
+               ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1));
                diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
        }
 
@@ -6615,7 +6717,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
        struct sched_group *group;
        struct rq *busiest;
        unsigned long flags;
-       struct cpumask *cpus = __get_cpu_var(load_balance_mask);
+       struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
 
        struct lb_env env = {
                .sd             = sd,