summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
fb13c7e)
A tasks preferred node is selected based on the number of faults
recorded for a node but the actual task_numa_migate() conducts a global
search regardless of the preferred nid. This patch checks if the
preferred nid has capacity and if so, searches for a CPU within that
node. This avoids a global search when the preferred node is not
overloaded.
Signed-off-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1381141781-10992-41-git-send-email-mgorman@suse.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
+static void task_numa_find_cpu(struct task_numa_env *env, long imp)
+{
+ int cpu;
+
+ for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
+ /* Skip this CPU if the source task cannot migrate */
+ if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
+ continue;
+
+ env->dst_cpu = cpu;
+ task_numa_compare(env, imp);
+ }
+}
+
static int task_numa_migrate(struct task_struct *p)
{
struct task_numa_env env = {
static int task_numa_migrate(struct task_struct *p)
{
struct task_numa_env env = {
};
struct sched_domain *sd;
unsigned long faults;
};
struct sched_domain *sd;
unsigned long faults;
+ int nid, ret;
+ long imp;
/*
* Pick the lowest SD_NUMA domain, as that would have the smallest
/*
* Pick the lowest SD_NUMA domain, as that would have the smallest
faults = task_faults(p, env.src_nid);
update_numa_stats(&env.src_stats, env.src_nid);
faults = task_faults(p, env.src_nid);
update_numa_stats(&env.src_stats, env.src_nid);
+ env.dst_nid = p->numa_preferred_nid;
+ imp = task_faults(env.p, env.dst_nid) - faults;
+ update_numa_stats(&env.dst_stats, env.dst_nid);
- /* Find an alternative node with relatively better statistics */
- for_each_online_node(nid) {
- long imp;
-
- if (nid == env.src_nid)
- continue;
-
- /* Only consider nodes that recorded more faults */
- imp = task_faults(p, nid) - faults;
- if (imp < 0)
- continue;
+ /*
+ * If the preferred nid has capacity then use it. Otherwise find an
+ * alternative node with relatively better statistics.
+ */
+ if (env.dst_stats.has_capacity) {
+ task_numa_find_cpu(&env, imp);
+ } else {
+ for_each_online_node(nid) {
+ if (nid == env.src_nid || nid == p->numa_preferred_nid)
+ continue;
- env.dst_nid = nid;
- update_numa_stats(&env.dst_stats, env.dst_nid);
- for_each_cpu(cpu, cpumask_of_node(nid)) {
- /* Skip this CPU if the source task cannot migrate */
- if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
+ /* Only consider nodes that recorded more faults */
+ imp = task_faults(env.p, nid) - faults;
+ if (imp < 0)
- env.dst_cpu = cpu;
- task_numa_compare(&env, imp);
+ env.dst_nid = nid;
+ update_numa_stats(&env.dst_stats, env.dst_nid);
+ task_numa_find_cpu(&env, imp);