* the preferred node but still allow the scheduler to move the task again if
* the nodes CPUs are overloaded.
*/
-unsigned int sysctl_numa_balancing_settle_count __read_mostly = 3;
+unsigned int sysctl_numa_balancing_settle_count __read_mostly = 4;
static inline int task_faults_idx(int nid, int priv)
{
/* Update the preferred nid and migrate task if possible */
p->numa_preferred_nid = max_nid;
- p->numa_migrate_seq = 0;
+ p->numa_migrate_seq = 1;
migrate_task_to(p, preferred_cpu);
}
}
/*
* Got a PROT_NONE fault for a page on @node.
*/
-void task_numa_fault(int last_nid, int node, int pages, bool migrated)
+void task_numa_fault(int last_nidpid, int node, int pages, bool migrated)
{
struct task_struct *p = current;
int priv;
if (!p->mm)
return;
- /* For now, do not attempt to detect private/shared accesses */
- priv = 1;
+ /*
+ * First accesses are treated as private, otherwise consider accesses
+ * to be private if the accessing pid has not changed
+ */
+ if (!nidpid_pid_unset(last_nidpid))
+ priv = ((p->pid & LAST__PID_MASK) == nidpid_to_pid(last_nidpid));
+ else
+ priv = 1;
/* Allocate buffer to track faults on a per-node basis */
if (unlikely(!p->numa_faults)) {
vma = mm->mmap;
}
for (; vma; vma = vma->vm_next) {
- if (!vma_migratable(vma))
- continue;
-
- /* Skip small VMAs. They are not likely to be of relevance */
- if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
+ if (!vma_migratable(vma) || !vma_policy_mof(p, vma))
continue;
do {
set_task_cpu(p, env->dst_cpu);
activate_task(env->dst_rq, p, 0);
check_preempt_curr(env->dst_rq, p, 0);
+#ifdef CONFIG_NUMA_BALANCING
+ if (p->numa_preferred_nid != -1) {
+ int src_nid = cpu_to_node(env->src_cpu);
+ int dst_nid = cpu_to_node(env->dst_cpu);
+
+ /*
+ * If the load balancer has moved the task then limit
+ * migrations from taking place in the short term in
+ * case this is a short-lived migration.
+ */
+ if (src_nid != dst_nid && dst_nid != p->numa_preferred_nid)
+ p->numa_migrate_seq = 0;
+ }
+#endif
}
/*