.cache_nice_tries = 2, \
.busy_idx = 3, \
.idle_idx = 2, \
- .newidle_idx = 0, /* unused */ \
+ .newidle_idx = 2, \
.wake_idx = 1, \
.forkexec_idx = 1, \
.flags = SD_LOAD_BALANCE \
.cache_nice_tries = 2, \
.busy_idx = 3, \
.idle_idx = 2, \
- .newidle_idx = 0, \
+ .newidle_idx = 2, \
.wake_idx = 1, \
.forkexec_idx = 1, \
.flags = SD_LOAD_BALANCE \
# define SD_CACHE_NICE_TRIES 2
# define SD_IDLE_IDX 2
-# define SD_NEWIDLE_IDX 0
+# define SD_NEWIDLE_IDX 2
# define SD_FORKEXEC_IDX 1
#endif
#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */
#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */
#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
+#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */
#define BALANCE_FOR_MC_POWER \
(sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0)
u32 reciprocal_cpu_power;
};
+enum sched_domain_level {
+ SD_LV_NONE = 0,
+ SD_LV_SIBLING,
+ SD_LV_MC,
+ SD_LV_CPU,
+ SD_LV_NODE,
+ SD_LV_ALLNODES,
+ SD_LV_MAX
+};
+
+struct sched_domain_attr {
+ int relax_domain_level;
+};
+
+#define SD_ATTR_INIT (struct sched_domain_attr) { \
+ .relax_domain_level = -1, \
+}
+
struct sched_domain {
/* These fields must be setup */
struct sched_domain *parent; /* top domain must be null terminated */
unsigned int wake_idx;
unsigned int forkexec_idx;
int flags; /* See SD_* */
+ enum sched_domain_level level;
/* Runtime fields. */
unsigned long last_balance; /* init to jiffies. units in jiffies */
#endif
};
-extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new);
+extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
+ struct sched_domain_attr *dattr_new);
extern int arch_reinit_sched_domains(void);
#endif /* CONFIG_SMP */
/* partition number for rebuild_sched_domains() */
int pn;
+ /* for custom sched domain */
+ int relax_domain_level;
+
/* used for walking a cpuset heirarchy */
struct list_head stack_list;
};
return cpus_intersects(a->cpus_allowed, b->cpus_allowed);
}
+static void
+update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
+{
+ if (!dattr)
+ return;
+ if (dattr->relax_domain_level < c->relax_domain_level)
+ dattr->relax_domain_level = c->relax_domain_level;
+ return;
+}
+
/*
* rebuild_sched_domains()
*
int csn; /* how many cpuset ptrs in csa so far */
int i, j, k; /* indices for partition finding loops */
cpumask_t *doms; /* resulting partition; i.e. sched domains */
+ struct sched_domain_attr *dattr; /* attributes for custom domains */
int ndoms; /* number of sched domains in result */
int nslot; /* next empty doms[] cpumask_t slot */
q = NULL;
csa = NULL;
doms = NULL;
+ dattr = NULL;
/* Special case for the 99% of systems with one, full, sched domain */
if (is_sched_load_balance(&top_cpuset)) {
doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
if (!doms)
goto rebuild;
+ dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
+ if (dattr) {
+ *dattr = SD_ATTR_INIT;
+ update_domain_attr(dattr, &top_cpuset);
+ }
*doms = top_cpuset.cpus_allowed;
goto rebuild;
}
doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL);
if (!doms)
goto rebuild;
+ dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
for (nslot = 0, i = 0; i < csn; i++) {
struct cpuset *a = csa[i];
}
cpus_clear(*dp);
+ if (dattr)
+ *(dattr + nslot) = SD_ATTR_INIT;
for (j = i; j < csn; j++) {
struct cpuset *b = csa[j];
if (apn == b->pn) {
cpus_or(*dp, *dp, b->cpus_allowed);
b->pn = -1;
+ update_domain_attr(dattr, b);
}
}
nslot++;
rebuild:
/* Have scheduler rebuild sched domains */
get_online_cpus();
- partition_sched_domains(ndoms, doms);
+ partition_sched_domains(ndoms, doms, dattr);
put_online_cpus();
done:
kfifo_free(q);
kfree(csa);
/* Don't kfree(doms) -- partition_sched_domains() does that. */
+ /* Don't kfree(dattr) -- partition_sched_domains() does that. */
}
static inline int started_after_time(struct task_struct *t1,
return 0;
}
+static int update_relax_domain_level(struct cpuset *cs, char *buf)
+{
+ int val = simple_strtol(buf, NULL, 10);
+
+ if (val < 0)
+ val = -1;
+
+ if (val != cs->relax_domain_level) {
+ cs->relax_domain_level = val;
+ rebuild_sched_domains();
+ }
+
+ return 0;
+}
+
/*
* update_flag - read a 0 or a 1 in a file and update associated flag
* bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE,
FILE_CPU_EXCLUSIVE,
FILE_MEM_EXCLUSIVE,
FILE_SCHED_LOAD_BALANCE,
+ FILE_SCHED_RELAX_DOMAIN_LEVEL,
FILE_MEMORY_PRESSURE_ENABLED,
FILE_MEMORY_PRESSURE,
FILE_SPREAD_PAGE,
case FILE_SCHED_LOAD_BALANCE:
retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, buffer);
break;
+ case FILE_SCHED_RELAX_DOMAIN_LEVEL:
+ retval = update_relax_domain_level(cs, buffer);
+ break;
case FILE_MEMORY_MIGRATE:
retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer);
break;
case FILE_SCHED_LOAD_BALANCE:
*s++ = is_sched_load_balance(cs) ? '1' : '0';
break;
+ case FILE_SCHED_RELAX_DOMAIN_LEVEL:
+ s += sprintf(s, "%d", cs->relax_domain_level);
+ break;
case FILE_MEMORY_MIGRATE:
*s++ = is_memory_migrate(cs) ? '1' : '0';
break;
.private = FILE_SCHED_LOAD_BALANCE,
};
+static struct cftype cft_sched_relax_domain_level = {
+ .name = "sched_relax_domain_level",
+ .read = cpuset_common_file_read,
+ .write = cpuset_common_file_write,
+ .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
+};
+
static struct cftype cft_memory_migrate = {
.name = "memory_migrate",
.read = cpuset_common_file_read,
return err;
if ((err = cgroup_add_file(cont, ss, &cft_sched_load_balance)) < 0)
return err;
+ if ((err = cgroup_add_file(cont, ss,
+ &cft_sched_relax_domain_level)) < 0)
+ return err;
if ((err = cgroup_add_file(cont, ss, &cft_memory_pressure)) < 0)
return err;
if ((err = cgroup_add_file(cont, ss, &cft_spread_page)) < 0)
nodes_clear(cs->mems_allowed);
cs->mems_generation = cpuset_mems_generation++;
fmeter_init(&cs->fmeter);
+ cs->relax_domain_level = -1;
cs->parent = parent;
number_of_cpusets++;
fmeter_init(&top_cpuset.fmeter);
top_cpuset.mems_generation = cpuset_mems_generation++;
set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
+ top_cpuset.relax_domain_level = -1;
err = register_filesystem(&cpuset_fs_type);
if (err < 0)
{ \
memset(sd, 0, sizeof(*sd)); \
*sd = SD_##type##_INIT; \
+ sd->level = SD_LV_##type; \
}
SD_INIT_FUNC(CPU)
#define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \
((unsigned long)(a) + offsetof(struct allmasks, v))
+static int default_relax_domain_level = -1;
+
+static int __init setup_relax_domain_level(char *str)
+{
+ default_relax_domain_level = simple_strtoul(str, NULL, 0);
+ return 1;
+}
+__setup("relax_domain_level=", setup_relax_domain_level);
+
+static void set_domain_attribute(struct sched_domain *sd,
+ struct sched_domain_attr *attr)
+{
+ int request;
+
+ if (!attr || attr->relax_domain_level < 0) {
+ if (default_relax_domain_level < 0)
+ return;
+ else
+ request = default_relax_domain_level;
+ } else
+ request = attr->relax_domain_level;
+ if (request < sd->level) {
+ /* turn off idle balance on this domain */
+ sd->flags &= ~(SD_WAKE_IDLE|SD_BALANCE_NEWIDLE);
+ } else {
+ /* turn on idle balance on this domain */
+ sd->flags |= (SD_WAKE_IDLE_FAR|SD_BALANCE_NEWIDLE);
+ }
+}
+
/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
*/
-static int build_sched_domains(const cpumask_t *cpu_map)
+static int __build_sched_domains(const cpumask_t *cpu_map,
+ struct sched_domain_attr *attr)
{
int i;
struct root_domain *rd;
SD_NODES_PER_DOMAIN*cpus_weight(*nodemask)) {
sd = &per_cpu(allnodes_domains, i);
SD_INIT(sd, ALLNODES);
+ set_domain_attribute(sd, attr);
sd->span = *cpu_map;
cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
p = sd;
sd = &per_cpu(node_domains, i);
SD_INIT(sd, NODE);
+ set_domain_attribute(sd, attr);
sched_domain_node_span(cpu_to_node(i), &sd->span);
sd->parent = p;
if (p)
p = sd;
sd = &per_cpu(phys_domains, i);
SD_INIT(sd, CPU);
+ set_domain_attribute(sd, attr);
sd->span = *nodemask;
sd->parent = p;
if (p)
p = sd;
sd = &per_cpu(core_domains, i);
SD_INIT(sd, MC);
+ set_domain_attribute(sd, attr);
sd->span = cpu_coregroup_map(i);
cpus_and(sd->span, sd->span, *cpu_map);
sd->parent = p;
p = sd;
sd = &per_cpu(cpu_domains, i);
SD_INIT(sd, SIBLING);
+ set_domain_attribute(sd, attr);
sd->span = per_cpu(cpu_sibling_map, i);
cpus_and(sd->span, sd->span, *cpu_map);
sd->parent = p;
#endif
}
+static int build_sched_domains(const cpumask_t *cpu_map)
+{
+ return __build_sched_domains(cpu_map, NULL);
+}
+
static cpumask_t *doms_cur; /* current sched domains */
static int ndoms_cur; /* number of sched domains in 'doms_cur' */
+static struct sched_domain_attr *dattr_cur; /* attribues of custom domains
+ in 'doms_cur' */
/*
* Special case: If a kmalloc of a doms_cur partition (array of
if (!doms_cur)
doms_cur = &fallback_doms;
cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map);
+ dattr_cur = NULL;
err = build_sched_domains(doms_cur);
register_sched_domain_sysctl();
arch_destroy_sched_domains(cpu_map, &tmpmask);
}
+/* handle null as "default" */
+static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
+ struct sched_domain_attr *new, int idx_new)
+{
+ struct sched_domain_attr tmp;
+
+ /* fast path */
+ if (!new && !cur)
+ return 1;
+
+ tmp = SD_ATTR_INIT;
+ return !memcmp(cur ? (cur + idx_cur) : &tmp,
+ new ? (new + idx_new) : &tmp,
+ sizeof(struct sched_domain_attr));
+}
+
/*
* Partition sched domains as specified by the 'ndoms_new'
* cpumasks in the array doms_new[] of cpumasks. This compares
*
* Call with hotplug lock held
*/
-void partition_sched_domains(int ndoms_new, cpumask_t *doms_new)
+void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
+ struct sched_domain_attr *dattr_new)
{
int i, j;
ndoms_new = 1;
doms_new = &fallback_doms;
cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map);
+ dattr_new = NULL;
}
/* Destroy deleted domains */
for (i = 0; i < ndoms_cur; i++) {
for (j = 0; j < ndoms_new; j++) {
- if (cpus_equal(doms_cur[i], doms_new[j]))
+ if (cpus_equal(doms_cur[i], doms_new[j])
+ && dattrs_equal(dattr_cur, i, dattr_new, j))
goto match1;
}
/* no match - a current sched domain not in new doms_new[] */
/* Build new domains */
for (i = 0; i < ndoms_new; i++) {
for (j = 0; j < ndoms_cur; j++) {
- if (cpus_equal(doms_new[i], doms_cur[j]))
+ if (cpus_equal(doms_new[i], doms_cur[j])
+ && dattrs_equal(dattr_new, i, dattr_cur, j))
goto match2;
}
/* no match - add a new doms_new */
- build_sched_domains(doms_new + i);
+ __build_sched_domains(doms_new + i,
+ dattr_new ? dattr_new + i : NULL);
match2:
;
}
/* Remember the new sched domains */
if (doms_cur != &fallback_doms)
kfree(doms_cur);
+ kfree(dattr_cur); /* kfree(NULL) is safe */
doms_cur = doms_new;
+ dattr_cur = dattr_new;
ndoms_cur = ndoms_new;
register_sched_domain_sysctl();
return cpu;
for_each_domain(cpu, sd) {
- if (sd->flags & SD_WAKE_IDLE) {
+ if ((sd->flags & SD_WAKE_IDLE)
+ || ((sd->flags & SD_WAKE_IDLE_FAR)
+ && !task_hot(p, task_rq(p)->clock, sd))) {
cpus_and(tmp, sd->span, p->cpus_allowed);
for_each_cpu_mask(i, tmp) {
if (idle_cpu(i)) {