projects
/
platform
/
adaptation
/
renesas_rcar
/
renesas_kernel.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Merge branch 'for-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
[platform/adaptation/renesas_rcar/renesas_kernel.git]
/
kernel
/
sched
/
core.c
diff --git
a/kernel/sched/core.c
b/kernel/sched/core.c
index
05c39f0
..
e53bda3
100644
(file)
--- a/
kernel/sched/core.c
+++ b/
kernel/sched/core.c
@@
-6815,7
+6815,7
@@
void sched_move_task(struct task_struct *tsk)
if (unlikely(running))
tsk->sched_class->put_prev_task(rq, tsk);
if (unlikely(running))
tsk->sched_class->put_prev_task(rq, tsk);
- tg = container_of(task_
subsys_state
_check(tsk, cpu_cgroup_subsys_id,
+ tg = container_of(task_
css
_check(tsk, cpu_cgroup_subsys_id,
lockdep_is_held(&tsk->sighand->siglock)),
struct task_group, css);
tg = autogroup_task_group(tsk, tg);
lockdep_is_held(&tsk->sighand->siglock)),
struct task_group, css);
tg = autogroup_task_group(tsk, tg);
@@
-7137,23
+7137,22
@@
int sched_rt_handler(struct ctl_table *table, int write,
#ifdef CONFIG_CGROUP_SCHED
#ifdef CONFIG_CGROUP_SCHED
-/* return corresponding task_group object of a cgroup */
-static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
+static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
{
{
- return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
- struct task_group, css);
+ return css ? container_of(css, struct task_group, css) : NULL;
}
}
-static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
+static struct cgroup_subsys_state *
+cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
{
- struct task_group *tg, *parent;
+ struct task_group *parent = css_tg(parent_css);
+ struct task_group *tg;
- if (!
cgrp->
parent) {
+ if (!parent) {
/* This is early initialization for the top cgroup */
return &root_task_group.css;
}
/* This is early initialization for the top cgroup */
return &root_task_group.css;
}
- parent = cgroup_tg(cgrp->parent);
tg = sched_create_group(parent);
if (IS_ERR(tg))
return ERR_PTR(-ENOMEM);
tg = sched_create_group(parent);
if (IS_ERR(tg))
return ERR_PTR(-ENOMEM);
@@
-7161,41
+7160,38
@@
static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
return &tg->css;
}
return &tg->css;
}
-static int cpu_cgroup_css_online(struct cgroup
*cgrp
)
+static int cpu_cgroup_css_online(struct cgroup
_subsys_state *css
)
{
{
- struct task_group *tg = cgroup_tg(cgrp);
- struct task_group *parent;
-
- if (!cgrp->parent)
- return 0;
+ struct task_group *tg = css_tg(css);
+ struct task_group *parent = css_tg(css_parent(css));
- parent = cgroup_tg(cgrp->parent);
- sched_online_group(tg, parent);
+ if (parent)
+
sched_online_group(tg, parent);
return 0;
}
return 0;
}
-static void cpu_cgroup_css_free(struct cgroup
*cgrp
)
+static void cpu_cgroup_css_free(struct cgroup
_subsys_state *css
)
{
{
- struct task_group *tg = c
group_tg(cgrp
);
+ struct task_group *tg = c
ss_tg(css
);
sched_destroy_group(tg);
}
sched_destroy_group(tg);
}
-static void cpu_cgroup_css_offline(struct cgroup
*cgrp
)
+static void cpu_cgroup_css_offline(struct cgroup
_subsys_state *css
)
{
{
- struct task_group *tg = c
group_tg(cgrp
);
+ struct task_group *tg = c
ss_tg(css
);
sched_offline_group(tg);
}
sched_offline_group(tg);
}
-static int cpu_cgroup_can_attach(struct cgroup
*cgrp
,
+static int cpu_cgroup_can_attach(struct cgroup
_subsys_state *css
,
struct cgroup_taskset *tset)
{
struct task_struct *task;
struct cgroup_taskset *tset)
{
struct task_struct *task;
- cgroup_taskset_for_each(task, c
grp
, tset) {
+ cgroup_taskset_for_each(task, c
ss
, tset) {
#ifdef CONFIG_RT_GROUP_SCHED
#ifdef CONFIG_RT_GROUP_SCHED
- if (!sched_rt_can_attach(c
group_tg(cgrp
), task))
+ if (!sched_rt_can_attach(c
ss_tg(css
), task))
return -EINVAL;
#else
/* We don't support RT-tasks being in separate groups */
return -EINVAL;
#else
/* We don't support RT-tasks being in separate groups */
@@
-7206,18
+7202,18
@@
static int cpu_cgroup_can_attach(struct cgroup *cgrp,
return 0;
}
return 0;
}
-static void cpu_cgroup_attach(struct cgroup
*cgrp
,
+static void cpu_cgroup_attach(struct cgroup
_subsys_state *css
,
struct cgroup_taskset *tset)
{
struct task_struct *task;
struct cgroup_taskset *tset)
{
struct task_struct *task;
- cgroup_taskset_for_each(task, c
grp
, tset)
+ cgroup_taskset_for_each(task, c
ss
, tset)
sched_move_task(task);
}
sched_move_task(task);
}
-static void
-
cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp
,
- struct task_struct *task)
+static void
cpu_cgroup_exit(struct cgroup_subsys_state *css,
+
struct cgroup_subsys_state *old_css
,
+
struct task_struct *task)
{
/*
* cgroup_exit() is called in the copy_process() failure path.
{
/*
* cgroup_exit() is called in the copy_process() failure path.
@@
-7231,15
+7227,16
@@
cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
}
#ifdef CONFIG_FAIR_GROUP_SCHED
}
#ifdef CONFIG_FAIR_GROUP_SCHED
-static int cpu_shares_write_u64(struct cgroup
*cgrp, struct cftype *cftype
,
- u64 shareval)
+static int cpu_shares_write_u64(struct cgroup
_subsys_state *css
,
+
struct cftype *cftype,
u64 shareval)
{
{
- return sched_group_set_shares(c
group_tg(cgrp
), scale_load(shareval));
+ return sched_group_set_shares(c
ss_tg(css
), scale_load(shareval));
}
}
-static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
{
- struct task_group *tg = c
group_tg(cgrp
);
+ struct task_group *tg = c
ss_tg(css
);
return (u64) scale_load_down(tg->shares);
}
return (u64) scale_load_down(tg->shares);
}
@@
-7361,26
+7358,28
@@
long tg_get_cfs_period(struct task_group *tg)
return cfs_period_us;
}
return cfs_period_us;
}
-static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft)
+static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
{
- return tg_get_cfs_quota(c
group_tg(cgrp
));
+ return tg_get_cfs_quota(c
ss_tg(css
));
}
}
-static int cpu_cfs_quota_write_s64(struct cgroup
*cgrp, struct cftype *cftype
,
- s64 cfs_quota_us)
+static int cpu_cfs_quota_write_s64(struct cgroup
_subsys_state *css
,
+
struct cftype *cftype,
s64 cfs_quota_us)
{
{
- return tg_set_cfs_quota(c
group_tg(cgrp
), cfs_quota_us);
+ return tg_set_cfs_quota(c
ss_tg(css
), cfs_quota_us);
}
}
-static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
{
- return tg_get_cfs_period(c
group_tg(cgrp
));
+ return tg_get_cfs_period(c
ss_tg(css
));
}
}
-static int cpu_cfs_period_write_u64(struct cgroup
*cgrp, struct cftype *cftype
,
- u64 cfs_period_us)
+static int cpu_cfs_period_write_u64(struct cgroup
_subsys_state *css
,
+
struct cftype *cftype,
u64 cfs_period_us)
{
{
- return tg_set_cfs_period(c
group_tg(cgrp
), cfs_period_us);
+ return tg_set_cfs_period(c
ss_tg(css
), cfs_period_us);
}
struct cfs_schedulable_data {
}
struct cfs_schedulable_data {
@@
-7461,10
+7460,10
@@
static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
return ret;
}
return ret;
}
-static int cpu_stats_show(struct cgroup
*cgrp
, struct cftype *cft,
+static int cpu_stats_show(struct cgroup
_subsys_state *css
, struct cftype *cft,
struct cgroup_map_cb *cb)
{
struct cgroup_map_cb *cb)
{
- struct task_group *tg = c
group_tg(cgrp
);
+ struct task_group *tg = c
ss_tg(css
);
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
cb->fill(cb, "nr_periods", cfs_b->nr_periods);
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
cb->fill(cb, "nr_periods", cfs_b->nr_periods);
@@
-7477,26
+7476,28
@@
static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
-static int cpu_rt_runtime_write(struct cgroup
*cgrp, struct cftype *cft
,
- s64 val)
+static int cpu_rt_runtime_write(struct cgroup
_subsys_state *css
,
+ s
truct cftype *cft, s
64 val)
{
{
- return sched_group_set_rt_runtime(c
group_tg(cgrp
), val);
+ return sched_group_set_rt_runtime(c
ss_tg(css
), val);
}
}
-static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
+static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
{
- return sched_group_rt_runtime(c
group_tg(cgrp
));
+ return sched_group_rt_runtime(c
ss_tg(css
));
}
}
-static int cpu_rt_period_write_uint(struct cgroup
*cgrp, struct cftype *cftype
,
- u64 rt_period_us)
+static int cpu_rt_period_write_uint(struct cgroup
_subsys_state *css
,
+
struct cftype *cftype,
u64 rt_period_us)
{
{
- return sched_group_set_rt_period(c
group_tg(cgrp
), rt_period_us);
+ return sched_group_set_rt_period(c
ss_tg(css
), rt_period_us);
}
}
-static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
{
- return sched_group_rt_period(c
group_tg(cgrp
));
+ return sched_group_rt_period(c
ss_tg(css
));
}
#endif /* CONFIG_RT_GROUP_SCHED */
}
#endif /* CONFIG_RT_GROUP_SCHED */