From: Srivatsa Vaddagiri Date: Fri, 25 Jan 2008 20:07:59 +0000 (+0100) Subject: sched: group scheduling code cleanup X-Git-Tag: upstream/snapshot3+hdmi~28418^2~91 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=93f992ccc008dd4030381caeebb252e85e66684b;p=platform%2Fadaptation%2Frenesas_rcar%2Frenesas_kernel.git sched: group scheduling code cleanup Minor cleanups: - Fix coding style - remove obsolete comment Signed-off-by: Srivatsa Vaddagiri Signed-off-by: Ingo Molnar --- diff --git a/kernel/sched.c b/kernel/sched.c index e76b11c..7f827b7 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -191,12 +191,12 @@ struct task_group init_task_group = { }; #ifdef CONFIG_FAIR_USER_SCHED -# define INIT_TASK_GRP_LOAD 2*NICE_0_LOAD +# define INIT_TASK_GROUP_LOAD 2*NICE_0_LOAD #else -# define INIT_TASK_GRP_LOAD NICE_0_LOAD +# define INIT_TASK_GROUP_LOAD NICE_0_LOAD #endif -static int init_task_group_load = INIT_TASK_GRP_LOAD; +static int init_task_group_load = INIT_TASK_GROUP_LOAD; /* return group to which a task belongs */ static inline struct task_group *task_group(struct task_struct *p) @@ -881,21 +881,6 @@ static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} #define sched_class_highest (&rt_sched_class) -/* - * Update delta_exec, delta_fair fields for rq. - * - * delta_fair clock advances at a rate inversely proportional to - * total load (rq->load.weight) on the runqueue, while - * delta_exec advances at the same rate as wall-clock (provided - * cpu is not idle). - * - * delta_exec / delta_fair is a measure of the (smoothened) load on this - * runqueue over any given interval. This (smoothened) load is used - * during load balance. - * - * This function is called /before/ updating rq->load - * and when switching tasks. - */ static inline void inc_load(struct rq *rq, const struct task_struct *p) { update_load_add(&rq->load, p->se.load.weight);