From a56900fabe126d27dc801aad6ff88bd5af9c7040 Mon Sep 17 00:00:00 2001 From: Satya Durga Srinivasu Prabhala Date: Wed, 8 Nov 2017 12:13:47 -0800 Subject: [PATCH] ANDROID: sched/walt: Fix compilation issue for x86_64 Below compilation errors observed when SCHED_WALT enabled and FAIR_GROUP_SCHED is disabled, fix it. CC kernel/sched/walt.o kernel/sched/walt.c: In function .walt_inc_cfs_cumulative_runnable_avg.: kernel/sched/walt.c:157:8: error: .struct cfs_rq. has no member named .cumulative_runnable_avg. cfs_rq->cumulative_runnable_avg += p->ravg.demand; ^ kernel/sched/walt.c: In function .walt_dec_cfs_cumulative_runnable_avg.: kernel/sched/walt.c:163:8: error: .struct cfs_rq. has no member named .cumulative_runnable_avg. cfs_rq->cumulative_runnable_avg -= p->ravg.demand; ^ make[2]: *** [kernel/sched/walt.o] Error 1 make[1]: *** [kernel/sched] Error 2 make: *** [kernel] Error 2 Signed-off-by: Satya Durga Srinivasu Prabhala --- kernel/sched/sched.h | 7 +++---- kernel/sched/walt.c | 2 ++ kernel/sched/walt.h | 21 +++++++++++++-------- 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index fc77c45..09d1219 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -447,10 +447,6 @@ struct cfs_rq { struct list_head leaf_cfs_rq_list; struct task_group *tg; /* group that "owns" this runqueue */ -#ifdef CONFIG_SCHED_WALT - u64 cumulative_runnable_avg; -#endif - #ifdef CONFIG_CFS_BANDWIDTH int runtime_enabled; u64 runtime_expires; @@ -460,6 +456,9 @@ struct cfs_rq { u64 throttled_clock_task_time; int throttled, throttle_count; struct list_head throttled_list; +#ifdef CONFIG_SCHED_WALT + u64 cumulative_runnable_avg; +#endif /* CONFIG_SCHED_WALT */ #endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_FAIR_GROUP_SCHED */ }; diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c index 343c2c9..0162dde 100644 --- a/kernel/sched/walt.c +++ b/kernel/sched/walt.c @@ -148,6 +148,7 @@ static int __init walt_init_ops(void) } late_initcall(walt_init_ops); +#ifdef CONFIG_CFS_BANDWIDTH void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq, struct task_struct *p) { @@ -159,6 +160,7 @@ void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *cfs_rq, { cfs_rq->cumulative_runnable_avg -= p->ravg.demand; } +#endif static int exiting_task(struct task_struct *p) { diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h index de7edac..bd20ed8 100644 --- a/kernel/sched/walt.h +++ b/kernel/sched/walt.h @@ -20,10 +20,7 @@ void walt_update_task_ravg(struct task_struct *p, struct rq *rq, int event, u64 wallclock, u64 irqtime); void walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p); void walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p); -void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq, - struct task_struct *p); -void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq, - struct task_struct *p); + void walt_fixup_busy_time(struct task_struct *p, int new_cpu); void walt_init_new_task_load(struct task_struct *p); void walt_mark_task_starting(struct task_struct *p); @@ -43,10 +40,6 @@ static inline void walt_update_task_ravg(struct task_struct *p, struct rq *rq, int event, u64 wallclock, u64 irqtime) { } static inline void walt_inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) { } static inline void walt_dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) { } -static inline void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq, - struct task_struct *p) { } -static inline void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq, - struct task_struct *p) { } static inline void walt_fixup_busy_time(struct task_struct *p, int new_cpu) { } static inline void walt_init_new_task_load(struct task_struct *p) { } static inline void walt_mark_task_starting(struct task_struct *p) { } @@ -59,6 +52,18 @@ static inline u64 walt_ktime_clock(void) { return 0; } #endif /* CONFIG_SCHED_WALT */ +#if defined(CONFIG_CFS_BANDWIDTH) && defined(CONFIG_SCHED_WALT) +void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq, + struct task_struct *p); +void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq, + struct task_struct *p); +#else +static inline void walt_inc_cfs_cumulative_runnable_avg(struct cfs_rq *rq, + struct task_struct *p) { } +static inline void walt_dec_cfs_cumulative_runnable_avg(struct cfs_rq *rq, + struct task_struct *p) { } +#endif + extern bool walt_disabled; #endif -- 2.7.4