sched: rt-group: synchonised bandwidth period
[platform/kernel/linux-starfive.git] / kernel / sched_rt.c
1 /*
2  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3  * policies)
4  */
5
6 #ifdef CONFIG_SMP
7
8 static inline int rt_overloaded(struct rq *rq)
9 {
10         return atomic_read(&rq->rd->rto_count);
11 }
12
13 static inline void rt_set_overload(struct rq *rq)
14 {
15         cpu_set(rq->cpu, rq->rd->rto_mask);
16         /*
17          * Make sure the mask is visible before we set
18          * the overload count. That is checked to determine
19          * if we should look at the mask. It would be a shame
20          * if we looked at the mask, but the mask was not
21          * updated yet.
22          */
23         wmb();
24         atomic_inc(&rq->rd->rto_count);
25 }
26
27 static inline void rt_clear_overload(struct rq *rq)
28 {
29         /* the order here really doesn't matter */
30         atomic_dec(&rq->rd->rto_count);
31         cpu_clear(rq->cpu, rq->rd->rto_mask);
32 }
33
34 static void update_rt_migration(struct rq *rq)
35 {
36         if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
37                 if (!rq->rt.overloaded) {
38                         rt_set_overload(rq);
39                         rq->rt.overloaded = 1;
40                 }
41         } else if (rq->rt.overloaded) {
42                 rt_clear_overload(rq);
43                 rq->rt.overloaded = 0;
44         }
45 }
46 #endif /* CONFIG_SMP */
47
48 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
49 {
50         return container_of(rt_se, struct task_struct, rt);
51 }
52
53 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
54 {
55         return !list_empty(&rt_se->run_list);
56 }
57
58 #ifdef CONFIG_RT_GROUP_SCHED
59
60 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
61 {
62         if (!rt_rq->tg)
63                 return RUNTIME_INF;
64
65         return rt_rq->tg->rt_bandwidth.rt_runtime;
66 }
67
68 #define for_each_leaf_rt_rq(rt_rq, rq) \
69         list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
70
71 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
72 {
73         return rt_rq->rq;
74 }
75
76 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
77 {
78         return rt_se->rt_rq;
79 }
80
81 #define for_each_sched_rt_entity(rt_se) \
82         for (; rt_se; rt_se = rt_se->parent)
83
84 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
85 {
86         return rt_se->my_q;
87 }
88
89 static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
90 static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
91
92 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
93 {
94         struct sched_rt_entity *rt_se = rt_rq->rt_se;
95
96         if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
97                 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
98
99                 enqueue_rt_entity(rt_se);
100                 if (rt_rq->highest_prio < curr->prio)
101                         resched_task(curr);
102         }
103 }
104
105 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
106 {
107         struct sched_rt_entity *rt_se = rt_rq->rt_se;
108
109         if (rt_se && on_rt_rq(rt_se))
110                 dequeue_rt_entity(rt_se);
111 }
112
113 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
114 {
115         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
116 }
117
118 static int rt_se_boosted(struct sched_rt_entity *rt_se)
119 {
120         struct rt_rq *rt_rq = group_rt_rq(rt_se);
121         struct task_struct *p;
122
123         if (rt_rq)
124                 return !!rt_rq->rt_nr_boosted;
125
126         p = rt_task_of(rt_se);
127         return p->prio != p->normal_prio;
128 }
129
130 #ifdef CONFIG_SMP
131 static inline cpumask_t sched_rt_period_mask(void)
132 {
133         return cpu_rq(smp_processor_id())->rd->span;
134 }
135 #else
136 static inline cpumask_t sched_rt_period_mask(void)
137 {
138         return cpu_online_map;
139 }
140 #endif
141
142 static inline
143 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
144 {
145         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
146 }
147
148 #else
149
150 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
151 {
152         return def_rt_bandwidth.rt_runtime;
153 }
154
155 #define for_each_leaf_rt_rq(rt_rq, rq) \
156         for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
157
158 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
159 {
160         return container_of(rt_rq, struct rq, rt);
161 }
162
163 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
164 {
165         struct task_struct *p = rt_task_of(rt_se);
166         struct rq *rq = task_rq(p);
167
168         return &rq->rt;
169 }
170
171 #define for_each_sched_rt_entity(rt_se) \
172         for (; rt_se; rt_se = NULL)
173
174 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
175 {
176         return NULL;
177 }
178
179 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
180 {
181 }
182
183 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
184 {
185 }
186
187 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
188 {
189         return rt_rq->rt_throttled;
190 }
191
192 static inline cpumask_t sched_rt_period_mask(void)
193 {
194         return cpu_online_map;
195 }
196
197 static inline
198 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
199 {
200         return &cpu_rq(cpu)->rt;
201 }
202
203 #endif
204
205 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
206 {
207         int i, idle = 1;
208         cpumask_t span;
209
210         if (rt_b->rt_runtime == RUNTIME_INF)
211                 return 1;
212
213         span = sched_rt_period_mask();
214         for_each_cpu_mask(i, span) {
215                 int enqueue = 0;
216                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
217                 struct rq *rq = rq_of_rt_rq(rt_rq);
218
219                 spin_lock(&rq->lock);
220                 if (rt_rq->rt_time) {
221                         u64 runtime = rt_b->rt_runtime;
222
223                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
224                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
225                                 rt_rq->rt_throttled = 0;
226                                 enqueue = 1;
227                         }
228                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
229                                 idle = 0;
230                 }
231
232                 if (enqueue)
233                         sched_rt_rq_enqueue(rt_rq);
234                 spin_unlock(&rq->lock);
235         }
236
237         return idle;
238 }
239
240 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
241 {
242 #ifdef CONFIG_RT_GROUP_SCHED
243         struct rt_rq *rt_rq = group_rt_rq(rt_se);
244
245         if (rt_rq)
246                 return rt_rq->highest_prio;
247 #endif
248
249         return rt_task_of(rt_se)->prio;
250 }
251
252 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
253 {
254         u64 runtime = sched_rt_runtime(rt_rq);
255
256         if (runtime == RUNTIME_INF)
257                 return 0;
258
259         if (rt_rq->rt_throttled)
260                 return rt_rq_throttled(rt_rq);
261
262         if (rt_rq->rt_time > runtime) {
263                 rt_rq->rt_throttled = 1;
264                 if (rt_rq_throttled(rt_rq)) {
265                         sched_rt_rq_dequeue(rt_rq);
266                         return 1;
267                 }
268         }
269
270         return 0;
271 }
272
273 /*
274  * Update the current task's runtime statistics. Skip current tasks that
275  * are not in our scheduling class.
276  */
277 static void update_curr_rt(struct rq *rq)
278 {
279         struct task_struct *curr = rq->curr;
280         struct sched_rt_entity *rt_se = &curr->rt;
281         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
282         u64 delta_exec;
283
284         if (!task_has_rt_policy(curr))
285                 return;
286
287         delta_exec = rq->clock - curr->se.exec_start;
288         if (unlikely((s64)delta_exec < 0))
289                 delta_exec = 0;
290
291         schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
292
293         curr->se.sum_exec_runtime += delta_exec;
294         curr->se.exec_start = rq->clock;
295         cpuacct_charge(curr, delta_exec);
296
297         rt_rq->rt_time += delta_exec;
298         if (sched_rt_runtime_exceeded(rt_rq))
299                 resched_task(curr);
300 }
301
302 static inline
303 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
304 {
305         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
306         rt_rq->rt_nr_running++;
307 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
308         if (rt_se_prio(rt_se) < rt_rq->highest_prio)
309                 rt_rq->highest_prio = rt_se_prio(rt_se);
310 #endif
311 #ifdef CONFIG_SMP
312         if (rt_se->nr_cpus_allowed > 1) {
313                 struct rq *rq = rq_of_rt_rq(rt_rq);
314                 rq->rt.rt_nr_migratory++;
315         }
316
317         update_rt_migration(rq_of_rt_rq(rt_rq));
318 #endif
319 #ifdef CONFIG_RT_GROUP_SCHED
320         if (rt_se_boosted(rt_se))
321                 rt_rq->rt_nr_boosted++;
322
323         if (rt_rq->tg)
324                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
325 #else
326         start_rt_bandwidth(&def_rt_bandwidth);
327 #endif
328 }
329
330 static inline
331 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
332 {
333         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
334         WARN_ON(!rt_rq->rt_nr_running);
335         rt_rq->rt_nr_running--;
336 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
337         if (rt_rq->rt_nr_running) {
338                 struct rt_prio_array *array;
339
340                 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
341                 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
342                         /* recalculate */
343                         array = &rt_rq->active;
344                         rt_rq->highest_prio =
345                                 sched_find_first_bit(array->bitmap);
346                 } /* otherwise leave rq->highest prio alone */
347         } else
348                 rt_rq->highest_prio = MAX_RT_PRIO;
349 #endif
350 #ifdef CONFIG_SMP
351         if (rt_se->nr_cpus_allowed > 1) {
352                 struct rq *rq = rq_of_rt_rq(rt_rq);
353                 rq->rt.rt_nr_migratory--;
354         }
355
356         update_rt_migration(rq_of_rt_rq(rt_rq));
357 #endif /* CONFIG_SMP */
358 #ifdef CONFIG_RT_GROUP_SCHED
359         if (rt_se_boosted(rt_se))
360                 rt_rq->rt_nr_boosted--;
361
362         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
363 #endif
364 }
365
366 static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
367 {
368         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
369         struct rt_prio_array *array = &rt_rq->active;
370         struct rt_rq *group_rq = group_rt_rq(rt_se);
371
372         if (group_rq && rt_rq_throttled(group_rq))
373                 return;
374
375         list_add_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
376         __set_bit(rt_se_prio(rt_se), array->bitmap);
377
378         inc_rt_tasks(rt_se, rt_rq);
379 }
380
381 static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
382 {
383         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
384         struct rt_prio_array *array = &rt_rq->active;
385
386         list_del_init(&rt_se->run_list);
387         if (list_empty(array->queue + rt_se_prio(rt_se)))
388                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
389
390         dec_rt_tasks(rt_se, rt_rq);
391 }
392
393 /*
394  * Because the prio of an upper entry depends on the lower
395  * entries, we must remove entries top - down.
396  *
397  * XXX: O(1/2 h^2) because we can only walk up, not down the chain.
398  *      doesn't matter much for now, as h=2 for GROUP_SCHED.
399  */
400 static void dequeue_rt_stack(struct task_struct *p)
401 {
402         struct sched_rt_entity *rt_se, *top_se;
403
404         /*
405          * dequeue all, top - down.
406          */
407         do {
408                 rt_se = &p->rt;
409                 top_se = NULL;
410                 for_each_sched_rt_entity(rt_se) {
411                         if (on_rt_rq(rt_se))
412                                 top_se = rt_se;
413                 }
414                 if (top_se)
415                         dequeue_rt_entity(top_se);
416         } while (top_se);
417 }
418
419 /*
420  * Adding/removing a task to/from a priority array:
421  */
422 static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
423 {
424         struct sched_rt_entity *rt_se = &p->rt;
425
426         if (wakeup)
427                 rt_se->timeout = 0;
428
429         dequeue_rt_stack(p);
430
431         /*
432          * enqueue everybody, bottom - up.
433          */
434         for_each_sched_rt_entity(rt_se)
435                 enqueue_rt_entity(rt_se);
436 }
437
438 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
439 {
440         struct sched_rt_entity *rt_se = &p->rt;
441         struct rt_rq *rt_rq;
442
443         update_curr_rt(rq);
444
445         dequeue_rt_stack(p);
446
447         /*
448          * re-enqueue all non-empty rt_rq entities.
449          */
450         for_each_sched_rt_entity(rt_se) {
451                 rt_rq = group_rt_rq(rt_se);
452                 if (rt_rq && rt_rq->rt_nr_running)
453                         enqueue_rt_entity(rt_se);
454         }
455 }
456
457 /*
458  * Put task to the end of the run list without the overhead of dequeue
459  * followed by enqueue.
460  */
461 static
462 void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
463 {
464         struct rt_prio_array *array = &rt_rq->active;
465
466         list_move_tail(&rt_se->run_list, array->queue + rt_se_prio(rt_se));
467 }
468
469 static void requeue_task_rt(struct rq *rq, struct task_struct *p)
470 {
471         struct sched_rt_entity *rt_se = &p->rt;
472         struct rt_rq *rt_rq;
473
474         for_each_sched_rt_entity(rt_se) {
475                 rt_rq = rt_rq_of_se(rt_se);
476                 requeue_rt_entity(rt_rq, rt_se);
477         }
478 }
479
480 static void yield_task_rt(struct rq *rq)
481 {
482         requeue_task_rt(rq, rq->curr);
483 }
484
485 #ifdef CONFIG_SMP
486 static int find_lowest_rq(struct task_struct *task);
487
488 static int select_task_rq_rt(struct task_struct *p, int sync)
489 {
490         struct rq *rq = task_rq(p);
491
492         /*
493          * If the current task is an RT task, then
494          * try to see if we can wake this RT task up on another
495          * runqueue. Otherwise simply start this RT task
496          * on its current runqueue.
497          *
498          * We want to avoid overloading runqueues. Even if
499          * the RT task is of higher priority than the current RT task.
500          * RT tasks behave differently than other tasks. If
501          * one gets preempted, we try to push it off to another queue.
502          * So trying to keep a preempting RT task on the same
503          * cache hot CPU will force the running RT task to
504          * a cold CPU. So we waste all the cache for the lower
505          * RT task in hopes of saving some of a RT task
506          * that is just being woken and probably will have
507          * cold cache anyway.
508          */
509         if (unlikely(rt_task(rq->curr)) &&
510             (p->rt.nr_cpus_allowed > 1)) {
511                 int cpu = find_lowest_rq(p);
512
513                 return (cpu == -1) ? task_cpu(p) : cpu;
514         }
515
516         /*
517          * Otherwise, just let it ride on the affined RQ and the
518          * post-schedule router will push the preempted task away
519          */
520         return task_cpu(p);
521 }
522 #endif /* CONFIG_SMP */
523
524 /*
525  * Preempt the current task with a newly woken task if needed:
526  */
527 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
528 {
529         if (p->prio < rq->curr->prio)
530                 resched_task(rq->curr);
531 }
532
533 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
534                                                    struct rt_rq *rt_rq)
535 {
536         struct rt_prio_array *array = &rt_rq->active;
537         struct sched_rt_entity *next = NULL;
538         struct list_head *queue;
539         int idx;
540
541         idx = sched_find_first_bit(array->bitmap);
542         BUG_ON(idx >= MAX_RT_PRIO);
543
544         queue = array->queue + idx;
545         next = list_entry(queue->next, struct sched_rt_entity, run_list);
546
547         return next;
548 }
549
550 static struct task_struct *pick_next_task_rt(struct rq *rq)
551 {
552         struct sched_rt_entity *rt_se;
553         struct task_struct *p;
554         struct rt_rq *rt_rq;
555
556         rt_rq = &rq->rt;
557
558         if (unlikely(!rt_rq->rt_nr_running))
559                 return NULL;
560
561         if (rt_rq_throttled(rt_rq))
562                 return NULL;
563
564         do {
565                 rt_se = pick_next_rt_entity(rq, rt_rq);
566                 BUG_ON(!rt_se);
567                 rt_rq = group_rt_rq(rt_se);
568         } while (rt_rq);
569
570         p = rt_task_of(rt_se);
571         p->se.exec_start = rq->clock;
572         return p;
573 }
574
575 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
576 {
577         update_curr_rt(rq);
578         p->se.exec_start = 0;
579 }
580
581 #ifdef CONFIG_SMP
582
583 /* Only try algorithms three times */
584 #define RT_MAX_TRIES 3
585
586 static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
587 static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
588
589 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
590 {
591         if (!task_running(rq, p) &&
592             (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
593             (p->rt.nr_cpus_allowed > 1))
594                 return 1;
595         return 0;
596 }
597
598 /* Return the second highest RT task, NULL otherwise */
599 static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
600 {
601         struct task_struct *next = NULL;
602         struct sched_rt_entity *rt_se;
603         struct rt_prio_array *array;
604         struct rt_rq *rt_rq;
605         int idx;
606
607         for_each_leaf_rt_rq(rt_rq, rq) {
608                 array = &rt_rq->active;
609                 idx = sched_find_first_bit(array->bitmap);
610  next_idx:
611                 if (idx >= MAX_RT_PRIO)
612                         continue;
613                 if (next && next->prio < idx)
614                         continue;
615                 list_for_each_entry(rt_se, array->queue + idx, run_list) {
616                         struct task_struct *p = rt_task_of(rt_se);
617                         if (pick_rt_task(rq, p, cpu)) {
618                                 next = p;
619                                 break;
620                         }
621                 }
622                 if (!next) {
623                         idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
624                         goto next_idx;
625                 }
626         }
627
628         return next;
629 }
630
631 static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
632
633 static int find_lowest_cpus(struct task_struct *task, cpumask_t *lowest_mask)
634 {
635         int       lowest_prio = -1;
636         int       lowest_cpu  = -1;
637         int       count       = 0;
638         int       cpu;
639
640         cpus_and(*lowest_mask, task_rq(task)->rd->online, task->cpus_allowed);
641
642         /*
643          * Scan each rq for the lowest prio.
644          */
645         for_each_cpu_mask(cpu, *lowest_mask) {
646                 struct rq *rq = cpu_rq(cpu);
647
648                 /* We look for lowest RT prio or non-rt CPU */
649                 if (rq->rt.highest_prio >= MAX_RT_PRIO) {
650                         /*
651                          * if we already found a low RT queue
652                          * and now we found this non-rt queue
653                          * clear the mask and set our bit.
654                          * Otherwise just return the queue as is
655                          * and the count==1 will cause the algorithm
656                          * to use the first bit found.
657                          */
658                         if (lowest_cpu != -1) {
659                                 cpus_clear(*lowest_mask);
660                                 cpu_set(rq->cpu, *lowest_mask);
661                         }
662                         return 1;
663                 }
664
665                 /* no locking for now */
666                 if ((rq->rt.highest_prio > task->prio)
667                     && (rq->rt.highest_prio >= lowest_prio)) {
668                         if (rq->rt.highest_prio > lowest_prio) {
669                                 /* new low - clear old data */
670                                 lowest_prio = rq->rt.highest_prio;
671                                 lowest_cpu = cpu;
672                                 count = 0;
673                         }
674                         count++;
675                 } else
676                         cpu_clear(cpu, *lowest_mask);
677         }
678
679         /*
680          * Clear out all the set bits that represent
681          * runqueues that were of higher prio than
682          * the lowest_prio.
683          */
684         if (lowest_cpu > 0) {
685                 /*
686                  * Perhaps we could add another cpumask op to
687                  * zero out bits. Like cpu_zero_bits(cpumask, nrbits);
688                  * Then that could be optimized to use memset and such.
689                  */
690                 for_each_cpu_mask(cpu, *lowest_mask) {
691                         if (cpu >= lowest_cpu)
692                                 break;
693                         cpu_clear(cpu, *lowest_mask);
694                 }
695         }
696
697         return count;
698 }
699
700 static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
701 {
702         int first;
703
704         /* "this_cpu" is cheaper to preempt than a remote processor */
705         if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
706                 return this_cpu;
707
708         first = first_cpu(*mask);
709         if (first != NR_CPUS)
710                 return first;
711
712         return -1;
713 }
714
715 static int find_lowest_rq(struct task_struct *task)
716 {
717         struct sched_domain *sd;
718         cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
719         int this_cpu = smp_processor_id();
720         int cpu      = task_cpu(task);
721         int count    = find_lowest_cpus(task, lowest_mask);
722
723         if (!count)
724                 return -1; /* No targets found */
725
726         /*
727          * There is no sense in performing an optimal search if only one
728          * target is found.
729          */
730         if (count == 1)
731                 return first_cpu(*lowest_mask);
732
733         /*
734          * At this point we have built a mask of cpus representing the
735          * lowest priority tasks in the system.  Now we want to elect
736          * the best one based on our affinity and topology.
737          *
738          * We prioritize the last cpu that the task executed on since
739          * it is most likely cache-hot in that location.
740          */
741         if (cpu_isset(cpu, *lowest_mask))
742                 return cpu;
743
744         /*
745          * Otherwise, we consult the sched_domains span maps to figure
746          * out which cpu is logically closest to our hot cache data.
747          */
748         if (this_cpu == cpu)
749                 this_cpu = -1; /* Skip this_cpu opt if the same */
750
751         for_each_domain(cpu, sd) {
752                 if (sd->flags & SD_WAKE_AFFINE) {
753                         cpumask_t domain_mask;
754                         int       best_cpu;
755
756                         cpus_and(domain_mask, sd->span, *lowest_mask);
757
758                         best_cpu = pick_optimal_cpu(this_cpu,
759                                                     &domain_mask);
760                         if (best_cpu != -1)
761                                 return best_cpu;
762                 }
763         }
764
765         /*
766          * And finally, if there were no matches within the domains
767          * just give the caller *something* to work with from the compatible
768          * locations.
769          */
770         return pick_optimal_cpu(this_cpu, lowest_mask);
771 }
772
773 /* Will lock the rq it finds */
774 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
775 {
776         struct rq *lowest_rq = NULL;
777         int tries;
778         int cpu;
779
780         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
781                 cpu = find_lowest_rq(task);
782
783                 if ((cpu == -1) || (cpu == rq->cpu))
784                         break;
785
786                 lowest_rq = cpu_rq(cpu);
787
788                 /* if the prio of this runqueue changed, try again */
789                 if (double_lock_balance(rq, lowest_rq)) {
790                         /*
791                          * We had to unlock the run queue. In
792                          * the mean time, task could have
793                          * migrated already or had its affinity changed.
794                          * Also make sure that it wasn't scheduled on its rq.
795                          */
796                         if (unlikely(task_rq(task) != rq ||
797                                      !cpu_isset(lowest_rq->cpu,
798                                                 task->cpus_allowed) ||
799                                      task_running(rq, task) ||
800                                      !task->se.on_rq)) {
801
802                                 spin_unlock(&lowest_rq->lock);
803                                 lowest_rq = NULL;
804                                 break;
805                         }
806                 }
807
808                 /* If this rq is still suitable use it. */
809                 if (lowest_rq->rt.highest_prio > task->prio)
810                         break;
811
812                 /* try again */
813                 spin_unlock(&lowest_rq->lock);
814                 lowest_rq = NULL;
815         }
816
817         return lowest_rq;
818 }
819
820 /*
821  * If the current CPU has more than one RT task, see if the non
822  * running task can migrate over to a CPU that is running a task
823  * of lesser priority.
824  */
825 static int push_rt_task(struct rq *rq)
826 {
827         struct task_struct *next_task;
828         struct rq *lowest_rq;
829         int ret = 0;
830         int paranoid = RT_MAX_TRIES;
831
832         if (!rq->rt.overloaded)
833                 return 0;
834
835         next_task = pick_next_highest_task_rt(rq, -1);
836         if (!next_task)
837                 return 0;
838
839  retry:
840         if (unlikely(next_task == rq->curr)) {
841                 WARN_ON(1);
842                 return 0;
843         }
844
845         /*
846          * It's possible that the next_task slipped in of
847          * higher priority than current. If that's the case
848          * just reschedule current.
849          */
850         if (unlikely(next_task->prio < rq->curr->prio)) {
851                 resched_task(rq->curr);
852                 return 0;
853         }
854
855         /* We might release rq lock */
856         get_task_struct(next_task);
857
858         /* find_lock_lowest_rq locks the rq if found */
859         lowest_rq = find_lock_lowest_rq(next_task, rq);
860         if (!lowest_rq) {
861                 struct task_struct *task;
862                 /*
863                  * find lock_lowest_rq releases rq->lock
864                  * so it is possible that next_task has changed.
865                  * If it has, then try again.
866                  */
867                 task = pick_next_highest_task_rt(rq, -1);
868                 if (unlikely(task != next_task) && task && paranoid--) {
869                         put_task_struct(next_task);
870                         next_task = task;
871                         goto retry;
872                 }
873                 goto out;
874         }
875
876         deactivate_task(rq, next_task, 0);
877         set_task_cpu(next_task, lowest_rq->cpu);
878         activate_task(lowest_rq, next_task, 0);
879
880         resched_task(lowest_rq->curr);
881
882         spin_unlock(&lowest_rq->lock);
883
884         ret = 1;
885 out:
886         put_task_struct(next_task);
887
888         return ret;
889 }
890
891 /*
892  * TODO: Currently we just use the second highest prio task on
893  *       the queue, and stop when it can't migrate (or there's
894  *       no more RT tasks).  There may be a case where a lower
895  *       priority RT task has a different affinity than the
896  *       higher RT task. In this case the lower RT task could
897  *       possibly be able to migrate where as the higher priority
898  *       RT task could not.  We currently ignore this issue.
899  *       Enhancements are welcome!
900  */
901 static void push_rt_tasks(struct rq *rq)
902 {
903         /* push_rt_task will return true if it moved an RT */
904         while (push_rt_task(rq))
905                 ;
906 }
907
908 static int pull_rt_task(struct rq *this_rq)
909 {
910         int this_cpu = this_rq->cpu, ret = 0, cpu;
911         struct task_struct *p, *next;
912         struct rq *src_rq;
913
914         if (likely(!rt_overloaded(this_rq)))
915                 return 0;
916
917         next = pick_next_task_rt(this_rq);
918
919         for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
920                 if (this_cpu == cpu)
921                         continue;
922
923                 src_rq = cpu_rq(cpu);
924                 /*
925                  * We can potentially drop this_rq's lock in
926                  * double_lock_balance, and another CPU could
927                  * steal our next task - hence we must cause
928                  * the caller to recalculate the next task
929                  * in that case:
930                  */
931                 if (double_lock_balance(this_rq, src_rq)) {
932                         struct task_struct *old_next = next;
933
934                         next = pick_next_task_rt(this_rq);
935                         if (next != old_next)
936                                 ret = 1;
937                 }
938
939                 /*
940                  * Are there still pullable RT tasks?
941                  */
942                 if (src_rq->rt.rt_nr_running <= 1)
943                         goto skip;
944
945                 p = pick_next_highest_task_rt(src_rq, this_cpu);
946
947                 /*
948                  * Do we have an RT task that preempts
949                  * the to-be-scheduled task?
950                  */
951                 if (p && (!next || (p->prio < next->prio))) {
952                         WARN_ON(p == src_rq->curr);
953                         WARN_ON(!p->se.on_rq);
954
955                         /*
956                          * There's a chance that p is higher in priority
957                          * than what's currently running on its cpu.
958                          * This is just that p is wakeing up and hasn't
959                          * had a chance to schedule. We only pull
960                          * p if it is lower in priority than the
961                          * current task on the run queue or
962                          * this_rq next task is lower in prio than
963                          * the current task on that rq.
964                          */
965                         if (p->prio < src_rq->curr->prio ||
966                             (next && next->prio < src_rq->curr->prio))
967                                 goto skip;
968
969                         ret = 1;
970
971                         deactivate_task(src_rq, p, 0);
972                         set_task_cpu(p, this_cpu);
973                         activate_task(this_rq, p, 0);
974                         /*
975                          * We continue with the search, just in
976                          * case there's an even higher prio task
977                          * in another runqueue. (low likelyhood
978                          * but possible)
979                          *
980                          * Update next so that we won't pick a task
981                          * on another cpu with a priority lower (or equal)
982                          * than the one we just picked.
983                          */
984                         next = p;
985
986                 }
987  skip:
988                 spin_unlock(&src_rq->lock);
989         }
990
991         return ret;
992 }
993
994 static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
995 {
996         /* Try to pull RT tasks here if we lower this rq's prio */
997         if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
998                 pull_rt_task(rq);
999 }
1000
1001 static void post_schedule_rt(struct rq *rq)
1002 {
1003         /*
1004          * If we have more than one rt_task queued, then
1005          * see if we can push the other rt_tasks off to other CPUS.
1006          * Note we may release the rq lock, and since
1007          * the lock was owned by prev, we need to release it
1008          * first via finish_lock_switch and then reaquire it here.
1009          */
1010         if (unlikely(rq->rt.overloaded)) {
1011                 spin_lock_irq(&rq->lock);
1012                 push_rt_tasks(rq);
1013                 spin_unlock_irq(&rq->lock);
1014         }
1015 }
1016
1017
1018 static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1019 {
1020         if (!task_running(rq, p) &&
1021             (p->prio >= rq->rt.highest_prio) &&
1022             rq->rt.overloaded)
1023                 push_rt_tasks(rq);
1024 }
1025
1026 static unsigned long
1027 load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1028                 unsigned long max_load_move,
1029                 struct sched_domain *sd, enum cpu_idle_type idle,
1030                 int *all_pinned, int *this_best_prio)
1031 {
1032         /* don't touch RT tasks */
1033         return 0;
1034 }
1035
1036 static int
1037 move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1038                  struct sched_domain *sd, enum cpu_idle_type idle)
1039 {
1040         /* don't touch RT tasks */
1041         return 0;
1042 }
1043
1044 static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
1045 {
1046         int weight = cpus_weight(*new_mask);
1047
1048         BUG_ON(!rt_task(p));
1049
1050         /*
1051          * Update the migration status of the RQ if we have an RT task
1052          * which is running AND changing its weight value.
1053          */
1054         if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1055                 struct rq *rq = task_rq(p);
1056
1057                 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1058                         rq->rt.rt_nr_migratory++;
1059                 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1060                         BUG_ON(!rq->rt.rt_nr_migratory);
1061                         rq->rt.rt_nr_migratory--;
1062                 }
1063
1064                 update_rt_migration(rq);
1065         }
1066
1067         p->cpus_allowed    = *new_mask;
1068         p->rt.nr_cpus_allowed = weight;
1069 }
1070
1071 /* Assumes rq->lock is held */
1072 static void join_domain_rt(struct rq *rq)
1073 {
1074         if (rq->rt.overloaded)
1075                 rt_set_overload(rq);
1076 }
1077
1078 /* Assumes rq->lock is held */
1079 static void leave_domain_rt(struct rq *rq)
1080 {
1081         if (rq->rt.overloaded)
1082                 rt_clear_overload(rq);
1083 }
1084
1085 /*
1086  * When switch from the rt queue, we bring ourselves to a position
1087  * that we might want to pull RT tasks from other runqueues.
1088  */
1089 static void switched_from_rt(struct rq *rq, struct task_struct *p,
1090                            int running)
1091 {
1092         /*
1093          * If there are other RT tasks then we will reschedule
1094          * and the scheduling of the other RT tasks will handle
1095          * the balancing. But if we are the last RT task
1096          * we may need to handle the pulling of RT tasks
1097          * now.
1098          */
1099         if (!rq->rt.rt_nr_running)
1100                 pull_rt_task(rq);
1101 }
1102 #endif /* CONFIG_SMP */
1103
1104 /*
1105  * When switching a task to RT, we may overload the runqueue
1106  * with RT tasks. In this case we try to push them off to
1107  * other runqueues.
1108  */
1109 static void switched_to_rt(struct rq *rq, struct task_struct *p,
1110                            int running)
1111 {
1112         int check_resched = 1;
1113
1114         /*
1115          * If we are already running, then there's nothing
1116          * that needs to be done. But if we are not running
1117          * we may need to preempt the current running task.
1118          * If that current running task is also an RT task
1119          * then see if we can move to another run queue.
1120          */
1121         if (!running) {
1122 #ifdef CONFIG_SMP
1123                 if (rq->rt.overloaded && push_rt_task(rq) &&
1124                     /* Don't resched if we changed runqueues */
1125                     rq != task_rq(p))
1126                         check_resched = 0;
1127 #endif /* CONFIG_SMP */
1128                 if (check_resched && p->prio < rq->curr->prio)
1129                         resched_task(rq->curr);
1130         }
1131 }
1132
1133 /*
1134  * Priority of the task has changed. This may cause
1135  * us to initiate a push or pull.
1136  */
1137 static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1138                             int oldprio, int running)
1139 {
1140         if (running) {
1141 #ifdef CONFIG_SMP
1142                 /*
1143                  * If our priority decreases while running, we
1144                  * may need to pull tasks to this runqueue.
1145                  */
1146                 if (oldprio < p->prio)
1147                         pull_rt_task(rq);
1148                 /*
1149                  * If there's a higher priority task waiting to run
1150                  * then reschedule. Note, the above pull_rt_task
1151                  * can release the rq lock and p could migrate.
1152                  * Only reschedule if p is still on the same runqueue.
1153                  */
1154                 if (p->prio > rq->rt.highest_prio && rq->curr == p)
1155                         resched_task(p);
1156 #else
1157                 /* For UP simply resched on drop of prio */
1158                 if (oldprio < p->prio)
1159                         resched_task(p);
1160 #endif /* CONFIG_SMP */
1161         } else {
1162                 /*
1163                  * This task is not running, but if it is
1164                  * greater than the current running task
1165                  * then reschedule.
1166                  */
1167                 if (p->prio < rq->curr->prio)
1168                         resched_task(rq->curr);
1169         }
1170 }
1171
1172 static void watchdog(struct rq *rq, struct task_struct *p)
1173 {
1174         unsigned long soft, hard;
1175
1176         if (!p->signal)
1177                 return;
1178
1179         soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1180         hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1181
1182         if (soft != RLIM_INFINITY) {
1183                 unsigned long next;
1184
1185                 p->rt.timeout++;
1186                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1187                 if (p->rt.timeout > next)
1188                         p->it_sched_expires = p->se.sum_exec_runtime;
1189         }
1190 }
1191
1192 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1193 {
1194         update_curr_rt(rq);
1195
1196         watchdog(rq, p);
1197
1198         /*
1199          * RR tasks need a special form of timeslice management.
1200          * FIFO tasks have no timeslices.
1201          */
1202         if (p->policy != SCHED_RR)
1203                 return;
1204
1205         if (--p->rt.time_slice)
1206                 return;
1207
1208         p->rt.time_slice = DEF_TIMESLICE;
1209
1210         /*
1211          * Requeue to the end of queue if we are not the only element
1212          * on the queue:
1213          */
1214         if (p->rt.run_list.prev != p->rt.run_list.next) {
1215                 requeue_task_rt(rq, p);
1216                 set_tsk_need_resched(p);
1217         }
1218 }
1219
1220 static void set_curr_task_rt(struct rq *rq)
1221 {
1222         struct task_struct *p = rq->curr;
1223
1224         p->se.exec_start = rq->clock;
1225 }
1226
1227 const struct sched_class rt_sched_class = {
1228         .next                   = &fair_sched_class,
1229         .enqueue_task           = enqueue_task_rt,
1230         .dequeue_task           = dequeue_task_rt,
1231         .yield_task             = yield_task_rt,
1232 #ifdef CONFIG_SMP
1233         .select_task_rq         = select_task_rq_rt,
1234 #endif /* CONFIG_SMP */
1235
1236         .check_preempt_curr     = check_preempt_curr_rt,
1237
1238         .pick_next_task         = pick_next_task_rt,
1239         .put_prev_task          = put_prev_task_rt,
1240
1241 #ifdef CONFIG_SMP
1242         .load_balance           = load_balance_rt,
1243         .move_one_task          = move_one_task_rt,
1244         .set_cpus_allowed       = set_cpus_allowed_rt,
1245         .join_domain            = join_domain_rt,
1246         .leave_domain           = leave_domain_rt,
1247         .pre_schedule           = pre_schedule_rt,
1248         .post_schedule          = post_schedule_rt,
1249         .task_wake_up           = task_wake_up_rt,
1250         .switched_from          = switched_from_rt,
1251 #endif
1252
1253         .set_curr_task          = set_curr_task_rt,
1254         .task_tick              = task_tick_rt,
1255
1256         .prio_changed           = prio_changed_rt,
1257         .switched_to            = switched_to_rt,
1258 };