OSDN Git Service

ad13e6242481d9acf7319d831a01f2b005a67e39
[uclinux-h8/linux.git] / kernel / sched / rt.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
4  * policies)
5  */
6 #include "sched.h"
7
8 int sched_rr_timeslice = RR_TIMESLICE;
9 int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
10
11 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
12
13 struct rt_bandwidth def_rt_bandwidth;
14
15 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
16 {
17         struct rt_bandwidth *rt_b =
18                 container_of(timer, struct rt_bandwidth, rt_period_timer);
19         int idle = 0;
20         int overrun;
21
22         raw_spin_lock(&rt_b->rt_runtime_lock);
23         for (;;) {
24                 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
25                 if (!overrun)
26                         break;
27
28                 raw_spin_unlock(&rt_b->rt_runtime_lock);
29                 idle = do_sched_rt_period_timer(rt_b, overrun);
30                 raw_spin_lock(&rt_b->rt_runtime_lock);
31         }
32         if (idle)
33                 rt_b->rt_period_active = 0;
34         raw_spin_unlock(&rt_b->rt_runtime_lock);
35
36         return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
37 }
38
39 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
40 {
41         rt_b->rt_period = ns_to_ktime(period);
42         rt_b->rt_runtime = runtime;
43
44         raw_spin_lock_init(&rt_b->rt_runtime_lock);
45
46         hrtimer_init(&rt_b->rt_period_timer,
47                         CLOCK_MONOTONIC, HRTIMER_MODE_REL);
48         rt_b->rt_period_timer.function = sched_rt_period_timer;
49 }
50
51 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
52 {
53         if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
54                 return;
55
56         raw_spin_lock(&rt_b->rt_runtime_lock);
57         if (!rt_b->rt_period_active) {
58                 rt_b->rt_period_active = 1;
59                 /*
60                  * SCHED_DEADLINE updates the bandwidth, as a run away
61                  * RT task with a DL task could hog a CPU. But DL does
62                  * not reset the period. If a deadline task was running
63                  * without an RT task running, it can cause RT tasks to
64                  * throttle when they start up. Kick the timer right away
65                  * to update the period.
66                  */
67                 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
68                 hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
69         }
70         raw_spin_unlock(&rt_b->rt_runtime_lock);
71 }
72
73 void init_rt_rq(struct rt_rq *rt_rq)
74 {
75         struct rt_prio_array *array;
76         int i;
77
78         array = &rt_rq->active;
79         for (i = 0; i < MAX_RT_PRIO; i++) {
80                 INIT_LIST_HEAD(array->queue + i);
81                 __clear_bit(i, array->bitmap);
82         }
83         /* delimiter for bitsearch: */
84         __set_bit(MAX_RT_PRIO, array->bitmap);
85
86 #if defined CONFIG_SMP
87         rt_rq->highest_prio.curr = MAX_RT_PRIO;
88         rt_rq->highest_prio.next = MAX_RT_PRIO;
89         rt_rq->rt_nr_migratory = 0;
90         rt_rq->overloaded = 0;
91         plist_head_init(&rt_rq->pushable_tasks);
92 #endif /* CONFIG_SMP */
93         /* We start is dequeued state, because no RT tasks are queued */
94         rt_rq->rt_queued = 0;
95
96         rt_rq->rt_time = 0;
97         rt_rq->rt_throttled = 0;
98         rt_rq->rt_runtime = 0;
99         raw_spin_lock_init(&rt_rq->rt_runtime_lock);
100 }
101
102 #ifdef CONFIG_RT_GROUP_SCHED
103 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
104 {
105         hrtimer_cancel(&rt_b->rt_period_timer);
106 }
107
108 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
109
110 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
111 {
112 #ifdef CONFIG_SCHED_DEBUG
113         WARN_ON_ONCE(!rt_entity_is_task(rt_se));
114 #endif
115         return container_of(rt_se, struct task_struct, rt);
116 }
117
118 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
119 {
120         return rt_rq->rq;
121 }
122
123 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
124 {
125         return rt_se->rt_rq;
126 }
127
128 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
129 {
130         struct rt_rq *rt_rq = rt_se->rt_rq;
131
132         return rt_rq->rq;
133 }
134
135 void free_rt_sched_group(struct task_group *tg)
136 {
137         int i;
138
139         if (tg->rt_se)
140                 destroy_rt_bandwidth(&tg->rt_bandwidth);
141
142         for_each_possible_cpu(i) {
143                 if (tg->rt_rq)
144                         kfree(tg->rt_rq[i]);
145                 if (tg->rt_se)
146                         kfree(tg->rt_se[i]);
147         }
148
149         kfree(tg->rt_rq);
150         kfree(tg->rt_se);
151 }
152
153 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
154                 struct sched_rt_entity *rt_se, int cpu,
155                 struct sched_rt_entity *parent)
156 {
157         struct rq *rq = cpu_rq(cpu);
158
159         rt_rq->highest_prio.curr = MAX_RT_PRIO;
160         rt_rq->rt_nr_boosted = 0;
161         rt_rq->rq = rq;
162         rt_rq->tg = tg;
163
164         tg->rt_rq[cpu] = rt_rq;
165         tg->rt_se[cpu] = rt_se;
166
167         if (!rt_se)
168                 return;
169
170         if (!parent)
171                 rt_se->rt_rq = &rq->rt;
172         else
173                 rt_se->rt_rq = parent->my_q;
174
175         rt_se->my_q = rt_rq;
176         rt_se->parent = parent;
177         INIT_LIST_HEAD(&rt_se->run_list);
178 }
179
180 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
181 {
182         struct rt_rq *rt_rq;
183         struct sched_rt_entity *rt_se;
184         int i;
185
186         tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
187         if (!tg->rt_rq)
188                 goto err;
189         tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
190         if (!tg->rt_se)
191                 goto err;
192
193         init_rt_bandwidth(&tg->rt_bandwidth,
194                         ktime_to_ns(def_rt_bandwidth.rt_period), 0);
195
196         for_each_possible_cpu(i) {
197                 rt_rq = kzalloc_node(sizeof(struct rt_rq),
198                                      GFP_KERNEL, cpu_to_node(i));
199                 if (!rt_rq)
200                         goto err;
201
202                 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
203                                      GFP_KERNEL, cpu_to_node(i));
204                 if (!rt_se)
205                         goto err_free_rq;
206
207                 init_rt_rq(rt_rq);
208                 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
209                 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
210         }
211
212         return 1;
213
214 err_free_rq:
215         kfree(rt_rq);
216 err:
217         return 0;
218 }
219
220 #else /* CONFIG_RT_GROUP_SCHED */
221
222 #define rt_entity_is_task(rt_se) (1)
223
224 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
225 {
226         return container_of(rt_se, struct task_struct, rt);
227 }
228
229 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
230 {
231         return container_of(rt_rq, struct rq, rt);
232 }
233
234 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
235 {
236         struct task_struct *p = rt_task_of(rt_se);
237
238         return task_rq(p);
239 }
240
241 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
242 {
243         struct rq *rq = rq_of_rt_se(rt_se);
244
245         return &rq->rt;
246 }
247
248 void free_rt_sched_group(struct task_group *tg) { }
249
250 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
251 {
252         return 1;
253 }
254 #endif /* CONFIG_RT_GROUP_SCHED */
255
256 #ifdef CONFIG_SMP
257
258 static void pull_rt_task(struct rq *this_rq);
259
260 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
261 {
262         /* Try to pull RT tasks here if we lower this rq's prio */
263         return rq->rt.highest_prio.curr > prev->prio;
264 }
265
266 static inline int rt_overloaded(struct rq *rq)
267 {
268         return atomic_read(&rq->rd->rto_count);
269 }
270
271 static inline void rt_set_overload(struct rq *rq)
272 {
273         if (!rq->online)
274                 return;
275
276         cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
277         /*
278          * Make sure the mask is visible before we set
279          * the overload count. That is checked to determine
280          * if we should look at the mask. It would be a shame
281          * if we looked at the mask, but the mask was not
282          * updated yet.
283          *
284          * Matched by the barrier in pull_rt_task().
285          */
286         smp_wmb();
287         atomic_inc(&rq->rd->rto_count);
288 }
289
290 static inline void rt_clear_overload(struct rq *rq)
291 {
292         if (!rq->online)
293                 return;
294
295         /* the order here really doesn't matter */
296         atomic_dec(&rq->rd->rto_count);
297         cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
298 }
299
300 static void update_rt_migration(struct rt_rq *rt_rq)
301 {
302         if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
303                 if (!rt_rq->overloaded) {
304                         rt_set_overload(rq_of_rt_rq(rt_rq));
305                         rt_rq->overloaded = 1;
306                 }
307         } else if (rt_rq->overloaded) {
308                 rt_clear_overload(rq_of_rt_rq(rt_rq));
309                 rt_rq->overloaded = 0;
310         }
311 }
312
313 static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
314 {
315         struct task_struct *p;
316
317         if (!rt_entity_is_task(rt_se))
318                 return;
319
320         p = rt_task_of(rt_se);
321         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
322
323         rt_rq->rt_nr_total++;
324         if (p->nr_cpus_allowed > 1)
325                 rt_rq->rt_nr_migratory++;
326
327         update_rt_migration(rt_rq);
328 }
329
330 static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
331 {
332         struct task_struct *p;
333
334         if (!rt_entity_is_task(rt_se))
335                 return;
336
337         p = rt_task_of(rt_se);
338         rt_rq = &rq_of_rt_rq(rt_rq)->rt;
339
340         rt_rq->rt_nr_total--;
341         if (p->nr_cpus_allowed > 1)
342                 rt_rq->rt_nr_migratory--;
343
344         update_rt_migration(rt_rq);
345 }
346
347 static inline int has_pushable_tasks(struct rq *rq)
348 {
349         return !plist_head_empty(&rq->rt.pushable_tasks);
350 }
351
352 static DEFINE_PER_CPU(struct callback_head, rt_push_head);
353 static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
354
355 static void push_rt_tasks(struct rq *);
356 static void pull_rt_task(struct rq *);
357
358 static inline void rt_queue_push_tasks(struct rq *rq)
359 {
360         if (!has_pushable_tasks(rq))
361                 return;
362
363         queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
364 }
365
366 static inline void rt_queue_pull_task(struct rq *rq)
367 {
368         queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
369 }
370
371 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
372 {
373         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
374         plist_node_init(&p->pushable_tasks, p->prio);
375         plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
376
377         /* Update the highest prio pushable task */
378         if (p->prio < rq->rt.highest_prio.next)
379                 rq->rt.highest_prio.next = p->prio;
380 }
381
382 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
383 {
384         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
385
386         /* Update the new highest prio pushable task */
387         if (has_pushable_tasks(rq)) {
388                 p = plist_first_entry(&rq->rt.pushable_tasks,
389                                       struct task_struct, pushable_tasks);
390                 rq->rt.highest_prio.next = p->prio;
391         } else
392                 rq->rt.highest_prio.next = MAX_RT_PRIO;
393 }
394
395 #else
396
397 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
398 {
399 }
400
401 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
402 {
403 }
404
405 static inline
406 void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
407 {
408 }
409
410 static inline
411 void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
412 {
413 }
414
415 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
416 {
417         return false;
418 }
419
420 static inline void pull_rt_task(struct rq *this_rq)
421 {
422 }
423
424 static inline void rt_queue_push_tasks(struct rq *rq)
425 {
426 }
427 #endif /* CONFIG_SMP */
428
429 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
430 static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
431
432 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
433 {
434         return rt_se->on_rq;
435 }
436
437 #ifdef CONFIG_RT_GROUP_SCHED
438
439 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
440 {
441         if (!rt_rq->tg)
442                 return RUNTIME_INF;
443
444         return rt_rq->rt_runtime;
445 }
446
447 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
448 {
449         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
450 }
451
452 typedef struct task_group *rt_rq_iter_t;
453
454 static inline struct task_group *next_task_group(struct task_group *tg)
455 {
456         do {
457                 tg = list_entry_rcu(tg->list.next,
458                         typeof(struct task_group), list);
459         } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
460
461         if (&tg->list == &task_groups)
462                 tg = NULL;
463
464         return tg;
465 }
466
467 #define for_each_rt_rq(rt_rq, iter, rq)                                 \
468         for (iter = container_of(&task_groups, typeof(*iter), list);    \
469                 (iter = next_task_group(iter)) &&                       \
470                 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
471
472 #define for_each_sched_rt_entity(rt_se) \
473         for (; rt_se; rt_se = rt_se->parent)
474
475 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
476 {
477         return rt_se->my_q;
478 }
479
480 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
481 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
482
483 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
484 {
485         struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
486         struct rq *rq = rq_of_rt_rq(rt_rq);
487         struct sched_rt_entity *rt_se;
488
489         int cpu = cpu_of(rq);
490
491         rt_se = rt_rq->tg->rt_se[cpu];
492
493         if (rt_rq->rt_nr_running) {
494                 if (!rt_se)
495                         enqueue_top_rt_rq(rt_rq);
496                 else if (!on_rt_rq(rt_se))
497                         enqueue_rt_entity(rt_se, 0);
498
499                 if (rt_rq->highest_prio.curr < curr->prio)
500                         resched_curr(rq);
501         }
502 }
503
504 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
505 {
506         struct sched_rt_entity *rt_se;
507         int cpu = cpu_of(rq_of_rt_rq(rt_rq));
508
509         rt_se = rt_rq->tg->rt_se[cpu];
510
511         if (!rt_se)
512                 dequeue_top_rt_rq(rt_rq);
513         else if (on_rt_rq(rt_se))
514                 dequeue_rt_entity(rt_se, 0);
515 }
516
517 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
518 {
519         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
520 }
521
522 static int rt_se_boosted(struct sched_rt_entity *rt_se)
523 {
524         struct rt_rq *rt_rq = group_rt_rq(rt_se);
525         struct task_struct *p;
526
527         if (rt_rq)
528                 return !!rt_rq->rt_nr_boosted;
529
530         p = rt_task_of(rt_se);
531         return p->prio != p->normal_prio;
532 }
533
534 #ifdef CONFIG_SMP
535 static inline const struct cpumask *sched_rt_period_mask(void)
536 {
537         return this_rq()->rd->span;
538 }
539 #else
540 static inline const struct cpumask *sched_rt_period_mask(void)
541 {
542         return cpu_online_mask;
543 }
544 #endif
545
546 static inline
547 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
548 {
549         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
550 }
551
552 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
553 {
554         return &rt_rq->tg->rt_bandwidth;
555 }
556
557 #else /* !CONFIG_RT_GROUP_SCHED */
558
559 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
560 {
561         return rt_rq->rt_runtime;
562 }
563
564 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
565 {
566         return ktime_to_ns(def_rt_bandwidth.rt_period);
567 }
568
569 typedef struct rt_rq *rt_rq_iter_t;
570
571 #define for_each_rt_rq(rt_rq, iter, rq) \
572         for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
573
574 #define for_each_sched_rt_entity(rt_se) \
575         for (; rt_se; rt_se = NULL)
576
577 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
578 {
579         return NULL;
580 }
581
582 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
583 {
584         struct rq *rq = rq_of_rt_rq(rt_rq);
585
586         if (!rt_rq->rt_nr_running)
587                 return;
588
589         enqueue_top_rt_rq(rt_rq);
590         resched_curr(rq);
591 }
592
593 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
594 {
595         dequeue_top_rt_rq(rt_rq);
596 }
597
598 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
599 {
600         return rt_rq->rt_throttled;
601 }
602
603 static inline const struct cpumask *sched_rt_period_mask(void)
604 {
605         return cpu_online_mask;
606 }
607
608 static inline
609 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
610 {
611         return &cpu_rq(cpu)->rt;
612 }
613
614 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
615 {
616         return &def_rt_bandwidth;
617 }
618
619 #endif /* CONFIG_RT_GROUP_SCHED */
620
621 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
622 {
623         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
624
625         return (hrtimer_active(&rt_b->rt_period_timer) ||
626                 rt_rq->rt_time < rt_b->rt_runtime);
627 }
628
629 #ifdef CONFIG_SMP
630 /*
631  * We ran out of runtime, see if we can borrow some from our neighbours.
632  */
633 static void do_balance_runtime(struct rt_rq *rt_rq)
634 {
635         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
636         struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
637         int i, weight;
638         u64 rt_period;
639
640         weight = cpumask_weight(rd->span);
641
642         raw_spin_lock(&rt_b->rt_runtime_lock);
643         rt_period = ktime_to_ns(rt_b->rt_period);
644         for_each_cpu(i, rd->span) {
645                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
646                 s64 diff;
647
648                 if (iter == rt_rq)
649                         continue;
650
651                 raw_spin_lock(&iter->rt_runtime_lock);
652                 /*
653                  * Either all rqs have inf runtime and there's nothing to steal
654                  * or __disable_runtime() below sets a specific rq to inf to
655                  * indicate its been disabled and disalow stealing.
656                  */
657                 if (iter->rt_runtime == RUNTIME_INF)
658                         goto next;
659
660                 /*
661                  * From runqueues with spare time, take 1/n part of their
662                  * spare time, but no more than our period.
663                  */
664                 diff = iter->rt_runtime - iter->rt_time;
665                 if (diff > 0) {
666                         diff = div_u64((u64)diff, weight);
667                         if (rt_rq->rt_runtime + diff > rt_period)
668                                 diff = rt_period - rt_rq->rt_runtime;
669                         iter->rt_runtime -= diff;
670                         rt_rq->rt_runtime += diff;
671                         if (rt_rq->rt_runtime == rt_period) {
672                                 raw_spin_unlock(&iter->rt_runtime_lock);
673                                 break;
674                         }
675                 }
676 next:
677                 raw_spin_unlock(&iter->rt_runtime_lock);
678         }
679         raw_spin_unlock(&rt_b->rt_runtime_lock);
680 }
681
682 /*
683  * Ensure this RQ takes back all the runtime it lend to its neighbours.
684  */
685 static void __disable_runtime(struct rq *rq)
686 {
687         struct root_domain *rd = rq->rd;
688         rt_rq_iter_t iter;
689         struct rt_rq *rt_rq;
690
691         if (unlikely(!scheduler_running))
692                 return;
693
694         for_each_rt_rq(rt_rq, iter, rq) {
695                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
696                 s64 want;
697                 int i;
698
699                 raw_spin_lock(&rt_b->rt_runtime_lock);
700                 raw_spin_lock(&rt_rq->rt_runtime_lock);
701                 /*
702                  * Either we're all inf and nobody needs to borrow, or we're
703                  * already disabled and thus have nothing to do, or we have
704                  * exactly the right amount of runtime to take out.
705                  */
706                 if (rt_rq->rt_runtime == RUNTIME_INF ||
707                                 rt_rq->rt_runtime == rt_b->rt_runtime)
708                         goto balanced;
709                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
710
711                 /*
712                  * Calculate the difference between what we started out with
713                  * and what we current have, that's the amount of runtime
714                  * we lend and now have to reclaim.
715                  */
716                 want = rt_b->rt_runtime - rt_rq->rt_runtime;
717
718                 /*
719                  * Greedy reclaim, take back as much as we can.
720                  */
721                 for_each_cpu(i, rd->span) {
722                         struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
723                         s64 diff;
724
725                         /*
726                          * Can't reclaim from ourselves or disabled runqueues.
727                          */
728                         if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
729                                 continue;
730
731                         raw_spin_lock(&iter->rt_runtime_lock);
732                         if (want > 0) {
733                                 diff = min_t(s64, iter->rt_runtime, want);
734                                 iter->rt_runtime -= diff;
735                                 want -= diff;
736                         } else {
737                                 iter->rt_runtime -= want;
738                                 want -= want;
739                         }
740                         raw_spin_unlock(&iter->rt_runtime_lock);
741
742                         if (!want)
743                                 break;
744                 }
745
746                 raw_spin_lock(&rt_rq->rt_runtime_lock);
747                 /*
748                  * We cannot be left wanting - that would mean some runtime
749                  * leaked out of the system.
750                  */
751                 BUG_ON(want);
752 balanced:
753                 /*
754                  * Disable all the borrow logic by pretending we have inf
755                  * runtime - in which case borrowing doesn't make sense.
756                  */
757                 rt_rq->rt_runtime = RUNTIME_INF;
758                 rt_rq->rt_throttled = 0;
759                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
760                 raw_spin_unlock(&rt_b->rt_runtime_lock);
761
762                 /* Make rt_rq available for pick_next_task() */
763                 sched_rt_rq_enqueue(rt_rq);
764         }
765 }
766
767 static void __enable_runtime(struct rq *rq)
768 {
769         rt_rq_iter_t iter;
770         struct rt_rq *rt_rq;
771
772         if (unlikely(!scheduler_running))
773                 return;
774
775         /*
776          * Reset each runqueue's bandwidth settings
777          */
778         for_each_rt_rq(rt_rq, iter, rq) {
779                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
780
781                 raw_spin_lock(&rt_b->rt_runtime_lock);
782                 raw_spin_lock(&rt_rq->rt_runtime_lock);
783                 rt_rq->rt_runtime = rt_b->rt_runtime;
784                 rt_rq->rt_time = 0;
785                 rt_rq->rt_throttled = 0;
786                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
787                 raw_spin_unlock(&rt_b->rt_runtime_lock);
788         }
789 }
790
791 static void balance_runtime(struct rt_rq *rt_rq)
792 {
793         if (!sched_feat(RT_RUNTIME_SHARE))
794                 return;
795
796         if (rt_rq->rt_time > rt_rq->rt_runtime) {
797                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
798                 do_balance_runtime(rt_rq);
799                 raw_spin_lock(&rt_rq->rt_runtime_lock);
800         }
801 }
802 #else /* !CONFIG_SMP */
803 static inline void balance_runtime(struct rt_rq *rt_rq) {}
804 #endif /* CONFIG_SMP */
805
806 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
807 {
808         int i, idle = 1, throttled = 0;
809         const struct cpumask *span;
810
811         span = sched_rt_period_mask();
812 #ifdef CONFIG_RT_GROUP_SCHED
813         /*
814          * FIXME: isolated CPUs should really leave the root task group,
815          * whether they are isolcpus or were isolated via cpusets, lest
816          * the timer run on a CPU which does not service all runqueues,
817          * potentially leaving other CPUs indefinitely throttled.  If
818          * isolation is really required, the user will turn the throttle
819          * off to kill the perturbations it causes anyway.  Meanwhile,
820          * this maintains functionality for boot and/or troubleshooting.
821          */
822         if (rt_b == &root_task_group.rt_bandwidth)
823                 span = cpu_online_mask;
824 #endif
825         for_each_cpu(i, span) {
826                 int enqueue = 0;
827                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
828                 struct rq *rq = rq_of_rt_rq(rt_rq);
829                 int skip;
830
831                 /*
832                  * When span == cpu_online_mask, taking each rq->lock
833                  * can be time-consuming. Try to avoid it when possible.
834                  */
835                 raw_spin_lock(&rt_rq->rt_runtime_lock);
836                 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
837                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
838                 if (skip)
839                         continue;
840
841                 raw_spin_lock(&rq->lock);
842                 update_rq_clock(rq);
843
844                 if (rt_rq->rt_time) {
845                         u64 runtime;
846
847                         raw_spin_lock(&rt_rq->rt_runtime_lock);
848                         if (rt_rq->rt_throttled)
849                                 balance_runtime(rt_rq);
850                         runtime = rt_rq->rt_runtime;
851                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
852                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
853                                 rt_rq->rt_throttled = 0;
854                                 enqueue = 1;
855
856                                 /*
857                                  * When we're idle and a woken (rt) task is
858                                  * throttled check_preempt_curr() will set
859                                  * skip_update and the time between the wakeup
860                                  * and this unthrottle will get accounted as
861                                  * 'runtime'.
862                                  */
863                                 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
864                                         rq_clock_skip_update(rq, false);
865                         }
866                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
867                                 idle = 0;
868                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
869                 } else if (rt_rq->rt_nr_running) {
870                         idle = 0;
871                         if (!rt_rq_throttled(rt_rq))
872                                 enqueue = 1;
873                 }
874                 if (rt_rq->rt_throttled)
875                         throttled = 1;
876
877                 if (enqueue)
878                         sched_rt_rq_enqueue(rt_rq);
879                 raw_spin_unlock(&rq->lock);
880         }
881
882         if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
883                 return 1;
884
885         return idle;
886 }
887
888 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
889 {
890 #ifdef CONFIG_RT_GROUP_SCHED
891         struct rt_rq *rt_rq = group_rt_rq(rt_se);
892
893         if (rt_rq)
894                 return rt_rq->highest_prio.curr;
895 #endif
896
897         return rt_task_of(rt_se)->prio;
898 }
899
900 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
901 {
902         u64 runtime = sched_rt_runtime(rt_rq);
903
904         if (rt_rq->rt_throttled)
905                 return rt_rq_throttled(rt_rq);
906
907         if (runtime >= sched_rt_period(rt_rq))
908                 return 0;
909
910         balance_runtime(rt_rq);
911         runtime = sched_rt_runtime(rt_rq);
912         if (runtime == RUNTIME_INF)
913                 return 0;
914
915         if (rt_rq->rt_time > runtime) {
916                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
917
918                 /*
919                  * Don't actually throttle groups that have no runtime assigned
920                  * but accrue some time due to boosting.
921                  */
922                 if (likely(rt_b->rt_runtime)) {
923                         rt_rq->rt_throttled = 1;
924                         printk_deferred_once("sched: RT throttling activated\n");
925                 } else {
926                         /*
927                          * In case we did anyway, make it go away,
928                          * replenishment is a joke, since it will replenish us
929                          * with exactly 0 ns.
930                          */
931                         rt_rq->rt_time = 0;
932                 }
933
934                 if (rt_rq_throttled(rt_rq)) {
935                         sched_rt_rq_dequeue(rt_rq);
936                         return 1;
937                 }
938         }
939
940         return 0;
941 }
942
943 /*
944  * Update the current task's runtime statistics. Skip current tasks that
945  * are not in our scheduling class.
946  */
947 static void update_curr_rt(struct rq *rq)
948 {
949         struct task_struct *curr = rq->curr;
950         struct sched_rt_entity *rt_se = &curr->rt;
951         u64 delta_exec;
952         u64 now;
953
954         if (curr->sched_class != &rt_sched_class)
955                 return;
956
957         now = rq_clock_task(rq);
958         delta_exec = now - curr->se.exec_start;
959         if (unlikely((s64)delta_exec <= 0))
960                 return;
961
962         schedstat_set(curr->se.statistics.exec_max,
963                       max(curr->se.statistics.exec_max, delta_exec));
964
965         curr->se.sum_exec_runtime += delta_exec;
966         account_group_exec_runtime(curr, delta_exec);
967
968         curr->se.exec_start = now;
969         cgroup_account_cputime(curr, delta_exec);
970
971         sched_rt_avg_update(rq, delta_exec);
972
973         if (!rt_bandwidth_enabled())
974                 return;
975
976         for_each_sched_rt_entity(rt_se) {
977                 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
978
979                 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
980                         raw_spin_lock(&rt_rq->rt_runtime_lock);
981                         rt_rq->rt_time += delta_exec;
982                         if (sched_rt_runtime_exceeded(rt_rq))
983                                 resched_curr(rq);
984                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
985                 }
986         }
987 }
988
989 static void
990 dequeue_top_rt_rq(struct rt_rq *rt_rq)
991 {
992         struct rq *rq = rq_of_rt_rq(rt_rq);
993
994         BUG_ON(&rq->rt != rt_rq);
995
996         if (!rt_rq->rt_queued)
997                 return;
998
999         BUG_ON(!rq->nr_running);
1000
1001         sub_nr_running(rq, rt_rq->rt_nr_running);
1002         rt_rq->rt_queued = 0;
1003
1004         /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
1005         cpufreq_update_util(rq, 0);
1006 }
1007
1008 static void
1009 enqueue_top_rt_rq(struct rt_rq *rt_rq)
1010 {
1011         struct rq *rq = rq_of_rt_rq(rt_rq);
1012
1013         BUG_ON(&rq->rt != rt_rq);
1014
1015         if (rt_rq->rt_queued)
1016                 return;
1017         if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
1018                 return;
1019
1020         add_nr_running(rq, rt_rq->rt_nr_running);
1021         rt_rq->rt_queued = 1;
1022
1023         /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
1024         cpufreq_update_util(rq, 0);
1025 }
1026
1027 #if defined CONFIG_SMP
1028
1029 static void
1030 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1031 {
1032         struct rq *rq = rq_of_rt_rq(rt_rq);
1033
1034 #ifdef CONFIG_RT_GROUP_SCHED
1035         /*
1036          * Change rq's cpupri only if rt_rq is the top queue.
1037          */
1038         if (&rq->rt != rt_rq)
1039                 return;
1040 #endif
1041         if (rq->online && prio < prev_prio)
1042                 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1043 }
1044
1045 static void
1046 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1047 {
1048         struct rq *rq = rq_of_rt_rq(rt_rq);
1049
1050 #ifdef CONFIG_RT_GROUP_SCHED
1051         /*
1052          * Change rq's cpupri only if rt_rq is the top queue.
1053          */
1054         if (&rq->rt != rt_rq)
1055                 return;
1056 #endif
1057         if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1058                 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1059 }
1060
1061 #else /* CONFIG_SMP */
1062
1063 static inline
1064 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1065 static inline
1066 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1067
1068 #endif /* CONFIG_SMP */
1069
1070 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1071 static void
1072 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1073 {
1074         int prev_prio = rt_rq->highest_prio.curr;
1075
1076         if (prio < prev_prio)
1077                 rt_rq->highest_prio.curr = prio;
1078
1079         inc_rt_prio_smp(rt_rq, prio, prev_prio);
1080 }
1081
1082 static void
1083 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1084 {
1085         int prev_prio = rt_rq->highest_prio.curr;
1086
1087         if (rt_rq->rt_nr_running) {
1088
1089                 WARN_ON(prio < prev_prio);
1090
1091                 /*
1092                  * This may have been our highest task, and therefore
1093                  * we may have some recomputation to do
1094                  */
1095                 if (prio == prev_prio) {
1096                         struct rt_prio_array *array = &rt_rq->active;
1097
1098                         rt_rq->highest_prio.curr =
1099                                 sched_find_first_bit(array->bitmap);
1100                 }
1101
1102         } else
1103                 rt_rq->highest_prio.curr = MAX_RT_PRIO;
1104
1105         dec_rt_prio_smp(rt_rq, prio, prev_prio);
1106 }
1107
1108 #else
1109
1110 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1111 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1112
1113 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1114
1115 #ifdef CONFIG_RT_GROUP_SCHED
1116
1117 static void
1118 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1119 {
1120         if (rt_se_boosted(rt_se))
1121                 rt_rq->rt_nr_boosted++;
1122
1123         if (rt_rq->tg)
1124                 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1125 }
1126
1127 static void
1128 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1129 {
1130         if (rt_se_boosted(rt_se))
1131                 rt_rq->rt_nr_boosted--;
1132
1133         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1134 }
1135
1136 #else /* CONFIG_RT_GROUP_SCHED */
1137
1138 static void
1139 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1140 {
1141         start_rt_bandwidth(&def_rt_bandwidth);
1142 }
1143
1144 static inline
1145 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1146
1147 #endif /* CONFIG_RT_GROUP_SCHED */
1148
1149 static inline
1150 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1151 {
1152         struct rt_rq *group_rq = group_rt_rq(rt_se);
1153
1154         if (group_rq)
1155                 return group_rq->rt_nr_running;
1156         else
1157                 return 1;
1158 }
1159
1160 static inline
1161 unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1162 {
1163         struct rt_rq *group_rq = group_rt_rq(rt_se);
1164         struct task_struct *tsk;
1165
1166         if (group_rq)
1167                 return group_rq->rr_nr_running;
1168
1169         tsk = rt_task_of(rt_se);
1170
1171         return (tsk->policy == SCHED_RR) ? 1 : 0;
1172 }
1173
1174 static inline
1175 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1176 {
1177         int prio = rt_se_prio(rt_se);
1178
1179         WARN_ON(!rt_prio(prio));
1180         rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1181         rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1182
1183         inc_rt_prio(rt_rq, prio);
1184         inc_rt_migration(rt_se, rt_rq);
1185         inc_rt_group(rt_se, rt_rq);
1186 }
1187
1188 static inline
1189 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1190 {
1191         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1192         WARN_ON(!rt_rq->rt_nr_running);
1193         rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1194         rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1195
1196         dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1197         dec_rt_migration(rt_se, rt_rq);
1198         dec_rt_group(rt_se, rt_rq);
1199 }
1200
1201 /*
1202  * Change rt_se->run_list location unless SAVE && !MOVE
1203  *
1204  * assumes ENQUEUE/DEQUEUE flags match
1205  */
1206 static inline bool move_entity(unsigned int flags)
1207 {
1208         if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1209                 return false;
1210
1211         return true;
1212 }
1213
1214 static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1215 {
1216         list_del_init(&rt_se->run_list);
1217
1218         if (list_empty(array->queue + rt_se_prio(rt_se)))
1219                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1220
1221         rt_se->on_list = 0;
1222 }
1223
1224 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1225 {
1226         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1227         struct rt_prio_array *array = &rt_rq->active;
1228         struct rt_rq *group_rq = group_rt_rq(rt_se);
1229         struct list_head *queue = array->queue + rt_se_prio(rt_se);
1230
1231         /*
1232          * Don't enqueue the group if its throttled, or when empty.
1233          * The latter is a consequence of the former when a child group
1234          * get throttled and the current group doesn't have any other
1235          * active members.
1236          */
1237         if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1238                 if (rt_se->on_list)
1239                         __delist_rt_entity(rt_se, array);
1240                 return;
1241         }
1242
1243         if (move_entity(flags)) {
1244                 WARN_ON_ONCE(rt_se->on_list);
1245                 if (flags & ENQUEUE_HEAD)
1246                         list_add(&rt_se->run_list, queue);
1247                 else
1248                         list_add_tail(&rt_se->run_list, queue);
1249
1250                 __set_bit(rt_se_prio(rt_se), array->bitmap);
1251                 rt_se->on_list = 1;
1252         }
1253         rt_se->on_rq = 1;
1254
1255         inc_rt_tasks(rt_se, rt_rq);
1256 }
1257
1258 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1259 {
1260         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1261         struct rt_prio_array *array = &rt_rq->active;
1262
1263         if (move_entity(flags)) {
1264                 WARN_ON_ONCE(!rt_se->on_list);
1265                 __delist_rt_entity(rt_se, array);
1266         }
1267         rt_se->on_rq = 0;
1268
1269         dec_rt_tasks(rt_se, rt_rq);
1270 }
1271
1272 /*
1273  * Because the prio of an upper entry depends on the lower
1274  * entries, we must remove entries top - down.
1275  */
1276 static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1277 {
1278         struct sched_rt_entity *back = NULL;
1279
1280         for_each_sched_rt_entity(rt_se) {
1281                 rt_se->back = back;
1282                 back = rt_se;
1283         }
1284
1285         dequeue_top_rt_rq(rt_rq_of_se(back));
1286
1287         for (rt_se = back; rt_se; rt_se = rt_se->back) {
1288                 if (on_rt_rq(rt_se))
1289                         __dequeue_rt_entity(rt_se, flags);
1290         }
1291 }
1292
1293 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1294 {
1295         struct rq *rq = rq_of_rt_se(rt_se);
1296
1297         dequeue_rt_stack(rt_se, flags);
1298         for_each_sched_rt_entity(rt_se)
1299                 __enqueue_rt_entity(rt_se, flags);
1300         enqueue_top_rt_rq(&rq->rt);
1301 }
1302
1303 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1304 {
1305         struct rq *rq = rq_of_rt_se(rt_se);
1306
1307         dequeue_rt_stack(rt_se, flags);
1308
1309         for_each_sched_rt_entity(rt_se) {
1310                 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1311
1312                 if (rt_rq && rt_rq->rt_nr_running)
1313                         __enqueue_rt_entity(rt_se, flags);
1314         }
1315         enqueue_top_rt_rq(&rq->rt);
1316 }
1317
1318 /*
1319  * Adding/removing a task to/from a priority array:
1320  */
1321 static void
1322 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1323 {
1324         struct sched_rt_entity *rt_se = &p->rt;
1325
1326         if (flags & ENQUEUE_WAKEUP)
1327                 rt_se->timeout = 0;
1328
1329         enqueue_rt_entity(rt_se, flags);
1330
1331         if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1332                 enqueue_pushable_task(rq, p);
1333 }
1334
1335 static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1336 {
1337         struct sched_rt_entity *rt_se = &p->rt;
1338
1339         update_curr_rt(rq);
1340         dequeue_rt_entity(rt_se, flags);
1341
1342         dequeue_pushable_task(rq, p);
1343 }
1344
1345 /*
1346  * Put task to the head or the end of the run list without the overhead of
1347  * dequeue followed by enqueue.
1348  */
1349 static void
1350 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1351 {
1352         if (on_rt_rq(rt_se)) {
1353                 struct rt_prio_array *array = &rt_rq->active;
1354                 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1355
1356                 if (head)
1357                         list_move(&rt_se->run_list, queue);
1358                 else
1359                         list_move_tail(&rt_se->run_list, queue);
1360         }
1361 }
1362
1363 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1364 {
1365         struct sched_rt_entity *rt_se = &p->rt;
1366         struct rt_rq *rt_rq;
1367
1368         for_each_sched_rt_entity(rt_se) {
1369                 rt_rq = rt_rq_of_se(rt_se);
1370                 requeue_rt_entity(rt_rq, rt_se, head);
1371         }
1372 }
1373
1374 static void yield_task_rt(struct rq *rq)
1375 {
1376         requeue_task_rt(rq, rq->curr, 0);
1377 }
1378
1379 #ifdef CONFIG_SMP
1380 static int find_lowest_rq(struct task_struct *task);
1381
1382 static int
1383 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
1384 {
1385         struct task_struct *curr;
1386         struct rq *rq;
1387
1388         /* For anything but wake ups, just return the task_cpu */
1389         if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1390                 goto out;
1391
1392         rq = cpu_rq(cpu);
1393
1394         rcu_read_lock();
1395         curr = READ_ONCE(rq->curr); /* unlocked access */
1396
1397         /*
1398          * If the current task on @p's runqueue is an RT task, then
1399          * try to see if we can wake this RT task up on another
1400          * runqueue. Otherwise simply start this RT task
1401          * on its current runqueue.
1402          *
1403          * We want to avoid overloading runqueues. If the woken
1404          * task is a higher priority, then it will stay on this CPU
1405          * and the lower prio task should be moved to another CPU.
1406          * Even though this will probably make the lower prio task
1407          * lose its cache, we do not want to bounce a higher task
1408          * around just because it gave up its CPU, perhaps for a
1409          * lock?
1410          *
1411          * For equal prio tasks, we just let the scheduler sort it out.
1412          *
1413          * Otherwise, just let it ride on the affined RQ and the
1414          * post-schedule router will push the preempted task away
1415          *
1416          * This test is optimistic, if we get it wrong the load-balancer
1417          * will have to sort it out.
1418          */
1419         if (curr && unlikely(rt_task(curr)) &&
1420             (curr->nr_cpus_allowed < 2 ||
1421              curr->prio <= p->prio)) {
1422                 int target = find_lowest_rq(p);
1423
1424                 /*
1425                  * Don't bother moving it if the destination CPU is
1426                  * not running a lower priority task.
1427                  */
1428                 if (target != -1 &&
1429                     p->prio < cpu_rq(target)->rt.highest_prio.curr)
1430                         cpu = target;
1431         }
1432         rcu_read_unlock();
1433
1434 out:
1435         return cpu;
1436 }
1437
1438 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1439 {
1440         /*
1441          * Current can't be migrated, useless to reschedule,
1442          * let's hope p can move out.
1443          */
1444         if (rq->curr->nr_cpus_allowed == 1 ||
1445             !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1446                 return;
1447
1448         /*
1449          * p is migratable, so let's not schedule it and
1450          * see if it is pushed or pulled somewhere else.
1451          */
1452         if (p->nr_cpus_allowed != 1
1453             && cpupri_find(&rq->rd->cpupri, p, NULL))
1454                 return;
1455
1456         /*
1457          * There appear to be other CPUs that can accept
1458          * the current task but none can run 'p', so lets reschedule
1459          * to try and push the current task away:
1460          */
1461         requeue_task_rt(rq, p, 1);
1462         resched_curr(rq);
1463 }
1464
1465 #endif /* CONFIG_SMP */
1466
1467 /*
1468  * Preempt the current task with a newly woken task if needed:
1469  */
1470 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1471 {
1472         if (p->prio < rq->curr->prio) {
1473                 resched_curr(rq);
1474                 return;
1475         }
1476
1477 #ifdef CONFIG_SMP
1478         /*
1479          * If:
1480          *
1481          * - the newly woken task is of equal priority to the current task
1482          * - the newly woken task is non-migratable while current is migratable
1483          * - current will be preempted on the next reschedule
1484          *
1485          * we should check to see if current can readily move to a different
1486          * cpu.  If so, we will reschedule to allow the push logic to try
1487          * to move current somewhere else, making room for our non-migratable
1488          * task.
1489          */
1490         if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1491                 check_preempt_equal_prio(rq, p);
1492 #endif
1493 }
1494
1495 static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1496                                                    struct rt_rq *rt_rq)
1497 {
1498         struct rt_prio_array *array = &rt_rq->active;
1499         struct sched_rt_entity *next = NULL;
1500         struct list_head *queue;
1501         int idx;
1502
1503         idx = sched_find_first_bit(array->bitmap);
1504         BUG_ON(idx >= MAX_RT_PRIO);
1505
1506         queue = array->queue + idx;
1507         next = list_entry(queue->next, struct sched_rt_entity, run_list);
1508
1509         return next;
1510 }
1511
1512 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1513 {
1514         struct sched_rt_entity *rt_se;
1515         struct task_struct *p;
1516         struct rt_rq *rt_rq  = &rq->rt;
1517
1518         do {
1519                 rt_se = pick_next_rt_entity(rq, rt_rq);
1520                 BUG_ON(!rt_se);
1521                 rt_rq = group_rt_rq(rt_se);
1522         } while (rt_rq);
1523
1524         p = rt_task_of(rt_se);
1525         p->se.exec_start = rq_clock_task(rq);
1526
1527         return p;
1528 }
1529
1530 static struct task_struct *
1531 pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1532 {
1533         struct task_struct *p;
1534         struct rt_rq *rt_rq = &rq->rt;
1535
1536         if (need_pull_rt_task(rq, prev)) {
1537                 /*
1538                  * This is OK, because current is on_cpu, which avoids it being
1539                  * picked for load-balance and preemption/IRQs are still
1540                  * disabled avoiding further scheduler activity on it and we're
1541                  * being very careful to re-start the picking loop.
1542                  */
1543                 rq_unpin_lock(rq, rf);
1544                 pull_rt_task(rq);
1545                 rq_repin_lock(rq, rf);
1546                 /*
1547                  * pull_rt_task() can drop (and re-acquire) rq->lock; this
1548                  * means a dl or stop task can slip in, in which case we need
1549                  * to re-start task selection.
1550                  */
1551                 if (unlikely((rq->stop && task_on_rq_queued(rq->stop)) ||
1552                              rq->dl.dl_nr_running))
1553                         return RETRY_TASK;
1554         }
1555
1556         /*
1557          * We may dequeue prev's rt_rq in put_prev_task().
1558          * So, we update time before rt_nr_running check.
1559          */
1560         if (prev->sched_class == &rt_sched_class)
1561                 update_curr_rt(rq);
1562
1563         if (!rt_rq->rt_queued)
1564                 return NULL;
1565
1566         put_prev_task(rq, prev);
1567
1568         p = _pick_next_task_rt(rq);
1569
1570         /* The running task is never eligible for pushing */
1571         dequeue_pushable_task(rq, p);
1572
1573         rt_queue_push_tasks(rq);
1574
1575         return p;
1576 }
1577
1578 static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1579 {
1580         update_curr_rt(rq);
1581
1582         /*
1583          * The previous task needs to be made eligible for pushing
1584          * if it is still active
1585          */
1586         if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1587                 enqueue_pushable_task(rq, p);
1588 }
1589
1590 #ifdef CONFIG_SMP
1591
1592 /* Only try algorithms three times */
1593 #define RT_MAX_TRIES 3
1594
1595 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1596 {
1597         if (!task_running(rq, p) &&
1598             cpumask_test_cpu(cpu, &p->cpus_allowed))
1599                 return 1;
1600
1601         return 0;
1602 }
1603
1604 /*
1605  * Return the highest pushable rq's task, which is suitable to be executed
1606  * on the CPU, NULL otherwise
1607  */
1608 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1609 {
1610         struct plist_head *head = &rq->rt.pushable_tasks;
1611         struct task_struct *p;
1612
1613         if (!has_pushable_tasks(rq))
1614                 return NULL;
1615
1616         plist_for_each_entry(p, head, pushable_tasks) {
1617                 if (pick_rt_task(rq, p, cpu))
1618                         return p;
1619         }
1620
1621         return NULL;
1622 }
1623
1624 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1625
1626 static int find_lowest_rq(struct task_struct *task)
1627 {
1628         struct sched_domain *sd;
1629         struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1630         int this_cpu = smp_processor_id();
1631         int cpu      = task_cpu(task);
1632
1633         /* Make sure the mask is initialized first */
1634         if (unlikely(!lowest_mask))
1635                 return -1;
1636
1637         if (task->nr_cpus_allowed == 1)
1638                 return -1; /* No other targets possible */
1639
1640         if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1641                 return -1; /* No targets found */
1642
1643         /*
1644          * At this point we have built a mask of CPUs representing the
1645          * lowest priority tasks in the system.  Now we want to elect
1646          * the best one based on our affinity and topology.
1647          *
1648          * We prioritize the last CPU that the task executed on since
1649          * it is most likely cache-hot in that location.
1650          */
1651         if (cpumask_test_cpu(cpu, lowest_mask))
1652                 return cpu;
1653
1654         /*
1655          * Otherwise, we consult the sched_domains span maps to figure
1656          * out which CPU is logically closest to our hot cache data.
1657          */
1658         if (!cpumask_test_cpu(this_cpu, lowest_mask))
1659                 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1660
1661         rcu_read_lock();
1662         for_each_domain(cpu, sd) {
1663                 if (sd->flags & SD_WAKE_AFFINE) {
1664                         int best_cpu;
1665
1666                         /*
1667                          * "this_cpu" is cheaper to preempt than a
1668                          * remote processor.
1669                          */
1670                         if (this_cpu != -1 &&
1671                             cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1672                                 rcu_read_unlock();
1673                                 return this_cpu;
1674                         }
1675
1676                         best_cpu = cpumask_first_and(lowest_mask,
1677                                                      sched_domain_span(sd));
1678                         if (best_cpu < nr_cpu_ids) {
1679                                 rcu_read_unlock();
1680                                 return best_cpu;
1681                         }
1682                 }
1683         }
1684         rcu_read_unlock();
1685
1686         /*
1687          * And finally, if there were no matches within the domains
1688          * just give the caller *something* to work with from the compatible
1689          * locations.
1690          */
1691         if (this_cpu != -1)
1692                 return this_cpu;
1693
1694         cpu = cpumask_any(lowest_mask);
1695         if (cpu < nr_cpu_ids)
1696                 return cpu;
1697
1698         return -1;
1699 }
1700
1701 /* Will lock the rq it finds */
1702 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1703 {
1704         struct rq *lowest_rq = NULL;
1705         int tries;
1706         int cpu;
1707
1708         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1709                 cpu = find_lowest_rq(task);
1710
1711                 if ((cpu == -1) || (cpu == rq->cpu))
1712                         break;
1713
1714                 lowest_rq = cpu_rq(cpu);
1715
1716                 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1717                         /*
1718                          * Target rq has tasks of equal or higher priority,
1719                          * retrying does not release any lock and is unlikely
1720                          * to yield a different result.
1721                          */
1722                         lowest_rq = NULL;
1723                         break;
1724                 }
1725
1726                 /* if the prio of this runqueue changed, try again */
1727                 if (double_lock_balance(rq, lowest_rq)) {
1728                         /*
1729                          * We had to unlock the run queue. In
1730                          * the mean time, task could have
1731                          * migrated already or had its affinity changed.
1732                          * Also make sure that it wasn't scheduled on its rq.
1733                          */
1734                         if (unlikely(task_rq(task) != rq ||
1735                                      !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) ||
1736                                      task_running(rq, task) ||
1737                                      !rt_task(task) ||
1738                                      !task_on_rq_queued(task))) {
1739
1740                                 double_unlock_balance(rq, lowest_rq);
1741                                 lowest_rq = NULL;
1742                                 break;
1743                         }
1744                 }
1745
1746                 /* If this rq is still suitable use it. */
1747                 if (lowest_rq->rt.highest_prio.curr > task->prio)
1748                         break;
1749
1750                 /* try again */
1751                 double_unlock_balance(rq, lowest_rq);
1752                 lowest_rq = NULL;
1753         }
1754
1755         return lowest_rq;
1756 }
1757
1758 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1759 {
1760         struct task_struct *p;
1761
1762         if (!has_pushable_tasks(rq))
1763                 return NULL;
1764
1765         p = plist_first_entry(&rq->rt.pushable_tasks,
1766                               struct task_struct, pushable_tasks);
1767
1768         BUG_ON(rq->cpu != task_cpu(p));
1769         BUG_ON(task_current(rq, p));
1770         BUG_ON(p->nr_cpus_allowed <= 1);
1771
1772         BUG_ON(!task_on_rq_queued(p));
1773         BUG_ON(!rt_task(p));
1774
1775         return p;
1776 }
1777
1778 /*
1779  * If the current CPU has more than one RT task, see if the non
1780  * running task can migrate over to a CPU that is running a task
1781  * of lesser priority.
1782  */
1783 static int push_rt_task(struct rq *rq)
1784 {
1785         struct task_struct *next_task;
1786         struct rq *lowest_rq;
1787         int ret = 0;
1788
1789         if (!rq->rt.overloaded)
1790                 return 0;
1791
1792         next_task = pick_next_pushable_task(rq);
1793         if (!next_task)
1794                 return 0;
1795
1796 retry:
1797         if (unlikely(next_task == rq->curr)) {
1798                 WARN_ON(1);
1799                 return 0;
1800         }
1801
1802         /*
1803          * It's possible that the next_task slipped in of
1804          * higher priority than current. If that's the case
1805          * just reschedule current.
1806          */
1807         if (unlikely(next_task->prio < rq->curr->prio)) {
1808                 resched_curr(rq);
1809                 return 0;
1810         }
1811
1812         /* We might release rq lock */
1813         get_task_struct(next_task);
1814
1815         /* find_lock_lowest_rq locks the rq if found */
1816         lowest_rq = find_lock_lowest_rq(next_task, rq);
1817         if (!lowest_rq) {
1818                 struct task_struct *task;
1819                 /*
1820                  * find_lock_lowest_rq releases rq->lock
1821                  * so it is possible that next_task has migrated.
1822                  *
1823                  * We need to make sure that the task is still on the same
1824                  * run-queue and is also still the next task eligible for
1825                  * pushing.
1826                  */
1827                 task = pick_next_pushable_task(rq);
1828                 if (task == next_task) {
1829                         /*
1830                          * The task hasn't migrated, and is still the next
1831                          * eligible task, but we failed to find a run-queue
1832                          * to push it to.  Do not retry in this case, since
1833                          * other CPUs will pull from us when ready.
1834                          */
1835                         goto out;
1836                 }
1837
1838                 if (!task)
1839                         /* No more tasks, just exit */
1840                         goto out;
1841
1842                 /*
1843                  * Something has shifted, try again.
1844                  */
1845                 put_task_struct(next_task);
1846                 next_task = task;
1847                 goto retry;
1848         }
1849
1850         deactivate_task(rq, next_task, 0);
1851         set_task_cpu(next_task, lowest_rq->cpu);
1852         activate_task(lowest_rq, next_task, 0);
1853         ret = 1;
1854
1855         resched_curr(lowest_rq);
1856
1857         double_unlock_balance(rq, lowest_rq);
1858
1859 out:
1860         put_task_struct(next_task);
1861
1862         return ret;
1863 }
1864
1865 static void push_rt_tasks(struct rq *rq)
1866 {
1867         /* push_rt_task will return true if it moved an RT */
1868         while (push_rt_task(rq))
1869                 ;
1870 }
1871
1872 #ifdef HAVE_RT_PUSH_IPI
1873
1874 /*
1875  * When a high priority task schedules out from a CPU and a lower priority
1876  * task is scheduled in, a check is made to see if there's any RT tasks
1877  * on other CPUs that are waiting to run because a higher priority RT task
1878  * is currently running on its CPU. In this case, the CPU with multiple RT
1879  * tasks queued on it (overloaded) needs to be notified that a CPU has opened
1880  * up that may be able to run one of its non-running queued RT tasks.
1881  *
1882  * All CPUs with overloaded RT tasks need to be notified as there is currently
1883  * no way to know which of these CPUs have the highest priority task waiting
1884  * to run. Instead of trying to take a spinlock on each of these CPUs,
1885  * which has shown to cause large latency when done on machines with many
1886  * CPUs, sending an IPI to the CPUs to have them push off the overloaded
1887  * RT tasks waiting to run.
1888  *
1889  * Just sending an IPI to each of the CPUs is also an issue, as on large
1890  * count CPU machines, this can cause an IPI storm on a CPU, especially
1891  * if its the only CPU with multiple RT tasks queued, and a large number
1892  * of CPUs scheduling a lower priority task at the same time.
1893  *
1894  * Each root domain has its own irq work function that can iterate over
1895  * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
1896  * tassk must be checked if there's one or many CPUs that are lowering
1897  * their priority, there's a single irq work iterator that will try to
1898  * push off RT tasks that are waiting to run.
1899  *
1900  * When a CPU schedules a lower priority task, it will kick off the
1901  * irq work iterator that will jump to each CPU with overloaded RT tasks.
1902  * As it only takes the first CPU that schedules a lower priority task
1903  * to start the process, the rto_start variable is incremented and if
1904  * the atomic result is one, then that CPU will try to take the rto_lock.
1905  * This prevents high contention on the lock as the process handles all
1906  * CPUs scheduling lower priority tasks.
1907  *
1908  * All CPUs that are scheduling a lower priority task will increment the
1909  * rt_loop_next variable. This will make sure that the irq work iterator
1910  * checks all RT overloaded CPUs whenever a CPU schedules a new lower
1911  * priority task, even if the iterator is in the middle of a scan. Incrementing
1912  * the rt_loop_next will cause the iterator to perform another scan.
1913  *
1914  */
1915 static int rto_next_cpu(struct root_domain *rd)
1916 {
1917         int next;
1918         int cpu;
1919
1920         /*
1921          * When starting the IPI RT pushing, the rto_cpu is set to -1,
1922          * rt_next_cpu() will simply return the first CPU found in
1923          * the rto_mask.
1924          *
1925          * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
1926          * will return the next CPU found in the rto_mask.
1927          *
1928          * If there are no more CPUs left in the rto_mask, then a check is made
1929          * against rto_loop and rto_loop_next. rto_loop is only updated with
1930          * the rto_lock held, but any CPU may increment the rto_loop_next
1931          * without any locking.
1932          */
1933         for (;;) {
1934
1935                 /* When rto_cpu is -1 this acts like cpumask_first() */
1936                 cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
1937
1938                 rd->rto_cpu = cpu;
1939
1940                 if (cpu < nr_cpu_ids)
1941                         return cpu;
1942
1943                 rd->rto_cpu = -1;
1944
1945                 /*
1946                  * ACQUIRE ensures we see the @rto_mask changes
1947                  * made prior to the @next value observed.
1948                  *
1949                  * Matches WMB in rt_set_overload().
1950                  */
1951                 next = atomic_read_acquire(&rd->rto_loop_next);
1952
1953                 if (rd->rto_loop == next)
1954                         break;
1955
1956                 rd->rto_loop = next;
1957         }
1958
1959         return -1;
1960 }
1961
1962 static inline bool rto_start_trylock(atomic_t *v)
1963 {
1964         return !atomic_cmpxchg_acquire(v, 0, 1);
1965 }
1966
1967 static inline void rto_start_unlock(atomic_t *v)
1968 {
1969         atomic_set_release(v, 0);
1970 }
1971
1972 static void tell_cpu_to_push(struct rq *rq)
1973 {
1974         int cpu = -1;
1975
1976         /* Keep the loop going if the IPI is currently active */
1977         atomic_inc(&rq->rd->rto_loop_next);
1978
1979         /* Only one CPU can initiate a loop at a time */
1980         if (!rto_start_trylock(&rq->rd->rto_loop_start))
1981                 return;
1982
1983         raw_spin_lock(&rq->rd->rto_lock);
1984
1985         /*
1986          * The rto_cpu is updated under the lock, if it has a valid CPU
1987          * then the IPI is still running and will continue due to the
1988          * update to loop_next, and nothing needs to be done here.
1989          * Otherwise it is finishing up and an ipi needs to be sent.
1990          */
1991         if (rq->rd->rto_cpu < 0)
1992                 cpu = rto_next_cpu(rq->rd);
1993
1994         raw_spin_unlock(&rq->rd->rto_lock);
1995
1996         rto_start_unlock(&rq->rd->rto_loop_start);
1997
1998         if (cpu >= 0) {
1999                 /* Make sure the rd does not get freed while pushing */
2000                 sched_get_rd(rq->rd);
2001                 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
2002         }
2003 }
2004
2005 /* Called from hardirq context */
2006 void rto_push_irq_work_func(struct irq_work *work)
2007 {
2008         struct root_domain *rd =
2009                 container_of(work, struct root_domain, rto_push_work);
2010         struct rq *rq;
2011         int cpu;
2012
2013         rq = this_rq();
2014
2015         /*
2016          * We do not need to grab the lock to check for has_pushable_tasks.
2017          * When it gets updated, a check is made if a push is possible.
2018          */
2019         if (has_pushable_tasks(rq)) {
2020                 raw_spin_lock(&rq->lock);
2021                 push_rt_tasks(rq);
2022                 raw_spin_unlock(&rq->lock);
2023         }
2024
2025         raw_spin_lock(&rd->rto_lock);
2026
2027         /* Pass the IPI to the next rt overloaded queue */
2028         cpu = rto_next_cpu(rd);
2029
2030         raw_spin_unlock(&rd->rto_lock);
2031
2032         if (cpu < 0) {
2033                 sched_put_rd(rd);
2034                 return;
2035         }
2036
2037         /* Try the next RT overloaded CPU */
2038         irq_work_queue_on(&rd->rto_push_work, cpu);
2039 }
2040 #endif /* HAVE_RT_PUSH_IPI */
2041
2042 static void pull_rt_task(struct rq *this_rq)
2043 {
2044         int this_cpu = this_rq->cpu, cpu;
2045         bool resched = false;
2046         struct task_struct *p;
2047         struct rq *src_rq;
2048         int rt_overload_count = rt_overloaded(this_rq);
2049
2050         if (likely(!rt_overload_count))
2051                 return;
2052
2053         /*
2054          * Match the barrier from rt_set_overloaded; this guarantees that if we
2055          * see overloaded we must also see the rto_mask bit.
2056          */
2057         smp_rmb();
2058
2059         /* If we are the only overloaded CPU do nothing */
2060         if (rt_overload_count == 1 &&
2061             cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2062                 return;
2063
2064 #ifdef HAVE_RT_PUSH_IPI
2065         if (sched_feat(RT_PUSH_IPI)) {
2066                 tell_cpu_to_push(this_rq);
2067                 return;
2068         }
2069 #endif
2070
2071         for_each_cpu(cpu, this_rq->rd->rto_mask) {
2072                 if (this_cpu == cpu)
2073                         continue;
2074
2075                 src_rq = cpu_rq(cpu);
2076
2077                 /*
2078                  * Don't bother taking the src_rq->lock if the next highest
2079                  * task is known to be lower-priority than our current task.
2080                  * This may look racy, but if this value is about to go
2081                  * logically higher, the src_rq will push this task away.
2082                  * And if its going logically lower, we do not care
2083                  */
2084                 if (src_rq->rt.highest_prio.next >=
2085                     this_rq->rt.highest_prio.curr)
2086                         continue;
2087
2088                 /*
2089                  * We can potentially drop this_rq's lock in
2090                  * double_lock_balance, and another CPU could
2091                  * alter this_rq
2092                  */
2093                 double_lock_balance(this_rq, src_rq);
2094
2095                 /*
2096                  * We can pull only a task, which is pushable
2097                  * on its rq, and no others.
2098                  */
2099                 p = pick_highest_pushable_task(src_rq, this_cpu);
2100
2101                 /*
2102                  * Do we have an RT task that preempts
2103                  * the to-be-scheduled task?
2104                  */
2105                 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2106                         WARN_ON(p == src_rq->curr);
2107                         WARN_ON(!task_on_rq_queued(p));
2108
2109                         /*
2110                          * There's a chance that p is higher in priority
2111                          * than what's currently running on its CPU.
2112                          * This is just that p is wakeing up and hasn't
2113                          * had a chance to schedule. We only pull
2114                          * p if it is lower in priority than the
2115                          * current task on the run queue
2116                          */
2117                         if (p->prio < src_rq->curr->prio)
2118                                 goto skip;
2119
2120                         resched = true;
2121
2122                         deactivate_task(src_rq, p, 0);
2123                         set_task_cpu(p, this_cpu);
2124                         activate_task(this_rq, p, 0);
2125                         /*
2126                          * We continue with the search, just in
2127                          * case there's an even higher prio task
2128                          * in another runqueue. (low likelihood
2129                          * but possible)
2130                          */
2131                 }
2132 skip:
2133                 double_unlock_balance(this_rq, src_rq);
2134         }
2135
2136         if (resched)
2137                 resched_curr(this_rq);
2138 }
2139
2140 /*
2141  * If we are not running and we are not going to reschedule soon, we should
2142  * try to push tasks away now
2143  */
2144 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2145 {
2146         if (!task_running(rq, p) &&
2147             !test_tsk_need_resched(rq->curr) &&
2148             p->nr_cpus_allowed > 1 &&
2149             (dl_task(rq->curr) || rt_task(rq->curr)) &&
2150             (rq->curr->nr_cpus_allowed < 2 ||
2151              rq->curr->prio <= p->prio))
2152                 push_rt_tasks(rq);
2153 }
2154
2155 /* Assumes rq->lock is held */
2156 static void rq_online_rt(struct rq *rq)
2157 {
2158         if (rq->rt.overloaded)
2159                 rt_set_overload(rq);
2160
2161         __enable_runtime(rq);
2162
2163         cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2164 }
2165
2166 /* Assumes rq->lock is held */
2167 static void rq_offline_rt(struct rq *rq)
2168 {
2169         if (rq->rt.overloaded)
2170                 rt_clear_overload(rq);
2171
2172         __disable_runtime(rq);
2173
2174         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2175 }
2176
2177 /*
2178  * When switch from the rt queue, we bring ourselves to a position
2179  * that we might want to pull RT tasks from other runqueues.
2180  */
2181 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2182 {
2183         /*
2184          * If there are other RT tasks then we will reschedule
2185          * and the scheduling of the other RT tasks will handle
2186          * the balancing. But if we are the last RT task
2187          * we may need to handle the pulling of RT tasks
2188          * now.
2189          */
2190         if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2191                 return;
2192
2193         rt_queue_pull_task(rq);
2194 }
2195
2196 void __init init_sched_rt_class(void)
2197 {
2198         unsigned int i;
2199
2200         for_each_possible_cpu(i) {
2201                 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2202                                         GFP_KERNEL, cpu_to_node(i));
2203         }
2204 }
2205 #endif /* CONFIG_SMP */
2206
2207 /*
2208  * When switching a task to RT, we may overload the runqueue
2209  * with RT tasks. In this case we try to push them off to
2210  * other runqueues.
2211  */
2212 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2213 {
2214         /*
2215          * If we are already running, then there's nothing
2216          * that needs to be done. But if we are not running
2217          * we may need to preempt the current running task.
2218          * If that current running task is also an RT task
2219          * then see if we can move to another run queue.
2220          */
2221         if (task_on_rq_queued(p) && rq->curr != p) {
2222 #ifdef CONFIG_SMP
2223                 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2224                         rt_queue_push_tasks(rq);
2225 #endif /* CONFIG_SMP */
2226                 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
2227                         resched_curr(rq);
2228         }
2229 }
2230
2231 /*
2232  * Priority of the task has changed. This may cause
2233  * us to initiate a push or pull.
2234  */
2235 static void
2236 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2237 {
2238         if (!task_on_rq_queued(p))
2239                 return;
2240
2241         if (rq->curr == p) {
2242 #ifdef CONFIG_SMP
2243                 /*
2244                  * If our priority decreases while running, we
2245                  * may need to pull tasks to this runqueue.
2246                  */
2247                 if (oldprio < p->prio)
2248                         rt_queue_pull_task(rq);
2249
2250                 /*
2251                  * If there's a higher priority task waiting to run
2252                  * then reschedule.
2253                  */
2254                 if (p->prio > rq->rt.highest_prio.curr)
2255                         resched_curr(rq);
2256 #else
2257                 /* For UP simply resched on drop of prio */
2258                 if (oldprio < p->prio)
2259                         resched_curr(rq);
2260 #endif /* CONFIG_SMP */
2261         } else {
2262                 /*
2263                  * This task is not running, but if it is
2264                  * greater than the current running task
2265                  * then reschedule.
2266                  */
2267                 if (p->prio < rq->curr->prio)
2268                         resched_curr(rq);
2269         }
2270 }
2271
2272 #ifdef CONFIG_POSIX_TIMERS
2273 static void watchdog(struct rq *rq, struct task_struct *p)
2274 {
2275         unsigned long soft, hard;
2276
2277         /* max may change after cur was read, this will be fixed next tick */
2278         soft = task_rlimit(p, RLIMIT_RTTIME);
2279         hard = task_rlimit_max(p, RLIMIT_RTTIME);
2280
2281         if (soft != RLIM_INFINITY) {
2282                 unsigned long next;
2283
2284                 if (p->rt.watchdog_stamp != jiffies) {
2285                         p->rt.timeout++;
2286                         p->rt.watchdog_stamp = jiffies;
2287                 }
2288
2289                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2290                 if (p->rt.timeout > next)
2291                         p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
2292         }
2293 }
2294 #else
2295 static inline void watchdog(struct rq *rq, struct task_struct *p) { }
2296 #endif
2297
2298 /*
2299  * scheduler tick hitting a task of our scheduling class.
2300  *
2301  * NOTE: This function can be called remotely by the tick offload that
2302  * goes along full dynticks. Therefore no local assumption can be made
2303  * and everything must be accessed through the @rq and @curr passed in
2304  * parameters.
2305  */
2306 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2307 {
2308         struct sched_rt_entity *rt_se = &p->rt;
2309
2310         update_curr_rt(rq);
2311
2312         watchdog(rq, p);
2313
2314         /*
2315          * RR tasks need a special form of timeslice management.
2316          * FIFO tasks have no timeslices.
2317          */
2318         if (p->policy != SCHED_RR)
2319                 return;
2320
2321         if (--p->rt.time_slice)
2322                 return;
2323
2324         p->rt.time_slice = sched_rr_timeslice;
2325
2326         /*
2327          * Requeue to the end of queue if we (and all of our ancestors) are not
2328          * the only element on the queue
2329          */
2330         for_each_sched_rt_entity(rt_se) {
2331                 if (rt_se->run_list.prev != rt_se->run_list.next) {
2332                         requeue_task_rt(rq, p, 0);
2333                         resched_curr(rq);
2334                         return;
2335                 }
2336         }
2337 }
2338
2339 static void set_curr_task_rt(struct rq *rq)
2340 {
2341         struct task_struct *p = rq->curr;
2342
2343         p->se.exec_start = rq_clock_task(rq);
2344
2345         /* The running task is never eligible for pushing */
2346         dequeue_pushable_task(rq, p);
2347 }
2348
2349 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2350 {
2351         /*
2352          * Time slice is 0 for SCHED_FIFO tasks
2353          */
2354         if (task->policy == SCHED_RR)
2355                 return sched_rr_timeslice;
2356         else
2357                 return 0;
2358 }
2359
2360 const struct sched_class rt_sched_class = {
2361         .next                   = &fair_sched_class,
2362         .enqueue_task           = enqueue_task_rt,
2363         .dequeue_task           = dequeue_task_rt,
2364         .yield_task             = yield_task_rt,
2365
2366         .check_preempt_curr     = check_preempt_curr_rt,
2367
2368         .pick_next_task         = pick_next_task_rt,
2369         .put_prev_task          = put_prev_task_rt,
2370
2371 #ifdef CONFIG_SMP
2372         .select_task_rq         = select_task_rq_rt,
2373
2374         .set_cpus_allowed       = set_cpus_allowed_common,
2375         .rq_online              = rq_online_rt,
2376         .rq_offline             = rq_offline_rt,
2377         .task_woken             = task_woken_rt,
2378         .switched_from          = switched_from_rt,
2379 #endif
2380
2381         .set_curr_task          = set_curr_task_rt,
2382         .task_tick              = task_tick_rt,
2383
2384         .get_rr_interval        = get_rr_interval_rt,
2385
2386         .prio_changed           = prio_changed_rt,
2387         .switched_to            = switched_to_rt,
2388
2389         .update_curr            = update_curr_rt,
2390 };
2391
2392 #ifdef CONFIG_RT_GROUP_SCHED
2393 /*
2394  * Ensure that the real time constraints are schedulable.
2395  */
2396 static DEFINE_MUTEX(rt_constraints_mutex);
2397
2398 /* Must be called with tasklist_lock held */
2399 static inline int tg_has_rt_tasks(struct task_group *tg)
2400 {
2401         struct task_struct *g, *p;
2402
2403         /*
2404          * Autogroups do not have RT tasks; see autogroup_create().
2405          */
2406         if (task_group_is_autogroup(tg))
2407                 return 0;
2408
2409         for_each_process_thread(g, p) {
2410                 if (rt_task(p) && task_group(p) == tg)
2411                         return 1;
2412         }
2413
2414         return 0;
2415 }
2416
2417 struct rt_schedulable_data {
2418         struct task_group *tg;
2419         u64 rt_period;
2420         u64 rt_runtime;
2421 };
2422
2423 static int tg_rt_schedulable(struct task_group *tg, void *data)
2424 {
2425         struct rt_schedulable_data *d = data;
2426         struct task_group *child;
2427         unsigned long total, sum = 0;
2428         u64 period, runtime;
2429
2430         period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2431         runtime = tg->rt_bandwidth.rt_runtime;
2432
2433         if (tg == d->tg) {
2434                 period = d->rt_period;
2435                 runtime = d->rt_runtime;
2436         }
2437
2438         /*
2439          * Cannot have more runtime than the period.
2440          */
2441         if (runtime > period && runtime != RUNTIME_INF)
2442                 return -EINVAL;
2443
2444         /*
2445          * Ensure we don't starve existing RT tasks.
2446          */
2447         if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
2448                 return -EBUSY;
2449
2450         total = to_ratio(period, runtime);
2451
2452         /*
2453          * Nobody can have more than the global setting allows.
2454          */
2455         if (total > to_ratio(global_rt_period(), global_rt_runtime()))
2456                 return -EINVAL;
2457
2458         /*
2459          * The sum of our children's runtime should not exceed our own.
2460          */
2461         list_for_each_entry_rcu(child, &tg->children, siblings) {
2462                 period = ktime_to_ns(child->rt_bandwidth.rt_period);
2463                 runtime = child->rt_bandwidth.rt_runtime;
2464
2465                 if (child == d->tg) {
2466                         period = d->rt_period;
2467                         runtime = d->rt_runtime;
2468                 }
2469
2470                 sum += to_ratio(period, runtime);
2471         }
2472
2473         if (sum > total)
2474                 return -EINVAL;
2475
2476         return 0;
2477 }
2478
2479 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
2480 {
2481         int ret;
2482
2483         struct rt_schedulable_data data = {
2484                 .tg = tg,
2485                 .rt_period = period,
2486                 .rt_runtime = runtime,
2487         };
2488
2489         rcu_read_lock();
2490         ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
2491         rcu_read_unlock();
2492
2493         return ret;
2494 }
2495
2496 static int tg_set_rt_bandwidth(struct task_group *tg,
2497                 u64 rt_period, u64 rt_runtime)
2498 {
2499         int i, err = 0;
2500
2501         /*
2502          * Disallowing the root group RT runtime is BAD, it would disallow the
2503          * kernel creating (and or operating) RT threads.
2504          */
2505         if (tg == &root_task_group && rt_runtime == 0)
2506                 return -EINVAL;
2507
2508         /* No period doesn't make any sense. */
2509         if (rt_period == 0)
2510                 return -EINVAL;
2511
2512         mutex_lock(&rt_constraints_mutex);
2513         read_lock(&tasklist_lock);
2514         err = __rt_schedulable(tg, rt_period, rt_runtime);
2515         if (err)
2516                 goto unlock;
2517
2518         raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2519         tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
2520         tg->rt_bandwidth.rt_runtime = rt_runtime;
2521
2522         for_each_possible_cpu(i) {
2523                 struct rt_rq *rt_rq = tg->rt_rq[i];
2524
2525                 raw_spin_lock(&rt_rq->rt_runtime_lock);
2526                 rt_rq->rt_runtime = rt_runtime;
2527                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
2528         }
2529         raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2530 unlock:
2531         read_unlock(&tasklist_lock);
2532         mutex_unlock(&rt_constraints_mutex);
2533
2534         return err;
2535 }
2536
2537 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
2538 {
2539         u64 rt_runtime, rt_period;
2540
2541         rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2542         rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
2543         if (rt_runtime_us < 0)
2544                 rt_runtime = RUNTIME_INF;
2545
2546         return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2547 }
2548
2549 long sched_group_rt_runtime(struct task_group *tg)
2550 {
2551         u64 rt_runtime_us;
2552
2553         if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
2554                 return -1;
2555
2556         rt_runtime_us = tg->rt_bandwidth.rt_runtime;
2557         do_div(rt_runtime_us, NSEC_PER_USEC);
2558         return rt_runtime_us;
2559 }
2560
2561 int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
2562 {
2563         u64 rt_runtime, rt_period;
2564
2565         rt_period = rt_period_us * NSEC_PER_USEC;
2566         rt_runtime = tg->rt_bandwidth.rt_runtime;
2567
2568         return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2569 }
2570
2571 long sched_group_rt_period(struct task_group *tg)
2572 {
2573         u64 rt_period_us;
2574
2575         rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
2576         do_div(rt_period_us, NSEC_PER_USEC);
2577         return rt_period_us;
2578 }
2579
2580 static int sched_rt_global_constraints(void)
2581 {
2582         int ret = 0;
2583
2584         mutex_lock(&rt_constraints_mutex);
2585         read_lock(&tasklist_lock);
2586         ret = __rt_schedulable(NULL, 0, 0);
2587         read_unlock(&tasklist_lock);
2588         mutex_unlock(&rt_constraints_mutex);
2589
2590         return ret;
2591 }
2592
2593 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
2594 {
2595         /* Don't accept realtime tasks when there is no way for them to run */
2596         if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
2597                 return 0;
2598
2599         return 1;
2600 }
2601
2602 #else /* !CONFIG_RT_GROUP_SCHED */
2603 static int sched_rt_global_constraints(void)
2604 {
2605         unsigned long flags;
2606         int i;
2607
2608         raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
2609         for_each_possible_cpu(i) {
2610                 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
2611
2612                 raw_spin_lock(&rt_rq->rt_runtime_lock);
2613                 rt_rq->rt_runtime = global_rt_runtime();
2614                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
2615         }
2616         raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
2617
2618         return 0;
2619 }
2620 #endif /* CONFIG_RT_GROUP_SCHED */
2621
2622 static int sched_rt_global_validate(void)
2623 {
2624         if (sysctl_sched_rt_period <= 0)
2625                 return -EINVAL;
2626
2627         if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
2628                 (sysctl_sched_rt_runtime > sysctl_sched_rt_period))
2629                 return -EINVAL;
2630
2631         return 0;
2632 }
2633
2634 static void sched_rt_do_global(void)
2635 {
2636         def_rt_bandwidth.rt_runtime = global_rt_runtime();
2637         def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
2638 }
2639
2640 int sched_rt_handler(struct ctl_table *table, int write,
2641                 void __user *buffer, size_t *lenp,
2642                 loff_t *ppos)
2643 {
2644         int old_period, old_runtime;
2645         static DEFINE_MUTEX(mutex);
2646         int ret;
2647
2648         mutex_lock(&mutex);
2649         old_period = sysctl_sched_rt_period;
2650         old_runtime = sysctl_sched_rt_runtime;
2651
2652         ret = proc_dointvec(table, write, buffer, lenp, ppos);
2653
2654         if (!ret && write) {
2655                 ret = sched_rt_global_validate();
2656                 if (ret)
2657                         goto undo;
2658
2659                 ret = sched_dl_global_validate();
2660                 if (ret)
2661                         goto undo;
2662
2663                 ret = sched_rt_global_constraints();
2664                 if (ret)
2665                         goto undo;
2666
2667                 sched_rt_do_global();
2668                 sched_dl_do_global();
2669         }
2670         if (0) {
2671 undo:
2672                 sysctl_sched_rt_period = old_period;
2673                 sysctl_sched_rt_runtime = old_runtime;
2674         }
2675         mutex_unlock(&mutex);
2676
2677         return ret;
2678 }
2679
2680 int sched_rr_handler(struct ctl_table *table, int write,
2681                 void __user *buffer, size_t *lenp,
2682                 loff_t *ppos)
2683 {
2684         int ret;
2685         static DEFINE_MUTEX(mutex);
2686
2687         mutex_lock(&mutex);
2688         ret = proc_dointvec(table, write, buffer, lenp, ppos);
2689         /*
2690          * Make sure that internally we keep jiffies.
2691          * Also, writing zero resets the timeslice to default:
2692          */
2693         if (!ret && write) {
2694                 sched_rr_timeslice =
2695                         sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
2696                         msecs_to_jiffies(sysctl_sched_rr_timeslice);
2697         }
2698         mutex_unlock(&mutex);
2699
2700         return ret;
2701 }
2702
2703 #ifdef CONFIG_SCHED_DEBUG
2704 extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2705
2706 void print_rt_stats(struct seq_file *m, int cpu)
2707 {
2708         rt_rq_iter_t iter;
2709         struct rt_rq *rt_rq;
2710
2711         rcu_read_lock();
2712         for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2713                 print_rt_rq(m, cpu, rt_rq);
2714         rcu_read_unlock();
2715 }
2716 #endif /* CONFIG_SCHED_DEBUG */