Root/kernel/sched_rt.c

1/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6#ifdef CONFIG_RT_GROUP_SCHED
7
8#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
9
10static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
11{
12#ifdef CONFIG_SCHED_DEBUG
13    WARN_ON_ONCE(!rt_entity_is_task(rt_se));
14#endif
15    return container_of(rt_se, struct task_struct, rt);
16}
17
18static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
19{
20    return rt_rq->rq;
21}
22
23static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
24{
25    return rt_se->rt_rq;
26}
27
28#else /* CONFIG_RT_GROUP_SCHED */
29
30#define rt_entity_is_task(rt_se) (1)
31
32static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
33{
34    return container_of(rt_se, struct task_struct, rt);
35}
36
37static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
38{
39    return container_of(rt_rq, struct rq, rt);
40}
41
42static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
43{
44    struct task_struct *p = rt_task_of(rt_se);
45    struct rq *rq = task_rq(p);
46
47    return &rq->rt;
48}
49
50#endif /* CONFIG_RT_GROUP_SCHED */
51
52#ifdef CONFIG_SMP
53
54static inline int rt_overloaded(struct rq *rq)
55{
56    return atomic_read(&rq->rd->rto_count);
57}
58
59static inline void rt_set_overload(struct rq *rq)
60{
61    if (!rq->online)
62        return;
63
64    cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
65    /*
66     * Make sure the mask is visible before we set
67     * the overload count. That is checked to determine
68     * if we should look at the mask. It would be a shame
69     * if we looked at the mask, but the mask was not
70     * updated yet.
71     */
72    wmb();
73    atomic_inc(&rq->rd->rto_count);
74}
75
76static inline void rt_clear_overload(struct rq *rq)
77{
78    if (!rq->online)
79        return;
80
81    /* the order here really doesn't matter */
82    atomic_dec(&rq->rd->rto_count);
83    cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
84}
85
86static void update_rt_migration(struct rt_rq *rt_rq)
87{
88    if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
89        if (!rt_rq->overloaded) {
90            rt_set_overload(rq_of_rt_rq(rt_rq));
91            rt_rq->overloaded = 1;
92        }
93    } else if (rt_rq->overloaded) {
94        rt_clear_overload(rq_of_rt_rq(rt_rq));
95        rt_rq->overloaded = 0;
96    }
97}
98
99static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
100{
101    if (!rt_entity_is_task(rt_se))
102        return;
103
104    rt_rq = &rq_of_rt_rq(rt_rq)->rt;
105
106    rt_rq->rt_nr_total++;
107    if (rt_se->nr_cpus_allowed > 1)
108        rt_rq->rt_nr_migratory++;
109
110    update_rt_migration(rt_rq);
111}
112
113static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
114{
115    if (!rt_entity_is_task(rt_se))
116        return;
117
118    rt_rq = &rq_of_rt_rq(rt_rq)->rt;
119
120    rt_rq->rt_nr_total--;
121    if (rt_se->nr_cpus_allowed > 1)
122        rt_rq->rt_nr_migratory--;
123
124    update_rt_migration(rt_rq);
125}
126
127static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
128{
129    plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
130    plist_node_init(&p->pushable_tasks, p->prio);
131    plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
132}
133
134static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
135{
136    plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
137}
138
139static inline int has_pushable_tasks(struct rq *rq)
140{
141    return !plist_head_empty(&rq->rt.pushable_tasks);
142}
143
144#else
145
146static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
147{
148}
149
150static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
151{
152}
153
154static inline
155void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
156{
157}
158
159static inline
160void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
161{
162}
163
164#endif /* CONFIG_SMP */
165
166static inline int on_rt_rq(struct sched_rt_entity *rt_se)
167{
168    return !list_empty(&rt_se->run_list);
169}
170
171#ifdef CONFIG_RT_GROUP_SCHED
172
173static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
174{
175    if (!rt_rq->tg)
176        return RUNTIME_INF;
177
178    return rt_rq->rt_runtime;
179}
180
181static inline u64 sched_rt_period(struct rt_rq *rt_rq)
182{
183    return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
184}
185
186typedef struct task_group *rt_rq_iter_t;
187
188#define for_each_rt_rq(rt_rq, iter, rq) \
189    for (iter = list_entry_rcu(task_groups.next, typeof(*iter), list); \
190         (&iter->list != &task_groups) && \
191         (rt_rq = iter->rt_rq[cpu_of(rq)]); \
192         iter = list_entry_rcu(iter->list.next, typeof(*iter), list))
193
194static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
195{
196    list_add_rcu(&rt_rq->leaf_rt_rq_list,
197            &rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
198}
199
200static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
201{
202    list_del_rcu(&rt_rq->leaf_rt_rq_list);
203}
204
205#define for_each_leaf_rt_rq(rt_rq, rq) \
206    list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
207
208#define for_each_sched_rt_entity(rt_se) \
209    for (; rt_se; rt_se = rt_se->parent)
210
211static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
212{
213    return rt_se->my_q;
214}
215
216static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
217static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
218
219static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
220{
221    struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
222    struct sched_rt_entity *rt_se;
223
224    int cpu = cpu_of(rq_of_rt_rq(rt_rq));
225
226    rt_se = rt_rq->tg->rt_se[cpu];
227
228    if (rt_rq->rt_nr_running) {
229        if (rt_se && !on_rt_rq(rt_se))
230            enqueue_rt_entity(rt_se, false);
231        if (rt_rq->highest_prio.curr < curr->prio)
232            resched_task(curr);
233    }
234}
235
236static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
237{
238    struct sched_rt_entity *rt_se;
239    int cpu = cpu_of(rq_of_rt_rq(rt_rq));
240
241    rt_se = rt_rq->tg->rt_se[cpu];
242
243    if (rt_se && on_rt_rq(rt_se))
244        dequeue_rt_entity(rt_se);
245}
246
247static inline int rt_rq_throttled(struct rt_rq *rt_rq)
248{
249    return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
250}
251
252static int rt_se_boosted(struct sched_rt_entity *rt_se)
253{
254    struct rt_rq *rt_rq = group_rt_rq(rt_se);
255    struct task_struct *p;
256
257    if (rt_rq)
258        return !!rt_rq->rt_nr_boosted;
259
260    p = rt_task_of(rt_se);
261    return p->prio != p->normal_prio;
262}
263
264#ifdef CONFIG_SMP
265static inline const struct cpumask *sched_rt_period_mask(void)
266{
267    return cpu_rq(smp_processor_id())->rd->span;
268}
269#else
270static inline const struct cpumask *sched_rt_period_mask(void)
271{
272    return cpu_online_mask;
273}
274#endif
275
276static inline
277struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
278{
279    return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
280}
281
282static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
283{
284    return &rt_rq->tg->rt_bandwidth;
285}
286
287#else /* !CONFIG_RT_GROUP_SCHED */
288
289static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
290{
291    return rt_rq->rt_runtime;
292}
293
294static inline u64 sched_rt_period(struct rt_rq *rt_rq)
295{
296    return ktime_to_ns(def_rt_bandwidth.rt_period);
297}
298
299typedef struct rt_rq *rt_rq_iter_t;
300
301#define for_each_rt_rq(rt_rq, iter, rq) \
302    for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
303
304static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
305{
306}
307
308static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
309{
310}
311
312#define for_each_leaf_rt_rq(rt_rq, rq) \
313    for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
314
315#define for_each_sched_rt_entity(rt_se) \
316    for (; rt_se; rt_se = NULL)
317
318static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
319{
320    return NULL;
321}
322
323static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
324{
325    if (rt_rq->rt_nr_running)
326        resched_task(rq_of_rt_rq(rt_rq)->curr);
327}
328
329static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
330{
331}
332
333static inline int rt_rq_throttled(struct rt_rq *rt_rq)
334{
335    return rt_rq->rt_throttled;
336}
337
338static inline const struct cpumask *sched_rt_period_mask(void)
339{
340    return cpu_online_mask;
341}
342
343static inline
344struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
345{
346    return &cpu_rq(cpu)->rt;
347}
348
349static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
350{
351    return &def_rt_bandwidth;
352}
353
354#endif /* CONFIG_RT_GROUP_SCHED */
355
356#ifdef CONFIG_SMP
357/*
358 * We ran out of runtime, see if we can borrow some from our neighbours.
359 */
360static int do_balance_runtime(struct rt_rq *rt_rq)
361{
362    struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
363    struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
364    int i, weight, more = 0;
365    u64 rt_period;
366
367    weight = cpumask_weight(rd->span);
368
369    raw_spin_lock(&rt_b->rt_runtime_lock);
370    rt_period = ktime_to_ns(rt_b->rt_period);
371    for_each_cpu(i, rd->span) {
372        struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
373        s64 diff;
374
375        if (iter == rt_rq)
376            continue;
377
378        raw_spin_lock(&iter->rt_runtime_lock);
379        /*
380         * Either all rqs have inf runtime and there's nothing to steal
381         * or __disable_runtime() below sets a specific rq to inf to
382         * indicate its been disabled and disalow stealing.
383         */
384        if (iter->rt_runtime == RUNTIME_INF)
385            goto next;
386
387        /*
388         * From runqueues with spare time, take 1/n part of their
389         * spare time, but no more than our period.
390         */
391        diff = iter->rt_runtime - iter->rt_time;
392        if (diff > 0) {
393            diff = div_u64((u64)diff, weight);
394            if (rt_rq->rt_runtime + diff > rt_period)
395                diff = rt_period - rt_rq->rt_runtime;
396            iter->rt_runtime -= diff;
397            rt_rq->rt_runtime += diff;
398            more = 1;
399            if (rt_rq->rt_runtime == rt_period) {
400                raw_spin_unlock(&iter->rt_runtime_lock);
401                break;
402            }
403        }
404next:
405        raw_spin_unlock(&iter->rt_runtime_lock);
406    }
407    raw_spin_unlock(&rt_b->rt_runtime_lock);
408
409    return more;
410}
411
412/*
413 * Ensure this RQ takes back all the runtime it lend to its neighbours.
414 */
415static void __disable_runtime(struct rq *rq)
416{
417    struct root_domain *rd = rq->rd;
418    rt_rq_iter_t iter;
419    struct rt_rq *rt_rq;
420
421    if (unlikely(!scheduler_running))
422        return;
423
424    for_each_rt_rq(rt_rq, iter, rq) {
425        struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
426        s64 want;
427        int i;
428
429        raw_spin_lock(&rt_b->rt_runtime_lock);
430        raw_spin_lock(&rt_rq->rt_runtime_lock);
431        /*
432         * Either we're all inf and nobody needs to borrow, or we're
433         * already disabled and thus have nothing to do, or we have
434         * exactly the right amount of runtime to take out.
435         */
436        if (rt_rq->rt_runtime == RUNTIME_INF ||
437                rt_rq->rt_runtime == rt_b->rt_runtime)
438            goto balanced;
439        raw_spin_unlock(&rt_rq->rt_runtime_lock);
440
441        /*
442         * Calculate the difference between what we started out with
443         * and what we current have, that's the amount of runtime
444         * we lend and now have to reclaim.
445         */
446        want = rt_b->rt_runtime - rt_rq->rt_runtime;
447
448        /*
449         * Greedy reclaim, take back as much as we can.
450         */
451        for_each_cpu(i, rd->span) {
452            struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
453            s64 diff;
454
455            /*
456             * Can't reclaim from ourselves or disabled runqueues.
457             */
458            if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
459                continue;
460
461            raw_spin_lock(&iter->rt_runtime_lock);
462            if (want > 0) {
463                diff = min_t(s64, iter->rt_runtime, want);
464                iter->rt_runtime -= diff;
465                want -= diff;
466            } else {
467                iter->rt_runtime -= want;
468                want -= want;
469            }
470            raw_spin_unlock(&iter->rt_runtime_lock);
471
472            if (!want)
473                break;
474        }
475
476        raw_spin_lock(&rt_rq->rt_runtime_lock);
477        /*
478         * We cannot be left wanting - that would mean some runtime
479         * leaked out of the system.
480         */
481        BUG_ON(want);
482balanced:
483        /*
484         * Disable all the borrow logic by pretending we have inf
485         * runtime - in which case borrowing doesn't make sense.
486         */
487        rt_rq->rt_runtime = RUNTIME_INF;
488        raw_spin_unlock(&rt_rq->rt_runtime_lock);
489        raw_spin_unlock(&rt_b->rt_runtime_lock);
490    }
491}
492
493static void disable_runtime(struct rq *rq)
494{
495    unsigned long flags;
496
497    raw_spin_lock_irqsave(&rq->lock, flags);
498    __disable_runtime(rq);
499    raw_spin_unlock_irqrestore(&rq->lock, flags);
500}
501
502static void __enable_runtime(struct rq *rq)
503{
504    rt_rq_iter_t iter;
505    struct rt_rq *rt_rq;
506
507    if (unlikely(!scheduler_running))
508        return;
509
510    /*
511     * Reset each runqueue's bandwidth settings
512     */
513    for_each_rt_rq(rt_rq, iter, rq) {
514        struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
515
516        raw_spin_lock(&rt_b->rt_runtime_lock);
517        raw_spin_lock(&rt_rq->rt_runtime_lock);
518        rt_rq->rt_runtime = rt_b->rt_runtime;
519        rt_rq->rt_time = 0;
520        rt_rq->rt_throttled = 0;
521        raw_spin_unlock(&rt_rq->rt_runtime_lock);
522        raw_spin_unlock(&rt_b->rt_runtime_lock);
523    }
524}
525
526static void enable_runtime(struct rq *rq)
527{
528    unsigned long flags;
529
530    raw_spin_lock_irqsave(&rq->lock, flags);
531    __enable_runtime(rq);
532    raw_spin_unlock_irqrestore(&rq->lock, flags);
533}
534
535static int balance_runtime(struct rt_rq *rt_rq)
536{
537    int more = 0;
538
539    if (rt_rq->rt_time > rt_rq->rt_runtime) {
540        raw_spin_unlock(&rt_rq->rt_runtime_lock);
541        more = do_balance_runtime(rt_rq);
542        raw_spin_lock(&rt_rq->rt_runtime_lock);
543    }
544
545    return more;
546}
547#else /* !CONFIG_SMP */
548static inline int balance_runtime(struct rt_rq *rt_rq)
549{
550    return 0;
551}
552#endif /* CONFIG_SMP */
553
554static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
555{
556    int i, idle = 1;
557    const struct cpumask *span;
558
559    if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
560        return 1;
561
562    span = sched_rt_period_mask();
563    for_each_cpu(i, span) {
564        int enqueue = 0;
565        struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
566        struct rq *rq = rq_of_rt_rq(rt_rq);
567
568        raw_spin_lock(&rq->lock);
569        if (rt_rq->rt_time) {
570            u64 runtime;
571
572            raw_spin_lock(&rt_rq->rt_runtime_lock);
573            if (rt_rq->rt_throttled)
574                balance_runtime(rt_rq);
575            runtime = rt_rq->rt_runtime;
576            rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
577            if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
578                rt_rq->rt_throttled = 0;
579                enqueue = 1;
580
581                /*
582                 * Force a clock update if the CPU was idle,
583                 * lest wakeup -> unthrottle time accumulate.
584                 */
585                if (rt_rq->rt_nr_running && rq->curr == rq->idle)
586                    rq->skip_clock_update = -1;
587            }
588            if (rt_rq->rt_time || rt_rq->rt_nr_running)
589                idle = 0;
590            raw_spin_unlock(&rt_rq->rt_runtime_lock);
591        } else if (rt_rq->rt_nr_running) {
592            idle = 0;
593            if (!rt_rq_throttled(rt_rq))
594                enqueue = 1;
595        }
596
597        if (enqueue)
598            sched_rt_rq_enqueue(rt_rq);
599        raw_spin_unlock(&rq->lock);
600    }
601
602    return idle;
603}
604
605static inline int rt_se_prio(struct sched_rt_entity *rt_se)
606{
607#ifdef CONFIG_RT_GROUP_SCHED
608    struct rt_rq *rt_rq = group_rt_rq(rt_se);
609
610    if (rt_rq)
611        return rt_rq->highest_prio.curr;
612#endif
613
614    return rt_task_of(rt_se)->prio;
615}
616
617static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
618{
619    u64 runtime = sched_rt_runtime(rt_rq);
620
621    if (rt_rq->rt_throttled)
622        return rt_rq_throttled(rt_rq);
623
624    if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
625        return 0;
626
627    balance_runtime(rt_rq);
628    runtime = sched_rt_runtime(rt_rq);
629    if (runtime == RUNTIME_INF)
630        return 0;
631
632    if (rt_rq->rt_time > runtime) {
633        rt_rq->rt_throttled = 1;
634        if (rt_rq_throttled(rt_rq)) {
635            sched_rt_rq_dequeue(rt_rq);
636            return 1;
637        }
638    }
639
640    return 0;
641}
642
643/*
644 * Update the current task's runtime statistics. Skip current tasks that
645 * are not in our scheduling class.
646 */
647static void update_curr_rt(struct rq *rq)
648{
649    struct task_struct *curr = rq->curr;
650    struct sched_rt_entity *rt_se = &curr->rt;
651    struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
652    u64 delta_exec;
653
654    if (curr->sched_class != &rt_sched_class)
655        return;
656
657    delta_exec = rq->clock_task - curr->se.exec_start;
658    if (unlikely((s64)delta_exec < 0))
659        delta_exec = 0;
660
661    schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));
662
663    curr->se.sum_exec_runtime += delta_exec;
664    account_group_exec_runtime(curr, delta_exec);
665
666    curr->se.exec_start = rq->clock_task;
667    cpuacct_charge(curr, delta_exec);
668
669    sched_rt_avg_update(rq, delta_exec);
670
671    if (!rt_bandwidth_enabled())
672        return;
673
674    for_each_sched_rt_entity(rt_se) {
675        rt_rq = rt_rq_of_se(rt_se);
676
677        if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
678            raw_spin_lock(&rt_rq->rt_runtime_lock);
679            rt_rq->rt_time += delta_exec;
680            if (sched_rt_runtime_exceeded(rt_rq))
681                resched_task(curr);
682            raw_spin_unlock(&rt_rq->rt_runtime_lock);
683        }
684    }
685}
686
687#if defined CONFIG_SMP
688
689static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
690
691static inline int next_prio(struct rq *rq)
692{
693    struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
694
695    if (next && rt_prio(next->prio))
696        return next->prio;
697    else
698        return MAX_RT_PRIO;
699}
700
701static void
702inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
703{
704    struct rq *rq = rq_of_rt_rq(rt_rq);
705
706    if (prio < prev_prio) {
707
708        /*
709         * If the new task is higher in priority than anything on the
710         * run-queue, we know that the previous high becomes our
711         * next-highest.
712         */
713        rt_rq->highest_prio.next = prev_prio;
714
715        if (rq->online)
716            cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
717
718    } else if (prio == rt_rq->highest_prio.curr)
719        /*
720         * If the next task is equal in priority to the highest on
721         * the run-queue, then we implicitly know that the next highest
722         * task cannot be any lower than current
723         */
724        rt_rq->highest_prio.next = prio;
725    else if (prio < rt_rq->highest_prio.next)
726        /*
727         * Otherwise, we need to recompute next-highest
728         */
729        rt_rq->highest_prio.next = next_prio(rq);
730}
731
732static void
733dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
734{
735    struct rq *rq = rq_of_rt_rq(rt_rq);
736
737    if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
738        rt_rq->highest_prio.next = next_prio(rq);
739
740    if (rq->online && rt_rq->highest_prio.curr != prev_prio)
741        cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
742}
743
744#else /* CONFIG_SMP */
745
746static inline
747void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
748static inline
749void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
750
751#endif /* CONFIG_SMP */
752
753#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
754static void
755inc_rt_prio(struct rt_rq *rt_rq, int prio)
756{
757    int prev_prio = rt_rq->highest_prio.curr;
758
759    if (prio < prev_prio)
760        rt_rq->highest_prio.curr = prio;
761
762    inc_rt_prio_smp(rt_rq, prio, prev_prio);
763}
764
765static void
766dec_rt_prio(struct rt_rq *rt_rq, int prio)
767{
768    int prev_prio = rt_rq->highest_prio.curr;
769
770    if (rt_rq->rt_nr_running) {
771
772        WARN_ON(prio < prev_prio);
773
774        /*
775         * This may have been our highest task, and therefore
776         * we may have some recomputation to do
777         */
778        if (prio == prev_prio) {
779            struct rt_prio_array *array = &rt_rq->active;
780
781            rt_rq->highest_prio.curr =
782                sched_find_first_bit(array->bitmap);
783        }
784
785    } else
786        rt_rq->highest_prio.curr = MAX_RT_PRIO;
787
788    dec_rt_prio_smp(rt_rq, prio, prev_prio);
789}
790
791#else
792
793static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
794static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
795
796#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
797
798#ifdef CONFIG_RT_GROUP_SCHED
799
800static void
801inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
802{
803    if (rt_se_boosted(rt_se))
804        rt_rq->rt_nr_boosted++;
805
806    if (rt_rq->tg)
807        start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
808}
809
810static void
811dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
812{
813    if (rt_se_boosted(rt_se))
814        rt_rq->rt_nr_boosted--;
815
816    WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
817}
818
819#else /* CONFIG_RT_GROUP_SCHED */
820
821static void
822inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
823{
824    start_rt_bandwidth(&def_rt_bandwidth);
825}
826
827static inline
828void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
829
830#endif /* CONFIG_RT_GROUP_SCHED */
831
832static inline
833void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
834{
835    int prio = rt_se_prio(rt_se);
836
837    WARN_ON(!rt_prio(prio));
838    rt_rq->rt_nr_running++;
839
840    inc_rt_prio(rt_rq, prio);
841    inc_rt_migration(rt_se, rt_rq);
842    inc_rt_group(rt_se, rt_rq);
843}
844
845static inline
846void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
847{
848    WARN_ON(!rt_prio(rt_se_prio(rt_se)));
849    WARN_ON(!rt_rq->rt_nr_running);
850    rt_rq->rt_nr_running--;
851
852    dec_rt_prio(rt_rq, rt_se_prio(rt_se));
853    dec_rt_migration(rt_se, rt_rq);
854    dec_rt_group(rt_se, rt_rq);
855}
856
857static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
858{
859    struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
860    struct rt_prio_array *array = &rt_rq->active;
861    struct rt_rq *group_rq = group_rt_rq(rt_se);
862    struct list_head *queue = array->queue + rt_se_prio(rt_se);
863
864    /*
865     * Don't enqueue the group if its throttled, or when empty.
866     * The latter is a consequence of the former when a child group
867     * get throttled and the current group doesn't have any other
868     * active members.
869     */
870    if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
871        return;
872
873    if (!rt_rq->rt_nr_running)
874        list_add_leaf_rt_rq(rt_rq);
875
876    if (head)
877        list_add(&rt_se->run_list, queue);
878    else
879        list_add_tail(&rt_se->run_list, queue);
880    __set_bit(rt_se_prio(rt_se), array->bitmap);
881
882    inc_rt_tasks(rt_se, rt_rq);
883}
884
885static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
886{
887    struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
888    struct rt_prio_array *array = &rt_rq->active;
889
890    list_del_init(&rt_se->run_list);
891    if (list_empty(array->queue + rt_se_prio(rt_se)))
892        __clear_bit(rt_se_prio(rt_se), array->bitmap);
893
894    dec_rt_tasks(rt_se, rt_rq);
895    if (!rt_rq->rt_nr_running)
896        list_del_leaf_rt_rq(rt_rq);
897}
898
899/*
900 * Because the prio of an upper entry depends on the lower
901 * entries, we must remove entries top - down.
902 */
903static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
904{
905    struct sched_rt_entity *back = NULL;
906
907    for_each_sched_rt_entity(rt_se) {
908        rt_se->back = back;
909        back = rt_se;
910    }
911
912    for (rt_se = back; rt_se; rt_se = rt_se->back) {
913        if (on_rt_rq(rt_se))
914            __dequeue_rt_entity(rt_se);
915    }
916}
917
918static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
919{
920    dequeue_rt_stack(rt_se);
921    for_each_sched_rt_entity(rt_se)
922        __enqueue_rt_entity(rt_se, head);
923}
924
925static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
926{
927    dequeue_rt_stack(rt_se);
928
929    for_each_sched_rt_entity(rt_se) {
930        struct rt_rq *rt_rq = group_rt_rq(rt_se);
931
932        if (rt_rq && rt_rq->rt_nr_running)
933            __enqueue_rt_entity(rt_se, false);
934    }
935}
936
937/*
938 * Adding/removing a task to/from a priority array:
939 */
940static void
941enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
942{
943    struct sched_rt_entity *rt_se = &p->rt;
944
945    if (flags & ENQUEUE_WAKEUP)
946        rt_se->timeout = 0;
947
948    enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
949
950    if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
951        enqueue_pushable_task(rq, p);
952}
953
954static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
955{
956    struct sched_rt_entity *rt_se = &p->rt;
957
958    update_curr_rt(rq);
959    dequeue_rt_entity(rt_se);
960
961    dequeue_pushable_task(rq, p);
962}
963
964/*
965 * Put task to the end of the run list without the overhead of dequeue
966 * followed by enqueue.
967 */
968static void
969requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
970{
971    if (on_rt_rq(rt_se)) {
972        struct rt_prio_array *array = &rt_rq->active;
973        struct list_head *queue = array->queue + rt_se_prio(rt_se);
974
975        if (head)
976            list_move(&rt_se->run_list, queue);
977        else
978            list_move_tail(&rt_se->run_list, queue);
979    }
980}
981
982static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
983{
984    struct sched_rt_entity *rt_se = &p->rt;
985    struct rt_rq *rt_rq;
986
987    for_each_sched_rt_entity(rt_se) {
988        rt_rq = rt_rq_of_se(rt_se);
989        requeue_rt_entity(rt_rq, rt_se, head);
990    }
991}
992
993static void yield_task_rt(struct rq *rq)
994{
995    requeue_task_rt(rq, rq->curr, 0);
996}
997
998#ifdef CONFIG_SMP
999static int find_lowest_rq(struct task_struct *task);
1000
1001static int
1002select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
1003{
1004    struct task_struct *curr;
1005    struct rq *rq;
1006    int cpu;
1007
1008    if (sd_flag != SD_BALANCE_WAKE)
1009        return smp_processor_id();
1010
1011    cpu = task_cpu(p);
1012    rq = cpu_rq(cpu);
1013
1014    rcu_read_lock();
1015    curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1016
1017    /*
1018     * If the current task on @p's runqueue is an RT task, then
1019     * try to see if we can wake this RT task up on another
1020     * runqueue. Otherwise simply start this RT task
1021     * on its current runqueue.
1022     *
1023     * We want to avoid overloading runqueues. If the woken
1024     * task is a higher priority, then it will stay on this CPU
1025     * and the lower prio task should be moved to another CPU.
1026     * Even though this will probably make the lower prio task
1027     * lose its cache, we do not want to bounce a higher task
1028     * around just because it gave up its CPU, perhaps for a
1029     * lock?
1030     *
1031     * For equal prio tasks, we just let the scheduler sort it out.
1032     *
1033     * Otherwise, just let it ride on the affined RQ and the
1034     * post-schedule router will push the preempted task away
1035     *
1036     * This test is optimistic, if we get it wrong the load-balancer
1037     * will have to sort it out.
1038     */
1039    if (curr && unlikely(rt_task(curr)) &&
1040        (curr->rt.nr_cpus_allowed < 2 ||
1041         curr->prio < p->prio) &&
1042        (p->rt.nr_cpus_allowed > 1)) {
1043        int target = find_lowest_rq(p);
1044
1045        if (target != -1)
1046            cpu = target;
1047    }
1048    rcu_read_unlock();
1049
1050    return cpu;
1051}
1052
1053static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1054{
1055    if (rq->curr->rt.nr_cpus_allowed == 1)
1056        return;
1057
1058    if (p->rt.nr_cpus_allowed != 1
1059        && cpupri_find(&rq->rd->cpupri, p, NULL))
1060        return;
1061
1062    if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1063        return;
1064
1065    /*
1066     * There appears to be other cpus that can accept
1067     * current and none to run 'p', so lets reschedule
1068     * to try and push current away:
1069     */
1070    requeue_task_rt(rq, p, 1);
1071    resched_task(rq->curr);
1072}
1073
1074#endif /* CONFIG_SMP */
1075
1076/*
1077 * Preempt the current task with a newly woken task if needed:
1078 */
1079static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1080{
1081    if (p->prio < rq->curr->prio) {
1082        resched_task(rq->curr);
1083        return;
1084    }
1085
1086#ifdef CONFIG_SMP
1087    /*
1088     * If:
1089     *
1090     * - the newly woken task is of equal priority to the current task
1091     * - the newly woken task is non-migratable while current is migratable
1092     * - current will be preempted on the next reschedule
1093     *
1094     * we should check to see if current can readily move to a different
1095     * cpu. If so, we will reschedule to allow the push logic to try
1096     * to move current somewhere else, making room for our non-migratable
1097     * task.
1098     */
1099    if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1100        check_preempt_equal_prio(rq, p);
1101#endif
1102}
1103
1104static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1105                           struct rt_rq *rt_rq)
1106{
1107    struct rt_prio_array *array = &rt_rq->active;
1108    struct sched_rt_entity *next = NULL;
1109    struct list_head *queue;
1110    int idx;
1111
1112    idx = sched_find_first_bit(array->bitmap);
1113    BUG_ON(idx >= MAX_RT_PRIO);
1114
1115    queue = array->queue + idx;
1116    next = list_entry(queue->next, struct sched_rt_entity, run_list);
1117
1118    return next;
1119}
1120
1121static struct task_struct *_pick_next_task_rt(struct rq *rq)
1122{
1123    struct sched_rt_entity *rt_se;
1124    struct task_struct *p;
1125    struct rt_rq *rt_rq;
1126
1127    rt_rq = &rq->rt;
1128
1129    if (unlikely(!rt_rq->rt_nr_running))
1130        return NULL;
1131
1132    if (rt_rq_throttled(rt_rq))
1133        return NULL;
1134
1135    do {
1136        rt_se = pick_next_rt_entity(rq, rt_rq);
1137        BUG_ON(!rt_se);
1138        rt_rq = group_rt_rq(rt_se);
1139    } while (rt_rq);
1140
1141    p = rt_task_of(rt_se);
1142    p->se.exec_start = rq->clock_task;
1143
1144    return p;
1145}
1146
1147static struct task_struct *pick_next_task_rt(struct rq *rq)
1148{
1149    struct task_struct *p = _pick_next_task_rt(rq);
1150
1151    /* The running task is never eligible for pushing */
1152    if (p)
1153        dequeue_pushable_task(rq, p);
1154
1155#ifdef CONFIG_SMP
1156    /*
1157     * We detect this state here so that we can avoid taking the RQ
1158     * lock again later if there is no need to push
1159     */
1160    rq->post_schedule = has_pushable_tasks(rq);
1161#endif
1162
1163    return p;
1164}
1165
1166static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1167{
1168    update_curr_rt(rq);
1169    p->se.exec_start = 0;
1170
1171    /*
1172     * The previous task needs to be made eligible for pushing
1173     * if it is still active
1174     */
1175    if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
1176        enqueue_pushable_task(rq, p);
1177}
1178
1179#ifdef CONFIG_SMP
1180
1181/* Only try algorithms three times */
1182#define RT_MAX_TRIES 3
1183
1184static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
1185
1186static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1187{
1188    if (!task_running(rq, p) &&
1189        (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
1190        (p->rt.nr_cpus_allowed > 1))
1191        return 1;
1192    return 0;
1193}
1194
1195/* Return the second highest RT task, NULL otherwise */
1196static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1197{
1198    struct task_struct *next = NULL;
1199    struct sched_rt_entity *rt_se;
1200    struct rt_prio_array *array;
1201    struct rt_rq *rt_rq;
1202    int idx;
1203
1204    for_each_leaf_rt_rq(rt_rq, rq) {
1205        array = &rt_rq->active;
1206        idx = sched_find_first_bit(array->bitmap);
1207next_idx:
1208        if (idx >= MAX_RT_PRIO)
1209            continue;
1210        if (next && next->prio < idx)
1211            continue;
1212        list_for_each_entry(rt_se, array->queue + idx, run_list) {
1213            struct task_struct *p;
1214
1215            if (!rt_entity_is_task(rt_se))
1216                continue;
1217
1218            p = rt_task_of(rt_se);
1219            if (pick_rt_task(rq, p, cpu)) {
1220                next = p;
1221                break;
1222            }
1223        }
1224        if (!next) {
1225            idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1226            goto next_idx;
1227        }
1228    }
1229
1230    return next;
1231}
1232
1233static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1234
1235static int find_lowest_rq(struct task_struct *task)
1236{
1237    struct sched_domain *sd;
1238    struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1239    int this_cpu = smp_processor_id();
1240    int cpu = task_cpu(task);
1241
1242    /* Make sure the mask is initialized first */
1243    if (unlikely(!lowest_mask))
1244        return -1;
1245
1246    if (task->rt.nr_cpus_allowed == 1)
1247        return -1; /* No other targets possible */
1248
1249    if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1250        return -1; /* No targets found */
1251
1252    /*
1253     * At this point we have built a mask of cpus representing the
1254     * lowest priority tasks in the system. Now we want to elect
1255     * the best one based on our affinity and topology.
1256     *
1257     * We prioritize the last cpu that the task executed on since
1258     * it is most likely cache-hot in that location.
1259     */
1260    if (cpumask_test_cpu(cpu, lowest_mask))
1261        return cpu;
1262
1263    /*
1264     * Otherwise, we consult the sched_domains span maps to figure
1265     * out which cpu is logically closest to our hot cache data.
1266     */
1267    if (!cpumask_test_cpu(this_cpu, lowest_mask))
1268        this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1269
1270    rcu_read_lock();
1271    for_each_domain(cpu, sd) {
1272        if (sd->flags & SD_WAKE_AFFINE) {
1273            int best_cpu;
1274
1275            /*
1276             * "this_cpu" is cheaper to preempt than a
1277             * remote processor.
1278             */
1279            if (this_cpu != -1 &&
1280                cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1281                rcu_read_unlock();
1282                return this_cpu;
1283            }
1284
1285            best_cpu = cpumask_first_and(lowest_mask,
1286                             sched_domain_span(sd));
1287            if (best_cpu < nr_cpu_ids) {
1288                rcu_read_unlock();
1289                return best_cpu;
1290            }
1291        }
1292    }
1293    rcu_read_unlock();
1294
1295    /*
1296     * And finally, if there were no matches within the domains
1297     * just give the caller *something* to work with from the compatible
1298     * locations.
1299     */
1300    if (this_cpu != -1)
1301        return this_cpu;
1302
1303    cpu = cpumask_any(lowest_mask);
1304    if (cpu < nr_cpu_ids)
1305        return cpu;
1306    return -1;
1307}
1308
1309/* Will lock the rq it finds */
1310static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1311{
1312    struct rq *lowest_rq = NULL;
1313    int tries;
1314    int cpu;
1315
1316    for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1317        cpu = find_lowest_rq(task);
1318
1319        if ((cpu == -1) || (cpu == rq->cpu))
1320            break;
1321
1322        lowest_rq = cpu_rq(cpu);
1323
1324        /* if the prio of this runqueue changed, try again */
1325        if (double_lock_balance(rq, lowest_rq)) {
1326            /*
1327             * We had to unlock the run queue. In
1328             * the mean time, task could have
1329             * migrated already or had its affinity changed.
1330             * Also make sure that it wasn't scheduled on its rq.
1331             */
1332            if (unlikely(task_rq(task) != rq ||
1333                     !cpumask_test_cpu(lowest_rq->cpu,
1334                               &task->cpus_allowed) ||
1335                     task_running(rq, task) ||
1336                     !task->on_rq)) {
1337
1338                raw_spin_unlock(&lowest_rq->lock);
1339                lowest_rq = NULL;
1340                break;
1341            }
1342        }
1343
1344        /* If this rq is still suitable use it. */
1345        if (lowest_rq->rt.highest_prio.curr > task->prio)
1346            break;
1347
1348        /* try again */
1349        double_unlock_balance(rq, lowest_rq);
1350        lowest_rq = NULL;
1351    }
1352
1353    return lowest_rq;
1354}
1355
1356static struct task_struct *pick_next_pushable_task(struct rq *rq)
1357{
1358    struct task_struct *p;
1359
1360    if (!has_pushable_tasks(rq))
1361        return NULL;
1362
1363    p = plist_first_entry(&rq->rt.pushable_tasks,
1364                  struct task_struct, pushable_tasks);
1365
1366    BUG_ON(rq->cpu != task_cpu(p));
1367    BUG_ON(task_current(rq, p));
1368    BUG_ON(p->rt.nr_cpus_allowed <= 1);
1369
1370    BUG_ON(!p->on_rq);
1371    BUG_ON(!rt_task(p));
1372
1373    return p;
1374}
1375
1376/*
1377 * If the current CPU has more than one RT task, see if the non
1378 * running task can migrate over to a CPU that is running a task
1379 * of lesser priority.
1380 */
1381static int push_rt_task(struct rq *rq)
1382{
1383    struct task_struct *next_task;
1384    struct rq *lowest_rq;
1385
1386    if (!rq->rt.overloaded)
1387        return 0;
1388
1389    next_task = pick_next_pushable_task(rq);
1390    if (!next_task)
1391        return 0;
1392
1393retry:
1394    if (unlikely(next_task == rq->curr)) {
1395        WARN_ON(1);
1396        return 0;
1397    }
1398
1399    /*
1400     * It's possible that the next_task slipped in of
1401     * higher priority than current. If that's the case
1402     * just reschedule current.
1403     */
1404    if (unlikely(next_task->prio < rq->curr->prio)) {
1405        resched_task(rq->curr);
1406        return 0;
1407    }
1408
1409    /* We might release rq lock */
1410    get_task_struct(next_task);
1411
1412    /* find_lock_lowest_rq locks the rq if found */
1413    lowest_rq = find_lock_lowest_rq(next_task, rq);
1414    if (!lowest_rq) {
1415        struct task_struct *task;
1416        /*
1417         * find lock_lowest_rq releases rq->lock
1418         * so it is possible that next_task has migrated.
1419         *
1420         * We need to make sure that the task is still on the same
1421         * run-queue and is also still the next task eligible for
1422         * pushing.
1423         */
1424        task = pick_next_pushable_task(rq);
1425        if (task_cpu(next_task) == rq->cpu && task == next_task) {
1426            /*
1427             * If we get here, the task hasn't moved at all, but
1428             * it has failed to push. We will not try again,
1429             * since the other cpus will pull from us when they
1430             * are ready.
1431             */
1432            dequeue_pushable_task(rq, next_task);
1433            goto out;
1434        }
1435
1436        if (!task)
1437            /* No more tasks, just exit */
1438            goto out;
1439
1440        /*
1441         * Something has shifted, try again.
1442         */
1443        put_task_struct(next_task);
1444        next_task = task;
1445        goto retry;
1446    }
1447
1448    deactivate_task(rq, next_task, 0);
1449    set_task_cpu(next_task, lowest_rq->cpu);
1450    activate_task(lowest_rq, next_task, 0);
1451
1452    resched_task(lowest_rq->curr);
1453
1454    double_unlock_balance(rq, lowest_rq);
1455
1456out:
1457    put_task_struct(next_task);
1458
1459    return 1;
1460}
1461
1462static void push_rt_tasks(struct rq *rq)
1463{
1464    /* push_rt_task will return true if it moved an RT */
1465    while (push_rt_task(rq))
1466        ;
1467}
1468
1469static int pull_rt_task(struct rq *this_rq)
1470{
1471    int this_cpu = this_rq->cpu, ret = 0, cpu;
1472    struct task_struct *p;
1473    struct rq *src_rq;
1474
1475    if (likely(!rt_overloaded(this_rq)))
1476        return 0;
1477
1478    for_each_cpu(cpu, this_rq->rd->rto_mask) {
1479        if (this_cpu == cpu)
1480            continue;
1481
1482        src_rq = cpu_rq(cpu);
1483
1484        /*
1485         * Don't bother taking the src_rq->lock if the next highest
1486         * task is known to be lower-priority than our current task.
1487         * This may look racy, but if this value is about to go
1488         * logically higher, the src_rq will push this task away.
1489         * And if its going logically lower, we do not care
1490         */
1491        if (src_rq->rt.highest_prio.next >=
1492            this_rq->rt.highest_prio.curr)
1493            continue;
1494
1495        /*
1496         * We can potentially drop this_rq's lock in
1497         * double_lock_balance, and another CPU could
1498         * alter this_rq
1499         */
1500        double_lock_balance(this_rq, src_rq);
1501
1502        /*
1503         * Are there still pullable RT tasks?
1504         */
1505        if (src_rq->rt.rt_nr_running <= 1)
1506            goto skip;
1507
1508        p = pick_next_highest_task_rt(src_rq, this_cpu);
1509
1510        /*
1511         * Do we have an RT task that preempts
1512         * the to-be-scheduled task?
1513         */
1514        if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1515            WARN_ON(p == src_rq->curr);
1516            WARN_ON(!p->on_rq);
1517
1518            /*
1519             * There's a chance that p is higher in priority
1520             * than what's currently running on its cpu.
1521             * This is just that p is wakeing up and hasn't
1522             * had a chance to schedule. We only pull
1523             * p if it is lower in priority than the
1524             * current task on the run queue
1525             */
1526            if (p->prio < src_rq->curr->prio)
1527                goto skip;
1528
1529            ret = 1;
1530
1531            deactivate_task(src_rq, p, 0);
1532            set_task_cpu(p, this_cpu);
1533            activate_task(this_rq, p, 0);
1534            /*
1535             * We continue with the search, just in
1536             * case there's an even higher prio task
1537             * in another runqueue. (low likelihood
1538             * but possible)
1539             */
1540        }
1541skip:
1542        double_unlock_balance(this_rq, src_rq);
1543    }
1544
1545    return ret;
1546}
1547
1548static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1549{
1550    /* Try to pull RT tasks here if we lower this rq's prio */
1551    if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
1552        pull_rt_task(rq);
1553}
1554
1555static void post_schedule_rt(struct rq *rq)
1556{
1557    push_rt_tasks(rq);
1558}
1559
1560/*
1561 * If we are not running and we are not going to reschedule soon, we should
1562 * try to push tasks away now
1563 */
1564static void task_woken_rt(struct rq *rq, struct task_struct *p)
1565{
1566    if (!task_running(rq, p) &&
1567        !test_tsk_need_resched(rq->curr) &&
1568        has_pushable_tasks(rq) &&
1569        p->rt.nr_cpus_allowed > 1 &&
1570        rt_task(rq->curr) &&
1571        (rq->curr->rt.nr_cpus_allowed < 2 ||
1572         rq->curr->prio < p->prio))
1573        push_rt_tasks(rq);
1574}
1575
1576static void set_cpus_allowed_rt(struct task_struct *p,
1577                const struct cpumask *new_mask)
1578{
1579    int weight = cpumask_weight(new_mask);
1580
1581    BUG_ON(!rt_task(p));
1582
1583    /*
1584     * Update the migration status of the RQ if we have an RT task
1585     * which is running AND changing its weight value.
1586     */
1587    if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) {
1588        struct rq *rq = task_rq(p);
1589
1590        if (!task_current(rq, p)) {
1591            /*
1592             * Make sure we dequeue this task from the pushable list
1593             * before going further. It will either remain off of
1594             * the list because we are no longer pushable, or it
1595             * will be requeued.
1596             */
1597            if (p->rt.nr_cpus_allowed > 1)
1598                dequeue_pushable_task(rq, p);
1599
1600            /*
1601             * Requeue if our weight is changing and still > 1
1602             */
1603            if (weight > 1)
1604                enqueue_pushable_task(rq, p);
1605
1606        }
1607
1608        if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1609            rq->rt.rt_nr_migratory++;
1610        } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1611            BUG_ON(!rq->rt.rt_nr_migratory);
1612            rq->rt.rt_nr_migratory--;
1613        }
1614
1615        update_rt_migration(&rq->rt);
1616    }
1617
1618    cpumask_copy(&p->cpus_allowed, new_mask);
1619    p->rt.nr_cpus_allowed = weight;
1620}
1621
1622/* Assumes rq->lock is held */
1623static void rq_online_rt(struct rq *rq)
1624{
1625    if (rq->rt.overloaded)
1626        rt_set_overload(rq);
1627
1628    __enable_runtime(rq);
1629
1630    cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1631}
1632
1633/* Assumes rq->lock is held */
1634static void rq_offline_rt(struct rq *rq)
1635{
1636    if (rq->rt.overloaded)
1637        rt_clear_overload(rq);
1638
1639    __disable_runtime(rq);
1640
1641    cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1642}
1643
1644/*
1645 * When switch from the rt queue, we bring ourselves to a position
1646 * that we might want to pull RT tasks from other runqueues.
1647 */
1648static void switched_from_rt(struct rq *rq, struct task_struct *p)
1649{
1650    /*
1651     * If there are other RT tasks then we will reschedule
1652     * and the scheduling of the other RT tasks will handle
1653     * the balancing. But if we are the last RT task
1654     * we may need to handle the pulling of RT tasks
1655     * now.
1656     */
1657    if (p->on_rq && !rq->rt.rt_nr_running)
1658        pull_rt_task(rq);
1659}
1660
1661static inline void init_sched_rt_class(void)
1662{
1663    unsigned int i;
1664
1665    for_each_possible_cpu(i)
1666        zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1667                    GFP_KERNEL, cpu_to_node(i));
1668}
1669#endif /* CONFIG_SMP */
1670
1671/*
1672 * When switching a task to RT, we may overload the runqueue
1673 * with RT tasks. In this case we try to push them off to
1674 * other runqueues.
1675 */
1676static void switched_to_rt(struct rq *rq, struct task_struct *p)
1677{
1678    int check_resched = 1;
1679
1680    /*
1681     * If we are already running, then there's nothing
1682     * that needs to be done. But if we are not running
1683     * we may need to preempt the current running task.
1684     * If that current running task is also an RT task
1685     * then see if we can move to another run queue.
1686     */
1687    if (p->on_rq && rq->curr != p) {
1688#ifdef CONFIG_SMP
1689        if (rq->rt.overloaded && push_rt_task(rq) &&
1690            /* Don't resched if we changed runqueues */
1691            rq != task_rq(p))
1692            check_resched = 0;
1693#endif /* CONFIG_SMP */
1694        if (check_resched && p->prio < rq->curr->prio)
1695            resched_task(rq->curr);
1696    }
1697}
1698
1699/*
1700 * Priority of the task has changed. This may cause
1701 * us to initiate a push or pull.
1702 */
1703static void
1704prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1705{
1706    if (!p->on_rq)
1707        return;
1708
1709    if (rq->curr == p) {
1710#ifdef CONFIG_SMP
1711        /*
1712         * If our priority decreases while running, we
1713         * may need to pull tasks to this runqueue.
1714         */
1715        if (oldprio < p->prio)
1716            pull_rt_task(rq);
1717        /*
1718         * If there's a higher priority task waiting to run
1719         * then reschedule. Note, the above pull_rt_task
1720         * can release the rq lock and p could migrate.
1721         * Only reschedule if p is still on the same runqueue.
1722         */
1723        if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1724            resched_task(p);
1725#else
1726        /* For UP simply resched on drop of prio */
1727        if (oldprio < p->prio)
1728            resched_task(p);
1729#endif /* CONFIG_SMP */
1730    } else {
1731        /*
1732         * This task is not running, but if it is
1733         * greater than the current running task
1734         * then reschedule.
1735         */
1736        if (p->prio < rq->curr->prio)
1737            resched_task(rq->curr);
1738    }
1739}
1740
1741static void watchdog(struct rq *rq, struct task_struct *p)
1742{
1743    unsigned long soft, hard;
1744
1745    /* max may change after cur was read, this will be fixed next tick */
1746    soft = task_rlimit(p, RLIMIT_RTTIME);
1747    hard = task_rlimit_max(p, RLIMIT_RTTIME);
1748
1749    if (soft != RLIM_INFINITY) {
1750        unsigned long next;
1751
1752        p->rt.timeout++;
1753        next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1754        if (p->rt.timeout > next)
1755            p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1756    }
1757}
1758
1759static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1760{
1761    update_curr_rt(rq);
1762
1763    watchdog(rq, p);
1764
1765    /*
1766     * RR tasks need a special form of timeslice management.
1767     * FIFO tasks have no timeslices.
1768     */
1769    if (p->policy != SCHED_RR)
1770        return;
1771
1772    if (--p->rt.time_slice)
1773        return;
1774
1775    p->rt.time_slice = DEF_TIMESLICE;
1776
1777    /*
1778     * Requeue to the end of queue if we are not the only element
1779     * on the queue:
1780     */
1781    if (p->rt.run_list.prev != p->rt.run_list.next) {
1782        requeue_task_rt(rq, p, 0);
1783        set_tsk_need_resched(p);
1784    }
1785}
1786
1787static void set_curr_task_rt(struct rq *rq)
1788{
1789    struct task_struct *p = rq->curr;
1790
1791    p->se.exec_start = rq->clock_task;
1792
1793    /* The running task is never eligible for pushing */
1794    dequeue_pushable_task(rq, p);
1795}
1796
1797static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
1798{
1799    /*
1800     * Time slice is 0 for SCHED_FIFO tasks
1801     */
1802    if (task->policy == SCHED_RR)
1803        return DEF_TIMESLICE;
1804    else
1805        return 0;
1806}
1807
1808static const struct sched_class rt_sched_class = {
1809    .next = &fair_sched_class,
1810    .enqueue_task = enqueue_task_rt,
1811    .dequeue_task = dequeue_task_rt,
1812    .yield_task = yield_task_rt,
1813
1814    .check_preempt_curr = check_preempt_curr_rt,
1815
1816    .pick_next_task = pick_next_task_rt,
1817    .put_prev_task = put_prev_task_rt,
1818
1819#ifdef CONFIG_SMP
1820    .select_task_rq = select_task_rq_rt,
1821
1822    .set_cpus_allowed = set_cpus_allowed_rt,
1823    .rq_online = rq_online_rt,
1824    .rq_offline = rq_offline_rt,
1825    .pre_schedule = pre_schedule_rt,
1826    .post_schedule = post_schedule_rt,
1827    .task_woken = task_woken_rt,
1828    .switched_from = switched_from_rt,
1829#endif
1830
1831    .set_curr_task = set_curr_task_rt,
1832    .task_tick = task_tick_rt,
1833
1834    .get_rr_interval = get_rr_interval_rt,
1835
1836    .prio_changed = prio_changed_rt,
1837    .switched_to = switched_to_rt,
1838};
1839
1840#ifdef CONFIG_SCHED_DEBUG
1841extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1842
1843static void print_rt_stats(struct seq_file *m, int cpu)
1844{
1845    rt_rq_iter_t iter;
1846    struct rt_rq *rt_rq;
1847
1848    rcu_read_lock();
1849    for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
1850        print_rt_rq(m, cpu, rt_rq);
1851    rcu_read_unlock();
1852}
1853#endif /* CONFIG_SCHED_DEBUG */
1854
1855

Archive Download this file



interactive