Root/kernel/sched_fair.c

1/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
21 */
22
23#include <linux/latencytop.h>
24#include <linux/sched.h>
25#include <linux/cpumask.h>
26
27/*
28 * Targeted preemption latency for CPU-bound tasks:
29 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
30 *
31 * NOTE: this latency value is not the same as the concept of
32 * 'timeslice length' - timeslices in CFS are of variable length
33 * and have no persistent notion like in traditional, time-slice
34 * based scheduling concepts.
35 *
36 * (to see the precise effective timeslice length of your workload,
37 * run vmstat and monitor the context-switches (cs) field)
38 */
39unsigned int sysctl_sched_latency = 6000000ULL;
40unsigned int normalized_sysctl_sched_latency = 6000000ULL;
41
42/*
43 * The initial- and re-scaling of tunables is configurable
44 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
45 *
46 * Options are:
47 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
48 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
49 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
50 */
51enum sched_tunable_scaling sysctl_sched_tunable_scaling
52    = SCHED_TUNABLESCALING_LOG;
53
54/*
55 * Minimal preemption granularity for CPU-bound tasks:
56 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
57 */
58unsigned int sysctl_sched_min_granularity = 750000ULL;
59unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
60
61/*
62 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
63 */
64static unsigned int sched_nr_latency = 8;
65
66/*
67 * After fork, child runs first. If set to 0 (default) then
68 * parent will (try to) run first.
69 */
70unsigned int sysctl_sched_child_runs_first __read_mostly;
71
72/*
73 * SCHED_OTHER wake-up granularity.
74 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
75 *
76 * This option delays the preemption effects of decoupled workloads
77 * and reduces their over-scheduling. Synchronous workloads will still
78 * have immediate wakeup/sleep latencies.
79 */
80unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
81unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
82
83const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
84
85/*
86 * The exponential sliding window over which load is averaged for shares
87 * distribution.
88 * (default: 10msec)
89 */
90unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
91
92static const struct sched_class fair_sched_class;
93
94/**************************************************************
95 * CFS operations on generic schedulable entities:
96 */
97
98#ifdef CONFIG_FAIR_GROUP_SCHED
99
100/* cpu runqueue to which this cfs_rq is attached */
101static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
102{
103    return cfs_rq->rq;
104}
105
106/* An entity is a task if it doesn't "own" a runqueue */
107#define entity_is_task(se) (!se->my_q)
108
109static inline struct task_struct *task_of(struct sched_entity *se)
110{
111#ifdef CONFIG_SCHED_DEBUG
112    WARN_ON_ONCE(!entity_is_task(se));
113#endif
114    return container_of(se, struct task_struct, se);
115}
116
117/* Walk up scheduling entities hierarchy */
118#define for_each_sched_entity(se) \
119        for (; se; se = se->parent)
120
121static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
122{
123    return p->se.cfs_rq;
124}
125
126/* runqueue on which this entity is (to be) queued */
127static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
128{
129    return se->cfs_rq;
130}
131
132/* runqueue "owned" by this group */
133static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
134{
135    return grp->my_q;
136}
137
138/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
139 * another cpu ('this_cpu')
140 */
141static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
142{
143    return cfs_rq->tg->cfs_rq[this_cpu];
144}
145
146static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
147{
148    if (!cfs_rq->on_list) {
149        /*
150         * Ensure we either appear before our parent (if already
151         * enqueued) or force our parent to appear after us when it is
152         * enqueued. The fact that we always enqueue bottom-up
153         * reduces this to two cases.
154         */
155        if (cfs_rq->tg->parent &&
156            cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
157            list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
158                &rq_of(cfs_rq)->leaf_cfs_rq_list);
159        } else {
160            list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
161                &rq_of(cfs_rq)->leaf_cfs_rq_list);
162        }
163
164        cfs_rq->on_list = 1;
165    }
166}
167
168static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
169{
170    if (cfs_rq->on_list) {
171        list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
172        cfs_rq->on_list = 0;
173    }
174}
175
176/* Iterate thr' all leaf cfs_rq's on a runqueue */
177#define for_each_leaf_cfs_rq(rq, cfs_rq) \
178    list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
179
180/* Do the two (enqueued) entities belong to the same group ? */
181static inline int
182is_same_group(struct sched_entity *se, struct sched_entity *pse)
183{
184    if (se->cfs_rq == pse->cfs_rq)
185        return 1;
186
187    return 0;
188}
189
190static inline struct sched_entity *parent_entity(struct sched_entity *se)
191{
192    return se->parent;
193}
194
195/* return depth at which a sched entity is present in the hierarchy */
196static inline int depth_se(struct sched_entity *se)
197{
198    int depth = 0;
199
200    for_each_sched_entity(se)
201        depth++;
202
203    return depth;
204}
205
206static void
207find_matching_se(struct sched_entity **se, struct sched_entity **pse)
208{
209    int se_depth, pse_depth;
210
211    /*
212     * preemption test can be made between sibling entities who are in the
213     * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
214     * both tasks until we find their ancestors who are siblings of common
215     * parent.
216     */
217
218    /* First walk up until both entities are at same depth */
219    se_depth = depth_se(*se);
220    pse_depth = depth_se(*pse);
221
222    while (se_depth > pse_depth) {
223        se_depth--;
224        *se = parent_entity(*se);
225    }
226
227    while (pse_depth > se_depth) {
228        pse_depth--;
229        *pse = parent_entity(*pse);
230    }
231
232    while (!is_same_group(*se, *pse)) {
233        *se = parent_entity(*se);
234        *pse = parent_entity(*pse);
235    }
236}
237
238#else /* !CONFIG_FAIR_GROUP_SCHED */
239
240static inline struct task_struct *task_of(struct sched_entity *se)
241{
242    return container_of(se, struct task_struct, se);
243}
244
245static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
246{
247    return container_of(cfs_rq, struct rq, cfs);
248}
249
250#define entity_is_task(se) 1
251
252#define for_each_sched_entity(se) \
253        for (; se; se = NULL)
254
255static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
256{
257    return &task_rq(p)->cfs;
258}
259
260static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
261{
262    struct task_struct *p = task_of(se);
263    struct rq *rq = task_rq(p);
264
265    return &rq->cfs;
266}
267
268/* runqueue "owned" by this group */
269static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
270{
271    return NULL;
272}
273
274static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
275{
276    return &cpu_rq(this_cpu)->cfs;
277}
278
279static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
280{
281}
282
283static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
284{
285}
286
287#define for_each_leaf_cfs_rq(rq, cfs_rq) \
288        for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
289
290static inline int
291is_same_group(struct sched_entity *se, struct sched_entity *pse)
292{
293    return 1;
294}
295
296static inline struct sched_entity *parent_entity(struct sched_entity *se)
297{
298    return NULL;
299}
300
301static inline void
302find_matching_se(struct sched_entity **se, struct sched_entity **pse)
303{
304}
305
306#endif /* CONFIG_FAIR_GROUP_SCHED */
307
308
309/**************************************************************
310 * Scheduling class tree data structure manipulation methods:
311 */
312
313static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
314{
315    s64 delta = (s64)(vruntime - min_vruntime);
316    if (delta > 0)
317        min_vruntime = vruntime;
318
319    return min_vruntime;
320}
321
322static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
323{
324    s64 delta = (s64)(vruntime - min_vruntime);
325    if (delta < 0)
326        min_vruntime = vruntime;
327
328    return min_vruntime;
329}
330
331static inline int entity_before(struct sched_entity *a,
332                struct sched_entity *b)
333{
334    return (s64)(a->vruntime - b->vruntime) < 0;
335}
336
337static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
338{
339    return se->vruntime - cfs_rq->min_vruntime;
340}
341
342static void update_min_vruntime(struct cfs_rq *cfs_rq)
343{
344    u64 vruntime = cfs_rq->min_vruntime;
345
346    if (cfs_rq->curr)
347        vruntime = cfs_rq->curr->vruntime;
348
349    if (cfs_rq->rb_leftmost) {
350        struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
351                           struct sched_entity,
352                           run_node);
353
354        if (!cfs_rq->curr)
355            vruntime = se->vruntime;
356        else
357            vruntime = min_vruntime(vruntime, se->vruntime);
358    }
359
360    cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
361#ifndef CONFIG_64BIT
362    smp_wmb();
363    cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
364#endif
365}
366
367/*
368 * Enqueue an entity into the rb-tree:
369 */
370static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
371{
372    struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
373    struct rb_node *parent = NULL;
374    struct sched_entity *entry;
375    s64 key = entity_key(cfs_rq, se);
376    int leftmost = 1;
377
378    /*
379     * Find the right place in the rbtree:
380     */
381    while (*link) {
382        parent = *link;
383        entry = rb_entry(parent, struct sched_entity, run_node);
384        /*
385         * We dont care about collisions. Nodes with
386         * the same key stay together.
387         */
388        if (key < entity_key(cfs_rq, entry)) {
389            link = &parent->rb_left;
390        } else {
391            link = &parent->rb_right;
392            leftmost = 0;
393        }
394    }
395
396    /*
397     * Maintain a cache of leftmost tree entries (it is frequently
398     * used):
399     */
400    if (leftmost)
401        cfs_rq->rb_leftmost = &se->run_node;
402
403    rb_link_node(&se->run_node, parent, link);
404    rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
405}
406
407static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
408{
409    if (cfs_rq->rb_leftmost == &se->run_node) {
410        struct rb_node *next_node;
411
412        next_node = rb_next(&se->run_node);
413        cfs_rq->rb_leftmost = next_node;
414    }
415
416    rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
417}
418
419static struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
420{
421    struct rb_node *left = cfs_rq->rb_leftmost;
422
423    if (!left)
424        return NULL;
425
426    return rb_entry(left, struct sched_entity, run_node);
427}
428
429static struct sched_entity *__pick_next_entity(struct sched_entity *se)
430{
431    struct rb_node *next = rb_next(&se->run_node);
432
433    if (!next)
434        return NULL;
435
436    return rb_entry(next, struct sched_entity, run_node);
437}
438
439#ifdef CONFIG_SCHED_DEBUG
440static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
441{
442    struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
443
444    if (!last)
445        return NULL;
446
447    return rb_entry(last, struct sched_entity, run_node);
448}
449
450/**************************************************************
451 * Scheduling class statistics methods:
452 */
453
454int sched_proc_update_handler(struct ctl_table *table, int write,
455        void __user *buffer, size_t *lenp,
456        loff_t *ppos)
457{
458    int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
459    int factor = get_update_sysctl_factor();
460
461    if (ret || !write)
462        return ret;
463
464    sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
465                    sysctl_sched_min_granularity);
466
467#define WRT_SYSCTL(name) \
468    (normalized_sysctl_##name = sysctl_##name / (factor))
469    WRT_SYSCTL(sched_min_granularity);
470    WRT_SYSCTL(sched_latency);
471    WRT_SYSCTL(sched_wakeup_granularity);
472#undef WRT_SYSCTL
473
474    return 0;
475}
476#endif
477
478/*
479 * delta /= w
480 */
481static inline unsigned long
482calc_delta_fair(unsigned long delta, struct sched_entity *se)
483{
484    if (unlikely(se->load.weight != NICE_0_LOAD))
485        delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
486
487    return delta;
488}
489
490/*
491 * The idea is to set a period in which each task runs once.
492 *
493 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
494 * this period because otherwise the slices get too small.
495 *
496 * p = (nr <= nl) ? l : l*nr/nl
497 */
498static u64 __sched_period(unsigned long nr_running)
499{
500    u64 period = sysctl_sched_latency;
501    unsigned long nr_latency = sched_nr_latency;
502
503    if (unlikely(nr_running > nr_latency)) {
504        period = sysctl_sched_min_granularity;
505        period *= nr_running;
506    }
507
508    return period;
509}
510
511/*
512 * We calculate the wall-time slice from the period by taking a part
513 * proportional to the weight.
514 *
515 * s = p*P[w/rw]
516 */
517static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
518{
519    u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
520
521    for_each_sched_entity(se) {
522        struct load_weight *load;
523        struct load_weight lw;
524
525        cfs_rq = cfs_rq_of(se);
526        load = &cfs_rq->load;
527
528        if (unlikely(!se->on_rq)) {
529            lw = cfs_rq->load;
530
531            update_load_add(&lw, se->load.weight);
532            load = &lw;
533        }
534        slice = calc_delta_mine(slice, se->load.weight, load);
535    }
536    return slice;
537}
538
539/*
540 * We calculate the vruntime slice of a to be inserted task
541 *
542 * vs = s/w
543 */
544static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
545{
546    return calc_delta_fair(sched_slice(cfs_rq, se), se);
547}
548
549static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update);
550static void update_cfs_shares(struct cfs_rq *cfs_rq);
551
552/*
553 * Update the current task's runtime statistics. Skip current tasks that
554 * are not in our scheduling class.
555 */
556static inline void
557__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
558          unsigned long delta_exec)
559{
560    unsigned long delta_exec_weighted;
561
562    schedstat_set(curr->statistics.exec_max,
563              max((u64)delta_exec, curr->statistics.exec_max));
564
565    curr->sum_exec_runtime += delta_exec;
566    schedstat_add(cfs_rq, exec_clock, delta_exec);
567    delta_exec_weighted = calc_delta_fair(delta_exec, curr);
568
569    curr->vruntime += delta_exec_weighted;
570    update_min_vruntime(cfs_rq);
571
572#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
573    cfs_rq->load_unacc_exec_time += delta_exec;
574#endif
575}
576
577static void update_curr(struct cfs_rq *cfs_rq)
578{
579    struct sched_entity *curr = cfs_rq->curr;
580    u64 now = rq_of(cfs_rq)->clock_task;
581    unsigned long delta_exec;
582
583    if (unlikely(!curr))
584        return;
585
586    /*
587     * Get the amount of time the current task was running
588     * since the last time we changed load (this cannot
589     * overflow on 32 bits):
590     */
591    delta_exec = (unsigned long)(now - curr->exec_start);
592    if (!delta_exec)
593        return;
594
595    __update_curr(cfs_rq, curr, delta_exec);
596    curr->exec_start = now;
597
598    if (entity_is_task(curr)) {
599        struct task_struct *curtask = task_of(curr);
600
601        trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
602        cpuacct_charge(curtask, delta_exec);
603        account_group_exec_runtime(curtask, delta_exec);
604    }
605}
606
607static inline void
608update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
609{
610    schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
611}
612
613/*
614 * Task is being enqueued - update stats:
615 */
616static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
617{
618    /*
619     * Are we enqueueing a waiting task? (for current tasks
620     * a dequeue/enqueue event is a NOP)
621     */
622    if (se != cfs_rq->curr)
623        update_stats_wait_start(cfs_rq, se);
624}
625
626static void
627update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
628{
629    schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
630            rq_of(cfs_rq)->clock - se->statistics.wait_start));
631    schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
632    schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
633            rq_of(cfs_rq)->clock - se->statistics.wait_start);
634#ifdef CONFIG_SCHEDSTATS
635    if (entity_is_task(se)) {
636        trace_sched_stat_wait(task_of(se),
637            rq_of(cfs_rq)->clock - se->statistics.wait_start);
638    }
639#endif
640    schedstat_set(se->statistics.wait_start, 0);
641}
642
643static inline void
644update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
645{
646    /*
647     * Mark the end of the wait period if dequeueing a
648     * waiting task:
649     */
650    if (se != cfs_rq->curr)
651        update_stats_wait_end(cfs_rq, se);
652}
653
654/*
655 * We are picking a new current task - update its stats:
656 */
657static inline void
658update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
659{
660    /*
661     * We are starting a new run period:
662     */
663    se->exec_start = rq_of(cfs_rq)->clock_task;
664}
665
666/**************************************************
667 * Scheduling class queueing methods:
668 */
669
670#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
671static void
672add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
673{
674    cfs_rq->task_weight += weight;
675}
676#else
677static inline void
678add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
679{
680}
681#endif
682
683static void
684account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
685{
686    update_load_add(&cfs_rq->load, se->load.weight);
687    if (!parent_entity(se))
688        inc_cpu_load(rq_of(cfs_rq), se->load.weight);
689    if (entity_is_task(se)) {
690        add_cfs_task_weight(cfs_rq, se->load.weight);
691        list_add(&se->group_node, &cfs_rq->tasks);
692    }
693    cfs_rq->nr_running++;
694}
695
696static void
697account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
698{
699    update_load_sub(&cfs_rq->load, se->load.weight);
700    if (!parent_entity(se))
701        dec_cpu_load(rq_of(cfs_rq), se->load.weight);
702    if (entity_is_task(se)) {
703        add_cfs_task_weight(cfs_rq, -se->load.weight);
704        list_del_init(&se->group_node);
705    }
706    cfs_rq->nr_running--;
707}
708
709#ifdef CONFIG_FAIR_GROUP_SCHED
710# ifdef CONFIG_SMP
711static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
712                        int global_update)
713{
714    struct task_group *tg = cfs_rq->tg;
715    long load_avg;
716
717    load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
718    load_avg -= cfs_rq->load_contribution;
719
720    if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) {
721        atomic_add(load_avg, &tg->load_weight);
722        cfs_rq->load_contribution += load_avg;
723    }
724}
725
726static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
727{
728    u64 period = sysctl_sched_shares_window;
729    u64 now, delta;
730    unsigned long load = cfs_rq->load.weight;
731
732    if (cfs_rq->tg == &root_task_group)
733        return;
734
735    now = rq_of(cfs_rq)->clock_task;
736    delta = now - cfs_rq->load_stamp;
737
738    /* truncate load history at 4 idle periods */
739    if (cfs_rq->load_stamp > cfs_rq->load_last &&
740        now - cfs_rq->load_last > 4 * period) {
741        cfs_rq->load_period = 0;
742        cfs_rq->load_avg = 0;
743        delta = period - 1;
744    }
745
746    cfs_rq->load_stamp = now;
747    cfs_rq->load_unacc_exec_time = 0;
748    cfs_rq->load_period += delta;
749    if (load) {
750        cfs_rq->load_last = now;
751        cfs_rq->load_avg += delta * load;
752    }
753
754    /* consider updating load contribution on each fold or truncate */
755    if (global_update || cfs_rq->load_period > period
756        || !cfs_rq->load_period)
757        update_cfs_rq_load_contribution(cfs_rq, global_update);
758
759    while (cfs_rq->load_period > period) {
760        /*
761         * Inline assembly required to prevent the compiler
762         * optimising this loop into a divmod call.
763         * See __iter_div_u64_rem() for another example of this.
764         */
765        asm("" : "+rm" (cfs_rq->load_period));
766        cfs_rq->load_period /= 2;
767        cfs_rq->load_avg /= 2;
768    }
769
770    if (!cfs_rq->curr && !cfs_rq->nr_running && !cfs_rq->load_avg)
771        list_del_leaf_cfs_rq(cfs_rq);
772}
773
774static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
775{
776    long load_weight, load, shares;
777
778    load = cfs_rq->load.weight;
779
780    load_weight = atomic_read(&tg->load_weight);
781    load_weight += load;
782    load_weight -= cfs_rq->load_contribution;
783
784    shares = (tg->shares * load);
785    if (load_weight)
786        shares /= load_weight;
787
788    if (shares < MIN_SHARES)
789        shares = MIN_SHARES;
790    if (shares > tg->shares)
791        shares = tg->shares;
792
793    return shares;
794}
795
796static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
797{
798    if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
799        update_cfs_load(cfs_rq, 0);
800        update_cfs_shares(cfs_rq);
801    }
802}
803# else /* CONFIG_SMP */
804static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
805{
806}
807
808static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
809{
810    return tg->shares;
811}
812
813static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
814{
815}
816# endif /* CONFIG_SMP */
817static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
818                unsigned long weight)
819{
820    if (se->on_rq) {
821        /* commit outstanding execution time */
822        if (cfs_rq->curr == se)
823            update_curr(cfs_rq);
824        account_entity_dequeue(cfs_rq, se);
825    }
826
827    update_load_set(&se->load, weight);
828
829    if (se->on_rq)
830        account_entity_enqueue(cfs_rq, se);
831}
832
833static void update_cfs_shares(struct cfs_rq *cfs_rq)
834{
835    struct task_group *tg;
836    struct sched_entity *se;
837    long shares;
838
839    tg = cfs_rq->tg;
840    se = tg->se[cpu_of(rq_of(cfs_rq))];
841    if (!se)
842        return;
843#ifndef CONFIG_SMP
844    if (likely(se->load.weight == tg->shares))
845        return;
846#endif
847    shares = calc_cfs_shares(cfs_rq, tg);
848
849    reweight_entity(cfs_rq_of(se), se, shares);
850}
851#else /* CONFIG_FAIR_GROUP_SCHED */
852static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
853{
854}
855
856static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
857{
858}
859
860static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
861{
862}
863#endif /* CONFIG_FAIR_GROUP_SCHED */
864
865static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
866{
867#ifdef CONFIG_SCHEDSTATS
868    struct task_struct *tsk = NULL;
869
870    if (entity_is_task(se))
871        tsk = task_of(se);
872
873    if (se->statistics.sleep_start) {
874        u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
875
876        if ((s64)delta < 0)
877            delta = 0;
878
879        if (unlikely(delta > se->statistics.sleep_max))
880            se->statistics.sleep_max = delta;
881
882        se->statistics.sleep_start = 0;
883        se->statistics.sum_sleep_runtime += delta;
884
885        if (tsk) {
886            account_scheduler_latency(tsk, delta >> 10, 1);
887            trace_sched_stat_sleep(tsk, delta);
888        }
889    }
890    if (se->statistics.block_start) {
891        u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
892
893        if ((s64)delta < 0)
894            delta = 0;
895
896        if (unlikely(delta > se->statistics.block_max))
897            se->statistics.block_max = delta;
898
899        se->statistics.block_start = 0;
900        se->statistics.sum_sleep_runtime += delta;
901
902        if (tsk) {
903            if (tsk->in_iowait) {
904                se->statistics.iowait_sum += delta;
905                se->statistics.iowait_count++;
906                trace_sched_stat_iowait(tsk, delta);
907            }
908
909            /*
910             * Blocking time is in units of nanosecs, so shift by
911             * 20 to get a milliseconds-range estimation of the
912             * amount of time that the task spent sleeping:
913             */
914            if (unlikely(prof_on == SLEEP_PROFILING)) {
915                profile_hits(SLEEP_PROFILING,
916                        (void *)get_wchan(tsk),
917                        delta >> 20);
918            }
919            account_scheduler_latency(tsk, delta >> 10, 0);
920        }
921    }
922#endif
923}
924
925static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
926{
927#ifdef CONFIG_SCHED_DEBUG
928    s64 d = se->vruntime - cfs_rq->min_vruntime;
929
930    if (d < 0)
931        d = -d;
932
933    if (d > 3*sysctl_sched_latency)
934        schedstat_inc(cfs_rq, nr_spread_over);
935#endif
936}
937
938static void
939place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
940{
941    u64 vruntime = cfs_rq->min_vruntime;
942
943    /*
944     * The 'current' period is already promised to the current tasks,
945     * however the extra weight of the new task will slow them down a
946     * little, place the new task so that it fits in the slot that
947     * stays open at the end.
948     */
949    if (initial && sched_feat(START_DEBIT))
950        vruntime += sched_vslice(cfs_rq, se);
951
952    /* sleeps up to a single latency don't count. */
953    if (!initial) {
954        unsigned long thresh = sysctl_sched_latency;
955
956        /*
957         * Halve their sleep time's effect, to allow
958         * for a gentler effect of sleepers:
959         */
960        if (sched_feat(GENTLE_FAIR_SLEEPERS))
961            thresh >>= 1;
962
963        vruntime -= thresh;
964    }
965
966    /* ensure we never gain time by being placed backwards. */
967    vruntime = max_vruntime(se->vruntime, vruntime);
968
969    se->vruntime = vruntime;
970}
971
972static void
973enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
974{
975    /*
976     * Update the normalized vruntime before updating min_vruntime
977     * through callig update_curr().
978     */
979    if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
980        se->vruntime += cfs_rq->min_vruntime;
981
982    /*
983     * Update run-time statistics of the 'current'.
984     */
985    update_curr(cfs_rq);
986    update_cfs_load(cfs_rq, 0);
987    account_entity_enqueue(cfs_rq, se);
988    update_cfs_shares(cfs_rq);
989
990    if (flags & ENQUEUE_WAKEUP) {
991        place_entity(cfs_rq, se, 0);
992        enqueue_sleeper(cfs_rq, se);
993    }
994
995    update_stats_enqueue(cfs_rq, se);
996    check_spread(cfs_rq, se);
997    if (se != cfs_rq->curr)
998        __enqueue_entity(cfs_rq, se);
999    se->on_rq = 1;
1000
1001    if (cfs_rq->nr_running == 1)
1002        list_add_leaf_cfs_rq(cfs_rq);
1003}
1004
1005static void __clear_buddies_last(struct sched_entity *se)
1006{
1007    for_each_sched_entity(se) {
1008        struct cfs_rq *cfs_rq = cfs_rq_of(se);
1009        if (cfs_rq->last == se)
1010            cfs_rq->last = NULL;
1011        else
1012            break;
1013    }
1014}
1015
1016static void __clear_buddies_next(struct sched_entity *se)
1017{
1018    for_each_sched_entity(se) {
1019        struct cfs_rq *cfs_rq = cfs_rq_of(se);
1020        if (cfs_rq->next == se)
1021            cfs_rq->next = NULL;
1022        else
1023            break;
1024    }
1025}
1026
1027static void __clear_buddies_skip(struct sched_entity *se)
1028{
1029    for_each_sched_entity(se) {
1030        struct cfs_rq *cfs_rq = cfs_rq_of(se);
1031        if (cfs_rq->skip == se)
1032            cfs_rq->skip = NULL;
1033        else
1034            break;
1035    }
1036}
1037
1038static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
1039{
1040    if (cfs_rq->last == se)
1041        __clear_buddies_last(se);
1042
1043    if (cfs_rq->next == se)
1044        __clear_buddies_next(se);
1045
1046    if (cfs_rq->skip == se)
1047        __clear_buddies_skip(se);
1048}
1049
1050static void
1051dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1052{
1053    /*
1054     * Update run-time statistics of the 'current'.
1055     */
1056    update_curr(cfs_rq);
1057
1058    update_stats_dequeue(cfs_rq, se);
1059    if (flags & DEQUEUE_SLEEP) {
1060#ifdef CONFIG_SCHEDSTATS
1061        if (entity_is_task(se)) {
1062            struct task_struct *tsk = task_of(se);
1063
1064            if (tsk->state & TASK_INTERRUPTIBLE)
1065                se->statistics.sleep_start = rq_of(cfs_rq)->clock;
1066            if (tsk->state & TASK_UNINTERRUPTIBLE)
1067                se->statistics.block_start = rq_of(cfs_rq)->clock;
1068        }
1069#endif
1070    }
1071
1072    clear_buddies(cfs_rq, se);
1073
1074    if (se != cfs_rq->curr)
1075        __dequeue_entity(cfs_rq, se);
1076    se->on_rq = 0;
1077    update_cfs_load(cfs_rq, 0);
1078    account_entity_dequeue(cfs_rq, se);
1079
1080    /*
1081     * Normalize the entity after updating the min_vruntime because the
1082     * update can refer to the ->curr item and we need to reflect this
1083     * movement in our normalized position.
1084     */
1085    if (!(flags & DEQUEUE_SLEEP))
1086        se->vruntime -= cfs_rq->min_vruntime;
1087
1088    update_min_vruntime(cfs_rq);
1089    update_cfs_shares(cfs_rq);
1090}
1091
1092/*
1093 * Preempt the current task with a newly woken task if needed:
1094 */
1095static void
1096check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
1097{
1098    unsigned long ideal_runtime, delta_exec;
1099
1100    ideal_runtime = sched_slice(cfs_rq, curr);
1101    delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1102    if (delta_exec > ideal_runtime) {
1103        resched_task(rq_of(cfs_rq)->curr);
1104        /*
1105         * The current task ran long enough, ensure it doesn't get
1106         * re-elected due to buddy favours.
1107         */
1108        clear_buddies(cfs_rq, curr);
1109        return;
1110    }
1111
1112    /*
1113     * Ensure that a task that missed wakeup preemption by a
1114     * narrow margin doesn't have to wait for a full slice.
1115     * This also mitigates buddy induced latencies under load.
1116     */
1117    if (!sched_feat(WAKEUP_PREEMPT))
1118        return;
1119
1120    if (delta_exec < sysctl_sched_min_granularity)
1121        return;
1122
1123    if (cfs_rq->nr_running > 1) {
1124        struct sched_entity *se = __pick_first_entity(cfs_rq);
1125        s64 delta = curr->vruntime - se->vruntime;
1126
1127        if (delta < 0)
1128            return;
1129
1130        if (delta > ideal_runtime)
1131            resched_task(rq_of(cfs_rq)->curr);
1132    }
1133}
1134
1135static void
1136set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
1137{
1138    /* 'current' is not kept within the tree. */
1139    if (se->on_rq) {
1140        /*
1141         * Any task has to be enqueued before it get to execute on
1142         * a CPU. So account for the time it spent waiting on the
1143         * runqueue.
1144         */
1145        update_stats_wait_end(cfs_rq, se);
1146        __dequeue_entity(cfs_rq, se);
1147    }
1148
1149    update_stats_curr_start(cfs_rq, se);
1150    cfs_rq->curr = se;
1151#ifdef CONFIG_SCHEDSTATS
1152    /*
1153     * Track our maximum slice length, if the CPU's load is at
1154     * least twice that of our own weight (i.e. dont track it
1155     * when there are only lesser-weight tasks around):
1156     */
1157    if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
1158        se->statistics.slice_max = max(se->statistics.slice_max,
1159            se->sum_exec_runtime - se->prev_sum_exec_runtime);
1160    }
1161#endif
1162    se->prev_sum_exec_runtime = se->sum_exec_runtime;
1163}
1164
1165static int
1166wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
1167
1168/*
1169 * Pick the next process, keeping these things in mind, in this order:
1170 * 1) keep things fair between processes/task groups
1171 * 2) pick the "next" process, since someone really wants that to run
1172 * 3) pick the "last" process, for cache locality
1173 * 4) do not run the "skip" process, if something else is available
1174 */
1175static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
1176{
1177    struct sched_entity *se = __pick_first_entity(cfs_rq);
1178    struct sched_entity *left = se;
1179
1180    /*
1181     * Avoid running the skip buddy, if running something else can
1182     * be done without getting too unfair.
1183     */
1184    if (cfs_rq->skip == se) {
1185        struct sched_entity *second = __pick_next_entity(se);
1186        if (second && wakeup_preempt_entity(second, left) < 1)
1187            se = second;
1188    }
1189
1190    /*
1191     * Prefer last buddy, try to return the CPU to a preempted task.
1192     */
1193    if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
1194        se = cfs_rq->last;
1195
1196    /*
1197     * Someone really wants this to run. If it's not unfair, run it.
1198     */
1199    if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
1200        se = cfs_rq->next;
1201
1202    clear_buddies(cfs_rq, se);
1203
1204    return se;
1205}
1206
1207static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
1208{
1209    /*
1210     * If still on the runqueue then deactivate_task()
1211     * was not called and update_curr() has to be done:
1212     */
1213    if (prev->on_rq)
1214        update_curr(cfs_rq);
1215
1216    check_spread(cfs_rq, prev);
1217    if (prev->on_rq) {
1218        update_stats_wait_start(cfs_rq, prev);
1219        /* Put 'current' back into the tree. */
1220        __enqueue_entity(cfs_rq, prev);
1221    }
1222    cfs_rq->curr = NULL;
1223}
1224
1225static void
1226entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
1227{
1228    /*
1229     * Update run-time statistics of the 'current'.
1230     */
1231    update_curr(cfs_rq);
1232
1233    /*
1234     * Update share accounting for long-running entities.
1235     */
1236    update_entity_shares_tick(cfs_rq);
1237
1238#ifdef CONFIG_SCHED_HRTICK
1239    /*
1240     * queued ticks are scheduled to match the slice, so don't bother
1241     * validating it and just reschedule.
1242     */
1243    if (queued) {
1244        resched_task(rq_of(cfs_rq)->curr);
1245        return;
1246    }
1247    /*
1248     * don't let the period tick interfere with the hrtick preemption
1249     */
1250    if (!sched_feat(DOUBLE_TICK) &&
1251            hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
1252        return;
1253#endif
1254
1255    if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
1256        check_preempt_tick(cfs_rq, curr);
1257}
1258
1259/**************************************************
1260 * CFS operations on tasks:
1261 */
1262
1263#ifdef CONFIG_SCHED_HRTICK
1264static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
1265{
1266    struct sched_entity *se = &p->se;
1267    struct cfs_rq *cfs_rq = cfs_rq_of(se);
1268
1269    WARN_ON(task_rq(p) != rq);
1270
1271    if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
1272        u64 slice = sched_slice(cfs_rq, se);
1273        u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
1274        s64 delta = slice - ran;
1275
1276        if (delta < 0) {
1277            if (rq->curr == p)
1278                resched_task(p);
1279            return;
1280        }
1281
1282        /*
1283         * Don't schedule slices shorter than 10000ns, that just
1284         * doesn't make sense. Rely on vruntime for fairness.
1285         */
1286        if (rq->curr != p)
1287            delta = max_t(s64, 10000LL, delta);
1288
1289        hrtick_start(rq, delta);
1290    }
1291}
1292
1293/*
1294 * called from enqueue/dequeue and updates the hrtick when the
1295 * current task is from our class and nr_running is low enough
1296 * to matter.
1297 */
1298static void hrtick_update(struct rq *rq)
1299{
1300    struct task_struct *curr = rq->curr;
1301
1302    if (curr->sched_class != &fair_sched_class)
1303        return;
1304
1305    if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
1306        hrtick_start_fair(rq, curr);
1307}
1308#else /* !CONFIG_SCHED_HRTICK */
1309static inline void
1310hrtick_start_fair(struct rq *rq, struct task_struct *p)
1311{
1312}
1313
1314static inline void hrtick_update(struct rq *rq)
1315{
1316}
1317#endif
1318
1319/*
1320 * The enqueue_task method is called before nr_running is
1321 * increased. Here we update the fair scheduling stats and
1322 * then put the task into the rbtree:
1323 */
1324static void
1325enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1326{
1327    struct cfs_rq *cfs_rq;
1328    struct sched_entity *se = &p->se;
1329
1330    for_each_sched_entity(se) {
1331        if (se->on_rq)
1332            break;
1333        cfs_rq = cfs_rq_of(se);
1334        enqueue_entity(cfs_rq, se, flags);
1335        flags = ENQUEUE_WAKEUP;
1336    }
1337
1338    for_each_sched_entity(se) {
1339        struct cfs_rq *cfs_rq = cfs_rq_of(se);
1340
1341        update_cfs_load(cfs_rq, 0);
1342        update_cfs_shares(cfs_rq);
1343    }
1344
1345    hrtick_update(rq);
1346}
1347
1348static void set_next_buddy(struct sched_entity *se);
1349
1350/*
1351 * The dequeue_task method is called before nr_running is
1352 * decreased. We remove the task from the rbtree and
1353 * update the fair scheduling stats:
1354 */
1355static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1356{
1357    struct cfs_rq *cfs_rq;
1358    struct sched_entity *se = &p->se;
1359    int task_sleep = flags & DEQUEUE_SLEEP;
1360
1361    for_each_sched_entity(se) {
1362        cfs_rq = cfs_rq_of(se);
1363        dequeue_entity(cfs_rq, se, flags);
1364
1365        /* Don't dequeue parent if it has other entities besides us */
1366        if (cfs_rq->load.weight) {
1367            /*
1368             * Bias pick_next to pick a task from this cfs_rq, as
1369             * p is sleeping when it is within its sched_slice.
1370             */
1371            if (task_sleep && parent_entity(se))
1372                set_next_buddy(parent_entity(se));
1373            break;
1374        }
1375        flags |= DEQUEUE_SLEEP;
1376    }
1377
1378    for_each_sched_entity(se) {
1379        struct cfs_rq *cfs_rq = cfs_rq_of(se);
1380
1381        update_cfs_load(cfs_rq, 0);
1382        update_cfs_shares(cfs_rq);
1383    }
1384
1385    hrtick_update(rq);
1386}
1387
1388#ifdef CONFIG_SMP
1389
1390static void task_waking_fair(struct task_struct *p)
1391{
1392    struct sched_entity *se = &p->se;
1393    struct cfs_rq *cfs_rq = cfs_rq_of(se);
1394    u64 min_vruntime;
1395
1396#ifndef CONFIG_64BIT
1397    u64 min_vruntime_copy;
1398
1399    do {
1400        min_vruntime_copy = cfs_rq->min_vruntime_copy;
1401        smp_rmb();
1402        min_vruntime = cfs_rq->min_vruntime;
1403    } while (min_vruntime != min_vruntime_copy);
1404#else
1405    min_vruntime = cfs_rq->min_vruntime;
1406#endif
1407
1408    se->vruntime -= min_vruntime;
1409}
1410
1411#ifdef CONFIG_FAIR_GROUP_SCHED
1412/*
1413 * effective_load() calculates the load change as seen from the root_task_group
1414 *
1415 * Adding load to a group doesn't make a group heavier, but can cause movement
1416 * of group shares between cpus. Assuming the shares were perfectly aligned one
1417 * can calculate the shift in shares.
1418 */
1419static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
1420{
1421    struct sched_entity *se = tg->se[cpu];
1422
1423    if (!tg->parent)
1424        return wl;
1425
1426    for_each_sched_entity(se) {
1427        long lw, w;
1428
1429        tg = se->my_q->tg;
1430        w = se->my_q->load.weight;
1431
1432        /* use this cpu's instantaneous contribution */
1433        lw = atomic_read(&tg->load_weight);
1434        lw -= se->my_q->load_contribution;
1435        lw += w + wg;
1436
1437        wl += w;
1438
1439        if (lw > 0 && wl < lw)
1440            wl = (wl * tg->shares) / lw;
1441        else
1442            wl = tg->shares;
1443
1444        /* zero point is MIN_SHARES */
1445        if (wl < MIN_SHARES)
1446            wl = MIN_SHARES;
1447        wl -= se->load.weight;
1448        wg = 0;
1449    }
1450
1451    return wl;
1452}
1453
1454#else
1455
1456static inline unsigned long effective_load(struct task_group *tg, int cpu,
1457        unsigned long wl, unsigned long wg)
1458{
1459    return wl;
1460}
1461
1462#endif
1463
1464static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1465{
1466    s64 this_load, load;
1467    int idx, this_cpu, prev_cpu;
1468    unsigned long tl_per_task;
1469    struct task_group *tg;
1470    unsigned long weight;
1471    int balanced;
1472
1473    idx = sd->wake_idx;
1474    this_cpu = smp_processor_id();
1475    prev_cpu = task_cpu(p);
1476    load = source_load(prev_cpu, idx);
1477    this_load = target_load(this_cpu, idx);
1478
1479    /*
1480     * If sync wakeup then subtract the (maximum possible)
1481     * effect of the currently running task from the load
1482     * of the current CPU:
1483     */
1484    rcu_read_lock();
1485    if (sync) {
1486        tg = task_group(current);
1487        weight = current->se.load.weight;
1488
1489        this_load += effective_load(tg, this_cpu, -weight, -weight);
1490        load += effective_load(tg, prev_cpu, 0, -weight);
1491    }
1492
1493    tg = task_group(p);
1494    weight = p->se.load.weight;
1495
1496    /*
1497     * In low-load situations, where prev_cpu is idle and this_cpu is idle
1498     * due to the sync cause above having dropped this_load to 0, we'll
1499     * always have an imbalance, but there's really nothing you can do
1500     * about that, so that's good too.
1501     *
1502     * Otherwise check if either cpus are near enough in load to allow this
1503     * task to be woken on this_cpu.
1504     */
1505    if (this_load > 0) {
1506        s64 this_eff_load, prev_eff_load;
1507
1508        this_eff_load = 100;
1509        this_eff_load *= power_of(prev_cpu);
1510        this_eff_load *= this_load +
1511            effective_load(tg, this_cpu, weight, weight);
1512
1513        prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
1514        prev_eff_load *= power_of(this_cpu);
1515        prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
1516
1517        balanced = this_eff_load <= prev_eff_load;
1518    } else
1519        balanced = true;
1520    rcu_read_unlock();
1521
1522    /*
1523     * If the currently running task will sleep within
1524     * a reasonable amount of time then attract this newly
1525     * woken task:
1526     */
1527    if (sync && balanced)
1528        return 1;
1529
1530    schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
1531    tl_per_task = cpu_avg_load_per_task(this_cpu);
1532
1533    if (balanced ||
1534        (this_load <= load &&
1535         this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
1536        /*
1537         * This domain has SD_WAKE_AFFINE and
1538         * p is cache cold in this domain, and
1539         * there is no bad imbalance.
1540         */
1541        schedstat_inc(sd, ttwu_move_affine);
1542        schedstat_inc(p, se.statistics.nr_wakeups_affine);
1543
1544        return 1;
1545    }
1546    return 0;
1547}
1548
1549/*
1550 * find_idlest_group finds and returns the least busy CPU group within the
1551 * domain.
1552 */
1553static struct sched_group *
1554find_idlest_group(struct sched_domain *sd, struct task_struct *p,
1555          int this_cpu, int load_idx)
1556{
1557    struct sched_group *idlest = NULL, *group = sd->groups;
1558    unsigned long min_load = ULONG_MAX, this_load = 0;
1559    int imbalance = 100 + (sd->imbalance_pct-100)/2;
1560
1561    do {
1562        unsigned long load, avg_load;
1563        int local_group;
1564        int i;
1565
1566        /* Skip over this group if it has no CPUs allowed */
1567        if (!cpumask_intersects(sched_group_cpus(group),
1568                    &p->cpus_allowed))
1569            continue;
1570
1571        local_group = cpumask_test_cpu(this_cpu,
1572                           sched_group_cpus(group));
1573
1574        /* Tally up the load of all CPUs in the group */
1575        avg_load = 0;
1576
1577        for_each_cpu(i, sched_group_cpus(group)) {
1578            /* Bias balancing toward cpus of our domain */
1579            if (local_group)
1580                load = source_load(i, load_idx);
1581            else
1582                load = target_load(i, load_idx);
1583
1584            avg_load += load;
1585        }
1586
1587        /* Adjust by relative CPU power of the group */
1588        avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
1589
1590        if (local_group) {
1591            this_load = avg_load;
1592        } else if (avg_load < min_load) {
1593            min_load = avg_load;
1594            idlest = group;
1595        }
1596    } while (group = group->next, group != sd->groups);
1597
1598    if (!idlest || 100*this_load < imbalance*min_load)
1599        return NULL;
1600    return idlest;
1601}
1602
1603/*
1604 * find_idlest_cpu - find the idlest cpu among the cpus in group.
1605 */
1606static int
1607find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1608{
1609    unsigned long load, min_load = ULONG_MAX;
1610    int idlest = -1;
1611    int i;
1612
1613    /* Traverse only the allowed CPUs */
1614    for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
1615        load = weighted_cpuload(i);
1616
1617        if (load < min_load || (load == min_load && i == this_cpu)) {
1618            min_load = load;
1619            idlest = i;
1620        }
1621    }
1622
1623    return idlest;
1624}
1625
1626/*
1627 * Try and locate an idle CPU in the sched_domain.
1628 */
1629static int select_idle_sibling(struct task_struct *p, int target)
1630{
1631    int cpu = smp_processor_id();
1632    int prev_cpu = task_cpu(p);
1633    struct sched_domain *sd;
1634    int i;
1635
1636    /*
1637     * If the task is going to be woken-up on this cpu and if it is
1638     * already idle, then it is the right target.
1639     */
1640    if (target == cpu && idle_cpu(cpu))
1641        return cpu;
1642
1643    /*
1644     * If the task is going to be woken-up on the cpu where it previously
1645     * ran and if it is currently idle, then it the right target.
1646     */
1647    if (target == prev_cpu && idle_cpu(prev_cpu))
1648        return prev_cpu;
1649
1650    /*
1651     * Otherwise, iterate the domains and find an elegible idle cpu.
1652     */
1653    rcu_read_lock();
1654    for_each_domain(target, sd) {
1655        if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
1656            break;
1657
1658        for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
1659            if (idle_cpu(i)) {
1660                target = i;
1661                break;
1662            }
1663        }
1664
1665        /*
1666         * Lets stop looking for an idle sibling when we reached
1667         * the domain that spans the current cpu and prev_cpu.
1668         */
1669        if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
1670            cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
1671            break;
1672    }
1673    rcu_read_unlock();
1674
1675    return target;
1676}
1677
1678/*
1679 * sched_balance_self: balance the current task (running on cpu) in domains
1680 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
1681 * SD_BALANCE_EXEC.
1682 *
1683 * Balance, ie. select the least loaded group.
1684 *
1685 * Returns the target CPU number, or the same CPU if no balancing is needed.
1686 *
1687 * preempt must be disabled.
1688 */
1689static int
1690select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
1691{
1692    struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
1693    int cpu = smp_processor_id();
1694    int prev_cpu = task_cpu(p);
1695    int new_cpu = cpu;
1696    int want_affine = 0;
1697    int want_sd = 1;
1698    int sync = wake_flags & WF_SYNC;
1699
1700    if (sd_flag & SD_BALANCE_WAKE) {
1701        if (cpumask_test_cpu(cpu, &p->cpus_allowed))
1702            want_affine = 1;
1703        new_cpu = prev_cpu;
1704    }
1705
1706    rcu_read_lock();
1707    for_each_domain(cpu, tmp) {
1708        if (!(tmp->flags & SD_LOAD_BALANCE))
1709            continue;
1710
1711        /*
1712         * If power savings logic is enabled for a domain, see if we
1713         * are not overloaded, if so, don't balance wider.
1714         */
1715        if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) {
1716            unsigned long power = 0;
1717            unsigned long nr_running = 0;
1718            unsigned long capacity;
1719            int i;
1720
1721            for_each_cpu(i, sched_domain_span(tmp)) {
1722                power += power_of(i);
1723                nr_running += cpu_rq(i)->cfs.nr_running;
1724            }
1725
1726            capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
1727
1728            if (tmp->flags & SD_POWERSAVINGS_BALANCE)
1729                nr_running /= 2;
1730
1731            if (nr_running < capacity)
1732                want_sd = 0;
1733        }
1734
1735        /*
1736         * If both cpu and prev_cpu are part of this domain,
1737         * cpu is a valid SD_WAKE_AFFINE target.
1738         */
1739        if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
1740            cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
1741            affine_sd = tmp;
1742            want_affine = 0;
1743        }
1744
1745        if (!want_sd && !want_affine)
1746            break;
1747
1748        if (!(tmp->flags & sd_flag))
1749            continue;
1750
1751        if (want_sd)
1752            sd = tmp;
1753    }
1754
1755    if (affine_sd) {
1756        if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
1757            prev_cpu = cpu;
1758
1759        new_cpu = select_idle_sibling(p, prev_cpu);
1760        goto unlock;
1761    }
1762
1763    while (sd) {
1764        int load_idx = sd->forkexec_idx;
1765        struct sched_group *group;
1766        int weight;
1767
1768        if (!(sd->flags & sd_flag)) {
1769            sd = sd->child;
1770            continue;
1771        }
1772
1773        if (sd_flag & SD_BALANCE_WAKE)
1774            load_idx = sd->wake_idx;
1775
1776        group = find_idlest_group(sd, p, cpu, load_idx);
1777        if (!group) {
1778            sd = sd->child;
1779            continue;
1780        }
1781
1782        new_cpu = find_idlest_cpu(group, p, cpu);
1783        if (new_cpu == -1 || new_cpu == cpu) {
1784            /* Now try balancing at a lower domain level of cpu */
1785            sd = sd->child;
1786            continue;
1787        }
1788
1789        /* Now try balancing at a lower domain level of new_cpu */
1790        cpu = new_cpu;
1791        weight = sd->span_weight;
1792        sd = NULL;
1793        for_each_domain(cpu, tmp) {
1794            if (weight <= tmp->span_weight)
1795                break;
1796            if (tmp->flags & sd_flag)
1797                sd = tmp;
1798        }
1799        /* while loop will break here if sd == NULL */
1800    }
1801unlock:
1802    rcu_read_unlock();
1803
1804    return new_cpu;
1805}
1806#endif /* CONFIG_SMP */
1807
1808static unsigned long
1809wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
1810{
1811    unsigned long gran = sysctl_sched_wakeup_granularity;
1812
1813    /*
1814     * Since its curr running now, convert the gran from real-time
1815     * to virtual-time in his units.
1816     *
1817     * By using 'se' instead of 'curr' we penalize light tasks, so
1818     * they get preempted easier. That is, if 'se' < 'curr' then
1819     * the resulting gran will be larger, therefore penalizing the
1820     * lighter, if otoh 'se' > 'curr' then the resulting gran will
1821     * be smaller, again penalizing the lighter task.
1822     *
1823     * This is especially important for buddies when the leftmost
1824     * task is higher priority than the buddy.
1825     */
1826    return calc_delta_fair(gran, se);
1827}
1828
1829/*
1830 * Should 'se' preempt 'curr'.
1831 *
1832 * |s1
1833 * |s2
1834 * |s3
1835 * g
1836 * |<--->|c
1837 *
1838 * w(c, s1) = -1
1839 * w(c, s2) = 0
1840 * w(c, s3) = 1
1841 *
1842 */
1843static int
1844wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1845{
1846    s64 gran, vdiff = curr->vruntime - se->vruntime;
1847
1848    if (vdiff <= 0)
1849        return -1;
1850
1851    gran = wakeup_gran(curr, se);
1852    if (vdiff > gran)
1853        return 1;
1854
1855    return 0;
1856}
1857
1858static void set_last_buddy(struct sched_entity *se)
1859{
1860    if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
1861        return;
1862
1863    for_each_sched_entity(se)
1864        cfs_rq_of(se)->last = se;
1865}
1866
1867static void set_next_buddy(struct sched_entity *se)
1868{
1869    if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
1870        return;
1871
1872    for_each_sched_entity(se)
1873        cfs_rq_of(se)->next = se;
1874}
1875
1876static void set_skip_buddy(struct sched_entity *se)
1877{
1878    for_each_sched_entity(se)
1879        cfs_rq_of(se)->skip = se;
1880}
1881
1882/*
1883 * Preempt the current task with a newly woken task if needed:
1884 */
1885static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1886{
1887    struct task_struct *curr = rq->curr;
1888    struct sched_entity *se = &curr->se, *pse = &p->se;
1889    struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1890    int scale = cfs_rq->nr_running >= sched_nr_latency;
1891    int next_buddy_marked = 0;
1892
1893    if (unlikely(se == pse))
1894        return;
1895
1896    if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
1897        set_next_buddy(pse);
1898        next_buddy_marked = 1;
1899    }
1900
1901    /*
1902     * We can come here with TIF_NEED_RESCHED already set from new task
1903     * wake up path.
1904     */
1905    if (test_tsk_need_resched(curr))
1906        return;
1907
1908    /* Idle tasks are by definition preempted by non-idle tasks. */
1909    if (unlikely(curr->policy == SCHED_IDLE) &&
1910        likely(p->policy != SCHED_IDLE))
1911        goto preempt;
1912
1913    /*
1914     * Batch and idle tasks do not preempt non-idle tasks (their preemption
1915     * is driven by the tick):
1916     */
1917    if (unlikely(p->policy != SCHED_NORMAL))
1918        return;
1919
1920
1921    if (!sched_feat(WAKEUP_PREEMPT))
1922        return;
1923
1924    update_curr(cfs_rq);
1925    find_matching_se(&se, &pse);
1926    BUG_ON(!pse);
1927    if (wakeup_preempt_entity(se, pse) == 1) {
1928        /*
1929         * Bias pick_next to pick the sched entity that is
1930         * triggering this preemption.
1931         */
1932        if (!next_buddy_marked)
1933            set_next_buddy(pse);
1934        goto preempt;
1935    }
1936
1937    return;
1938
1939preempt:
1940    resched_task(curr);
1941    /*
1942     * Only set the backward buddy when the current task is still
1943     * on the rq. This can happen when a wakeup gets interleaved
1944     * with schedule on the ->pre_schedule() or idle_balance()
1945     * point, either of which can * drop the rq lock.
1946     *
1947     * Also, during early boot the idle thread is in the fair class,
1948     * for obvious reasons its a bad idea to schedule back to it.
1949     */
1950    if (unlikely(!se->on_rq || curr == rq->idle))
1951        return;
1952
1953    if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
1954        set_last_buddy(se);
1955}
1956
1957static struct task_struct *pick_next_task_fair(struct rq *rq)
1958{
1959    struct task_struct *p;
1960    struct cfs_rq *cfs_rq = &rq->cfs;
1961    struct sched_entity *se;
1962
1963    if (!cfs_rq->nr_running)
1964        return NULL;
1965
1966    do {
1967        se = pick_next_entity(cfs_rq);
1968        set_next_entity(cfs_rq, se);
1969        cfs_rq = group_cfs_rq(se);
1970    } while (cfs_rq);
1971
1972    p = task_of(se);
1973    hrtick_start_fair(rq, p);
1974
1975    return p;
1976}
1977
1978/*
1979 * Account for a descheduled task:
1980 */
1981static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
1982{
1983    struct sched_entity *se = &prev->se;
1984    struct cfs_rq *cfs_rq;
1985
1986    for_each_sched_entity(se) {
1987        cfs_rq = cfs_rq_of(se);
1988        put_prev_entity(cfs_rq, se);
1989    }
1990}
1991
1992/*
1993 * sched_yield() is very simple
1994 *
1995 * The magic of dealing with the ->skip buddy is in pick_next_entity.
1996 */
1997static void yield_task_fair(struct rq *rq)
1998{
1999    struct task_struct *curr = rq->curr;
2000    struct cfs_rq *cfs_rq = task_cfs_rq(curr);
2001    struct sched_entity *se = &curr->se;
2002
2003    /*
2004     * Are we the only task in the tree?
2005     */
2006    if (unlikely(rq->nr_running == 1))
2007        return;
2008
2009    clear_buddies(cfs_rq, se);
2010
2011    if (curr->policy != SCHED_BATCH) {
2012        update_rq_clock(rq);
2013        /*
2014         * Update run-time statistics of the 'current'.
2015         */
2016        update_curr(cfs_rq);
2017    }
2018
2019    set_skip_buddy(se);
2020}
2021
2022static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
2023{
2024    struct sched_entity *se = &p->se;
2025
2026    if (!se->on_rq)
2027        return false;
2028
2029    /* Tell the scheduler that we'd really like pse to run next. */
2030    set_next_buddy(se);
2031
2032    yield_task_fair(rq);
2033
2034    return true;
2035}
2036
2037#ifdef CONFIG_SMP
2038/**************************************************
2039 * Fair scheduling class load-balancing methods:
2040 */
2041
2042/*
2043 * pull_task - move a task from a remote runqueue to the local runqueue.
2044 * Both runqueues must be locked.
2045 */
2046static void pull_task(struct rq *src_rq, struct task_struct *p,
2047              struct rq *this_rq, int this_cpu)
2048{
2049    deactivate_task(src_rq, p, 0);
2050    set_task_cpu(p, this_cpu);
2051    activate_task(this_rq, p, 0);
2052    check_preempt_curr(this_rq, p, 0);
2053}
2054
2055/*
2056 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
2057 */
2058static
2059int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2060             struct sched_domain *sd, enum cpu_idle_type idle,
2061             int *all_pinned)
2062{
2063    int tsk_cache_hot = 0;
2064    /*
2065     * We do not migrate tasks that are:
2066     * 1) running (obviously), or
2067     * 2) cannot be migrated to this CPU due to cpus_allowed, or
2068     * 3) are cache-hot on their current CPU.
2069     */
2070    if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
2071        schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
2072        return 0;
2073    }
2074    *all_pinned = 0;
2075
2076    if (task_running(rq, p)) {
2077        schedstat_inc(p, se.statistics.nr_failed_migrations_running);
2078        return 0;
2079    }
2080
2081    /*
2082     * Aggressive migration if:
2083     * 1) task is cache cold, or
2084     * 2) too many balance attempts have failed.
2085     */
2086
2087    tsk_cache_hot = task_hot(p, rq->clock_task, sd);
2088    if (!tsk_cache_hot ||
2089        sd->nr_balance_failed > sd->cache_nice_tries) {
2090#ifdef CONFIG_SCHEDSTATS
2091        if (tsk_cache_hot) {
2092            schedstat_inc(sd, lb_hot_gained[idle]);
2093            schedstat_inc(p, se.statistics.nr_forced_migrations);
2094        }
2095#endif
2096        return 1;
2097    }
2098
2099    if (tsk_cache_hot) {
2100        schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
2101        return 0;
2102    }
2103    return 1;
2104}
2105
2106/*
2107 * move_one_task tries to move exactly one task from busiest to this_rq, as
2108 * part of active balancing operations within "domain".
2109 * Returns 1 if successful and 0 otherwise.
2110 *
2111 * Called with both runqueues locked.
2112 */
2113static int
2114move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
2115          struct sched_domain *sd, enum cpu_idle_type idle)
2116{
2117    struct task_struct *p, *n;
2118    struct cfs_rq *cfs_rq;
2119    int pinned = 0;
2120
2121    for_each_leaf_cfs_rq(busiest, cfs_rq) {
2122        list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) {
2123
2124            if (!can_migrate_task(p, busiest, this_cpu,
2125                        sd, idle, &pinned))
2126                continue;
2127
2128            pull_task(busiest, p, this_rq, this_cpu);
2129            /*
2130             * Right now, this is only the second place pull_task()
2131             * is called, so we can safely collect pull_task()
2132             * stats here rather than inside pull_task().
2133             */
2134            schedstat_inc(sd, lb_gained[idle]);
2135            return 1;
2136        }
2137    }
2138
2139    return 0;
2140}
2141
2142static unsigned long
2143balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2144          unsigned long max_load_move, struct sched_domain *sd,
2145          enum cpu_idle_type idle, int *all_pinned,
2146          struct cfs_rq *busiest_cfs_rq)
2147{
2148    int loops = 0, pulled = 0;
2149    long rem_load_move = max_load_move;
2150    struct task_struct *p, *n;
2151
2152    if (max_load_move == 0)
2153        goto out;
2154
2155    list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
2156        if (loops++ > sysctl_sched_nr_migrate)
2157            break;
2158
2159        if ((p->se.load.weight >> 1) > rem_load_move ||
2160            !can_migrate_task(p, busiest, this_cpu, sd, idle,
2161                      all_pinned))
2162            continue;
2163
2164        pull_task(busiest, p, this_rq, this_cpu);
2165        pulled++;
2166        rem_load_move -= p->se.load.weight;
2167
2168#ifdef CONFIG_PREEMPT
2169        /*
2170         * NEWIDLE balancing is a source of latency, so preemptible
2171         * kernels will stop after the first task is pulled to minimize
2172         * the critical section.
2173         */
2174        if (idle == CPU_NEWLY_IDLE)
2175            break;
2176#endif
2177
2178        /*
2179         * We only want to steal up to the prescribed amount of
2180         * weighted load.
2181         */
2182        if (rem_load_move <= 0)
2183            break;
2184    }
2185out:
2186    /*
2187     * Right now, this is one of only two places pull_task() is called,
2188     * so we can safely collect pull_task() stats here rather than
2189     * inside pull_task().
2190     */
2191    schedstat_add(sd, lb_gained[idle], pulled);
2192
2193    return max_load_move - rem_load_move;
2194}
2195
2196#ifdef CONFIG_FAIR_GROUP_SCHED
2197/*
2198 * update tg->load_weight by folding this cpu's load_avg
2199 */
2200static int update_shares_cpu(struct task_group *tg, int cpu)
2201{
2202    struct cfs_rq *cfs_rq;
2203    unsigned long flags;
2204    struct rq *rq;
2205
2206    if (!tg->se[cpu])
2207        return 0;
2208
2209    rq = cpu_rq(cpu);
2210    cfs_rq = tg->cfs_rq[cpu];
2211
2212    raw_spin_lock_irqsave(&rq->lock, flags);
2213
2214    update_rq_clock(rq);
2215    update_cfs_load(cfs_rq, 1);
2216
2217    /*
2218     * We need to update shares after updating tg->load_weight in
2219     * order to adjust the weight of groups with long running tasks.
2220     */
2221    update_cfs_shares(cfs_rq);
2222
2223    raw_spin_unlock_irqrestore(&rq->lock, flags);
2224
2225    return 0;
2226}
2227
2228static void update_shares(int cpu)
2229{
2230    struct cfs_rq *cfs_rq;
2231    struct rq *rq = cpu_rq(cpu);
2232
2233    rcu_read_lock();
2234    for_each_leaf_cfs_rq(rq, cfs_rq)
2235        update_shares_cpu(cfs_rq->tg, cpu);
2236    rcu_read_unlock();
2237}
2238
2239static unsigned long
2240load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
2241          unsigned long max_load_move,
2242          struct sched_domain *sd, enum cpu_idle_type idle,
2243          int *all_pinned)
2244{
2245    long rem_load_move = max_load_move;
2246    int busiest_cpu = cpu_of(busiest);
2247    struct task_group *tg;
2248
2249    rcu_read_lock();
2250    update_h_load(busiest_cpu);
2251
2252    list_for_each_entry_rcu(tg, &task_groups, list) {
2253        struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
2254        unsigned long busiest_h_load = busiest_cfs_rq->h_load;
2255        unsigned long busiest_weight = busiest_cfs_rq->load.weight;
2256        u64 rem_load, moved_load;
2257
2258        /*
2259         * empty group
2260         */
2261        if (!busiest_cfs_rq->task_weight)
2262            continue;
2263
2264        rem_load = (u64)rem_load_move * busiest_weight;
2265        rem_load = div_u64(rem_load, busiest_h_load + 1);
2266
2267        moved_load = balance_tasks(this_rq, this_cpu, busiest,
2268                rem_load, sd, idle, all_pinned,
2269                busiest_cfs_rq);
2270
2271        if (!moved_load)
2272            continue;
2273
2274        moved_load *= busiest_h_load;
2275        moved_load = div_u64(moved_load, busiest_weight + 1);
2276
2277        rem_load_move -= moved_load;
2278        if (rem_load_move < 0)
2279            break;
2280    }
2281    rcu_read_unlock();
2282
2283    return max_load_move - rem_load_move;
2284}
2285#else
2286static inline void update_shares(int cpu)
2287{
2288}
2289
2290static unsigned long
2291load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
2292          unsigned long max_load_move,
2293          struct sched_domain *sd, enum cpu_idle_type idle,
2294          int *all_pinned)
2295{
2296    return balance_tasks(this_rq, this_cpu, busiest,
2297            max_load_move, sd, idle, all_pinned,
2298            &busiest->cfs);
2299}
2300#endif
2301
2302/*
2303 * move_tasks tries to move up to max_load_move weighted load from busiest to
2304 * this_rq, as part of a balancing operation within domain "sd".
2305 * Returns 1 if successful and 0 otherwise.
2306 *
2307 * Called with both runqueues locked.
2308 */
2309static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2310              unsigned long max_load_move,
2311              struct sched_domain *sd, enum cpu_idle_type idle,
2312              int *all_pinned)
2313{
2314    unsigned long total_load_moved = 0, load_moved;
2315
2316    do {
2317        load_moved = load_balance_fair(this_rq, this_cpu, busiest,
2318                max_load_move - total_load_moved,
2319                sd, idle, all_pinned);
2320
2321        total_load_moved += load_moved;
2322
2323#ifdef CONFIG_PREEMPT
2324        /*
2325         * NEWIDLE balancing is a source of latency, so preemptible
2326         * kernels will stop after the first task is pulled to minimize
2327         * the critical section.
2328         */
2329        if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
2330            break;
2331
2332        if (raw_spin_is_contended(&this_rq->lock) ||
2333                raw_spin_is_contended(&busiest->lock))
2334            break;
2335#endif
2336    } while (load_moved && max_load_move > total_load_moved);
2337
2338    return total_load_moved > 0;
2339}
2340
2341/********** Helpers for find_busiest_group ************************/
2342/*
2343 * sd_lb_stats - Structure to store the statistics of a sched_domain
2344 * during load balancing.
2345 */
2346struct sd_lb_stats {
2347    struct sched_group *busiest; /* Busiest group in this sd */
2348    struct sched_group *this; /* Local group in this sd */
2349    unsigned long total_load; /* Total load of all groups in sd */
2350    unsigned long total_pwr; /* Total power of all groups in sd */
2351    unsigned long avg_load; /* Average load across all groups in sd */
2352
2353    /** Statistics of this group */
2354    unsigned long this_load;
2355    unsigned long this_load_per_task;
2356    unsigned long this_nr_running;
2357    unsigned long this_has_capacity;
2358    unsigned int this_idle_cpus;
2359
2360    /* Statistics of the busiest group */
2361    unsigned int busiest_idle_cpus;
2362    unsigned long max_load;
2363    unsigned long busiest_load_per_task;
2364    unsigned long busiest_nr_running;
2365    unsigned long busiest_group_capacity;
2366    unsigned long busiest_has_capacity;
2367    unsigned int busiest_group_weight;
2368
2369    int group_imb; /* Is there imbalance in this sd */
2370#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2371    int power_savings_balance; /* Is powersave balance needed for this sd */
2372    struct sched_group *group_min; /* Least loaded group in sd */
2373    struct sched_group *group_leader; /* Group which relieves group_min */
2374    unsigned long min_load_per_task; /* load_per_task in group_min */
2375    unsigned long leader_nr_running; /* Nr running of group_leader */
2376    unsigned long min_nr_running; /* Nr running of group_min */
2377#endif
2378};
2379
2380/*
2381 * sg_lb_stats - stats of a sched_group required for load_balancing
2382 */
2383struct sg_lb_stats {
2384    unsigned long avg_load; /*Avg load across the CPUs of the group */
2385    unsigned long group_load; /* Total load over the CPUs of the group */
2386    unsigned long sum_nr_running; /* Nr tasks running in the group */
2387    unsigned long sum_weighted_load; /* Weighted load of group's tasks */
2388    unsigned long group_capacity;
2389    unsigned long idle_cpus;
2390    unsigned long group_weight;
2391    int group_imb; /* Is there an imbalance in the group ? */
2392    int group_has_capacity; /* Is there extra capacity in the group? */
2393};
2394
2395/**
2396 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
2397 * @group: The group whose first cpu is to be returned.
2398 */
2399static inline unsigned int group_first_cpu(struct sched_group *group)
2400{
2401    return cpumask_first(sched_group_cpus(group));
2402}
2403
2404/**
2405 * get_sd_load_idx - Obtain the load index for a given sched domain.
2406 * @sd: The sched_domain whose load_idx is to be obtained.
2407 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
2408 */
2409static inline int get_sd_load_idx(struct sched_domain *sd,
2410                    enum cpu_idle_type idle)
2411{
2412    int load_idx;
2413
2414    switch (idle) {
2415    case CPU_NOT_IDLE:
2416        load_idx = sd->busy_idx;
2417        break;
2418
2419    case CPU_NEWLY_IDLE:
2420        load_idx = sd->newidle_idx;
2421        break;
2422    default:
2423        load_idx = sd->idle_idx;
2424        break;
2425    }
2426
2427    return load_idx;
2428}
2429
2430
2431#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2432/**
2433 * init_sd_power_savings_stats - Initialize power savings statistics for
2434 * the given sched_domain, during load balancing.
2435 *
2436 * @sd: Sched domain whose power-savings statistics are to be initialized.
2437 * @sds: Variable containing the statistics for sd.
2438 * @idle: Idle status of the CPU at which we're performing load-balancing.
2439 */
2440static inline void init_sd_power_savings_stats(struct sched_domain *sd,
2441    struct sd_lb_stats *sds, enum cpu_idle_type idle)
2442{
2443    /*
2444     * Busy processors will not participate in power savings
2445     * balance.
2446     */
2447    if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
2448        sds->power_savings_balance = 0;
2449    else {
2450        sds->power_savings_balance = 1;
2451        sds->min_nr_running = ULONG_MAX;
2452        sds->leader_nr_running = 0;
2453    }
2454}
2455
2456/**
2457 * update_sd_power_savings_stats - Update the power saving stats for a
2458 * sched_domain while performing load balancing.
2459 *
2460 * @group: sched_group belonging to the sched_domain under consideration.
2461 * @sds: Variable containing the statistics of the sched_domain
2462 * @local_group: Does group contain the CPU for which we're performing
2463 * load balancing ?
2464 * @sgs: Variable containing the statistics of the group.
2465 */
2466static inline void update_sd_power_savings_stats(struct sched_group *group,
2467    struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
2468{
2469
2470    if (!sds->power_savings_balance)
2471        return;
2472
2473    /*
2474     * If the local group is idle or completely loaded
2475     * no need to do power savings balance at this domain
2476     */
2477    if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
2478                !sds->this_nr_running))
2479        sds->power_savings_balance = 0;
2480
2481    /*
2482     * If a group is already running at full capacity or idle,
2483     * don't include that group in power savings calculations
2484     */
2485    if (!sds->power_savings_balance ||
2486        sgs->sum_nr_running >= sgs->group_capacity ||
2487        !sgs->sum_nr_running)
2488        return;
2489
2490    /*
2491     * Calculate the group which has the least non-idle load.
2492     * This is the group from where we need to pick up the load
2493     * for saving power
2494     */
2495    if ((sgs->sum_nr_running < sds->min_nr_running) ||
2496        (sgs->sum_nr_running == sds->min_nr_running &&
2497         group_first_cpu(group) > group_first_cpu(sds->group_min))) {
2498        sds->group_min = group;
2499        sds->min_nr_running = sgs->sum_nr_running;
2500        sds->min_load_per_task = sgs->sum_weighted_load /
2501                        sgs->sum_nr_running;
2502    }
2503
2504    /*
2505     * Calculate the group which is almost near its
2506     * capacity but still has some space to pick up some load
2507     * from other group and save more power
2508     */
2509    if (sgs->sum_nr_running + 1 > sgs->group_capacity)
2510        return;
2511
2512    if (sgs->sum_nr_running > sds->leader_nr_running ||
2513        (sgs->sum_nr_running == sds->leader_nr_running &&
2514         group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
2515        sds->group_leader = group;
2516        sds->leader_nr_running = sgs->sum_nr_running;
2517    }
2518}
2519
2520/**
2521 * check_power_save_busiest_group - see if there is potential for some power-savings balance
2522 * @sds: Variable containing the statistics of the sched_domain
2523 * under consideration.
2524 * @this_cpu: Cpu at which we're currently performing load-balancing.
2525 * @imbalance: Variable to store the imbalance.
2526 *
2527 * Description:
2528 * Check if we have potential to perform some power-savings balance.
2529 * If yes, set the busiest group to be the least loaded group in the
2530 * sched_domain, so that it's CPUs can be put to idle.
2531 *
2532 * Returns 1 if there is potential to perform power-savings balance.
2533 * Else returns 0.
2534 */
2535static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
2536                    int this_cpu, unsigned long *imbalance)
2537{
2538    if (!sds->power_savings_balance)
2539        return 0;
2540
2541    if (sds->this != sds->group_leader ||
2542            sds->group_leader == sds->group_min)
2543        return 0;
2544
2545    *imbalance = sds->min_load_per_task;
2546    sds->busiest = sds->group_min;
2547
2548    return 1;
2549
2550}
2551#else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
2552static inline void init_sd_power_savings_stats(struct sched_domain *sd,
2553    struct sd_lb_stats *sds, enum cpu_idle_type idle)
2554{
2555    return;
2556}
2557
2558static inline void update_sd_power_savings_stats(struct sched_group *group,
2559    struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
2560{
2561    return;
2562}
2563
2564static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
2565                    int this_cpu, unsigned long *imbalance)
2566{
2567    return 0;
2568}
2569#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
2570
2571
2572unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
2573{
2574    return SCHED_POWER_SCALE;
2575}
2576
2577unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
2578{
2579    return default_scale_freq_power(sd, cpu);
2580}
2581
2582unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
2583{
2584    unsigned long weight = sd->span_weight;
2585    unsigned long smt_gain = sd->smt_gain;
2586
2587    smt_gain /= weight;
2588
2589    return smt_gain;
2590}
2591
2592unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
2593{
2594    return default_scale_smt_power(sd, cpu);
2595}
2596
2597unsigned long scale_rt_power(int cpu)
2598{
2599    struct rq *rq = cpu_rq(cpu);
2600    u64 total, available;
2601
2602    total = sched_avg_period() + (rq->clock - rq->age_stamp);
2603
2604    if (unlikely(total < rq->rt_avg)) {
2605        /* Ensures that power won't end up being negative */
2606        available = 0;
2607    } else {
2608        available = total - rq->rt_avg;
2609    }
2610
2611    if (unlikely((s64)total < SCHED_POWER_SCALE))
2612        total = SCHED_POWER_SCALE;
2613
2614    total >>= SCHED_POWER_SHIFT;
2615
2616    return div_u64(available, total);
2617}
2618
2619static void update_cpu_power(struct sched_domain *sd, int cpu)
2620{
2621    unsigned long weight = sd->span_weight;
2622    unsigned long power = SCHED_POWER_SCALE;
2623    struct sched_group *sdg = sd->groups;
2624
2625    if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
2626        if (sched_feat(ARCH_POWER))
2627            power *= arch_scale_smt_power(sd, cpu);
2628        else
2629            power *= default_scale_smt_power(sd, cpu);
2630
2631        power >>= SCHED_POWER_SHIFT;
2632    }
2633
2634    sdg->sgp->power_orig = power;
2635
2636    if (sched_feat(ARCH_POWER))
2637        power *= arch_scale_freq_power(sd, cpu);
2638    else
2639        power *= default_scale_freq_power(sd, cpu);
2640
2641    power >>= SCHED_POWER_SHIFT;
2642
2643    power *= scale_rt_power(cpu);
2644    power >>= SCHED_POWER_SHIFT;
2645
2646    if (!power)
2647        power = 1;
2648
2649    cpu_rq(cpu)->cpu_power = power;
2650    sdg->sgp->power = power;
2651}
2652
2653static void update_group_power(struct sched_domain *sd, int cpu)
2654{
2655    struct sched_domain *child = sd->child;
2656    struct sched_group *group, *sdg = sd->groups;
2657    unsigned long power;
2658
2659    if (!child) {
2660        update_cpu_power(sd, cpu);
2661        return;
2662    }
2663
2664    power = 0;
2665
2666    group = child->groups;
2667    do {
2668        power += group->sgp->power;
2669        group = group->next;
2670    } while (group != child->groups);
2671
2672    sdg->sgp->power = power;
2673}
2674
2675/*
2676 * Try and fix up capacity for tiny siblings, this is needed when
2677 * things like SD_ASYM_PACKING need f_b_g to select another sibling
2678 * which on its own isn't powerful enough.
2679 *
2680 * See update_sd_pick_busiest() and check_asym_packing().
2681 */
2682static inline int
2683fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
2684{
2685    /*
2686     * Only siblings can have significantly less than SCHED_POWER_SCALE
2687     */
2688    if (!(sd->flags & SD_SHARE_CPUPOWER))
2689        return 0;
2690
2691    /*
2692     * If ~90% of the cpu_power is still there, we're good.
2693     */
2694    if (group->sgp->power * 32 > group->sgp->power_orig * 29)
2695        return 1;
2696
2697    return 0;
2698}
2699
2700/**
2701 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
2702 * @sd: The sched_domain whose statistics are to be updated.
2703 * @group: sched_group whose statistics are to be updated.
2704 * @this_cpu: Cpu for which load balance is currently performed.
2705 * @idle: Idle status of this_cpu
2706 * @load_idx: Load index of sched_domain of this_cpu for load calc.
2707 * @local_group: Does group contain this_cpu.
2708 * @cpus: Set of cpus considered for load balancing.
2709 * @balance: Should we balance.
2710 * @sgs: variable to hold the statistics for this group.
2711 */
2712static inline void update_sg_lb_stats(struct sched_domain *sd,
2713            struct sched_group *group, int this_cpu,
2714            enum cpu_idle_type idle, int load_idx,
2715            int local_group, const struct cpumask *cpus,
2716            int *balance, struct sg_lb_stats *sgs)
2717{
2718    unsigned long load, max_cpu_load, min_cpu_load, max_nr_running;
2719    int i;
2720    unsigned int balance_cpu = -1, first_idle_cpu = 0;
2721    unsigned long avg_load_per_task = 0;
2722
2723    if (local_group)
2724        balance_cpu = group_first_cpu(group);
2725
2726    /* Tally up the load of all CPUs in the group */
2727    max_cpu_load = 0;
2728    min_cpu_load = ~0UL;
2729    max_nr_running = 0;
2730
2731    for_each_cpu_and(i, sched_group_cpus(group), cpus) {
2732        struct rq *rq = cpu_rq(i);
2733
2734        /* Bias balancing toward cpus of our domain */
2735        if (local_group) {
2736            if (idle_cpu(i) && !first_idle_cpu) {
2737                first_idle_cpu = 1;
2738                balance_cpu = i;
2739            }
2740
2741            load = target_load(i, load_idx);
2742        } else {
2743            load = source_load(i, load_idx);
2744            if (load > max_cpu_load) {
2745                max_cpu_load = load;
2746                max_nr_running = rq->nr_running;
2747            }
2748            if (min_cpu_load > load)
2749                min_cpu_load = load;
2750        }
2751
2752        sgs->group_load += load;
2753        sgs->sum_nr_running += rq->nr_running;
2754        sgs->sum_weighted_load += weighted_cpuload(i);
2755        if (idle_cpu(i))
2756            sgs->idle_cpus++;
2757    }
2758
2759    /*
2760     * First idle cpu or the first cpu(busiest) in this sched group
2761     * is eligible for doing load balancing at this and above
2762     * domains. In the newly idle case, we will allow all the cpu's
2763     * to do the newly idle load balance.
2764     */
2765    if (idle != CPU_NEWLY_IDLE && local_group) {
2766        if (balance_cpu != this_cpu) {
2767            *balance = 0;
2768            return;
2769        }
2770        update_group_power(sd, this_cpu);
2771    }
2772
2773    /* Adjust by relative CPU power of the group */
2774    sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
2775
2776    /*
2777     * Consider the group unbalanced when the imbalance is larger
2778     * than the average weight of a task.
2779     *
2780     * APZ: with cgroup the avg task weight can vary wildly and
2781     * might not be a suitable number - should we keep a
2782     * normalized nr_running number somewhere that negates
2783     * the hierarchy?
2784     */
2785    if (sgs->sum_nr_running)
2786        avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
2787
2788    if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1)
2789        sgs->group_imb = 1;
2790
2791    sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
2792                        SCHED_POWER_SCALE);
2793    if (!sgs->group_capacity)
2794        sgs->group_capacity = fix_small_capacity(sd, group);
2795    sgs->group_weight = group->group_weight;
2796
2797    if (sgs->group_capacity > sgs->sum_nr_running)
2798        sgs->group_has_capacity = 1;
2799}
2800
2801/**
2802 * update_sd_pick_busiest - return 1 on busiest group
2803 * @sd: sched_domain whose statistics are to be checked
2804 * @sds: sched_domain statistics
2805 * @sg: sched_group candidate to be checked for being the busiest
2806 * @sgs: sched_group statistics
2807 * @this_cpu: the current cpu
2808 *
2809 * Determine if @sg is a busier group than the previously selected
2810 * busiest group.
2811 */
2812static bool update_sd_pick_busiest(struct sched_domain *sd,
2813                   struct sd_lb_stats *sds,
2814                   struct sched_group *sg,
2815                   struct sg_lb_stats *sgs,
2816                   int this_cpu)
2817{
2818    if (sgs->avg_load <= sds->max_load)
2819        return false;
2820
2821    if (sgs->sum_nr_running > sgs->group_capacity)
2822        return true;
2823
2824    if (sgs->group_imb)
2825        return true;
2826
2827    /*
2828     * ASYM_PACKING needs to move all the work to the lowest
2829     * numbered CPUs in the group, therefore mark all groups
2830     * higher than ourself as busy.
2831     */
2832    if ((sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
2833        this_cpu < group_first_cpu(sg)) {
2834        if (!sds->busiest)
2835            return true;
2836
2837        if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
2838            return true;
2839    }
2840
2841    return false;
2842}
2843
2844/**
2845 * update_sd_lb_stats - Update sched_group's statistics for load balancing.
2846 * @sd: sched_domain whose statistics are to be updated.
2847 * @this_cpu: Cpu for which load balance is currently performed.
2848 * @idle: Idle status of this_cpu
2849 * @cpus: Set of cpus considered for load balancing.
2850 * @balance: Should we balance.
2851 * @sds: variable to hold the statistics for this sched_domain.
2852 */
2853static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
2854            enum cpu_idle_type idle, const struct cpumask *cpus,
2855            int *balance, struct sd_lb_stats *sds)
2856{
2857    struct sched_domain *child = sd->child;
2858    struct sched_group *sg = sd->groups;
2859    struct sg_lb_stats sgs;
2860    int load_idx, prefer_sibling = 0;
2861
2862    if (child && child->flags & SD_PREFER_SIBLING)
2863        prefer_sibling = 1;
2864
2865    init_sd_power_savings_stats(sd, sds, idle);
2866    load_idx = get_sd_load_idx(sd, idle);
2867
2868    do {
2869        int local_group;
2870
2871        local_group = cpumask_test_cpu(this_cpu, sched_group_cpus(sg));
2872        memset(&sgs, 0, sizeof(sgs));
2873        update_sg_lb_stats(sd, sg, this_cpu, idle, load_idx,
2874                local_group, cpus, balance, &sgs);
2875
2876        if (local_group && !(*balance))
2877            return;
2878
2879        sds->total_load += sgs.group_load;
2880        sds->total_pwr += sg->sgp->power;
2881
2882        /*
2883         * In case the child domain prefers tasks go to siblings
2884         * first, lower the sg capacity to one so that we'll try
2885         * and move all the excess tasks away. We lower the capacity
2886         * of a group only if the local group has the capacity to fit
2887         * these excess tasks, i.e. nr_running < group_capacity. The
2888         * extra check prevents the case where you always pull from the
2889         * heaviest group when it is already under-utilized (possible
2890         * with a large weight task outweighs the tasks on the system).
2891         */
2892        if (prefer_sibling && !local_group && sds->this_has_capacity)
2893            sgs.group_capacity = min(sgs.group_capacity, 1UL);
2894
2895        if (local_group) {
2896            sds->this_load = sgs.avg_load;
2897            sds->this = sg;
2898            sds->this_nr_running = sgs.sum_nr_running;
2899            sds->this_load_per_task = sgs.sum_weighted_load;
2900            sds->this_has_capacity = sgs.group_has_capacity;
2901            sds->this_idle_cpus = sgs.idle_cpus;
2902        } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) {
2903            sds->max_load = sgs.avg_load;
2904            sds->busiest = sg;
2905            sds->busiest_nr_running = sgs.sum_nr_running;
2906            sds->busiest_idle_cpus = sgs.idle_cpus;
2907            sds->busiest_group_capacity = sgs.group_capacity;
2908            sds->busiest_load_per_task = sgs.sum_weighted_load;
2909            sds->busiest_has_capacity = sgs.group_has_capacity;
2910            sds->busiest_group_weight = sgs.group_weight;
2911            sds->group_imb = sgs.group_imb;
2912        }
2913
2914        update_sd_power_savings_stats(sg, sds, local_group, &sgs);
2915        sg = sg->next;
2916    } while (sg != sd->groups);
2917}
2918
2919int __weak arch_sd_sibling_asym_packing(void)
2920{
2921       return 0*SD_ASYM_PACKING;
2922}
2923
2924/**
2925 * check_asym_packing - Check to see if the group is packed into the
2926 * sched doman.
2927 *
2928 * This is primarily intended to used at the sibling level. Some
2929 * cores like POWER7 prefer to use lower numbered SMT threads. In the
2930 * case of POWER7, it can move to lower SMT modes only when higher
2931 * threads are idle. When in lower SMT modes, the threads will
2932 * perform better since they share less core resources. Hence when we
2933 * have idle threads, we want them to be the higher ones.
2934 *
2935 * This packing function is run on idle threads. It checks to see if
2936 * the busiest CPU in this domain (core in the P7 case) has a higher
2937 * CPU number than the packing function is being run on. Here we are
2938 * assuming lower CPU number will be equivalent to lower a SMT thread
2939 * number.
2940 *
2941 * Returns 1 when packing is required and a task should be moved to
2942 * this CPU. The amount of the imbalance is returned in *imbalance.
2943 *
2944 * @sd: The sched_domain whose packing is to be checked.
2945 * @sds: Statistics of the sched_domain which is to be packed
2946 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
2947 * @imbalance: returns amount of imbalanced due to packing.
2948 */
2949static int check_asym_packing(struct sched_domain *sd,
2950                  struct sd_lb_stats *sds,
2951                  int this_cpu, unsigned long *imbalance)
2952{
2953    int busiest_cpu;
2954
2955    if (!(sd->flags & SD_ASYM_PACKING))
2956        return 0;
2957
2958    if (!sds->busiest)
2959        return 0;
2960
2961    busiest_cpu = group_first_cpu(sds->busiest);
2962    if (this_cpu > busiest_cpu)
2963        return 0;
2964
2965    *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power,
2966                       SCHED_POWER_SCALE);
2967    return 1;
2968}
2969
2970/**
2971 * fix_small_imbalance - Calculate the minor imbalance that exists
2972 * amongst the groups of a sched_domain, during
2973 * load balancing.
2974 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
2975 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
2976 * @imbalance: Variable to store the imbalance.
2977 */
2978static inline void fix_small_imbalance(struct sd_lb_stats *sds,
2979                int this_cpu, unsigned long *imbalance)
2980{
2981    unsigned long tmp, pwr_now = 0, pwr_move = 0;
2982    unsigned int imbn = 2;
2983    unsigned long scaled_busy_load_per_task;
2984
2985    if (sds->this_nr_running) {
2986        sds->this_load_per_task /= sds->this_nr_running;
2987        if (sds->busiest_load_per_task >
2988                sds->this_load_per_task)
2989            imbn = 1;
2990    } else
2991        sds->this_load_per_task =
2992            cpu_avg_load_per_task(this_cpu);
2993
2994    scaled_busy_load_per_task = sds->busiest_load_per_task
2995                     * SCHED_POWER_SCALE;
2996    scaled_busy_load_per_task /= sds->busiest->sgp->power;
2997
2998    if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
2999            (scaled_busy_load_per_task * imbn)) {
3000        *imbalance = sds->busiest_load_per_task;
3001        return;
3002    }
3003
3004    /*
3005     * OK, we don't have enough imbalance to justify moving tasks,
3006     * however we may be able to increase total CPU power used by
3007     * moving them.
3008     */
3009
3010    pwr_now += sds->busiest->sgp->power *
3011            min(sds->busiest_load_per_task, sds->max_load);
3012    pwr_now += sds->this->sgp->power *
3013            min(sds->this_load_per_task, sds->this_load);
3014    pwr_now /= SCHED_POWER_SCALE;
3015
3016    /* Amount of load we'd subtract */
3017    tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
3018        sds->busiest->sgp->power;
3019    if (sds->max_load > tmp)
3020        pwr_move += sds->busiest->sgp->power *
3021            min(sds->busiest_load_per_task, sds->max_load - tmp);
3022
3023    /* Amount of load we'd add */
3024    if (sds->max_load * sds->busiest->sgp->power <
3025        sds->busiest_load_per_task * SCHED_POWER_SCALE)
3026        tmp = (sds->max_load * sds->busiest->sgp->power) /
3027            sds->this->sgp->power;
3028    else
3029        tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
3030            sds->this->sgp->power;
3031    pwr_move += sds->this->sgp->power *
3032            min(sds->this_load_per_task, sds->this_load + tmp);
3033    pwr_move /= SCHED_POWER_SCALE;
3034
3035    /* Move if we gain throughput */
3036    if (pwr_move > pwr_now)
3037        *imbalance = sds->busiest_load_per_task;
3038}
3039
3040/**
3041 * calculate_imbalance - Calculate the amount of imbalance present within the
3042 * groups of a given sched_domain during load balance.
3043 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
3044 * @this_cpu: Cpu for which currently load balance is being performed.
3045 * @imbalance: The variable to store the imbalance.
3046 */
3047static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3048        unsigned long *imbalance)
3049{
3050    unsigned long max_pull, load_above_capacity = ~0UL;
3051
3052    sds->busiest_load_per_task /= sds->busiest_nr_running;
3053    if (sds->group_imb) {
3054        sds->busiest_load_per_task =
3055            min(sds->busiest_load_per_task, sds->avg_load);
3056    }
3057
3058    /*
3059     * In the presence of smp nice balancing, certain scenarios can have
3060     * max load less than avg load(as we skip the groups at or below
3061     * its cpu_power, while calculating max_load..)
3062     */
3063    if (sds->max_load < sds->avg_load) {
3064        *imbalance = 0;
3065        return fix_small_imbalance(sds, this_cpu, imbalance);
3066    }
3067
3068    if (!sds->group_imb) {
3069        /*
3070         * Don't want to pull so many tasks that a group would go idle.
3071         */
3072        load_above_capacity = (sds->busiest_nr_running -
3073                        sds->busiest_group_capacity);
3074
3075        load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
3076
3077        load_above_capacity /= sds->busiest->sgp->power;
3078    }
3079
3080    /*
3081     * We're trying to get all the cpus to the average_load, so we don't
3082     * want to push ourselves above the average load, nor do we wish to
3083     * reduce the max loaded cpu below the average load. At the same time,
3084     * we also don't want to reduce the group load below the group capacity
3085     * (so that we can implement power-savings policies etc). Thus we look
3086     * for the minimum possible imbalance.
3087     * Be careful of negative numbers as they'll appear as very large values
3088     * with unsigned longs.
3089     */
3090    max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
3091
3092    /* How much load to actually move to equalise the imbalance */
3093    *imbalance = min(max_pull * sds->busiest->sgp->power,
3094        (sds->avg_load - sds->this_load) * sds->this->sgp->power)
3095            / SCHED_POWER_SCALE;
3096
3097    /*
3098     * if *imbalance is less than the average load per runnable task
3099     * there is no guarantee that any tasks will be moved so we'll have
3100     * a think about bumping its value to force at least one task to be
3101     * moved
3102     */
3103    if (*imbalance < sds->busiest_load_per_task)
3104        return fix_small_imbalance(sds, this_cpu, imbalance);
3105
3106}
3107
3108/******* find_busiest_group() helpers end here *********************/
3109
3110/**
3111 * find_busiest_group - Returns the busiest group within the sched_domain
3112 * if there is an imbalance. If there isn't an imbalance, and
3113 * the user has opted for power-savings, it returns a group whose
3114 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
3115 * such a group exists.
3116 *
3117 * Also calculates the amount of weighted load which should be moved
3118 * to restore balance.
3119 *
3120 * @sd: The sched_domain whose busiest group is to be returned.
3121 * @this_cpu: The cpu for which load balancing is currently being performed.
3122 * @imbalance: Variable which stores amount of weighted load which should
3123 * be moved to restore balance/put a group to idle.
3124 * @idle: The idle status of this_cpu.
3125 * @cpus: The set of CPUs under consideration for load-balancing.
3126 * @balance: Pointer to a variable indicating if this_cpu
3127 * is the appropriate cpu to perform load balancing at this_level.
3128 *
3129 * Returns: - the busiest group if imbalance exists.
3130 * - If no imbalance and user has opted for power-savings balance,
3131 * return the least loaded group whose CPUs can be
3132 * put to idle by rebalancing its tasks onto our group.
3133 */
3134static struct sched_group *
3135find_busiest_group(struct sched_domain *sd, int this_cpu,
3136           unsigned long *imbalance, enum cpu_idle_type idle,
3137           const struct cpumask *cpus, int *balance)
3138{
3139    struct sd_lb_stats sds;
3140
3141    memset(&sds, 0, sizeof(sds));
3142
3143    /*
3144     * Compute the various statistics relavent for load balancing at
3145     * this level.
3146     */
3147    update_sd_lb_stats(sd, this_cpu, idle, cpus, balance, &sds);
3148
3149    /*
3150     * this_cpu is not the appropriate cpu to perform load balancing at
3151     * this level.
3152     */
3153    if (!(*balance))
3154        goto ret;
3155
3156    if ((idle == CPU_IDLE || idle == CPU_NEWLY_IDLE) &&
3157        check_asym_packing(sd, &sds, this_cpu, imbalance))
3158        return sds.busiest;
3159
3160    /* There is no busy sibling group to pull tasks from */
3161    if (!sds.busiest || sds.busiest_nr_running == 0)
3162        goto out_balanced;
3163
3164    sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
3165
3166    /*
3167     * If the busiest group is imbalanced the below checks don't
3168     * work because they assumes all things are equal, which typically
3169     * isn't true due to cpus_allowed constraints and the like.
3170     */
3171    if (sds.group_imb)
3172        goto force_balance;
3173
3174    /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
3175    if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
3176            !sds.busiest_has_capacity)
3177        goto force_balance;
3178
3179    /*
3180     * If the local group is more busy than the selected busiest group
3181     * don't try and pull any tasks.
3182     */
3183    if (sds.this_load >= sds.max_load)
3184        goto out_balanced;
3185
3186    /*
3187     * Don't pull any tasks if this group is already above the domain
3188     * average load.
3189     */
3190    if (sds.this_load >= sds.avg_load)
3191        goto out_balanced;
3192
3193    if (idle == CPU_IDLE) {
3194        /*
3195         * This cpu is idle. If the busiest group load doesn't
3196         * have more tasks than the number of available cpu's and
3197         * there is no imbalance between this and busiest group
3198         * wrt to idle cpu's, it is balanced.
3199         */
3200        if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
3201            sds.busiest_nr_running <= sds.busiest_group_weight)
3202            goto out_balanced;
3203    } else {
3204        /*
3205         * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
3206         * imbalance_pct to be conservative.
3207         */
3208        if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
3209            goto out_balanced;
3210    }
3211
3212force_balance:
3213    /* Looks like there is an imbalance. Compute it */
3214    calculate_imbalance(&sds, this_cpu, imbalance);
3215    return sds.busiest;
3216
3217out_balanced:
3218    /*
3219     * There is no obvious imbalance. But check if we can do some balancing
3220     * to save power.
3221     */
3222    if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
3223        return sds.busiest;
3224ret:
3225    *imbalance = 0;
3226    return NULL;
3227}
3228
3229/*
3230 * find_busiest_queue - find the busiest runqueue among the cpus in group.
3231 */
3232static struct rq *
3233find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
3234           enum cpu_idle_type idle, unsigned long imbalance,
3235           const struct cpumask *cpus)
3236{
3237    struct rq *busiest = NULL, *rq;
3238    unsigned long max_load = 0;
3239    int i;
3240
3241    for_each_cpu(i, sched_group_cpus(group)) {
3242        unsigned long power = power_of(i);
3243        unsigned long capacity = DIV_ROUND_CLOSEST(power,
3244                               SCHED_POWER_SCALE);
3245        unsigned long wl;
3246
3247        if (!capacity)
3248            capacity = fix_small_capacity(sd, group);
3249
3250        if (!cpumask_test_cpu(i, cpus))
3251            continue;
3252
3253        rq = cpu_rq(i);
3254        wl = weighted_cpuload(i);
3255
3256        /*
3257         * When comparing with imbalance, use weighted_cpuload()
3258         * which is not scaled with the cpu power.
3259         */
3260        if (capacity && rq->nr_running == 1 && wl > imbalance)
3261            continue;
3262
3263        /*
3264         * For the load comparisons with the other cpu's, consider
3265         * the weighted_cpuload() scaled with the cpu power, so that
3266         * the load can be moved away from the cpu that is potentially
3267         * running at a lower capacity.
3268         */
3269        wl = (wl * SCHED_POWER_SCALE) / power;
3270
3271        if (wl > max_load) {
3272            max_load = wl;
3273            busiest = rq;
3274        }
3275    }
3276
3277    return busiest;
3278}
3279
3280/*
3281 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
3282 * so long as it is large enough.
3283 */
3284#define MAX_PINNED_INTERVAL 512
3285
3286/* Working cpumask for load_balance and load_balance_newidle. */
3287static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
3288
3289static int need_active_balance(struct sched_domain *sd, int idle,
3290                   int busiest_cpu, int this_cpu)
3291{
3292    if (idle == CPU_NEWLY_IDLE) {
3293
3294        /*
3295         * ASYM_PACKING needs to force migrate tasks from busy but
3296         * higher numbered CPUs in order to pack all tasks in the
3297         * lowest numbered CPUs.
3298         */
3299        if ((sd->flags & SD_ASYM_PACKING) && busiest_cpu > this_cpu)
3300            return 1;
3301
3302        /*
3303         * The only task running in a non-idle cpu can be moved to this
3304         * cpu in an attempt to completely freeup the other CPU
3305         * package.
3306         *
3307         * The package power saving logic comes from
3308         * find_busiest_group(). If there are no imbalance, then
3309         * f_b_g() will return NULL. However when sched_mc={1,2} then
3310         * f_b_g() will select a group from which a running task may be
3311         * pulled to this cpu in order to make the other package idle.
3312         * If there is no opportunity to make a package idle and if
3313         * there are no imbalance, then f_b_g() will return NULL and no
3314         * action will be taken in load_balance_newidle().
3315         *
3316         * Under normal task pull operation due to imbalance, there
3317         * will be more than one task in the source run queue and
3318         * move_tasks() will succeed. ld_moved will be true and this
3319         * active balance code will not be triggered.
3320         */
3321        if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
3322            return 0;
3323    }
3324
3325    return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
3326}
3327
3328static int active_load_balance_cpu_stop(void *data);
3329
3330/*
3331 * Check this_cpu to ensure it is balanced within domain. Attempt to move
3332 * tasks if there is an imbalance.
3333 */
3334static int load_balance(int this_cpu, struct rq *this_rq,
3335            struct sched_domain *sd, enum cpu_idle_type idle,
3336            int *balance)
3337{
3338    int ld_moved, all_pinned = 0, active_balance = 0;
3339    struct sched_group *group;
3340    unsigned long imbalance;
3341    struct rq *busiest;
3342    unsigned long flags;
3343    struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
3344
3345    cpumask_copy(cpus, cpu_active_mask);
3346
3347    schedstat_inc(sd, lb_count[idle]);
3348
3349redo:
3350    group = find_busiest_group(sd, this_cpu, &imbalance, idle,
3351                   cpus, balance);
3352
3353    if (*balance == 0)
3354        goto out_balanced;
3355
3356    if (!group) {
3357        schedstat_inc(sd, lb_nobusyg[idle]);
3358        goto out_balanced;
3359    }
3360
3361    busiest = find_busiest_queue(sd, group, idle, imbalance, cpus);
3362    if (!busiest) {
3363        schedstat_inc(sd, lb_nobusyq[idle]);
3364        goto out_balanced;
3365    }
3366
3367    BUG_ON(busiest == this_rq);
3368
3369    schedstat_add(sd, lb_imbalance[idle], imbalance);
3370
3371    ld_moved = 0;
3372    if (busiest->nr_running > 1) {
3373        /*
3374         * Attempt to move tasks. If find_busiest_group has found
3375         * an imbalance but busiest->nr_running <= 1, the group is
3376         * still unbalanced. ld_moved simply stays zero, so it is
3377         * correctly treated as an imbalance.
3378         */
3379        all_pinned = 1;
3380        local_irq_save(flags);
3381        double_rq_lock(this_rq, busiest);
3382        ld_moved = move_tasks(this_rq, this_cpu, busiest,
3383                      imbalance, sd, idle, &all_pinned);
3384        double_rq_unlock(this_rq, busiest);
3385        local_irq_restore(flags);
3386
3387        /*
3388         * some other cpu did the load balance for us.
3389         */
3390        if (ld_moved && this_cpu != smp_processor_id())
3391            resched_cpu(this_cpu);
3392
3393        /* All tasks on this runqueue were pinned by CPU affinity */
3394        if (unlikely(all_pinned)) {
3395            cpumask_clear_cpu(cpu_of(busiest), cpus);
3396            if (!cpumask_empty(cpus))
3397                goto redo;
3398            goto out_balanced;
3399        }
3400    }
3401
3402    if (!ld_moved) {
3403        schedstat_inc(sd, lb_failed[idle]);
3404        /*
3405         * Increment the failure counter only on periodic balance.
3406         * We do not want newidle balance, which can be very
3407         * frequent, pollute the failure counter causing
3408         * excessive cache_hot migrations and active balances.
3409         */
3410        if (idle != CPU_NEWLY_IDLE)
3411            sd->nr_balance_failed++;
3412
3413        if (need_active_balance(sd, idle, cpu_of(busiest), this_cpu)) {
3414            raw_spin_lock_irqsave(&busiest->lock, flags);
3415
3416            /* don't kick the active_load_balance_cpu_stop,
3417             * if the curr task on busiest cpu can't be
3418             * moved to this_cpu
3419             */
3420            if (!cpumask_test_cpu(this_cpu,
3421                          &busiest->curr->cpus_allowed)) {
3422                raw_spin_unlock_irqrestore(&busiest->lock,
3423                                flags);
3424                all_pinned = 1;
3425                goto out_one_pinned;
3426            }
3427
3428            /*
3429             * ->active_balance synchronizes accesses to
3430             * ->active_balance_work. Once set, it's cleared
3431             * only after active load balance is finished.
3432             */
3433            if (!busiest->active_balance) {
3434                busiest->active_balance = 1;
3435                busiest->push_cpu = this_cpu;
3436                active_balance = 1;
3437            }
3438            raw_spin_unlock_irqrestore(&busiest->lock, flags);
3439
3440            if (active_balance)
3441                stop_one_cpu_nowait(cpu_of(busiest),
3442                    active_load_balance_cpu_stop, busiest,
3443                    &busiest->active_balance_work);
3444
3445            /*
3446             * We've kicked active balancing, reset the failure
3447             * counter.
3448             */
3449            sd->nr_balance_failed = sd->cache_nice_tries+1;
3450        }
3451    } else
3452        sd->nr_balance_failed = 0;
3453
3454    if (likely(!active_balance)) {
3455        /* We were unbalanced, so reset the balancing interval */
3456        sd->balance_interval = sd->min_interval;
3457    } else {
3458        /*
3459         * If we've begun active balancing, start to back off. This
3460         * case may not be covered by the all_pinned logic if there
3461         * is only 1 task on the busy runqueue (because we don't call
3462         * move_tasks).
3463         */
3464        if (sd->balance_interval < sd->max_interval)
3465            sd->balance_interval *= 2;
3466    }
3467
3468    goto out;
3469
3470out_balanced:
3471    schedstat_inc(sd, lb_balanced[idle]);
3472
3473    sd->nr_balance_failed = 0;
3474
3475out_one_pinned:
3476    /* tune up the balancing interval */
3477    if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
3478            (sd->balance_interval < sd->max_interval))
3479        sd->balance_interval *= 2;
3480
3481    ld_moved = 0;
3482out:
3483    return ld_moved;
3484}
3485
3486/*
3487 * idle_balance is called by schedule() if this_cpu is about to become
3488 * idle. Attempts to pull tasks from other CPUs.
3489 */
3490static void idle_balance(int this_cpu, struct rq *this_rq)
3491{
3492    struct sched_domain *sd;
3493    int pulled_task = 0;
3494    unsigned long next_balance = jiffies + HZ;
3495
3496    this_rq->idle_stamp = this_rq->clock;
3497
3498    if (this_rq->avg_idle < sysctl_sched_migration_cost)
3499        return;
3500
3501    /*
3502     * Drop the rq->lock, but keep IRQ/preempt disabled.
3503     */
3504    raw_spin_unlock(&this_rq->lock);
3505
3506    update_shares(this_cpu);
3507    rcu_read_lock();
3508    for_each_domain(this_cpu, sd) {
3509        unsigned long interval;
3510        int balance = 1;
3511
3512        if (!(sd->flags & SD_LOAD_BALANCE))
3513            continue;
3514
3515        if (sd->flags & SD_BALANCE_NEWIDLE) {
3516            /* If we've pulled tasks over stop searching: */
3517            pulled_task = load_balance(this_cpu, this_rq,
3518                           sd, CPU_NEWLY_IDLE, &balance);
3519        }
3520
3521        interval = msecs_to_jiffies(sd->balance_interval);
3522        if (time_after(next_balance, sd->last_balance + interval))
3523            next_balance = sd->last_balance + interval;
3524        if (pulled_task) {
3525            this_rq->idle_stamp = 0;
3526            break;
3527        }
3528    }
3529    rcu_read_unlock();
3530
3531    raw_spin_lock(&this_rq->lock);
3532
3533    if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
3534        /*
3535         * We are going idle. next_balance may be set based on
3536         * a busy processor. So reset next_balance.
3537         */
3538        this_rq->next_balance = next_balance;
3539    }
3540}
3541
3542/*
3543 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
3544 * running tasks off the busiest CPU onto idle CPUs. It requires at
3545 * least 1 task to be running on each physical CPU where possible, and
3546 * avoids physical / logical imbalances.
3547 */
3548static int active_load_balance_cpu_stop(void *data)
3549{
3550    struct rq *busiest_rq = data;
3551    int busiest_cpu = cpu_of(busiest_rq);
3552    int target_cpu = busiest_rq->push_cpu;
3553    struct rq *target_rq = cpu_rq(target_cpu);
3554    struct sched_domain *sd;
3555
3556    raw_spin_lock_irq(&busiest_rq->lock);
3557
3558    /* make sure the requested cpu hasn't gone down in the meantime */
3559    if (unlikely(busiest_cpu != smp_processor_id() ||
3560             !busiest_rq->active_balance))
3561        goto out_unlock;
3562
3563    /* Is there any task to move? */
3564    if (busiest_rq->nr_running <= 1)
3565        goto out_unlock;
3566
3567    /*
3568     * This condition is "impossible", if it occurs
3569     * we need to fix it. Originally reported by
3570     * Bjorn Helgaas on a 128-cpu setup.
3571     */
3572    BUG_ON(busiest_rq == target_rq);
3573
3574    /* move a task from busiest_rq to target_rq */
3575    double_lock_balance(busiest_rq, target_rq);
3576
3577    /* Search for an sd spanning us and the target CPU. */
3578    rcu_read_lock();
3579    for_each_domain(target_cpu, sd) {
3580        if ((sd->flags & SD_LOAD_BALANCE) &&
3581            cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
3582                break;
3583    }
3584
3585    if (likely(sd)) {
3586        schedstat_inc(sd, alb_count);
3587
3588        if (move_one_task(target_rq, target_cpu, busiest_rq,
3589                  sd, CPU_IDLE))
3590            schedstat_inc(sd, alb_pushed);
3591        else
3592            schedstat_inc(sd, alb_failed);
3593    }
3594    rcu_read_unlock();
3595    double_unlock_balance(busiest_rq, target_rq);
3596out_unlock:
3597    busiest_rq->active_balance = 0;
3598    raw_spin_unlock_irq(&busiest_rq->lock);
3599    return 0;
3600}
3601
3602#ifdef CONFIG_NO_HZ
3603
3604static DEFINE_PER_CPU(struct call_single_data, remote_sched_softirq_cb);
3605
3606static void trigger_sched_softirq(void *data)
3607{
3608    raise_softirq_irqoff(SCHED_SOFTIRQ);
3609}
3610
3611static inline void init_sched_softirq_csd(struct call_single_data *csd)
3612{
3613    csd->func = trigger_sched_softirq;
3614    csd->info = NULL;
3615    csd->flags = 0;
3616    csd->priv = 0;
3617}
3618
3619/*
3620 * idle load balancing details
3621 * - One of the idle CPUs nominates itself as idle load_balancer, while
3622 * entering idle.
3623 * - This idle load balancer CPU will also go into tickless mode when
3624 * it is idle, just like all other idle CPUs
3625 * - When one of the busy CPUs notice that there may be an idle rebalancing
3626 * needed, they will kick the idle load balancer, which then does idle
3627 * load balancing for all the idle CPUs.
3628 */
3629static struct {
3630    atomic_t load_balancer;
3631    atomic_t first_pick_cpu;
3632    atomic_t second_pick_cpu;
3633    cpumask_var_t idle_cpus_mask;
3634    cpumask_var_t grp_idle_mask;
3635    unsigned long next_balance; /* in jiffy units */
3636} nohz ____cacheline_aligned;
3637
3638int get_nohz_load_balancer(void)
3639{
3640    return atomic_read(&nohz.load_balancer);
3641}
3642
3643#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3644/**
3645 * lowest_flag_domain - Return lowest sched_domain containing flag.
3646 * @cpu: The cpu whose lowest level of sched domain is to
3647 * be returned.
3648 * @flag: The flag to check for the lowest sched_domain
3649 * for the given cpu.
3650 *
3651 * Returns the lowest sched_domain of a cpu which contains the given flag.
3652 */
3653static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
3654{
3655    struct sched_domain *sd;
3656
3657    for_each_domain(cpu, sd)
3658        if (sd && (sd->flags & flag))
3659            break;
3660
3661    return sd;
3662}
3663
3664/**
3665 * for_each_flag_domain - Iterates over sched_domains containing the flag.
3666 * @cpu: The cpu whose domains we're iterating over.
3667 * @sd: variable holding the value of the power_savings_sd
3668 * for cpu.
3669 * @flag: The flag to filter the sched_domains to be iterated.
3670 *
3671 * Iterates over all the scheduler domains for a given cpu that has the 'flag'
3672 * set, starting from the lowest sched_domain to the highest.
3673 */
3674#define for_each_flag_domain(cpu, sd, flag) \
3675    for (sd = lowest_flag_domain(cpu, flag); \
3676        (sd && (sd->flags & flag)); sd = sd->parent)
3677
3678/**
3679 * is_semi_idle_group - Checks if the given sched_group is semi-idle.
3680 * @ilb_group: group to be checked for semi-idleness
3681 *
3682 * Returns: 1 if the group is semi-idle. 0 otherwise.
3683 *
3684 * We define a sched_group to be semi idle if it has atleast one idle-CPU
3685 * and atleast one non-idle CPU. This helper function checks if the given
3686 * sched_group is semi-idle or not.
3687 */
3688static inline int is_semi_idle_group(struct sched_group *ilb_group)
3689{
3690    cpumask_and(nohz.grp_idle_mask, nohz.idle_cpus_mask,
3691                    sched_group_cpus(ilb_group));
3692
3693    /*
3694     * A sched_group is semi-idle when it has atleast one busy cpu
3695     * and atleast one idle cpu.
3696     */
3697    if (cpumask_empty(nohz.grp_idle_mask))
3698        return 0;
3699
3700    if (cpumask_equal(nohz.grp_idle_mask, sched_group_cpus(ilb_group)))
3701        return 0;
3702
3703    return 1;
3704}
3705/**
3706 * find_new_ilb - Finds the optimum idle load balancer for nomination.
3707 * @cpu: The cpu which is nominating a new idle_load_balancer.
3708 *
3709 * Returns: Returns the id of the idle load balancer if it exists,
3710 * Else, returns >= nr_cpu_ids.
3711 *
3712 * This algorithm picks the idle load balancer such that it belongs to a
3713 * semi-idle powersavings sched_domain. The idea is to try and avoid
3714 * completely idle packages/cores just for the purpose of idle load balancing
3715 * when there are other idle cpu's which are better suited for that job.
3716 */
3717static int find_new_ilb(int cpu)
3718{
3719    struct sched_domain *sd;
3720    struct sched_group *ilb_group;
3721    int ilb = nr_cpu_ids;
3722
3723    /*
3724     * Have idle load balancer selection from semi-idle packages only
3725     * when power-aware load balancing is enabled
3726     */
3727    if (!(sched_smt_power_savings || sched_mc_power_savings))
3728        goto out_done;
3729
3730    /*
3731     * Optimize for the case when we have no idle CPUs or only one
3732     * idle CPU. Don't walk the sched_domain hierarchy in such cases
3733     */
3734    if (cpumask_weight(nohz.idle_cpus_mask) < 2)
3735        goto out_done;
3736
3737    rcu_read_lock();
3738    for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
3739        ilb_group = sd->groups;
3740
3741        do {
3742            if (is_semi_idle_group(ilb_group)) {
3743                ilb = cpumask_first(nohz.grp_idle_mask);
3744                goto unlock;
3745            }
3746
3747            ilb_group = ilb_group->next;
3748
3749        } while (ilb_group != sd->groups);
3750    }
3751unlock:
3752    rcu_read_unlock();
3753
3754out_done:
3755    return ilb;
3756}
3757#else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
3758static inline int find_new_ilb(int call_cpu)
3759{
3760    return nr_cpu_ids;
3761}
3762#endif
3763
3764/*
3765 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
3766 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
3767 * CPU (if there is one).
3768 */
3769static void nohz_balancer_kick(int cpu)
3770{
3771    int ilb_cpu;
3772
3773    nohz.next_balance++;
3774
3775    ilb_cpu = get_nohz_load_balancer();
3776
3777    if (ilb_cpu >= nr_cpu_ids) {
3778        ilb_cpu = cpumask_first(nohz.idle_cpus_mask);
3779        if (ilb_cpu >= nr_cpu_ids)
3780            return;
3781    }
3782
3783    if (!cpu_rq(ilb_cpu)->nohz_balance_kick) {
3784        struct call_single_data *cp;
3785
3786        cpu_rq(ilb_cpu)->nohz_balance_kick = 1;
3787        cp = &per_cpu(remote_sched_softirq_cb, cpu);
3788        __smp_call_function_single(ilb_cpu, cp, 0);
3789    }
3790    return;
3791}
3792
3793/*
3794 * This routine will try to nominate the ilb (idle load balancing)
3795 * owner among the cpus whose ticks are stopped. ilb owner will do the idle
3796 * load balancing on behalf of all those cpus.
3797 *
3798 * When the ilb owner becomes busy, we will not have new ilb owner until some
3799 * idle CPU wakes up and goes back to idle or some busy CPU tries to kick
3800 * idle load balancing by kicking one of the idle CPUs.
3801 *
3802 * Ticks are stopped for the ilb owner as well, with busy CPU kicking this
3803 * ilb owner CPU in future (when there is a need for idle load balancing on
3804 * behalf of all idle CPUs).
3805 */
3806void select_nohz_load_balancer(int stop_tick)
3807{
3808    int cpu = smp_processor_id();
3809
3810    if (stop_tick) {
3811        if (!cpu_active(cpu)) {
3812            if (atomic_read(&nohz.load_balancer) != cpu)
3813                return;
3814
3815            /*
3816             * If we are going offline and still the leader,
3817             * give up!
3818             */
3819            if (atomic_cmpxchg(&nohz.load_balancer, cpu,
3820                       nr_cpu_ids) != cpu)
3821                BUG();
3822
3823            return;
3824        }
3825
3826        cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
3827
3828        if (atomic_read(&nohz.first_pick_cpu) == cpu)
3829            atomic_cmpxchg(&nohz.first_pick_cpu, cpu, nr_cpu_ids);
3830        if (atomic_read(&nohz.second_pick_cpu) == cpu)
3831            atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids);
3832
3833        if (atomic_read(&nohz.load_balancer) >= nr_cpu_ids) {
3834            int new_ilb;
3835
3836            /* make me the ilb owner */
3837            if (atomic_cmpxchg(&nohz.load_balancer, nr_cpu_ids,
3838                       cpu) != nr_cpu_ids)
3839                return;
3840
3841            /*
3842             * Check to see if there is a more power-efficient
3843             * ilb.
3844             */
3845            new_ilb = find_new_ilb(cpu);
3846            if (new_ilb < nr_cpu_ids && new_ilb != cpu) {
3847                atomic_set(&nohz.load_balancer, nr_cpu_ids);
3848                resched_cpu(new_ilb);
3849                return;
3850            }
3851            return;
3852        }
3853    } else {
3854        if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
3855            return;
3856
3857        cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
3858
3859        if (atomic_read(&nohz.load_balancer) == cpu)
3860            if (atomic_cmpxchg(&nohz.load_balancer, cpu,
3861                       nr_cpu_ids) != cpu)
3862                BUG();
3863    }
3864    return;
3865}
3866#endif
3867
3868static DEFINE_SPINLOCK(balancing);
3869
3870static unsigned long __read_mostly max_load_balance_interval = HZ/10;
3871
3872/*
3873 * Scale the max load_balance interval with the number of CPUs in the system.
3874 * This trades load-balance latency on larger machines for less cross talk.
3875 */
3876static void update_max_interval(void)
3877{
3878    max_load_balance_interval = HZ*num_online_cpus()/10;
3879}
3880
3881/*
3882 * It checks each scheduling domain to see if it is due to be balanced,
3883 * and initiates a balancing operation if so.
3884 *
3885 * Balancing parameters are set up in arch_init_sched_domains.
3886 */
3887static void rebalance_domains(int cpu, enum cpu_idle_type idle)
3888{
3889    int balance = 1;
3890    struct rq *rq = cpu_rq(cpu);
3891    unsigned long interval;
3892    struct sched_domain *sd;
3893    /* Earliest time when we have to do rebalance again */
3894    unsigned long next_balance = jiffies + 60*HZ;
3895    int update_next_balance = 0;
3896    int need_serialize;
3897
3898    update_shares(cpu);
3899
3900    rcu_read_lock();
3901    for_each_domain(cpu, sd) {
3902        if (!(sd->flags & SD_LOAD_BALANCE))
3903            continue;
3904
3905        interval = sd->balance_interval;
3906        if (idle != CPU_IDLE)
3907            interval *= sd->busy_factor;
3908
3909        /* scale ms to jiffies */
3910        interval = msecs_to_jiffies(interval);
3911        interval = clamp(interval, 1UL, max_load_balance_interval);
3912
3913        need_serialize = sd->flags & SD_SERIALIZE;
3914
3915        if (need_serialize) {
3916            if (!spin_trylock(&balancing))
3917                goto out;
3918        }
3919
3920        if (time_after_eq(jiffies, sd->last_balance + interval)) {
3921            if (load_balance(cpu, rq, sd, idle, &balance)) {
3922                /*
3923                 * We've pulled tasks over so either we're no
3924                 * longer idle.
3925                 */
3926                idle = CPU_NOT_IDLE;
3927            }
3928            sd->last_balance = jiffies;
3929        }
3930        if (need_serialize)
3931            spin_unlock(&balancing);
3932out:
3933        if (time_after(next_balance, sd->last_balance + interval)) {
3934            next_balance = sd->last_balance + interval;
3935            update_next_balance = 1;
3936        }
3937
3938        /*
3939         * Stop the load balance at this level. There is another
3940         * CPU in our sched group which is doing load balancing more
3941         * actively.
3942         */
3943        if (!balance)
3944            break;
3945    }
3946    rcu_read_unlock();
3947
3948    /*
3949     * next_balance will be updated only when there is a need.
3950     * When the cpu is attached to null domain for ex, it will not be
3951     * updated.
3952     */
3953    if (likely(update_next_balance))
3954        rq->next_balance = next_balance;
3955}
3956
3957#ifdef CONFIG_NO_HZ
3958/*
3959 * In CONFIG_NO_HZ case, the idle balance kickee will do the
3960 * rebalancing for all the cpus for whom scheduler ticks are stopped.
3961 */
3962static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
3963{
3964    struct rq *this_rq = cpu_rq(this_cpu);
3965    struct rq *rq;
3966    int balance_cpu;
3967
3968    if (idle != CPU_IDLE || !this_rq->nohz_balance_kick)
3969        return;
3970
3971    for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
3972        if (balance_cpu == this_cpu)
3973            continue;
3974
3975        /*
3976         * If this cpu gets work to do, stop the load balancing
3977         * work being done for other cpus. Next load
3978         * balancing owner will pick it up.
3979         */
3980        if (need_resched()) {
3981            this_rq->nohz_balance_kick = 0;
3982            break;
3983        }
3984
3985        raw_spin_lock_irq(&this_rq->lock);
3986        update_rq_clock(this_rq);
3987        update_cpu_load(this_rq);
3988        raw_spin_unlock_irq(&this_rq->lock);
3989
3990        rebalance_domains(balance_cpu, CPU_IDLE);
3991
3992        rq = cpu_rq(balance_cpu);
3993        if (time_after(this_rq->next_balance, rq->next_balance))
3994            this_rq->next_balance = rq->next_balance;
3995    }
3996    nohz.next_balance = this_rq->next_balance;
3997    this_rq->nohz_balance_kick = 0;
3998}
3999
4000/*
4001 * Current heuristic for kicking the idle load balancer
4002 * - first_pick_cpu is the one of the busy CPUs. It will kick
4003 * idle load balancer when it has more than one process active. This
4004 * eliminates the need for idle load balancing altogether when we have
4005 * only one running process in the system (common case).
4006 * - If there are more than one busy CPU, idle load balancer may have
4007 * to run for active_load_balance to happen (i.e., two busy CPUs are
4008 * SMT or core siblings and can run better if they move to different
4009 * physical CPUs). So, second_pick_cpu is the second of the busy CPUs
4010 * which will kick idle load balancer as soon as it has any load.
4011 */
4012static inline int nohz_kick_needed(struct rq *rq, int cpu)
4013{
4014    unsigned long now = jiffies;
4015    int ret;
4016    int first_pick_cpu, second_pick_cpu;
4017
4018    if (time_before(now, nohz.next_balance))
4019        return 0;
4020
4021    if (rq->idle_at_tick)
4022        return 0;
4023
4024    first_pick_cpu = atomic_read(&nohz.first_pick_cpu);
4025    second_pick_cpu = atomic_read(&nohz.second_pick_cpu);
4026
4027    if (first_pick_cpu < nr_cpu_ids && first_pick_cpu != cpu &&
4028        second_pick_cpu < nr_cpu_ids && second_pick_cpu != cpu)
4029        return 0;
4030
4031    ret = atomic_cmpxchg(&nohz.first_pick_cpu, nr_cpu_ids, cpu);
4032    if (ret == nr_cpu_ids || ret == cpu) {
4033        atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids);
4034        if (rq->nr_running > 1)
4035            return 1;
4036    } else {
4037        ret = atomic_cmpxchg(&nohz.second_pick_cpu, nr_cpu_ids, cpu);
4038        if (ret == nr_cpu_ids || ret == cpu) {
4039            if (rq->nr_running)
4040                return 1;
4041        }
4042    }
4043    return 0;
4044}
4045#else
4046static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
4047#endif
4048
4049/*
4050 * run_rebalance_domains is triggered when needed from the scheduler tick.
4051 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
4052 */
4053static void run_rebalance_domains(struct softirq_action *h)
4054{
4055    int this_cpu = smp_processor_id();
4056    struct rq *this_rq = cpu_rq(this_cpu);
4057    enum cpu_idle_type idle = this_rq->idle_at_tick ?
4058                        CPU_IDLE : CPU_NOT_IDLE;
4059
4060    rebalance_domains(this_cpu, idle);
4061
4062    /*
4063     * If this cpu has a pending nohz_balance_kick, then do the
4064     * balancing on behalf of the other idle cpus whose ticks are
4065     * stopped.
4066     */
4067    nohz_idle_balance(this_cpu, idle);
4068}
4069
4070static inline int on_null_domain(int cpu)
4071{
4072    return !rcu_dereference_sched(cpu_rq(cpu)->sd);
4073}
4074
4075/*
4076 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
4077 */
4078static inline void trigger_load_balance(struct rq *rq, int cpu)
4079{
4080    /* Don't need to rebalance while attached to NULL domain */
4081    if (time_after_eq(jiffies, rq->next_balance) &&
4082        likely(!on_null_domain(cpu)))
4083        raise_softirq(SCHED_SOFTIRQ);
4084#ifdef CONFIG_NO_HZ
4085    else if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
4086        nohz_balancer_kick(cpu);
4087#endif
4088}
4089
4090static void rq_online_fair(struct rq *rq)
4091{
4092    update_sysctl();
4093}
4094
4095static void rq_offline_fair(struct rq *rq)
4096{
4097    update_sysctl();
4098}
4099
4100#else /* CONFIG_SMP */
4101
4102/*
4103 * on UP we do not need to balance between CPUs:
4104 */
4105static inline void idle_balance(int cpu, struct rq *rq)
4106{
4107}
4108
4109#endif /* CONFIG_SMP */
4110
4111/*
4112 * scheduler tick hitting a task of our scheduling class:
4113 */
4114static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
4115{
4116    struct cfs_rq *cfs_rq;
4117    struct sched_entity *se = &curr->se;
4118
4119    for_each_sched_entity(se) {
4120        cfs_rq = cfs_rq_of(se);
4121        entity_tick(cfs_rq, se, queued);
4122    }
4123}
4124
4125/*
4126 * called on fork with the child task as argument from the parent's context
4127 * - child not yet on the tasklist
4128 * - preemption disabled
4129 */
4130static void task_fork_fair(struct task_struct *p)
4131{
4132    struct cfs_rq *cfs_rq = task_cfs_rq(current);
4133    struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
4134    int this_cpu = smp_processor_id();
4135    struct rq *rq = this_rq();
4136    unsigned long flags;
4137
4138    raw_spin_lock_irqsave(&rq->lock, flags);
4139
4140    update_rq_clock(rq);
4141
4142    if (unlikely(task_cpu(p) != this_cpu)) {
4143        rcu_read_lock();
4144        __set_task_cpu(p, this_cpu);
4145        rcu_read_unlock();
4146    }
4147
4148    update_curr(cfs_rq);
4149
4150    if (curr)
4151        se->vruntime = curr->vruntime;
4152    place_entity(cfs_rq, se, 1);
4153
4154    if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
4155        /*
4156         * Upon rescheduling, sched_class::put_prev_task() will place
4157         * 'current' within the tree based on its new key value.
4158         */
4159        swap(curr->vruntime, se->vruntime);
4160        resched_task(rq->curr);
4161    }
4162
4163    se->vruntime -= cfs_rq->min_vruntime;
4164
4165    raw_spin_unlock_irqrestore(&rq->lock, flags);
4166}
4167
4168/*
4169 * Priority of the task has changed. Check to see if we preempt
4170 * the current task.
4171 */
4172static void
4173prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
4174{
4175    if (!p->se.on_rq)
4176        return;
4177
4178    /*
4179     * Reschedule if we are currently running on this runqueue and
4180     * our priority decreased, or if we are not currently running on
4181     * this runqueue and our priority is higher than the current's
4182     */
4183    if (rq->curr == p) {
4184        if (p->prio > oldprio)
4185            resched_task(rq->curr);
4186    } else
4187        check_preempt_curr(rq, p, 0);
4188}
4189
4190static void switched_from_fair(struct rq *rq, struct task_struct *p)
4191{
4192    struct sched_entity *se = &p->se;
4193    struct cfs_rq *cfs_rq = cfs_rq_of(se);
4194
4195    /*
4196     * Ensure the task's vruntime is normalized, so that when its
4197     * switched back to the fair class the enqueue_entity(.flags=0) will
4198     * do the right thing.
4199     *
4200     * If it was on_rq, then the dequeue_entity(.flags=0) will already
4201     * have normalized the vruntime, if it was !on_rq, then only when
4202     * the task is sleeping will it still have non-normalized vruntime.
4203     */
4204    if (!se->on_rq && p->state != TASK_RUNNING) {
4205        /*
4206         * Fix up our vruntime so that the current sleep doesn't
4207         * cause 'unlimited' sleep bonus.
4208         */
4209        place_entity(cfs_rq, se, 0);
4210        se->vruntime -= cfs_rq->min_vruntime;
4211    }
4212}
4213
4214/*
4215 * We switched to the sched_fair class.
4216 */
4217static void switched_to_fair(struct rq *rq, struct task_struct *p)
4218{
4219    if (!p->se.on_rq)
4220        return;
4221
4222    /*
4223     * We were most likely switched from sched_rt, so
4224     * kick off the schedule if running, otherwise just see
4225     * if we can still preempt the current task.
4226     */
4227    if (rq->curr == p)
4228        resched_task(rq->curr);
4229    else
4230        check_preempt_curr(rq, p, 0);
4231}
4232
4233/* Account for a task changing its policy or group.
4234 *
4235 * This routine is mostly called to set cfs_rq->curr field when a task
4236 * migrates between groups/classes.
4237 */
4238static void set_curr_task_fair(struct rq *rq)
4239{
4240    struct sched_entity *se = &rq->curr->se;
4241
4242    for_each_sched_entity(se)
4243        set_next_entity(cfs_rq_of(se), se);
4244}
4245
4246#ifdef CONFIG_FAIR_GROUP_SCHED
4247static void task_move_group_fair(struct task_struct *p, int on_rq)
4248{
4249    /*
4250     * If the task was not on the rq at the time of this cgroup movement
4251     * it must have been asleep, sleeping tasks keep their ->vruntime
4252     * absolute on their old rq until wakeup (needed for the fair sleeper
4253     * bonus in place_entity()).
4254     *
4255     * If it was on the rq, we've just 'preempted' it, which does convert
4256     * ->vruntime to a relative base.
4257     *
4258     * Make sure both cases convert their relative position when migrating
4259     * to another cgroup's rq. This does somewhat interfere with the
4260     * fair sleeper stuff for the first placement, but who cares.
4261     */
4262    if (!on_rq)
4263        p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
4264    set_task_rq(p, task_cpu(p));
4265    if (!on_rq)
4266        p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
4267}
4268#endif
4269
4270static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
4271{
4272    struct sched_entity *se = &task->se;
4273    unsigned int rr_interval = 0;
4274
4275    /*
4276     * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
4277     * idle runqueue:
4278     */
4279    if (rq->cfs.load.weight)
4280        rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
4281
4282    return rr_interval;
4283}
4284
4285/*
4286 * All the scheduling class methods:
4287 */
4288static const struct sched_class fair_sched_class = {
4289    .next = &idle_sched_class,
4290    .enqueue_task = enqueue_task_fair,
4291    .dequeue_task = dequeue_task_fair,
4292    .yield_task = yield_task_fair,
4293    .yield_to_task = yield_to_task_fair,
4294
4295    .check_preempt_curr = check_preempt_wakeup,
4296
4297    .pick_next_task = pick_next_task_fair,
4298    .put_prev_task = put_prev_task_fair,
4299
4300#ifdef CONFIG_SMP
4301    .select_task_rq = select_task_rq_fair,
4302
4303    .rq_online = rq_online_fair,
4304    .rq_offline = rq_offline_fair,
4305
4306    .task_waking = task_waking_fair,
4307#endif
4308
4309    .set_curr_task = set_curr_task_fair,
4310    .task_tick = task_tick_fair,
4311    .task_fork = task_fork_fair,
4312
4313    .prio_changed = prio_changed_fair,
4314    .switched_from = switched_from_fair,
4315    .switched_to = switched_to_fair,
4316
4317    .get_rr_interval = get_rr_interval_fair,
4318
4319#ifdef CONFIG_FAIR_GROUP_SCHED
4320    .task_move_group = task_move_group_fair,
4321#endif
4322};
4323
4324#ifdef CONFIG_SCHED_DEBUG
4325static void print_cfs_stats(struct seq_file *m, int cpu)
4326{
4327    struct cfs_rq *cfs_rq;
4328
4329    rcu_read_lock();
4330    for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
4331        print_cfs_rq(m, cpu, cfs_rq);
4332    rcu_read_unlock();
4333}
4334#endif
4335

Archive Download this file



interactive