Root/kernel/sched.c

1/*
2 * kernel/sched.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
19 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
27 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
33#include <linux/uaccess.h>
34#include <linux/highmem.h>
35#include <linux/smp_lock.h>
36#include <asm/mmu_context.h>
37#include <linux/interrupt.h>
38#include <linux/capability.h>
39#include <linux/completion.h>
40#include <linux/kernel_stat.h>
41#include <linux/debug_locks.h>
42#include <linux/perf_event.h>
43#include <linux/security.h>
44#include <linux/notifier.h>
45#include <linux/profile.h>
46#include <linux/freezer.h>
47#include <linux/vmalloc.h>
48#include <linux/blkdev.h>
49#include <linux/delay.h>
50#include <linux/pid_namespace.h>
51#include <linux/smp.h>
52#include <linux/threads.h>
53#include <linux/timer.h>
54#include <linux/rcupdate.h>
55#include <linux/cpu.h>
56#include <linux/cpuset.h>
57#include <linux/percpu.h>
58#include <linux/proc_fs.h>
59#include <linux/seq_file.h>
60#include <linux/stop_machine.h>
61#include <linux/sysctl.h>
62#include <linux/syscalls.h>
63#include <linux/times.h>
64#include <linux/tsacct_kern.h>
65#include <linux/kprobes.h>
66#include <linux/delayacct.h>
67#include <linux/unistd.h>
68#include <linux/pagemap.h>
69#include <linux/hrtimer.h>
70#include <linux/tick.h>
71#include <linux/debugfs.h>
72#include <linux/ctype.h>
73#include <linux/ftrace.h>
74#include <linux/slab.h>
75
76#include <asm/tlb.h>
77#include <asm/irq_regs.h>
78#include <asm/mutex.h>
79
80#include "sched_cpupri.h"
81#include "workqueue_sched.h"
82#include "sched_autogroup.h"
83
84#define CREATE_TRACE_POINTS
85#include <trace/events/sched.h>
86
87/*
88 * Convert user-nice values [ -20 ... 0 ... 19 ]
89 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
90 * and back.
91 */
92#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
93#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
94#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
95
96/*
97 * 'User priority' is the nice value converted to something we
98 * can work with better when scaling various scheduler parameters,
99 * it's a [ 0 ... 39 ] range.
100 */
101#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
102#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
103#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
104
105/*
106 * Helpers for converting nanosecond timing to jiffy resolution
107 */
108#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
109
110#define NICE_0_LOAD SCHED_LOAD_SCALE
111#define NICE_0_SHIFT SCHED_LOAD_SHIFT
112
113/*
114 * These are the 'tuning knobs' of the scheduler:
115 *
116 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
117 * Timeslices get refilled after they expire.
118 */
119#define DEF_TIMESLICE (100 * HZ / 1000)
120
121/*
122 * single value that denotes runtime == period, ie unlimited time.
123 */
124#define RUNTIME_INF ((u64)~0ULL)
125
126static inline int rt_policy(int policy)
127{
128    if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
129        return 1;
130    return 0;
131}
132
133static inline int task_has_rt_policy(struct task_struct *p)
134{
135    return rt_policy(p->policy);
136}
137
138/*
139 * This is the priority-queue data structure of the RT scheduling class:
140 */
141struct rt_prio_array {
142    DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
143    struct list_head queue[MAX_RT_PRIO];
144};
145
146struct rt_bandwidth {
147    /* nests inside the rq lock: */
148    raw_spinlock_t rt_runtime_lock;
149    ktime_t rt_period;
150    u64 rt_runtime;
151    struct hrtimer rt_period_timer;
152};
153
154static struct rt_bandwidth def_rt_bandwidth;
155
156static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
157
158static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
159{
160    struct rt_bandwidth *rt_b =
161        container_of(timer, struct rt_bandwidth, rt_period_timer);
162    ktime_t now;
163    int overrun;
164    int idle = 0;
165
166    for (;;) {
167        now = hrtimer_cb_get_time(timer);
168        overrun = hrtimer_forward(timer, now, rt_b->rt_period);
169
170        if (!overrun)
171            break;
172
173        idle = do_sched_rt_period_timer(rt_b, overrun);
174    }
175
176    return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
177}
178
179static
180void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
181{
182    rt_b->rt_period = ns_to_ktime(period);
183    rt_b->rt_runtime = runtime;
184
185    raw_spin_lock_init(&rt_b->rt_runtime_lock);
186
187    hrtimer_init(&rt_b->rt_period_timer,
188            CLOCK_MONOTONIC, HRTIMER_MODE_REL);
189    rt_b->rt_period_timer.function = sched_rt_period_timer;
190}
191
192static inline int rt_bandwidth_enabled(void)
193{
194    return sysctl_sched_rt_runtime >= 0;
195}
196
197static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
198{
199    ktime_t now;
200
201    if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
202        return;
203
204    if (hrtimer_active(&rt_b->rt_period_timer))
205        return;
206
207    raw_spin_lock(&rt_b->rt_runtime_lock);
208    for (;;) {
209        unsigned long delta;
210        ktime_t soft, hard;
211
212        if (hrtimer_active(&rt_b->rt_period_timer))
213            break;
214
215        now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
216        hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
217
218        soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
219        hard = hrtimer_get_expires(&rt_b->rt_period_timer);
220        delta = ktime_to_ns(ktime_sub(hard, soft));
221        __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
222                HRTIMER_MODE_ABS_PINNED, 0);
223    }
224    raw_spin_unlock(&rt_b->rt_runtime_lock);
225}
226
227#ifdef CONFIG_RT_GROUP_SCHED
228static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
229{
230    hrtimer_cancel(&rt_b->rt_period_timer);
231}
232#endif
233
234/*
235 * sched_domains_mutex serializes calls to arch_init_sched_domains,
236 * detach_destroy_domains and partition_sched_domains.
237 */
238static DEFINE_MUTEX(sched_domains_mutex);
239
240#ifdef CONFIG_CGROUP_SCHED
241
242#include <linux/cgroup.h>
243
244struct cfs_rq;
245
246static LIST_HEAD(task_groups);
247
248/* task group related information */
249struct task_group {
250    struct cgroup_subsys_state css;
251
252#ifdef CONFIG_FAIR_GROUP_SCHED
253    /* schedulable entities of this group on each cpu */
254    struct sched_entity **se;
255    /* runqueue "owned" by this group on each cpu */
256    struct cfs_rq **cfs_rq;
257    unsigned long shares;
258
259    atomic_t load_weight;
260#endif
261
262#ifdef CONFIG_RT_GROUP_SCHED
263    struct sched_rt_entity **rt_se;
264    struct rt_rq **rt_rq;
265
266    struct rt_bandwidth rt_bandwidth;
267#endif
268
269    struct rcu_head rcu;
270    struct list_head list;
271
272    struct task_group *parent;
273    struct list_head siblings;
274    struct list_head children;
275
276#ifdef CONFIG_SCHED_AUTOGROUP
277    struct autogroup *autogroup;
278#endif
279};
280
281/* task_group_lock serializes the addition/removal of task groups */
282static DEFINE_SPINLOCK(task_group_lock);
283
284#ifdef CONFIG_FAIR_GROUP_SCHED
285
286# define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
287
288/*
289 * A weight of 0 or 1 can cause arithmetics problems.
290 * A weight of a cfs_rq is the sum of weights of which entities
291 * are queued on this cfs_rq, so a weight of a entity should not be
292 * too large, so as the shares value of a task group.
293 * (The default weight is 1024 - so there's no practical
294 * limitation from this.)
295 */
296#define MIN_SHARES 2
297#define MAX_SHARES (1UL << 18)
298
299static int root_task_group_load = ROOT_TASK_GROUP_LOAD;
300#endif
301
302/* Default task group.
303 * Every task in system belong to this group at bootup.
304 */
305struct task_group root_task_group;
306
307#endif /* CONFIG_CGROUP_SCHED */
308
309/* CFS-related fields in a runqueue */
310struct cfs_rq {
311    struct load_weight load;
312    unsigned long nr_running;
313
314    u64 exec_clock;
315    u64 min_vruntime;
316
317    struct rb_root tasks_timeline;
318    struct rb_node *rb_leftmost;
319
320    struct list_head tasks;
321    struct list_head *balance_iterator;
322
323    /*
324     * 'curr' points to currently running entity on this cfs_rq.
325     * It is set to NULL otherwise (i.e when none are currently running).
326     */
327    struct sched_entity *curr, *next, *last;
328
329    unsigned int nr_spread_over;
330
331#ifdef CONFIG_FAIR_GROUP_SCHED
332    struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
333
334    /*
335     * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
336     * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
337     * (like users, containers etc.)
338     *
339     * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
340     * list is used during load balance.
341     */
342    int on_list;
343    struct list_head leaf_cfs_rq_list;
344    struct task_group *tg; /* group that "owns" this runqueue */
345
346#ifdef CONFIG_SMP
347    /*
348     * the part of load.weight contributed by tasks
349     */
350    unsigned long task_weight;
351
352    /*
353     * h_load = weight * f(tg)
354     *
355     * Where f(tg) is the recursive weight fraction assigned to
356     * this group.
357     */
358    unsigned long h_load;
359
360    /*
361     * Maintaining per-cpu shares distribution for group scheduling
362     *
363     * load_stamp is the last time we updated the load average
364     * load_last is the last time we updated the load average and saw load
365     * load_unacc_exec_time is currently unaccounted execution time
366     */
367    u64 load_avg;
368    u64 load_period;
369    u64 load_stamp, load_last, load_unacc_exec_time;
370
371    unsigned long load_contribution;
372#endif
373#endif
374};
375
376/* Real-Time classes' related field in a runqueue: */
377struct rt_rq {
378    struct rt_prio_array active;
379    unsigned long rt_nr_running;
380#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
381    struct {
382        int curr; /* highest queued rt task prio */
383#ifdef CONFIG_SMP
384        int next; /* next highest */
385#endif
386    } highest_prio;
387#endif
388#ifdef CONFIG_SMP
389    unsigned long rt_nr_migratory;
390    unsigned long rt_nr_total;
391    int overloaded;
392    struct plist_head pushable_tasks;
393#endif
394    int rt_throttled;
395    u64 rt_time;
396    u64 rt_runtime;
397    /* Nests inside the rq lock: */
398    raw_spinlock_t rt_runtime_lock;
399
400#ifdef CONFIG_RT_GROUP_SCHED
401    unsigned long rt_nr_boosted;
402
403    struct rq *rq;
404    struct list_head leaf_rt_rq_list;
405    struct task_group *tg;
406#endif
407};
408
409#ifdef CONFIG_SMP
410
411/*
412 * We add the notion of a root-domain which will be used to define per-domain
413 * variables. Each exclusive cpuset essentially defines an island domain by
414 * fully partitioning the member cpus from any other cpuset. Whenever a new
415 * exclusive cpuset is created, we also create and attach a new root-domain
416 * object.
417 *
418 */
419struct root_domain {
420    atomic_t refcount;
421    cpumask_var_t span;
422    cpumask_var_t online;
423
424    /*
425     * The "RT overload" flag: it gets set if a CPU has more than
426     * one runnable RT task.
427     */
428    cpumask_var_t rto_mask;
429    atomic_t rto_count;
430    struct cpupri cpupri;
431};
432
433/*
434 * By default the system creates a single root-domain with all cpus as
435 * members (mimicking the global state we have today).
436 */
437static struct root_domain def_root_domain;
438
439#endif /* CONFIG_SMP */
440
441/*
442 * This is the main, per-CPU runqueue data structure.
443 *
444 * Locking rule: those places that want to lock multiple runqueues
445 * (such as the load balancing or the thread migration code), lock
446 * acquire operations must be ordered by ascending &runqueue.
447 */
448struct rq {
449    /* runqueue lock: */
450    raw_spinlock_t lock;
451
452    /*
453     * nr_running and cpu_load should be in the same cacheline because
454     * remote CPUs use both these fields when doing load calculation.
455     */
456    unsigned long nr_running;
457    #define CPU_LOAD_IDX_MAX 5
458    unsigned long cpu_load[CPU_LOAD_IDX_MAX];
459    unsigned long last_load_update_tick;
460#ifdef CONFIG_NO_HZ
461    u64 nohz_stamp;
462    unsigned char nohz_balance_kick;
463#endif
464    unsigned int skip_clock_update;
465
466    /* capture load from *all* tasks on this cpu: */
467    struct load_weight load;
468    unsigned long nr_load_updates;
469    u64 nr_switches;
470
471    struct cfs_rq cfs;
472    struct rt_rq rt;
473
474#ifdef CONFIG_FAIR_GROUP_SCHED
475    /* list of leaf cfs_rq on this cpu: */
476    struct list_head leaf_cfs_rq_list;
477#endif
478#ifdef CONFIG_RT_GROUP_SCHED
479    struct list_head leaf_rt_rq_list;
480#endif
481
482    /*
483     * This is part of a global counter where only the total sum
484     * over all CPUs matters. A task can increase this counter on
485     * one CPU and if it got migrated afterwards it may decrease
486     * it on another CPU. Always updated under the runqueue lock:
487     */
488    unsigned long nr_uninterruptible;
489
490    struct task_struct *curr, *idle, *stop;
491    unsigned long next_balance;
492    struct mm_struct *prev_mm;
493
494    u64 clock;
495    u64 clock_task;
496
497    atomic_t nr_iowait;
498
499#ifdef CONFIG_SMP
500    struct root_domain *rd;
501    struct sched_domain *sd;
502
503    unsigned long cpu_power;
504
505    unsigned char idle_at_tick;
506    /* For active balancing */
507    int post_schedule;
508    int active_balance;
509    int push_cpu;
510    struct cpu_stop_work active_balance_work;
511    /* cpu of this runqueue: */
512    int cpu;
513    int online;
514
515    unsigned long avg_load_per_task;
516
517    u64 rt_avg;
518    u64 age_stamp;
519    u64 idle_stamp;
520    u64 avg_idle;
521#endif
522
523#ifdef CONFIG_IRQ_TIME_ACCOUNTING
524    u64 prev_irq_time;
525#endif
526
527    /* calc_load related fields */
528    unsigned long calc_load_update;
529    long calc_load_active;
530
531#ifdef CONFIG_SCHED_HRTICK
532#ifdef CONFIG_SMP
533    int hrtick_csd_pending;
534    struct call_single_data hrtick_csd;
535#endif
536    struct hrtimer hrtick_timer;
537#endif
538
539#ifdef CONFIG_SCHEDSTATS
540    /* latency stats */
541    struct sched_info rq_sched_info;
542    unsigned long long rq_cpu_time;
543    /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
544
545    /* sys_sched_yield() stats */
546    unsigned int yld_count;
547
548    /* schedule() stats */
549    unsigned int sched_switch;
550    unsigned int sched_count;
551    unsigned int sched_goidle;
552
553    /* try_to_wake_up() stats */
554    unsigned int ttwu_count;
555    unsigned int ttwu_local;
556#endif
557};
558
559static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
560
561
562static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
563
564static inline int cpu_of(struct rq *rq)
565{
566#ifdef CONFIG_SMP
567    return rq->cpu;
568#else
569    return 0;
570#endif
571}
572
573#define rcu_dereference_check_sched_domain(p) \
574    rcu_dereference_check((p), \
575                  rcu_read_lock_sched_held() || \
576                  lockdep_is_held(&sched_domains_mutex))
577
578/*
579 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
580 * See detach_destroy_domains: synchronize_sched for details.
581 *
582 * The domain tree of any CPU may only be accessed from within
583 * preempt-disabled sections.
584 */
585#define for_each_domain(cpu, __sd) \
586    for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
587
588#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
589#define this_rq() (&__get_cpu_var(runqueues))
590#define task_rq(p) cpu_rq(task_cpu(p))
591#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
592#define raw_rq() (&__raw_get_cpu_var(runqueues))
593
594#ifdef CONFIG_CGROUP_SCHED
595
596/*
597 * Return the group to which this tasks belongs.
598 *
599 * We use task_subsys_state_check() and extend the RCU verification
600 * with lockdep_is_held(&task_rq(p)->lock) because cpu_cgroup_attach()
601 * holds that lock for each task it moves into the cgroup. Therefore
602 * by holding that lock, we pin the task to the current cgroup.
603 */
604static inline struct task_group *task_group(struct task_struct *p)
605{
606    struct task_group *tg;
607    struct cgroup_subsys_state *css;
608
609    if (p->flags & PF_EXITING)
610        return &root_task_group;
611
612    css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
613            lockdep_is_held(&task_rq(p)->lock));
614    tg = container_of(css, struct task_group, css);
615
616    return autogroup_task_group(p, tg);
617}
618
619/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
620static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
621{
622#ifdef CONFIG_FAIR_GROUP_SCHED
623    p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
624    p->se.parent = task_group(p)->se[cpu];
625#endif
626
627#ifdef CONFIG_RT_GROUP_SCHED
628    p->rt.rt_rq = task_group(p)->rt_rq[cpu];
629    p->rt.parent = task_group(p)->rt_se[cpu];
630#endif
631}
632
633#else /* CONFIG_CGROUP_SCHED */
634
635static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
636static inline struct task_group *task_group(struct task_struct *p)
637{
638    return NULL;
639}
640
641#endif /* CONFIG_CGROUP_SCHED */
642
643static void update_rq_clock_task(struct rq *rq, s64 delta);
644
645static void update_rq_clock(struct rq *rq)
646{
647    s64 delta;
648
649    if (rq->skip_clock_update)
650        return;
651
652    delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
653    rq->clock += delta;
654    update_rq_clock_task(rq, delta);
655}
656
657/*
658 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
659 */
660#ifdef CONFIG_SCHED_DEBUG
661# define const_debug __read_mostly
662#else
663# define const_debug static const
664#endif
665
666/**
667 * runqueue_is_locked
668 * @cpu: the processor in question.
669 *
670 * Returns true if the current cpu runqueue is locked.
671 * This interface allows printk to be called with the runqueue lock
672 * held and know whether or not it is OK to wake up the klogd.
673 */
674int runqueue_is_locked(int cpu)
675{
676    return raw_spin_is_locked(&cpu_rq(cpu)->lock);
677}
678
679/*
680 * Debugging: various feature bits
681 */
682
683#define SCHED_FEAT(name, enabled) \
684    __SCHED_FEAT_##name ,
685
686enum {
687#include "sched_features.h"
688};
689
690#undef SCHED_FEAT
691
692#define SCHED_FEAT(name, enabled) \
693    (1UL << __SCHED_FEAT_##name) * enabled |
694
695const_debug unsigned int sysctl_sched_features =
696#include "sched_features.h"
697    0;
698
699#undef SCHED_FEAT
700
701#ifdef CONFIG_SCHED_DEBUG
702#define SCHED_FEAT(name, enabled) \
703    #name ,
704
705static __read_mostly char *sched_feat_names[] = {
706#include "sched_features.h"
707    NULL
708};
709
710#undef SCHED_FEAT
711
712static int sched_feat_show(struct seq_file *m, void *v)
713{
714    int i;
715
716    for (i = 0; sched_feat_names[i]; i++) {
717        if (!(sysctl_sched_features & (1UL << i)))
718            seq_puts(m, "NO_");
719        seq_printf(m, "%s ", sched_feat_names[i]);
720    }
721    seq_puts(m, "\n");
722
723    return 0;
724}
725
726static ssize_t
727sched_feat_write(struct file *filp, const char __user *ubuf,
728        size_t cnt, loff_t *ppos)
729{
730    char buf[64];
731    char *cmp;
732    int neg = 0;
733    int i;
734
735    if (cnt > 63)
736        cnt = 63;
737
738    if (copy_from_user(&buf, ubuf, cnt))
739        return -EFAULT;
740
741    buf[cnt] = 0;
742    cmp = strstrip(buf);
743
744    if (strncmp(cmp, "NO_", 3) == 0) {
745        neg = 1;
746        cmp += 3;
747    }
748
749    for (i = 0; sched_feat_names[i]; i++) {
750        if (strcmp(cmp, sched_feat_names[i]) == 0) {
751            if (neg)
752                sysctl_sched_features &= ~(1UL << i);
753            else
754                sysctl_sched_features |= (1UL << i);
755            break;
756        }
757    }
758
759    if (!sched_feat_names[i])
760        return -EINVAL;
761
762    *ppos += cnt;
763
764    return cnt;
765}
766
767static int sched_feat_open(struct inode *inode, struct file *filp)
768{
769    return single_open(filp, sched_feat_show, NULL);
770}
771
772static const struct file_operations sched_feat_fops = {
773    .open = sched_feat_open,
774    .write = sched_feat_write,
775    .read = seq_read,
776    .llseek = seq_lseek,
777    .release = single_release,
778};
779
780static __init int sched_init_debug(void)
781{
782    debugfs_create_file("sched_features", 0644, NULL, NULL,
783            &sched_feat_fops);
784
785    return 0;
786}
787late_initcall(sched_init_debug);
788
789#endif
790
791#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
792
793/*
794 * Number of tasks to iterate in a single balance run.
795 * Limited because this is done with IRQs disabled.
796 */
797const_debug unsigned int sysctl_sched_nr_migrate = 32;
798
799/*
800 * period over which we average the RT time consumption, measured
801 * in ms.
802 *
803 * default: 1s
804 */
805const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
806
807/*
808 * period over which we measure -rt task cpu usage in us.
809 * default: 1s
810 */
811unsigned int sysctl_sched_rt_period = 1000000;
812
813static __read_mostly int scheduler_running;
814
815/*
816 * part of the period that we allow rt tasks to run in us.
817 * default: 0.95s
818 */
819int sysctl_sched_rt_runtime = 950000;
820
821static inline u64 global_rt_period(void)
822{
823    return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
824}
825
826static inline u64 global_rt_runtime(void)
827{
828    if (sysctl_sched_rt_runtime < 0)
829        return RUNTIME_INF;
830
831    return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
832}
833
834#ifndef prepare_arch_switch
835# define prepare_arch_switch(next) do { } while (0)
836#endif
837#ifndef finish_arch_switch
838# define finish_arch_switch(prev) do { } while (0)
839#endif
840
841static inline int task_current(struct rq *rq, struct task_struct *p)
842{
843    return rq->curr == p;
844}
845
846#ifndef __ARCH_WANT_UNLOCKED_CTXSW
847static inline int task_running(struct rq *rq, struct task_struct *p)
848{
849    return task_current(rq, p);
850}
851
852static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
853{
854}
855
856static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
857{
858#ifdef CONFIG_DEBUG_SPINLOCK
859    /* this is a valid case when another task releases the spinlock */
860    rq->lock.owner = current;
861#endif
862    /*
863     * If we are tracking spinlock dependencies then we have to
864     * fix up the runqueue lock - which gets 'carried over' from
865     * prev into current:
866     */
867    spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
868
869    raw_spin_unlock_irq(&rq->lock);
870}
871
872#else /* __ARCH_WANT_UNLOCKED_CTXSW */
873static inline int task_running(struct rq *rq, struct task_struct *p)
874{
875#ifdef CONFIG_SMP
876    return p->oncpu;
877#else
878    return task_current(rq, p);
879#endif
880}
881
882static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
883{
884#ifdef CONFIG_SMP
885    /*
886     * We can optimise this out completely for !SMP, because the
887     * SMP rebalancing from interrupt is the only thing that cares
888     * here.
889     */
890    next->oncpu = 1;
891#endif
892#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
893    raw_spin_unlock_irq(&rq->lock);
894#else
895    raw_spin_unlock(&rq->lock);
896#endif
897}
898
899static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
900{
901#ifdef CONFIG_SMP
902    /*
903     * After ->oncpu is cleared, the task can be moved to a different CPU.
904     * We must ensure this doesn't happen until the switch is completely
905     * finished.
906     */
907    smp_wmb();
908    prev->oncpu = 0;
909#endif
910#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
911    local_irq_enable();
912#endif
913}
914#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
915
916/*
917 * Check whether the task is waking, we use this to synchronize ->cpus_allowed
918 * against ttwu().
919 */
920static inline int task_is_waking(struct task_struct *p)
921{
922    return unlikely(p->state == TASK_WAKING);
923}
924
925/*
926 * __task_rq_lock - lock the runqueue a given task resides on.
927 * Must be called interrupts disabled.
928 */
929static inline struct rq *__task_rq_lock(struct task_struct *p)
930    __acquires(rq->lock)
931{
932    struct rq *rq;
933
934    for (;;) {
935        rq = task_rq(p);
936        raw_spin_lock(&rq->lock);
937        if (likely(rq == task_rq(p)))
938            return rq;
939        raw_spin_unlock(&rq->lock);
940    }
941}
942
943/*
944 * task_rq_lock - lock the runqueue a given task resides on and disable
945 * interrupts. Note the ordering: we can safely lookup the task_rq without
946 * explicitly disabling preemption.
947 */
948static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
949    __acquires(rq->lock)
950{
951    struct rq *rq;
952
953    for (;;) {
954        local_irq_save(*flags);
955        rq = task_rq(p);
956        raw_spin_lock(&rq->lock);
957        if (likely(rq == task_rq(p)))
958            return rq;
959        raw_spin_unlock_irqrestore(&rq->lock, *flags);
960    }
961}
962
963static void __task_rq_unlock(struct rq *rq)
964    __releases(rq->lock)
965{
966    raw_spin_unlock(&rq->lock);
967}
968
969static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
970    __releases(rq->lock)
971{
972    raw_spin_unlock_irqrestore(&rq->lock, *flags);
973}
974
975/*
976 * this_rq_lock - lock this runqueue and disable interrupts.
977 */
978static struct rq *this_rq_lock(void)
979    __acquires(rq->lock)
980{
981    struct rq *rq;
982
983    local_irq_disable();
984    rq = this_rq();
985    raw_spin_lock(&rq->lock);
986
987    return rq;
988}
989
990#ifdef CONFIG_SCHED_HRTICK
991/*
992 * Use HR-timers to deliver accurate preemption points.
993 *
994 * Its all a bit involved since we cannot program an hrt while holding the
995 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
996 * reschedule event.
997 *
998 * When we get rescheduled we reprogram the hrtick_timer outside of the
999 * rq->lock.
1000 */
1001
1002/*
1003 * Use hrtick when:
1004 * - enabled by features
1005 * - hrtimer is actually high res
1006 */
1007static inline int hrtick_enabled(struct rq *rq)
1008{
1009    if (!sched_feat(HRTICK))
1010        return 0;
1011    if (!cpu_active(cpu_of(rq)))
1012        return 0;
1013    return hrtimer_is_hres_active(&rq->hrtick_timer);
1014}
1015
1016static void hrtick_clear(struct rq *rq)
1017{
1018    if (hrtimer_active(&rq->hrtick_timer))
1019        hrtimer_cancel(&rq->hrtick_timer);
1020}
1021
1022/*
1023 * High-resolution timer tick.
1024 * Runs from hardirq context with interrupts disabled.
1025 */
1026static enum hrtimer_restart hrtick(struct hrtimer *timer)
1027{
1028    struct rq *rq = container_of(timer, struct rq, hrtick_timer);
1029
1030    WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
1031
1032    raw_spin_lock(&rq->lock);
1033    update_rq_clock(rq);
1034    rq->curr->sched_class->task_tick(rq, rq->curr, 1);
1035    raw_spin_unlock(&rq->lock);
1036
1037    return HRTIMER_NORESTART;
1038}
1039
1040#ifdef CONFIG_SMP
1041/*
1042 * called from hardirq (IPI) context
1043 */
1044static void __hrtick_start(void *arg)
1045{
1046    struct rq *rq = arg;
1047
1048    raw_spin_lock(&rq->lock);
1049    hrtimer_restart(&rq->hrtick_timer);
1050    rq->hrtick_csd_pending = 0;
1051    raw_spin_unlock(&rq->lock);
1052}
1053
1054/*
1055 * Called to set the hrtick timer state.
1056 *
1057 * called with rq->lock held and irqs disabled
1058 */
1059static void hrtick_start(struct rq *rq, u64 delay)
1060{
1061    struct hrtimer *timer = &rq->hrtick_timer;
1062    ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
1063
1064    hrtimer_set_expires(timer, time);
1065
1066    if (rq == this_rq()) {
1067        hrtimer_restart(timer);
1068    } else if (!rq->hrtick_csd_pending) {
1069        __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
1070        rq->hrtick_csd_pending = 1;
1071    }
1072}
1073
1074static int
1075hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
1076{
1077    int cpu = (int)(long)hcpu;
1078
1079    switch (action) {
1080    case CPU_UP_CANCELED:
1081    case CPU_UP_CANCELED_FROZEN:
1082    case CPU_DOWN_PREPARE:
1083    case CPU_DOWN_PREPARE_FROZEN:
1084    case CPU_DEAD:
1085    case CPU_DEAD_FROZEN:
1086        hrtick_clear(cpu_rq(cpu));
1087        return NOTIFY_OK;
1088    }
1089
1090    return NOTIFY_DONE;
1091}
1092
1093static __init void init_hrtick(void)
1094{
1095    hotcpu_notifier(hotplug_hrtick, 0);
1096}
1097#else
1098/*
1099 * Called to set the hrtick timer state.
1100 *
1101 * called with rq->lock held and irqs disabled
1102 */
1103static void hrtick_start(struct rq *rq, u64 delay)
1104{
1105    __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
1106            HRTIMER_MODE_REL_PINNED, 0);
1107}
1108
1109static inline void init_hrtick(void)
1110{
1111}
1112#endif /* CONFIG_SMP */
1113
1114static void init_rq_hrtick(struct rq *rq)
1115{
1116#ifdef CONFIG_SMP
1117    rq->hrtick_csd_pending = 0;
1118
1119    rq->hrtick_csd.flags = 0;
1120    rq->hrtick_csd.func = __hrtick_start;
1121    rq->hrtick_csd.info = rq;
1122#endif
1123
1124    hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1125    rq->hrtick_timer.function = hrtick;
1126}
1127#else /* CONFIG_SCHED_HRTICK */
1128static inline void hrtick_clear(struct rq *rq)
1129{
1130}
1131
1132static inline void init_rq_hrtick(struct rq *rq)
1133{
1134}
1135
1136static inline void init_hrtick(void)
1137{
1138}
1139#endif /* CONFIG_SCHED_HRTICK */
1140
1141/*
1142 * resched_task - mark a task 'to be rescheduled now'.
1143 *
1144 * On UP this means the setting of the need_resched flag, on SMP it
1145 * might also involve a cross-CPU call to trigger the scheduler on
1146 * the target CPU.
1147 */
1148#ifdef CONFIG_SMP
1149
1150#ifndef tsk_is_polling
1151#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
1152#endif
1153
1154static void resched_task(struct task_struct *p)
1155{
1156    int cpu;
1157
1158    assert_raw_spin_locked(&task_rq(p)->lock);
1159
1160    if (test_tsk_need_resched(p))
1161        return;
1162
1163    set_tsk_need_resched(p);
1164
1165    cpu = task_cpu(p);
1166    if (cpu == smp_processor_id())
1167        return;
1168
1169    /* NEED_RESCHED must be visible before we test polling */
1170    smp_mb();
1171    if (!tsk_is_polling(p))
1172        smp_send_reschedule(cpu);
1173}
1174
1175static void resched_cpu(int cpu)
1176{
1177    struct rq *rq = cpu_rq(cpu);
1178    unsigned long flags;
1179
1180    if (!raw_spin_trylock_irqsave(&rq->lock, flags))
1181        return;
1182    resched_task(cpu_curr(cpu));
1183    raw_spin_unlock_irqrestore(&rq->lock, flags);
1184}
1185
1186#ifdef CONFIG_NO_HZ
1187/*
1188 * In the semi idle case, use the nearest busy cpu for migrating timers
1189 * from an idle cpu. This is good for power-savings.
1190 *
1191 * We don't do similar optimization for completely idle system, as
1192 * selecting an idle cpu will add more delays to the timers than intended
1193 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
1194 */
1195int get_nohz_timer_target(void)
1196{
1197    int cpu = smp_processor_id();
1198    int i;
1199    struct sched_domain *sd;
1200
1201    for_each_domain(cpu, sd) {
1202        for_each_cpu(i, sched_domain_span(sd))
1203            if (!idle_cpu(i))
1204                return i;
1205    }
1206    return cpu;
1207}
1208/*
1209 * When add_timer_on() enqueues a timer into the timer wheel of an
1210 * idle CPU then this timer might expire before the next timer event
1211 * which is scheduled to wake up that CPU. In case of a completely
1212 * idle system the next event might even be infinite time into the
1213 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
1214 * leaves the inner idle loop so the newly added timer is taken into
1215 * account when the CPU goes back to idle and evaluates the timer
1216 * wheel for the next timer event.
1217 */
1218void wake_up_idle_cpu(int cpu)
1219{
1220    struct rq *rq = cpu_rq(cpu);
1221
1222    if (cpu == smp_processor_id())
1223        return;
1224
1225    /*
1226     * This is safe, as this function is called with the timer
1227     * wheel base lock of (cpu) held. When the CPU is on the way
1228     * to idle and has not yet set rq->curr to idle then it will
1229     * be serialized on the timer wheel base lock and take the new
1230     * timer into account automatically.
1231     */
1232    if (rq->curr != rq->idle)
1233        return;
1234
1235    /*
1236     * We can set TIF_RESCHED on the idle task of the other CPU
1237     * lockless. The worst case is that the other CPU runs the
1238     * idle task through an additional NOOP schedule()
1239     */
1240    set_tsk_need_resched(rq->idle);
1241
1242    /* NEED_RESCHED must be visible before we test polling */
1243    smp_mb();
1244    if (!tsk_is_polling(rq->idle))
1245        smp_send_reschedule(cpu);
1246}
1247
1248#endif /* CONFIG_NO_HZ */
1249
1250static u64 sched_avg_period(void)
1251{
1252    return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1253}
1254
1255static void sched_avg_update(struct rq *rq)
1256{
1257    s64 period = sched_avg_period();
1258
1259    while ((s64)(rq->clock - rq->age_stamp) > period) {
1260        /*
1261         * Inline assembly required to prevent the compiler
1262         * optimising this loop into a divmod call.
1263         * See __iter_div_u64_rem() for another example of this.
1264         */
1265        asm("" : "+rm" (rq->age_stamp));
1266        rq->age_stamp += period;
1267        rq->rt_avg /= 2;
1268    }
1269}
1270
1271static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1272{
1273    rq->rt_avg += rt_delta;
1274    sched_avg_update(rq);
1275}
1276
1277#else /* !CONFIG_SMP */
1278static void resched_task(struct task_struct *p)
1279{
1280    assert_raw_spin_locked(&task_rq(p)->lock);
1281    set_tsk_need_resched(p);
1282}
1283
1284static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1285{
1286}
1287
1288static void sched_avg_update(struct rq *rq)
1289{
1290}
1291#endif /* CONFIG_SMP */
1292
1293#if BITS_PER_LONG == 32
1294# define WMULT_CONST (~0UL)
1295#else
1296# define WMULT_CONST (1UL << 32)
1297#endif
1298
1299#define WMULT_SHIFT 32
1300
1301/*
1302 * Shift right and round:
1303 */
1304#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
1305
1306/*
1307 * delta *= weight / lw
1308 */
1309static unsigned long
1310calc_delta_mine(unsigned long delta_exec, unsigned long weight,
1311        struct load_weight *lw)
1312{
1313    u64 tmp;
1314
1315    if (!lw->inv_weight) {
1316        if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST))
1317            lw->inv_weight = 1;
1318        else
1319            lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)
1320                / (lw->weight+1);
1321    }
1322
1323    tmp = (u64)delta_exec * weight;
1324    /*
1325     * Check whether we'd overflow the 64-bit multiplication:
1326     */
1327    if (unlikely(tmp > WMULT_CONST))
1328        tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
1329            WMULT_SHIFT/2);
1330    else
1331        tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
1332
1333    return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
1334}
1335
1336static inline void update_load_add(struct load_weight *lw, unsigned long inc)
1337{
1338    lw->weight += inc;
1339    lw->inv_weight = 0;
1340}
1341
1342static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
1343{
1344    lw->weight -= dec;
1345    lw->inv_weight = 0;
1346}
1347
1348static inline void update_load_set(struct load_weight *lw, unsigned long w)
1349{
1350    lw->weight = w;
1351    lw->inv_weight = 0;
1352}
1353
1354/*
1355 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1356 * of tasks with abnormal "nice" values across CPUs the contribution that
1357 * each task makes to its run queue's load is weighted according to its
1358 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1359 * scaled version of the new time slice allocation that they receive on time
1360 * slice expiry etc.
1361 */
1362
1363#define WEIGHT_IDLEPRIO 3
1364#define WMULT_IDLEPRIO 1431655765
1365
1366/*
1367 * Nice levels are multiplicative, with a gentle 10% change for every
1368 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
1369 * nice 1, it will get ~10% less CPU time than another CPU-bound task
1370 * that remained on nice 0.
1371 *
1372 * The "10% effect" is relative and cumulative: from _any_ nice level,
1373 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
1374 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
1375 * If a task goes up by ~10% and another task goes down by ~10% then
1376 * the relative distance between them is ~25%.)
1377 */
1378static const int prio_to_weight[40] = {
1379 /* -20 */ 88761, 71755, 56483, 46273, 36291,
1380 /* -15 */ 29154, 23254, 18705, 14949, 11916,
1381 /* -10 */ 9548, 7620, 6100, 4904, 3906,
1382 /* -5 */ 3121, 2501, 1991, 1586, 1277,
1383 /* 0 */ 1024, 820, 655, 526, 423,
1384 /* 5 */ 335, 272, 215, 172, 137,
1385 /* 10 */ 110, 87, 70, 56, 45,
1386 /* 15 */ 36, 29, 23, 18, 15,
1387};
1388
1389/*
1390 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
1391 *
1392 * In cases where the weight does not change often, we can use the
1393 * precalculated inverse to speed up arithmetics by turning divisions
1394 * into multiplications:
1395 */
1396static const u32 prio_to_wmult[40] = {
1397 /* -20 */ 48388, 59856, 76040, 92818, 118348,
1398 /* -15 */ 147320, 184698, 229616, 287308, 360437,
1399 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
1400 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
1401 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
1402 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
1403 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
1404 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
1405};
1406
1407/* Time spent by the tasks of the cpu accounting group executing in ... */
1408enum cpuacct_stat_index {
1409    CPUACCT_STAT_USER, /* ... user mode */
1410    CPUACCT_STAT_SYSTEM, /* ... kernel mode */
1411
1412    CPUACCT_STAT_NSTATS,
1413};
1414
1415#ifdef CONFIG_CGROUP_CPUACCT
1416static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
1417static void cpuacct_update_stats(struct task_struct *tsk,
1418        enum cpuacct_stat_index idx, cputime_t val);
1419#else
1420static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
1421static inline void cpuacct_update_stats(struct task_struct *tsk,
1422        enum cpuacct_stat_index idx, cputime_t val) {}
1423#endif
1424
1425static inline void inc_cpu_load(struct rq *rq, unsigned long load)
1426{
1427    update_load_add(&rq->load, load);
1428}
1429
1430static inline void dec_cpu_load(struct rq *rq, unsigned long load)
1431{
1432    update_load_sub(&rq->load, load);
1433}
1434
1435#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
1436typedef int (*tg_visitor)(struct task_group *, void *);
1437
1438/*
1439 * Iterate the full tree, calling @down when first entering a node and @up when
1440 * leaving it for the final time.
1441 */
1442static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
1443{
1444    struct task_group *parent, *child;
1445    int ret;
1446
1447    rcu_read_lock();
1448    parent = &root_task_group;
1449down:
1450    ret = (*down)(parent, data);
1451    if (ret)
1452        goto out_unlock;
1453    list_for_each_entry_rcu(child, &parent->children, siblings) {
1454        parent = child;
1455        goto down;
1456
1457up:
1458        continue;
1459    }
1460    ret = (*up)(parent, data);
1461    if (ret)
1462        goto out_unlock;
1463
1464    child = parent;
1465    parent = parent->parent;
1466    if (parent)
1467        goto up;
1468out_unlock:
1469    rcu_read_unlock();
1470
1471    return ret;
1472}
1473
1474static int tg_nop(struct task_group *tg, void *data)
1475{
1476    return 0;
1477}
1478#endif
1479
1480#ifdef CONFIG_SMP
1481/* Used instead of source_load when we know the type == 0 */
1482static unsigned long weighted_cpuload(const int cpu)
1483{
1484    return cpu_rq(cpu)->load.weight;
1485}
1486
1487/*
1488 * Return a low guess at the load of a migration-source cpu weighted
1489 * according to the scheduling class and "nice" value.
1490 *
1491 * We want to under-estimate the load of migration sources, to
1492 * balance conservatively.
1493 */
1494static unsigned long source_load(int cpu, int type)
1495{
1496    struct rq *rq = cpu_rq(cpu);
1497    unsigned long total = weighted_cpuload(cpu);
1498
1499    if (type == 0 || !sched_feat(LB_BIAS))
1500        return total;
1501
1502    return min(rq->cpu_load[type-1], total);
1503}
1504
1505/*
1506 * Return a high guess at the load of a migration-target cpu weighted
1507 * according to the scheduling class and "nice" value.
1508 */
1509static unsigned long target_load(int cpu, int type)
1510{
1511    struct rq *rq = cpu_rq(cpu);
1512    unsigned long total = weighted_cpuload(cpu);
1513
1514    if (type == 0 || !sched_feat(LB_BIAS))
1515        return total;
1516
1517    return max(rq->cpu_load[type-1], total);
1518}
1519
1520static unsigned long power_of(int cpu)
1521{
1522    return cpu_rq(cpu)->cpu_power;
1523}
1524
1525static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1526
1527static unsigned long cpu_avg_load_per_task(int cpu)
1528{
1529    struct rq *rq = cpu_rq(cpu);
1530    unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
1531
1532    if (nr_running)
1533        rq->avg_load_per_task = rq->load.weight / nr_running;
1534    else
1535        rq->avg_load_per_task = 0;
1536
1537    return rq->avg_load_per_task;
1538}
1539
1540#ifdef CONFIG_FAIR_GROUP_SCHED
1541
1542/*
1543 * Compute the cpu's hierarchical load factor for each task group.
1544 * This needs to be done in a top-down fashion because the load of a child
1545 * group is a fraction of its parents load.
1546 */
1547static int tg_load_down(struct task_group *tg, void *data)
1548{
1549    unsigned long load;
1550    long cpu = (long)data;
1551
1552    if (!tg->parent) {
1553        load = cpu_rq(cpu)->load.weight;
1554    } else {
1555        load = tg->parent->cfs_rq[cpu]->h_load;
1556        load *= tg->se[cpu]->load.weight;
1557        load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
1558    }
1559
1560    tg->cfs_rq[cpu]->h_load = load;
1561
1562    return 0;
1563}
1564
1565static void update_h_load(long cpu)
1566{
1567    walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
1568}
1569
1570#endif
1571
1572#ifdef CONFIG_PREEMPT
1573
1574static void double_rq_lock(struct rq *rq1, struct rq *rq2);
1575
1576/*
1577 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1578 * way at the expense of forcing extra atomic operations in all
1579 * invocations. This assures that the double_lock is acquired using the
1580 * same underlying policy as the spinlock_t on this architecture, which
1581 * reduces latency compared to the unfair variant below. However, it
1582 * also adds more overhead and therefore may reduce throughput.
1583 */
1584static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1585    __releases(this_rq->lock)
1586    __acquires(busiest->lock)
1587    __acquires(this_rq->lock)
1588{
1589    raw_spin_unlock(&this_rq->lock);
1590    double_rq_lock(this_rq, busiest);
1591
1592    return 1;
1593}
1594
1595#else
1596/*
1597 * Unfair double_lock_balance: Optimizes throughput at the expense of
1598 * latency by eliminating extra atomic operations when the locks are
1599 * already in proper order on entry. This favors lower cpu-ids and will
1600 * grant the double lock to lower cpus over higher ids under contention,
1601 * regardless of entry order into the function.
1602 */
1603static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1604    __releases(this_rq->lock)
1605    __acquires(busiest->lock)
1606    __acquires(this_rq->lock)
1607{
1608    int ret = 0;
1609
1610    if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1611        if (busiest < this_rq) {
1612            raw_spin_unlock(&this_rq->lock);
1613            raw_spin_lock(&busiest->lock);
1614            raw_spin_lock_nested(&this_rq->lock,
1615                          SINGLE_DEPTH_NESTING);
1616            ret = 1;
1617        } else
1618            raw_spin_lock_nested(&busiest->lock,
1619                          SINGLE_DEPTH_NESTING);
1620    }
1621    return ret;
1622}
1623
1624#endif /* CONFIG_PREEMPT */
1625
1626/*
1627 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1628 */
1629static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1630{
1631    if (unlikely(!irqs_disabled())) {
1632        /* printk() doesn't work good under rq->lock */
1633        raw_spin_unlock(&this_rq->lock);
1634        BUG_ON(1);
1635    }
1636
1637    return _double_lock_balance(this_rq, busiest);
1638}
1639
1640static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1641    __releases(busiest->lock)
1642{
1643    raw_spin_unlock(&busiest->lock);
1644    lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1645}
1646
1647/*
1648 * double_rq_lock - safely lock two runqueues
1649 *
1650 * Note this does not disable interrupts like task_rq_lock,
1651 * you need to do so manually before calling.
1652 */
1653static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1654    __acquires(rq1->lock)
1655    __acquires(rq2->lock)
1656{
1657    BUG_ON(!irqs_disabled());
1658    if (rq1 == rq2) {
1659        raw_spin_lock(&rq1->lock);
1660        __acquire(rq2->lock); /* Fake it out ;) */
1661    } else {
1662        if (rq1 < rq2) {
1663            raw_spin_lock(&rq1->lock);
1664            raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1665        } else {
1666            raw_spin_lock(&rq2->lock);
1667            raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1668        }
1669    }
1670}
1671
1672/*
1673 * double_rq_unlock - safely unlock two runqueues
1674 *
1675 * Note this does not restore interrupts like task_rq_unlock,
1676 * you need to do so manually after calling.
1677 */
1678static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1679    __releases(rq1->lock)
1680    __releases(rq2->lock)
1681{
1682    raw_spin_unlock(&rq1->lock);
1683    if (rq1 != rq2)
1684        raw_spin_unlock(&rq2->lock);
1685    else
1686        __release(rq2->lock);
1687}
1688
1689#endif
1690
1691static void calc_load_account_idle(struct rq *this_rq);
1692static void update_sysctl(void);
1693static int get_update_sysctl_factor(void);
1694static void update_cpu_load(struct rq *this_rq);
1695
1696static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
1697{
1698    set_task_rq(p, cpu);
1699#ifdef CONFIG_SMP
1700    /*
1701     * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
1702     * successfuly executed on another CPU. We must ensure that updates of
1703     * per-task data have been completed by this moment.
1704     */
1705    smp_wmb();
1706    task_thread_info(p)->cpu = cpu;
1707#endif
1708}
1709
1710static const struct sched_class rt_sched_class;
1711
1712#define sched_class_highest (&stop_sched_class)
1713#define for_each_class(class) \
1714   for (class = sched_class_highest; class; class = class->next)
1715
1716#include "sched_stats.h"
1717
1718static void inc_nr_running(struct rq *rq)
1719{
1720    rq->nr_running++;
1721}
1722
1723static void dec_nr_running(struct rq *rq)
1724{
1725    rq->nr_running--;
1726}
1727
1728static void set_load_weight(struct task_struct *p)
1729{
1730    /*
1731     * SCHED_IDLE tasks get minimal weight:
1732     */
1733    if (p->policy == SCHED_IDLE) {
1734        p->se.load.weight = WEIGHT_IDLEPRIO;
1735        p->se.load.inv_weight = WMULT_IDLEPRIO;
1736        return;
1737    }
1738
1739    p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
1740    p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
1741}
1742
1743static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
1744{
1745    update_rq_clock(rq);
1746    sched_info_queued(p);
1747    p->sched_class->enqueue_task(rq, p, flags);
1748    p->se.on_rq = 1;
1749}
1750
1751static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
1752{
1753    update_rq_clock(rq);
1754    sched_info_dequeued(p);
1755    p->sched_class->dequeue_task(rq, p, flags);
1756    p->se.on_rq = 0;
1757}
1758
1759/*
1760 * activate_task - move a task to the runqueue.
1761 */
1762static void activate_task(struct rq *rq, struct task_struct *p, int flags)
1763{
1764    if (task_contributes_to_load(p))
1765        rq->nr_uninterruptible--;
1766
1767    enqueue_task(rq, p, flags);
1768    inc_nr_running(rq);
1769}
1770
1771/*
1772 * deactivate_task - remove a task from the runqueue.
1773 */
1774static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1775{
1776    if (task_contributes_to_load(p))
1777        rq->nr_uninterruptible++;
1778
1779    dequeue_task(rq, p, flags);
1780    dec_nr_running(rq);
1781}
1782
1783#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1784
1785/*
1786 * There are no locks covering percpu hardirq/softirq time.
1787 * They are only modified in account_system_vtime, on corresponding CPU
1788 * with interrupts disabled. So, writes are safe.
1789 * They are read and saved off onto struct rq in update_rq_clock().
1790 * This may result in other CPU reading this CPU's irq time and can
1791 * race with irq/account_system_vtime on this CPU. We would either get old
1792 * or new value with a side effect of accounting a slice of irq time to wrong
1793 * task when irq is in progress while we read rq->clock. That is a worthy
1794 * compromise in place of having locks on each irq in account_system_time.
1795 */
1796static DEFINE_PER_CPU(u64, cpu_hardirq_time);
1797static DEFINE_PER_CPU(u64, cpu_softirq_time);
1798
1799static DEFINE_PER_CPU(u64, irq_start_time);
1800static int sched_clock_irqtime;
1801
1802void enable_sched_clock_irqtime(void)
1803{
1804    sched_clock_irqtime = 1;
1805}
1806
1807void disable_sched_clock_irqtime(void)
1808{
1809    sched_clock_irqtime = 0;
1810}
1811
1812#ifndef CONFIG_64BIT
1813static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
1814
1815static inline void irq_time_write_begin(void)
1816{
1817    __this_cpu_inc(irq_time_seq.sequence);
1818    smp_wmb();
1819}
1820
1821static inline void irq_time_write_end(void)
1822{
1823    smp_wmb();
1824    __this_cpu_inc(irq_time_seq.sequence);
1825}
1826
1827static inline u64 irq_time_read(int cpu)
1828{
1829    u64 irq_time;
1830    unsigned seq;
1831
1832    do {
1833        seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
1834        irq_time = per_cpu(cpu_softirq_time, cpu) +
1835               per_cpu(cpu_hardirq_time, cpu);
1836    } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
1837
1838    return irq_time;
1839}
1840#else /* CONFIG_64BIT */
1841static inline void irq_time_write_begin(void)
1842{
1843}
1844
1845static inline void irq_time_write_end(void)
1846{
1847}
1848
1849static inline u64 irq_time_read(int cpu)
1850{
1851    return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1852}
1853#endif /* CONFIG_64BIT */
1854
1855/*
1856 * Called before incrementing preempt_count on {soft,}irq_enter
1857 * and before decrementing preempt_count on {soft,}irq_exit.
1858 */
1859void account_system_vtime(struct task_struct *curr)
1860{
1861    unsigned long flags;
1862    s64 delta;
1863    int cpu;
1864
1865    if (!sched_clock_irqtime)
1866        return;
1867
1868    local_irq_save(flags);
1869
1870    cpu = smp_processor_id();
1871    delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
1872    __this_cpu_add(irq_start_time, delta);
1873
1874    irq_time_write_begin();
1875    /*
1876     * We do not account for softirq time from ksoftirqd here.
1877     * We want to continue accounting softirq time to ksoftirqd thread
1878     * in that case, so as not to confuse scheduler with a special task
1879     * that do not consume any time, but still wants to run.
1880     */
1881    if (hardirq_count())
1882        __this_cpu_add(cpu_hardirq_time, delta);
1883    else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD))
1884        __this_cpu_add(cpu_softirq_time, delta);
1885
1886    irq_time_write_end();
1887    local_irq_restore(flags);
1888}
1889EXPORT_SYMBOL_GPL(account_system_vtime);
1890
1891static void update_rq_clock_task(struct rq *rq, s64 delta)
1892{
1893    s64 irq_delta;
1894
1895    irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
1896
1897    /*
1898     * Since irq_time is only updated on {soft,}irq_exit, we might run into
1899     * this case when a previous update_rq_clock() happened inside a
1900     * {soft,}irq region.
1901     *
1902     * When this happens, we stop ->clock_task and only update the
1903     * prev_irq_time stamp to account for the part that fit, so that a next
1904     * update will consume the rest. This ensures ->clock_task is
1905     * monotonic.
1906     *
1907     * It does however cause some slight miss-attribution of {soft,}irq
1908     * time, a more accurate solution would be to update the irq_time using
1909     * the current rq->clock timestamp, except that would require using
1910     * atomic ops.
1911     */
1912    if (irq_delta > delta)
1913        irq_delta = delta;
1914
1915    rq->prev_irq_time += irq_delta;
1916    delta -= irq_delta;
1917    rq->clock_task += delta;
1918
1919    if (irq_delta && sched_feat(NONIRQ_POWER))
1920        sched_rt_avg_update(rq, irq_delta);
1921}
1922
1923#else /* CONFIG_IRQ_TIME_ACCOUNTING */
1924
1925static void update_rq_clock_task(struct rq *rq, s64 delta)
1926{
1927    rq->clock_task += delta;
1928}
1929
1930#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
1931
1932#include "sched_idletask.c"
1933#include "sched_fair.c"
1934#include "sched_rt.c"
1935#include "sched_autogroup.c"
1936#include "sched_stoptask.c"
1937#ifdef CONFIG_SCHED_DEBUG
1938# include "sched_debug.c"
1939#endif
1940
1941void sched_set_stop_task(int cpu, struct task_struct *stop)
1942{
1943    struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
1944    struct task_struct *old_stop = cpu_rq(cpu)->stop;
1945
1946    if (stop) {
1947        /*
1948         * Make it appear like a SCHED_FIFO task, its something
1949         * userspace knows about and won't get confused about.
1950         *
1951         * Also, it will make PI more or less work without too
1952         * much confusion -- but then, stop work should not
1953         * rely on PI working anyway.
1954         */
1955        sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
1956
1957        stop->sched_class = &stop_sched_class;
1958    }
1959
1960    cpu_rq(cpu)->stop = stop;
1961
1962    if (old_stop) {
1963        /*
1964         * Reset it back to a normal scheduling class so that
1965         * it can die in pieces.
1966         */
1967        old_stop->sched_class = &rt_sched_class;
1968    }
1969}
1970
1971/*
1972 * __normal_prio - return the priority that is based on the static prio
1973 */
1974static inline int __normal_prio(struct task_struct *p)
1975{
1976    return p->static_prio;
1977}
1978
1979/*
1980 * Calculate the expected normal priority: i.e. priority
1981 * without taking RT-inheritance into account. Might be
1982 * boosted by interactivity modifiers. Changes upon fork,
1983 * setprio syscalls, and whenever the interactivity
1984 * estimator recalculates.
1985 */
1986static inline int normal_prio(struct task_struct *p)
1987{
1988    int prio;
1989
1990    if (task_has_rt_policy(p))
1991        prio = MAX_RT_PRIO-1 - p->rt_priority;
1992    else
1993        prio = __normal_prio(p);
1994    return prio;
1995}
1996
1997/*
1998 * Calculate the current priority, i.e. the priority
1999 * taken into account by the scheduler. This value might
2000 * be boosted by RT tasks, or might be boosted by
2001 * interactivity modifiers. Will be RT if the task got
2002 * RT-boosted. If not then it returns p->normal_prio.
2003 */
2004static int effective_prio(struct task_struct *p)
2005{
2006    p->normal_prio = normal_prio(p);
2007    /*
2008     * If we are RT tasks or we were boosted to RT priority,
2009     * keep the priority unchanged. Otherwise, update priority
2010     * to the normal priority:
2011     */
2012    if (!rt_prio(p->prio))
2013        return p->normal_prio;
2014    return p->prio;
2015}
2016
2017/**
2018 * task_curr - is this task currently executing on a CPU?
2019 * @p: the task in question.
2020 */
2021inline int task_curr(const struct task_struct *p)
2022{
2023    return cpu_curr(task_cpu(p)) == p;
2024}
2025
2026static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2027                       const struct sched_class *prev_class,
2028                       int oldprio, int running)
2029{
2030    if (prev_class != p->sched_class) {
2031        if (prev_class->switched_from)
2032            prev_class->switched_from(rq, p, running);
2033        p->sched_class->switched_to(rq, p, running);
2034    } else
2035        p->sched_class->prio_changed(rq, p, oldprio, running);
2036}
2037
2038static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
2039{
2040    const struct sched_class *class;
2041
2042    if (p->sched_class == rq->curr->sched_class) {
2043        rq->curr->sched_class->check_preempt_curr(rq, p, flags);
2044    } else {
2045        for_each_class(class) {
2046            if (class == rq->curr->sched_class)
2047                break;
2048            if (class == p->sched_class) {
2049                resched_task(rq->curr);
2050                break;
2051            }
2052        }
2053    }
2054
2055    /*
2056     * A queue event has occurred, and we're going to schedule. In
2057     * this case, we can save a useless back to back clock update.
2058     */
2059    if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr))
2060        rq->skip_clock_update = 1;
2061}
2062
2063#ifdef CONFIG_SMP
2064/*
2065 * Is this task likely cache-hot:
2066 */
2067static int
2068task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2069{
2070    s64 delta;
2071
2072    if (p->sched_class != &fair_sched_class)
2073        return 0;
2074
2075    if (unlikely(p->policy == SCHED_IDLE))
2076        return 0;
2077
2078    /*
2079     * Buddy candidates are cache hot:
2080     */
2081    if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
2082            (&p->se == cfs_rq_of(&p->se)->next ||
2083             &p->se == cfs_rq_of(&p->se)->last))
2084        return 1;
2085
2086    if (sysctl_sched_migration_cost == -1)
2087        return 1;
2088    if (sysctl_sched_migration_cost == 0)
2089        return 0;
2090
2091    delta = now - p->se.exec_start;
2092
2093    return delta < (s64)sysctl_sched_migration_cost;
2094}
2095
2096void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2097{
2098#ifdef CONFIG_SCHED_DEBUG
2099    /*
2100     * We should never call set_task_cpu() on a blocked task,
2101     * ttwu() will sort out the placement.
2102     */
2103    WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
2104            !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
2105#endif
2106
2107    trace_sched_migrate_task(p, new_cpu);
2108
2109    if (task_cpu(p) != new_cpu) {
2110        p->se.nr_migrations++;
2111        perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
2112    }
2113
2114    __set_task_cpu(p, new_cpu);
2115}
2116
2117struct migration_arg {
2118    struct task_struct *task;
2119    int dest_cpu;
2120};
2121
2122static int migration_cpu_stop(void *data);
2123
2124/*
2125 * The task's runqueue lock must be held.
2126 * Returns true if you have to wait for migration thread.
2127 */
2128static bool migrate_task(struct task_struct *p, struct rq *rq)
2129{
2130    /*
2131     * If the task is not on a runqueue (and not running), then
2132     * the next wake-up will properly place the task.
2133     */
2134    return p->se.on_rq || task_running(rq, p);
2135}
2136
2137/*
2138 * wait_task_inactive - wait for a thread to unschedule.
2139 *
2140 * If @match_state is nonzero, it's the @p->state value just checked and
2141 * not expected to change. If it changes, i.e. @p might have woken up,
2142 * then return zero. When we succeed in waiting for @p to be off its CPU,
2143 * we return a positive number (its total switch count). If a second call
2144 * a short while later returns the same number, the caller can be sure that
2145 * @p has remained unscheduled the whole time.
2146 *
2147 * The caller must ensure that the task *will* unschedule sometime soon,
2148 * else this function might spin for a *long* time. This function can't
2149 * be called with interrupts off, or it may introduce deadlock with
2150 * smp_call_function() if an IPI is sent by the same process we are
2151 * waiting to become inactive.
2152 */
2153unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2154{
2155    unsigned long flags;
2156    int running, on_rq;
2157    unsigned long ncsw;
2158    struct rq *rq;
2159
2160    for (;;) {
2161        /*
2162         * We do the initial early heuristics without holding
2163         * any task-queue locks at all. We'll only try to get
2164         * the runqueue lock when things look like they will
2165         * work out!
2166         */
2167        rq = task_rq(p);
2168
2169        /*
2170         * If the task is actively running on another CPU
2171         * still, just relax and busy-wait without holding
2172         * any locks.
2173         *
2174         * NOTE! Since we don't hold any locks, it's not
2175         * even sure that "rq" stays as the right runqueue!
2176         * But we don't care, since "task_running()" will
2177         * return false if the runqueue has changed and p
2178         * is actually now running somewhere else!
2179         */
2180        while (task_running(rq, p)) {
2181            if (match_state && unlikely(p->state != match_state))
2182                return 0;
2183            cpu_relax();
2184        }
2185
2186        /*
2187         * Ok, time to look more closely! We need the rq
2188         * lock now, to be *sure*. If we're wrong, we'll
2189         * just go back and repeat.
2190         */
2191        rq = task_rq_lock(p, &flags);
2192        trace_sched_wait_task(p);
2193        running = task_running(rq, p);
2194        on_rq = p->se.on_rq;
2195        ncsw = 0;
2196        if (!match_state || p->state == match_state)
2197            ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
2198        task_rq_unlock(rq, &flags);
2199
2200        /*
2201         * If it changed from the expected state, bail out now.
2202         */
2203        if (unlikely(!ncsw))
2204            break;
2205
2206        /*
2207         * Was it really running after all now that we
2208         * checked with the proper locks actually held?
2209         *
2210         * Oops. Go back and try again..
2211         */
2212        if (unlikely(running)) {
2213            cpu_relax();
2214            continue;
2215        }
2216
2217        /*
2218         * It's not enough that it's not actively running,
2219         * it must be off the runqueue _entirely_, and not
2220         * preempted!
2221         *
2222         * So if it was still runnable (but just not actively
2223         * running right now), it's preempted, and we should
2224         * yield - it could be a while.
2225         */
2226        if (unlikely(on_rq)) {
2227            schedule_timeout_uninterruptible(1);
2228            continue;
2229        }
2230
2231        /*
2232         * Ahh, all good. It wasn't running, and it wasn't
2233         * runnable, which means that it will never become
2234         * running in the future either. We're all done!
2235         */
2236        break;
2237    }
2238
2239    return ncsw;
2240}
2241
2242/***
2243 * kick_process - kick a running thread to enter/exit the kernel
2244 * @p: the to-be-kicked thread
2245 *
2246 * Cause a process which is running on another CPU to enter
2247 * kernel-mode, without any delay. (to get signals handled.)
2248 *
2249 * NOTE: this function doesnt have to take the runqueue lock,
2250 * because all it wants to ensure is that the remote task enters
2251 * the kernel. If the IPI races and the task has been migrated
2252 * to another CPU then no harm is done and the purpose has been
2253 * achieved as well.
2254 */
2255void kick_process(struct task_struct *p)
2256{
2257    int cpu;
2258
2259    preempt_disable();
2260    cpu = task_cpu(p);
2261    if ((cpu != smp_processor_id()) && task_curr(p))
2262        smp_send_reschedule(cpu);
2263    preempt_enable();
2264}
2265EXPORT_SYMBOL_GPL(kick_process);
2266#endif /* CONFIG_SMP */
2267
2268/**
2269 * task_oncpu_function_call - call a function on the cpu on which a task runs
2270 * @p: the task to evaluate
2271 * @func: the function to be called
2272 * @info: the function call argument
2273 *
2274 * Calls the function @func when the task is currently running. This might
2275 * be on the current CPU, which just calls the function directly
2276 */
2277void task_oncpu_function_call(struct task_struct *p,
2278                  void (*func) (void *info), void *info)
2279{
2280    int cpu;
2281
2282    preempt_disable();
2283    cpu = task_cpu(p);
2284    if (task_curr(p))
2285        smp_call_function_single(cpu, func, info, 1);
2286    preempt_enable();
2287}
2288
2289#ifdef CONFIG_SMP
2290/*
2291 * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
2292 */
2293static int select_fallback_rq(int cpu, struct task_struct *p)
2294{
2295    int dest_cpu;
2296    const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
2297
2298    /* Look for allowed, online CPU in same node. */
2299    for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
2300        if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
2301            return dest_cpu;
2302
2303    /* Any allowed, online CPU? */
2304    dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
2305    if (dest_cpu < nr_cpu_ids)
2306        return dest_cpu;
2307
2308    /* No more Mr. Nice Guy. */
2309    dest_cpu = cpuset_cpus_allowed_fallback(p);
2310    /*
2311     * Don't tell them about moving exiting tasks or
2312     * kernel threads (both mm NULL), since they never
2313     * leave kernel.
2314     */
2315    if (p->mm && printk_ratelimit()) {
2316        printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n",
2317                task_pid_nr(p), p->comm, cpu);
2318    }
2319
2320    return dest_cpu;
2321}
2322
2323/*
2324 * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable.
2325 */
2326static inline
2327int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags)
2328{
2329    int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags);
2330
2331    /*
2332     * In order not to call set_task_cpu() on a blocking task we need
2333     * to rely on ttwu() to place the task on a valid ->cpus_allowed
2334     * cpu.
2335     *
2336     * Since this is common to all placement strategies, this lives here.
2337     *
2338     * [ this allows ->select_task() to simply return task_cpu(p) and
2339     * not worry about this generic constraint ]
2340     */
2341    if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
2342             !cpu_online(cpu)))
2343        cpu = select_fallback_rq(task_cpu(p), p);
2344
2345    return cpu;
2346}
2347
2348static void update_avg(u64 *avg, u64 sample)
2349{
2350    s64 diff = sample - *avg;
2351    *avg += diff >> 3;
2352}
2353#endif
2354
2355static inline void ttwu_activate(struct task_struct *p, struct rq *rq,
2356                 bool is_sync, bool is_migrate, bool is_local,
2357                 unsigned long en_flags)
2358{
2359    schedstat_inc(p, se.statistics.nr_wakeups);
2360    if (is_sync)
2361        schedstat_inc(p, se.statistics.nr_wakeups_sync);
2362    if (is_migrate)
2363        schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2364    if (is_local)
2365        schedstat_inc(p, se.statistics.nr_wakeups_local);
2366    else
2367        schedstat_inc(p, se.statistics.nr_wakeups_remote);
2368
2369    activate_task(rq, p, en_flags);
2370}
2371
2372static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq,
2373                    int wake_flags, bool success)
2374{
2375    trace_sched_wakeup(p, success);
2376    check_preempt_curr(rq, p, wake_flags);
2377
2378    p->state = TASK_RUNNING;
2379#ifdef CONFIG_SMP
2380    if (p->sched_class->task_woken)
2381        p->sched_class->task_woken(rq, p);
2382
2383    if (unlikely(rq->idle_stamp)) {
2384        u64 delta = rq->clock - rq->idle_stamp;
2385        u64 max = 2*sysctl_sched_migration_cost;
2386
2387        if (delta > max)
2388            rq->avg_idle = max;
2389        else
2390            update_avg(&rq->avg_idle, delta);
2391        rq->idle_stamp = 0;
2392    }
2393#endif
2394    /* if a worker is waking up, notify workqueue */
2395    if ((p->flags & PF_WQ_WORKER) && success)
2396        wq_worker_waking_up(p, cpu_of(rq));
2397}
2398
2399/**
2400 * try_to_wake_up - wake up a thread
2401 * @p: the thread to be awakened
2402 * @state: the mask of task states that can be woken
2403 * @wake_flags: wake modifier flags (WF_*)
2404 *
2405 * Put it on the run-queue if it's not already there. The "current"
2406 * thread is always on the run-queue (except when the actual
2407 * re-schedule is in progress), and as such you're allowed to do
2408 * the simpler "current->state = TASK_RUNNING" to mark yourself
2409 * runnable without the overhead of this.
2410 *
2411 * Returns %true if @p was woken up, %false if it was already running
2412 * or @state didn't match @p's state.
2413 */
2414static int try_to_wake_up(struct task_struct *p, unsigned int state,
2415              int wake_flags)
2416{
2417    int cpu, orig_cpu, this_cpu, success = 0;
2418    unsigned long flags;
2419    unsigned long en_flags = ENQUEUE_WAKEUP;
2420    struct rq *rq;
2421
2422    this_cpu = get_cpu();
2423
2424    smp_wmb();
2425    rq = task_rq_lock(p, &flags);
2426    if (!(p->state & state))
2427        goto out;
2428
2429    if (p->se.on_rq)
2430        goto out_running;
2431
2432    cpu = task_cpu(p);
2433    orig_cpu = cpu;
2434
2435#ifdef CONFIG_SMP
2436    if (unlikely(task_running(rq, p)))
2437        goto out_activate;
2438
2439    /*
2440     * In order to handle concurrent wakeups and release the rq->lock
2441     * we put the task in TASK_WAKING state.
2442     *
2443     * First fix up the nr_uninterruptible count:
2444     */
2445    if (task_contributes_to_load(p)) {
2446        if (likely(cpu_online(orig_cpu)))
2447            rq->nr_uninterruptible--;
2448        else
2449            this_rq()->nr_uninterruptible--;
2450    }
2451    p->state = TASK_WAKING;
2452
2453    if (p->sched_class->task_waking) {
2454        p->sched_class->task_waking(rq, p);
2455        en_flags |= ENQUEUE_WAKING;
2456    }
2457
2458    cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
2459    if (cpu != orig_cpu)
2460        set_task_cpu(p, cpu);
2461    __task_rq_unlock(rq);
2462
2463    rq = cpu_rq(cpu);
2464    raw_spin_lock(&rq->lock);
2465
2466    /*
2467     * We migrated the task without holding either rq->lock, however
2468     * since the task is not on the task list itself, nobody else
2469     * will try and migrate the task, hence the rq should match the
2470     * cpu we just moved it to.
2471     */
2472    WARN_ON(task_cpu(p) != cpu);
2473    WARN_ON(p->state != TASK_WAKING);
2474
2475#ifdef CONFIG_SCHEDSTATS
2476    schedstat_inc(rq, ttwu_count);
2477    if (cpu == this_cpu)
2478        schedstat_inc(rq, ttwu_local);
2479    else {
2480        struct sched_domain *sd;
2481        for_each_domain(this_cpu, sd) {
2482            if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
2483                schedstat_inc(sd, ttwu_wake_remote);
2484                break;
2485            }
2486        }
2487    }
2488#endif /* CONFIG_SCHEDSTATS */
2489
2490out_activate:
2491#endif /* CONFIG_SMP */
2492    ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu,
2493              cpu == this_cpu, en_flags);
2494    success = 1;
2495out_running:
2496    ttwu_post_activation(p, rq, wake_flags, success);
2497out:
2498    task_rq_unlock(rq, &flags);
2499    put_cpu();
2500
2501    return success;
2502}
2503
2504/**
2505 * try_to_wake_up_local - try to wake up a local task with rq lock held
2506 * @p: the thread to be awakened
2507 *
2508 * Put @p on the run-queue if it's not already there. The caller must
2509 * ensure that this_rq() is locked, @p is bound to this_rq() and not
2510 * the current task. this_rq() stays locked over invocation.
2511 */
2512static void try_to_wake_up_local(struct task_struct *p)
2513{
2514    struct rq *rq = task_rq(p);
2515    bool success = false;
2516
2517    BUG_ON(rq != this_rq());
2518    BUG_ON(p == current);
2519    lockdep_assert_held(&rq->lock);
2520
2521    if (!(p->state & TASK_NORMAL))
2522        return;
2523
2524    if (!p->se.on_rq) {
2525        if (likely(!task_running(rq, p))) {
2526            schedstat_inc(rq, ttwu_count);
2527            schedstat_inc(rq, ttwu_local);
2528        }
2529        ttwu_activate(p, rq, false, false, true, ENQUEUE_WAKEUP);
2530        success = true;
2531    }
2532    ttwu_post_activation(p, rq, 0, success);
2533}
2534
2535/**
2536 * wake_up_process - Wake up a specific process
2537 * @p: The process to be woken up.
2538 *
2539 * Attempt to wake up the nominated process and move it to the set of runnable
2540 * processes. Returns 1 if the process was woken up, 0 if it was already
2541 * running.
2542 *
2543 * It may be assumed that this function implies a write memory barrier before
2544 * changing the task state if and only if any tasks are woken up.
2545 */
2546int wake_up_process(struct task_struct *p)
2547{
2548    return try_to_wake_up(p, TASK_ALL, 0);
2549}
2550EXPORT_SYMBOL(wake_up_process);
2551
2552int wake_up_state(struct task_struct *p, unsigned int state)
2553{
2554    return try_to_wake_up(p, state, 0);
2555}
2556
2557/*
2558 * Perform scheduler related setup for a newly forked process p.
2559 * p is forked by current.
2560 *
2561 * __sched_fork() is basic setup used by init_idle() too:
2562 */
2563static void __sched_fork(struct task_struct *p)
2564{
2565    p->se.exec_start = 0;
2566    p->se.sum_exec_runtime = 0;
2567    p->se.prev_sum_exec_runtime = 0;
2568    p->se.nr_migrations = 0;
2569
2570#ifdef CONFIG_SCHEDSTATS
2571    memset(&p->se.statistics, 0, sizeof(p->se.statistics));
2572#endif
2573
2574    INIT_LIST_HEAD(&p->rt.run_list);
2575    p->se.on_rq = 0;
2576    INIT_LIST_HEAD(&p->se.group_node);
2577
2578#ifdef CONFIG_PREEMPT_NOTIFIERS
2579    INIT_HLIST_HEAD(&p->preempt_notifiers);
2580#endif
2581}
2582
2583/*
2584 * fork()/clone()-time setup:
2585 */
2586void sched_fork(struct task_struct *p, int clone_flags)
2587{
2588    int cpu = get_cpu();
2589
2590    __sched_fork(p);
2591    /*
2592     * We mark the process as running here. This guarantees that
2593     * nobody will actually run it, and a signal or other external
2594     * event cannot wake it up and insert it on the runqueue either.
2595     */
2596    p->state = TASK_RUNNING;
2597
2598    /*
2599     * Revert to default priority/policy on fork if requested.
2600     */
2601    if (unlikely(p->sched_reset_on_fork)) {
2602        if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
2603            p->policy = SCHED_NORMAL;
2604            p->normal_prio = p->static_prio;
2605        }
2606
2607        if (PRIO_TO_NICE(p->static_prio) < 0) {
2608            p->static_prio = NICE_TO_PRIO(0);
2609            p->normal_prio = p->static_prio;
2610            set_load_weight(p);
2611        }
2612
2613        /*
2614         * We don't need the reset flag anymore after the fork. It has
2615         * fulfilled its duty:
2616         */
2617        p->sched_reset_on_fork = 0;
2618    }
2619
2620    /*
2621     * Make sure we do not leak PI boosting priority to the child.
2622     */
2623    p->prio = current->normal_prio;
2624
2625    if (!rt_prio(p->prio))
2626        p->sched_class = &fair_sched_class;
2627
2628    if (p->sched_class->task_fork)
2629        p->sched_class->task_fork(p);
2630
2631    /*
2632     * The child is not yet in the pid-hash so no cgroup attach races,
2633     * and the cgroup is pinned to this child due to cgroup_fork()
2634     * is ran before sched_fork().
2635     *
2636     * Silence PROVE_RCU.
2637     */
2638    rcu_read_lock();
2639    set_task_cpu(p, cpu);
2640    rcu_read_unlock();
2641
2642#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
2643    if (likely(sched_info_on()))
2644        memset(&p->sched_info, 0, sizeof(p->sched_info));
2645#endif
2646#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
2647    p->oncpu = 0;
2648#endif
2649#ifdef CONFIG_PREEMPT
2650    /* Want to start with kernel preemption disabled. */
2651    task_thread_info(p)->preempt_count = 1;
2652#endif
2653#ifdef CONFIG_SMP
2654    plist_node_init(&p->pushable_tasks, MAX_PRIO);
2655#endif
2656
2657    put_cpu();
2658}
2659
2660/*
2661 * wake_up_new_task - wake up a newly created task for the first time.
2662 *
2663 * This function will do some initial scheduler statistics housekeeping
2664 * that must be done for every newly created context, then puts the task
2665 * on the runqueue and wakes it.
2666 */
2667void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2668{
2669    unsigned long flags;
2670    struct rq *rq;
2671    int cpu __maybe_unused = get_cpu();
2672
2673#ifdef CONFIG_SMP
2674    rq = task_rq_lock(p, &flags);
2675    p->state = TASK_WAKING;
2676
2677    /*
2678     * Fork balancing, do it here and not earlier because:
2679     * - cpus_allowed can change in the fork path
2680     * - any previously selected cpu might disappear through hotplug
2681     *
2682     * We set TASK_WAKING so that select_task_rq() can drop rq->lock
2683     * without people poking at ->cpus_allowed.
2684     */
2685    cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0);
2686    set_task_cpu(p, cpu);
2687
2688    p->state = TASK_RUNNING;
2689    task_rq_unlock(rq, &flags);
2690#endif
2691
2692    rq = task_rq_lock(p, &flags);
2693    activate_task(rq, p, 0);
2694    trace_sched_wakeup_new(p, 1);
2695    check_preempt_curr(rq, p, WF_FORK);
2696#ifdef CONFIG_SMP
2697    if (p->sched_class->task_woken)
2698        p->sched_class->task_woken(rq, p);
2699#endif
2700    task_rq_unlock(rq, &flags);
2701    put_cpu();
2702}
2703
2704#ifdef CONFIG_PREEMPT_NOTIFIERS
2705
2706/**
2707 * preempt_notifier_register - tell me when current is being preempted & rescheduled
2708 * @notifier: notifier struct to register
2709 */
2710void preempt_notifier_register(struct preempt_notifier *notifier)
2711{
2712    hlist_add_head(&notifier->link, &current->preempt_notifiers);
2713}
2714EXPORT_SYMBOL_GPL(preempt_notifier_register);
2715
2716/**
2717 * preempt_notifier_unregister - no longer interested in preemption notifications
2718 * @notifier: notifier struct to unregister
2719 *
2720 * This is safe to call from within a preemption notifier.
2721 */
2722void preempt_notifier_unregister(struct preempt_notifier *notifier)
2723{
2724    hlist_del(&notifier->link);
2725}
2726EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
2727
2728static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2729{
2730    struct preempt_notifier *notifier;
2731    struct hlist_node *node;
2732
2733    hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2734        notifier->ops->sched_in(notifier, raw_smp_processor_id());
2735}
2736
2737static void
2738fire_sched_out_preempt_notifiers(struct task_struct *curr,
2739                 struct task_struct *next)
2740{
2741    struct preempt_notifier *notifier;
2742    struct hlist_node *node;
2743
2744    hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
2745        notifier->ops->sched_out(notifier, next);
2746}
2747
2748#else /* !CONFIG_PREEMPT_NOTIFIERS */
2749
2750static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2751{
2752}
2753
2754static void
2755fire_sched_out_preempt_notifiers(struct task_struct *curr,
2756                 struct task_struct *next)
2757{
2758}
2759
2760#endif /* CONFIG_PREEMPT_NOTIFIERS */
2761
2762/**
2763 * prepare_task_switch - prepare to switch tasks
2764 * @rq: the runqueue preparing to switch
2765 * @prev: the current task that is being switched out
2766 * @next: the task we are going to switch to.
2767 *
2768 * This is called with the rq lock held and interrupts off. It must
2769 * be paired with a subsequent finish_task_switch after the context
2770 * switch.
2771 *
2772 * prepare_task_switch sets up locking and calls architecture specific
2773 * hooks.
2774 */
2775static inline void
2776prepare_task_switch(struct rq *rq, struct task_struct *prev,
2777            struct task_struct *next)
2778{
2779    fire_sched_out_preempt_notifiers(prev, next);
2780    prepare_lock_switch(rq, next);
2781    prepare_arch_switch(next);
2782}
2783
2784/**
2785 * finish_task_switch - clean up after a task-switch
2786 * @rq: runqueue associated with task-switch
2787 * @prev: the thread we just switched away from.
2788 *
2789 * finish_task_switch must be called after the context switch, paired
2790 * with a prepare_task_switch call before the context switch.
2791 * finish_task_switch will reconcile locking set up by prepare_task_switch,
2792 * and do any other architecture-specific cleanup actions.
2793 *
2794 * Note that we may have delayed dropping an mm in context_switch(). If
2795 * so, we finish that here outside of the runqueue lock. (Doing it
2796 * with the lock held can cause deadlocks; see schedule() for
2797 * details.)
2798 */
2799static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2800    __releases(rq->lock)
2801{
2802    struct mm_struct *mm = rq->prev_mm;
2803    long prev_state;
2804
2805    rq->prev_mm = NULL;
2806
2807    /*
2808     * A task struct has one reference for the use as "current".
2809     * If a task dies, then it sets TASK_DEAD in tsk->state and calls
2810     * schedule one last time. The schedule call will never return, and
2811     * the scheduled task must drop that reference.
2812     * The test for TASK_DEAD must occur while the runqueue locks are
2813     * still held, otherwise prev could be scheduled on another cpu, die
2814     * there before we look at prev->state, and then the reference would
2815     * be dropped twice.
2816     * Manfred Spraul <manfred@colorfullife.com>
2817     */
2818    prev_state = prev->state;
2819    finish_arch_switch(prev);
2820#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2821    local_irq_disable();
2822#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
2823    perf_event_task_sched_in(current);
2824#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
2825    local_irq_enable();
2826#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
2827    finish_lock_switch(rq, prev);
2828
2829    fire_sched_in_preempt_notifiers(current);
2830    if (mm)
2831        mmdrop(mm);
2832    if (unlikely(prev_state == TASK_DEAD)) {
2833        /*
2834         * Remove function-return probe instances associated with this
2835         * task and put them back on the free list.
2836         */
2837        kprobe_flush_task(prev);
2838        put_task_struct(prev);
2839    }
2840}
2841
2842#ifdef CONFIG_SMP
2843
2844/* assumes rq->lock is held */
2845static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
2846{
2847    if (prev->sched_class->pre_schedule)
2848        prev->sched_class->pre_schedule(rq, prev);
2849}
2850
2851/* rq->lock is NOT held, but preemption is disabled */
2852static inline void post_schedule(struct rq *rq)
2853{
2854    if (rq->post_schedule) {
2855        unsigned long flags;
2856
2857        raw_spin_lock_irqsave(&rq->lock, flags);
2858        if (rq->curr->sched_class->post_schedule)
2859            rq->curr->sched_class->post_schedule(rq);
2860        raw_spin_unlock_irqrestore(&rq->lock, flags);
2861
2862        rq->post_schedule = 0;
2863    }
2864}
2865
2866#else
2867
2868static inline void pre_schedule(struct rq *rq, struct task_struct *p)
2869{
2870}
2871
2872static inline void post_schedule(struct rq *rq)
2873{
2874}
2875
2876#endif
2877
2878/**
2879 * schedule_tail - first thing a freshly forked thread must call.
2880 * @prev: the thread we just switched away from.
2881 */
2882asmlinkage void schedule_tail(struct task_struct *prev)
2883    __releases(rq->lock)
2884{
2885    struct rq *rq = this_rq();
2886
2887    finish_task_switch(rq, prev);
2888
2889    /*
2890     * FIXME: do we need to worry about rq being invalidated by the
2891     * task_switch?
2892     */
2893    post_schedule(rq);
2894
2895#ifdef __ARCH_WANT_UNLOCKED_CTXSW
2896    /* In this case, finish_task_switch does not reenable preemption */
2897    preempt_enable();
2898#endif
2899    if (current->set_child_tid)
2900        put_user(task_pid_vnr(current), current->set_child_tid);
2901}
2902
2903/*
2904 * context_switch - switch to the new MM and the new
2905 * thread's register state.
2906 */
2907static inline void
2908context_switch(struct rq *rq, struct task_struct *prev,
2909           struct task_struct *next)
2910{
2911    struct mm_struct *mm, *oldmm;
2912
2913    prepare_task_switch(rq, prev, next);
2914    trace_sched_switch(prev, next);
2915    mm = next->mm;
2916    oldmm = prev->active_mm;
2917    /*
2918     * For paravirt, this is coupled with an exit in switch_to to
2919     * combine the page table reload and the switch backend into
2920     * one hypercall.
2921     */
2922    arch_start_context_switch(prev);
2923
2924    if (!mm) {
2925        next->active_mm = oldmm;
2926        atomic_inc(&oldmm->mm_count);
2927        enter_lazy_tlb(oldmm, next);
2928    } else
2929        switch_mm(oldmm, mm, next);
2930
2931    if (!prev->mm) {
2932        prev->active_mm = NULL;
2933        rq->prev_mm = oldmm;
2934    }
2935    /*
2936     * Since the runqueue lock will be released by the next
2937     * task (which is an invalid locking op but in the case
2938     * of the scheduler it's an obvious special-case), so we
2939     * do an early lockdep release here:
2940     */
2941#ifndef __ARCH_WANT_UNLOCKED_CTXSW
2942    spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
2943#endif
2944
2945    /* Here we just switch the register state and the stack. */
2946    switch_to(prev, next, prev);
2947
2948    barrier();
2949    /*
2950     * this_rq must be evaluated again because prev may have moved
2951     * CPUs since it called schedule(), thus the 'rq' on its stack
2952     * frame will be invalid.
2953     */
2954    finish_task_switch(this_rq(), prev);
2955}
2956
2957/*
2958 * nr_running, nr_uninterruptible and nr_context_switches:
2959 *
2960 * externally visible scheduler statistics: current number of runnable
2961 * threads, current number of uninterruptible-sleeping threads, total
2962 * number of context switches performed since bootup.
2963 */
2964unsigned long nr_running(void)
2965{
2966    unsigned long i, sum = 0;
2967
2968    for_each_online_cpu(i)
2969        sum += cpu_rq(i)->nr_running;
2970
2971    return sum;
2972}
2973
2974unsigned long nr_uninterruptible(void)
2975{
2976    unsigned long i, sum = 0;
2977
2978    for_each_possible_cpu(i)
2979        sum += cpu_rq(i)->nr_uninterruptible;
2980
2981    /*
2982     * Since we read the counters lockless, it might be slightly
2983     * inaccurate. Do not allow it to go below zero though:
2984     */
2985    if (unlikely((long)sum < 0))
2986        sum = 0;
2987
2988    return sum;
2989}
2990
2991unsigned long long nr_context_switches(void)
2992{
2993    int i;
2994    unsigned long long sum = 0;
2995
2996    for_each_possible_cpu(i)
2997        sum += cpu_rq(i)->nr_switches;
2998
2999    return sum;
3000}
3001
3002unsigned long nr_iowait(void)
3003{
3004    unsigned long i, sum = 0;
3005
3006    for_each_possible_cpu(i)
3007        sum += atomic_read(&cpu_rq(i)->nr_iowait);
3008
3009    return sum;
3010}
3011
3012unsigned long nr_iowait_cpu(int cpu)
3013{
3014    struct rq *this = cpu_rq(cpu);
3015    return atomic_read(&this->nr_iowait);
3016}
3017
3018unsigned long this_cpu_load(void)
3019{
3020    struct rq *this = this_rq();
3021    return this->cpu_load[0];
3022}
3023
3024
3025/* Variables and functions for calc_load */
3026static atomic_long_t calc_load_tasks;
3027static unsigned long calc_load_update;
3028unsigned long avenrun[3];
3029EXPORT_SYMBOL(avenrun);
3030
3031static long calc_load_fold_active(struct rq *this_rq)
3032{
3033    long nr_active, delta = 0;
3034
3035    nr_active = this_rq->nr_running;
3036    nr_active += (long) this_rq->nr_uninterruptible;
3037
3038    if (nr_active != this_rq->calc_load_active) {
3039        delta = nr_active - this_rq->calc_load_active;
3040        this_rq->calc_load_active = nr_active;
3041    }
3042
3043    return delta;
3044}
3045
3046static unsigned long
3047calc_load(unsigned long load, unsigned long exp, unsigned long active)
3048{
3049    load *= exp;
3050    load += active * (FIXED_1 - exp);
3051    load += 1UL << (FSHIFT - 1);
3052    return load >> FSHIFT;
3053}
3054
3055#ifdef CONFIG_NO_HZ
3056/*
3057 * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
3058 *
3059 * When making the ILB scale, we should try to pull this in as well.
3060 */
3061static atomic_long_t calc_load_tasks_idle;
3062
3063static void calc_load_account_idle(struct rq *this_rq)
3064{
3065    long delta;
3066
3067    delta = calc_load_fold_active(this_rq);
3068    if (delta)
3069        atomic_long_add(delta, &calc_load_tasks_idle);
3070}
3071
3072static long calc_load_fold_idle(void)
3073{
3074    long delta = 0;
3075
3076    /*
3077     * Its got a race, we don't care...
3078     */
3079    if (atomic_long_read(&calc_load_tasks_idle))
3080        delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
3081
3082    return delta;
3083}
3084
3085/**
3086 * fixed_power_int - compute: x^n, in O(log n) time
3087 *
3088 * @x: base of the power
3089 * @frac_bits: fractional bits of @x
3090 * @n: power to raise @x to.
3091 *
3092 * By exploiting the relation between the definition of the natural power
3093 * function: x^n := x*x*...*x (x multiplied by itself for n times), and
3094 * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
3095 * (where: n_i \elem {0, 1}, the binary vector representing n),
3096 * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
3097 * of course trivially computable in O(log_2 n), the length of our binary
3098 * vector.
3099 */
3100static unsigned long
3101fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
3102{
3103    unsigned long result = 1UL << frac_bits;
3104
3105    if (n) for (;;) {
3106        if (n & 1) {
3107            result *= x;
3108            result += 1UL << (frac_bits - 1);
3109            result >>= frac_bits;
3110        }
3111        n >>= 1;
3112        if (!n)
3113            break;
3114        x *= x;
3115        x += 1UL << (frac_bits - 1);
3116        x >>= frac_bits;
3117    }
3118
3119    return result;
3120}
3121
3122/*
3123 * a1 = a0 * e + a * (1 - e)
3124 *
3125 * a2 = a1 * e + a * (1 - e)
3126 * = (a0 * e + a * (1 - e)) * e + a * (1 - e)
3127 * = a0 * e^2 + a * (1 - e) * (1 + e)
3128 *
3129 * a3 = a2 * e + a * (1 - e)
3130 * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
3131 * = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
3132 *
3133 * ...
3134 *
3135 * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
3136 * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
3137 * = a0 * e^n + a * (1 - e^n)
3138 *
3139 * [1] application of the geometric series:
3140 *
3141 * n 1 - x^(n+1)
3142 * S_n := \Sum x^i = -------------
3143 * i=0 1 - x
3144 */
3145static unsigned long
3146calc_load_n(unsigned long load, unsigned long exp,
3147        unsigned long active, unsigned int n)
3148{
3149
3150    return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
3151}
3152
3153/*
3154 * NO_HZ can leave us missing all per-cpu ticks calling
3155 * calc_load_account_active(), but since an idle CPU folds its delta into
3156 * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
3157 * in the pending idle delta if our idle period crossed a load cycle boundary.
3158 *
3159 * Once we've updated the global active value, we need to apply the exponential
3160 * weights adjusted to the number of cycles missed.
3161 */
3162static void calc_global_nohz(unsigned long ticks)
3163{
3164    long delta, active, n;
3165
3166    if (time_before(jiffies, calc_load_update))
3167        return;
3168
3169    /*
3170     * If we crossed a calc_load_update boundary, make sure to fold
3171     * any pending idle changes, the respective CPUs might have
3172     * missed the tick driven calc_load_account_active() update
3173     * due to NO_HZ.
3174     */
3175    delta = calc_load_fold_idle();
3176    if (delta)
3177        atomic_long_add(delta, &calc_load_tasks);
3178
3179    /*
3180     * If we were idle for multiple load cycles, apply them.
3181     */
3182    if (ticks >= LOAD_FREQ) {
3183        n = ticks / LOAD_FREQ;
3184
3185        active = atomic_long_read(&calc_load_tasks);
3186        active = active > 0 ? active * FIXED_1 : 0;
3187
3188        avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
3189        avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
3190        avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
3191
3192        calc_load_update += n * LOAD_FREQ;
3193    }
3194
3195    /*
3196     * Its possible the remainder of the above division also crosses
3197     * a LOAD_FREQ period, the regular check in calc_global_load()
3198     * which comes after this will take care of that.
3199     *
3200     * Consider us being 11 ticks before a cycle completion, and us
3201     * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
3202     * age us 4 cycles, and the test in calc_global_load() will
3203     * pick up the final one.
3204     */
3205}
3206#else
3207static void calc_load_account_idle(struct rq *this_rq)
3208{
3209}
3210
3211static inline long calc_load_fold_idle(void)
3212{
3213    return 0;
3214}
3215
3216static void calc_global_nohz(unsigned long ticks)
3217{
3218}
3219#endif
3220
3221/**
3222 * get_avenrun - get the load average array
3223 * @loads: pointer to dest load array
3224 * @offset: offset to add
3225 * @shift: shift count to shift the result left
3226 *
3227 * These values are estimates at best, so no need for locking.
3228 */
3229void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
3230{
3231    loads[0] = (avenrun[0] + offset) << shift;
3232    loads[1] = (avenrun[1] + offset) << shift;
3233    loads[2] = (avenrun[2] + offset) << shift;
3234}
3235
3236/*
3237 * calc_load - update the avenrun load estimates 10 ticks after the
3238 * CPUs have updated calc_load_tasks.
3239 */
3240void calc_global_load(unsigned long ticks)
3241{
3242    long active;
3243
3244    calc_global_nohz(ticks);
3245
3246    if (time_before(jiffies, calc_load_update + 10))
3247        return;
3248
3249    active = atomic_long_read(&calc_load_tasks);
3250    active = active > 0 ? active * FIXED_1 : 0;
3251
3252    avenrun[0] = calc_load(avenrun[0], EXP_1, active);
3253    avenrun[1] = calc_load(avenrun[1], EXP_5, active);
3254    avenrun[2] = calc_load(avenrun[2], EXP_15, active);
3255
3256    calc_load_update += LOAD_FREQ;
3257}
3258
3259/*
3260 * Called from update_cpu_load() to periodically update this CPU's
3261 * active count.
3262 */
3263static void calc_load_account_active(struct rq *this_rq)
3264{
3265    long delta;
3266
3267    if (time_before(jiffies, this_rq->calc_load_update))
3268        return;
3269
3270    delta = calc_load_fold_active(this_rq);
3271    delta += calc_load_fold_idle();
3272    if (delta)
3273        atomic_long_add(delta, &calc_load_tasks);
3274
3275    this_rq->calc_load_update += LOAD_FREQ;
3276}
3277
3278/*
3279 * The exact cpuload at various idx values, calculated at every tick would be
3280 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
3281 *
3282 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
3283 * on nth tick when cpu may be busy, then we have:
3284 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3285 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
3286 *
3287 * decay_load_missed() below does efficient calculation of
3288 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
3289 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
3290 *
3291 * The calculation is approximated on a 128 point scale.
3292 * degrade_zero_ticks is the number of ticks after which load at any
3293 * particular idx is approximated to be zero.
3294 * degrade_factor is a precomputed table, a row for each load idx.
3295 * Each column corresponds to degradation factor for a power of two ticks,
3296 * based on 128 point scale.
3297 * Example:
3298 * row 2, col 3 (=12) says that the degradation at load idx 2 after
3299 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
3300 *
3301 * With this power of 2 load factors, we can degrade the load n times
3302 * by looking at 1 bits in n and doing as many mult/shift instead of
3303 * n mult/shifts needed by the exact degradation.
3304 */
3305#define DEGRADE_SHIFT 7
3306static const unsigned char
3307        degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
3308static const unsigned char
3309        degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
3310                    {0, 0, 0, 0, 0, 0, 0, 0},
3311                    {64, 32, 8, 0, 0, 0, 0, 0},
3312                    {96, 72, 40, 12, 1, 0, 0},
3313                    {112, 98, 75, 43, 15, 1, 0},
3314                    {120, 112, 98, 76, 45, 16, 2} };
3315
3316/*
3317 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
3318 * would be when CPU is idle and so we just decay the old load without
3319 * adding any new load.
3320 */
3321static unsigned long
3322decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
3323{
3324    int j = 0;
3325
3326    if (!missed_updates)
3327        return load;
3328
3329    if (missed_updates >= degrade_zero_ticks[idx])
3330        return 0;
3331
3332    if (idx == 1)
3333        return load >> missed_updates;
3334
3335    while (missed_updates) {
3336        if (missed_updates % 2)
3337            load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
3338
3339        missed_updates >>= 1;
3340        j++;
3341    }
3342    return load;
3343}
3344
3345/*
3346 * Update rq->cpu_load[] statistics. This function is usually called every
3347 * scheduler tick (TICK_NSEC). With tickless idle this will not be called
3348 * every tick. We fix it up based on jiffies.
3349 */
3350static void update_cpu_load(struct rq *this_rq)
3351{
3352    unsigned long this_load = this_rq->load.weight;
3353    unsigned long curr_jiffies = jiffies;
3354    unsigned long pending_updates;
3355    int i, scale;
3356
3357    this_rq->nr_load_updates++;
3358
3359    /* Avoid repeated calls on same jiffy, when moving in and out of idle */
3360    if (curr_jiffies == this_rq->last_load_update_tick)
3361        return;
3362
3363    pending_updates = curr_jiffies - this_rq->last_load_update_tick;
3364    this_rq->last_load_update_tick = curr_jiffies;
3365
3366    /* Update our load: */
3367    this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
3368    for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
3369        unsigned long old_load, new_load;
3370
3371        /* scale is effectively 1 << i now, and >> i divides by scale */
3372
3373        old_load = this_rq->cpu_load[i];
3374        old_load = decay_load_missed(old_load, pending_updates - 1, i);
3375        new_load = this_load;
3376        /*
3377         * Round up the averaging division if load is increasing. This
3378         * prevents us from getting stuck on 9 if the load is 10, for
3379         * example.
3380         */
3381        if (new_load > old_load)
3382            new_load += scale - 1;
3383
3384        this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
3385    }
3386
3387    sched_avg_update(this_rq);
3388}
3389
3390static void update_cpu_load_active(struct rq *this_rq)
3391{
3392    update_cpu_load(this_rq);
3393
3394    calc_load_account_active(this_rq);
3395}
3396
3397#ifdef CONFIG_SMP
3398
3399/*
3400 * sched_exec - execve() is a valuable balancing opportunity, because at
3401 * this point the task has the smallest effective memory and cache footprint.
3402 */
3403void sched_exec(void)
3404{
3405    struct task_struct *p = current;
3406    unsigned long flags;
3407    struct rq *rq;
3408    int dest_cpu;
3409
3410    rq = task_rq_lock(p, &flags);
3411    dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0);
3412    if (dest_cpu == smp_processor_id())
3413        goto unlock;
3414
3415    /*
3416     * select_task_rq() can race against ->cpus_allowed
3417     */
3418    if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
3419        likely(cpu_active(dest_cpu)) && migrate_task(p, rq)) {
3420        struct migration_arg arg = { p, dest_cpu };
3421
3422        task_rq_unlock(rq, &flags);
3423        stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
3424        return;
3425    }
3426unlock:
3427    task_rq_unlock(rq, &flags);
3428}
3429
3430#endif
3431
3432DEFINE_PER_CPU(struct kernel_stat, kstat);
3433
3434EXPORT_PER_CPU_SYMBOL(kstat);
3435
3436/*
3437 * Return any ns on the sched_clock that have not yet been accounted in
3438 * @p in case that task is currently running.
3439 *
3440 * Called with task_rq_lock() held on @rq.
3441 */
3442static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
3443{
3444    u64 ns = 0;
3445
3446    if (task_current(rq, p)) {
3447        update_rq_clock(rq);
3448        ns = rq->clock_task - p->se.exec_start;
3449        if ((s64)ns < 0)
3450            ns = 0;
3451    }
3452
3453    return ns;
3454}
3455
3456unsigned long long task_delta_exec(struct task_struct *p)
3457{
3458    unsigned long flags;
3459    struct rq *rq;
3460    u64 ns = 0;
3461
3462    rq = task_rq_lock(p, &flags);
3463    ns = do_task_delta_exec(p, rq);
3464    task_rq_unlock(rq, &flags);
3465
3466    return ns;
3467}
3468
3469/*
3470 * Return accounted runtime for the task.
3471 * In case the task is currently running, return the runtime plus current's
3472 * pending runtime that have not been accounted yet.
3473 */
3474unsigned long long task_sched_runtime(struct task_struct *p)
3475{
3476    unsigned long flags;
3477    struct rq *rq;
3478    u64 ns = 0;
3479
3480    rq = task_rq_lock(p, &flags);
3481    ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
3482    task_rq_unlock(rq, &flags);
3483
3484    return ns;
3485}
3486
3487/*
3488 * Return sum_exec_runtime for the thread group.
3489 * In case the task is currently running, return the sum plus current's
3490 * pending runtime that have not been accounted yet.
3491 *
3492 * Note that the thread group might have other running tasks as well,
3493 * so the return value not includes other pending runtime that other
3494 * running tasks might have.
3495 */
3496unsigned long long thread_group_sched_runtime(struct task_struct *p)
3497{
3498    struct task_cputime totals;
3499    unsigned long flags;
3500    struct rq *rq;
3501    u64 ns;
3502
3503    rq = task_rq_lock(p, &flags);
3504    thread_group_cputime(p, &totals);
3505    ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
3506    task_rq_unlock(rq, &flags);
3507
3508    return ns;
3509}
3510
3511/*
3512 * Account user cpu time to a process.
3513 * @p: the process that the cpu time gets accounted to
3514 * @cputime: the cpu time spent in user space since the last update
3515 * @cputime_scaled: cputime scaled by cpu frequency
3516 */
3517void account_user_time(struct task_struct *p, cputime_t cputime,
3518               cputime_t cputime_scaled)
3519{
3520    struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3521    cputime64_t tmp;
3522
3523    /* Add user time to process. */
3524    p->utime = cputime_add(p->utime, cputime);
3525    p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
3526    account_group_user_time(p, cputime);
3527
3528    /* Add user time to cpustat. */
3529    tmp = cputime_to_cputime64(cputime);
3530    if (TASK_NICE(p) > 0)
3531        cpustat->nice = cputime64_add(cpustat->nice, tmp);
3532    else
3533        cpustat->user = cputime64_add(cpustat->user, tmp);
3534
3535    cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
3536    /* Account for user time used */
3537    acct_update_integrals(p);
3538}
3539
3540/*
3541 * Account guest cpu time to a process.
3542 * @p: the process that the cpu time gets accounted to
3543 * @cputime: the cpu time spent in virtual machine since the last update
3544 * @cputime_scaled: cputime scaled by cpu frequency
3545 */
3546static void account_guest_time(struct task_struct *p, cputime_t cputime,
3547                   cputime_t cputime_scaled)
3548{
3549    cputime64_t tmp;
3550    struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3551
3552    tmp = cputime_to_cputime64(cputime);
3553
3554    /* Add guest time to process. */
3555    p->utime = cputime_add(p->utime, cputime);
3556    p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
3557    account_group_user_time(p, cputime);
3558    p->gtime = cputime_add(p->gtime, cputime);
3559
3560    /* Add guest time to cpustat. */
3561    if (TASK_NICE(p) > 0) {
3562        cpustat->nice = cputime64_add(cpustat->nice, tmp);
3563        cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
3564    } else {
3565        cpustat->user = cputime64_add(cpustat->user, tmp);
3566        cpustat->guest = cputime64_add(cpustat->guest, tmp);
3567    }
3568}
3569
3570/*
3571 * Account system cpu time to a process.
3572 * @p: the process that the cpu time gets accounted to
3573 * @hardirq_offset: the offset to subtract from hardirq_count()
3574 * @cputime: the cpu time spent in kernel space since the last update
3575 * @cputime_scaled: cputime scaled by cpu frequency
3576 */
3577void account_system_time(struct task_struct *p, int hardirq_offset,
3578             cputime_t cputime, cputime_t cputime_scaled)
3579{
3580    struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3581    cputime64_t tmp;
3582
3583    if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
3584        account_guest_time(p, cputime, cputime_scaled);
3585        return;
3586    }
3587
3588    /* Add system time to process. */
3589    p->stime = cputime_add(p->stime, cputime);
3590    p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
3591    account_group_system_time(p, cputime);
3592
3593    /* Add system time to cpustat. */
3594    tmp = cputime_to_cputime64(cputime);
3595    if (hardirq_count() - hardirq_offset)
3596        cpustat->irq = cputime64_add(cpustat->irq, tmp);
3597    else if (in_serving_softirq())
3598        cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
3599    else
3600        cpustat->system = cputime64_add(cpustat->system, tmp);
3601
3602    cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
3603
3604    /* Account for system time used */
3605    acct_update_integrals(p);
3606}
3607
3608/*
3609 * Account for involuntary wait time.
3610 * @steal: the cpu time spent in involuntary wait
3611 */
3612void account_steal_time(cputime_t cputime)
3613{
3614    struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3615    cputime64_t cputime64 = cputime_to_cputime64(cputime);
3616
3617    cpustat->steal = cputime64_add(cpustat->steal, cputime64);
3618}
3619
3620/*
3621 * Account for idle time.
3622 * @cputime: the cpu time spent in idle wait
3623 */
3624void account_idle_time(cputime_t cputime)
3625{
3626    struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3627    cputime64_t cputime64 = cputime_to_cputime64(cputime);
3628    struct rq *rq = this_rq();
3629
3630    if (atomic_read(&rq->nr_iowait) > 0)
3631        cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
3632    else
3633        cpustat->idle = cputime64_add(cpustat->idle, cputime64);
3634}
3635
3636#ifndef CONFIG_VIRT_CPU_ACCOUNTING
3637
3638/*
3639 * Account a single tick of cpu time.
3640 * @p: the process that the cpu time gets accounted to
3641 * @user_tick: indicates if the tick is a user or a system tick
3642 */
3643void account_process_tick(struct task_struct *p, int user_tick)
3644{
3645    cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
3646    struct rq *rq = this_rq();
3647
3648    if (user_tick)
3649        account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
3650    else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
3651        account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
3652                    one_jiffy_scaled);
3653    else
3654        account_idle_time(cputime_one_jiffy);
3655}
3656
3657/*
3658 * Account multiple ticks of steal time.
3659 * @p: the process from which the cpu time has been stolen
3660 * @ticks: number of stolen ticks
3661 */
3662void account_steal_ticks(unsigned long ticks)
3663{
3664    account_steal_time(jiffies_to_cputime(ticks));
3665}
3666
3667/*
3668 * Account multiple ticks of idle time.
3669 * @ticks: number of stolen ticks
3670 */
3671void account_idle_ticks(unsigned long ticks)
3672{
3673    account_idle_time(jiffies_to_cputime(ticks));
3674}
3675
3676#endif
3677
3678/*
3679 * Use precise platform statistics if available:
3680 */
3681#ifdef CONFIG_VIRT_CPU_ACCOUNTING
3682void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3683{
3684    *ut = p->utime;
3685    *st = p->stime;
3686}
3687
3688void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3689{
3690    struct task_cputime cputime;
3691
3692    thread_group_cputime(p, &cputime);
3693
3694    *ut = cputime.utime;
3695    *st = cputime.stime;
3696}
3697#else
3698
3699#ifndef nsecs_to_cputime
3700# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
3701#endif
3702
3703void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3704{
3705    cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
3706
3707    /*
3708     * Use CFS's precise accounting:
3709     */
3710    rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
3711
3712    if (total) {
3713        u64 temp = rtime;
3714
3715        temp *= utime;
3716        do_div(temp, total);
3717        utime = (cputime_t)temp;
3718    } else
3719        utime = rtime;
3720
3721    /*
3722     * Compare with previous values, to keep monotonicity:
3723     */
3724    p->prev_utime = max(p->prev_utime, utime);
3725    p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
3726
3727    *ut = p->prev_utime;
3728    *st = p->prev_stime;
3729}
3730
3731/*
3732 * Must be called with siglock held.
3733 */
3734void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
3735{
3736    struct signal_struct *sig = p->signal;
3737    struct task_cputime cputime;
3738    cputime_t rtime, utime, total;
3739
3740    thread_group_cputime(p, &cputime);
3741
3742    total = cputime_add(cputime.utime, cputime.stime);
3743    rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
3744
3745    if (total) {
3746        u64 temp = rtime;
3747
3748        temp *= cputime.utime;
3749        do_div(temp, total);
3750        utime = (cputime_t)temp;
3751    } else
3752        utime = rtime;
3753
3754    sig->prev_utime = max(sig->prev_utime, utime);
3755    sig->prev_stime = max(sig->prev_stime,
3756                  cputime_sub(rtime, sig->prev_utime));
3757
3758    *ut = sig->prev_utime;
3759    *st = sig->prev_stime;
3760}
3761#endif
3762
3763/*
3764 * This function gets called by the timer code, with HZ frequency.
3765 * We call it with interrupts disabled.
3766 *
3767 * It also gets called by the fork code, when changing the parent's
3768 * timeslices.
3769 */
3770void scheduler_tick(void)
3771{
3772    int cpu = smp_processor_id();
3773    struct rq *rq = cpu_rq(cpu);
3774    struct task_struct *curr = rq->curr;
3775
3776    sched_clock_tick();
3777
3778    raw_spin_lock(&rq->lock);
3779    update_rq_clock(rq);
3780    update_cpu_load_active(rq);
3781    curr->sched_class->task_tick(rq, curr, 0);
3782    raw_spin_unlock(&rq->lock);
3783
3784    perf_event_task_tick();
3785
3786#ifdef CONFIG_SMP
3787    rq->idle_at_tick = idle_cpu(cpu);
3788    trigger_load_balance(rq, cpu);
3789#endif
3790}
3791
3792notrace unsigned long get_parent_ip(unsigned long addr)
3793{
3794    if (in_lock_functions(addr)) {
3795        addr = CALLER_ADDR2;
3796        if (in_lock_functions(addr))
3797            addr = CALLER_ADDR3;
3798    }
3799    return addr;
3800}
3801
3802#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
3803                defined(CONFIG_PREEMPT_TRACER))
3804
3805void __kprobes add_preempt_count(int val)
3806{
3807#ifdef CONFIG_DEBUG_PREEMPT
3808    /*
3809     * Underflow?
3810     */
3811    if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3812        return;
3813#endif
3814    preempt_count() += val;
3815#ifdef CONFIG_DEBUG_PREEMPT
3816    /*
3817     * Spinlock count overflowing soon?
3818     */
3819    DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
3820                PREEMPT_MASK - 10);
3821#endif
3822    if (preempt_count() == val)
3823        trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
3824}
3825EXPORT_SYMBOL(add_preempt_count);
3826
3827void __kprobes sub_preempt_count(int val)
3828{
3829#ifdef CONFIG_DEBUG_PREEMPT
3830    /*
3831     * Underflow?
3832     */
3833    if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
3834        return;
3835    /*
3836     * Is the spinlock portion underflowing?
3837     */
3838    if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3839            !(preempt_count() & PREEMPT_MASK)))
3840        return;
3841#endif
3842
3843    if (preempt_count() == val)
3844        trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
3845    preempt_count() -= val;
3846}
3847EXPORT_SYMBOL(sub_preempt_count);
3848
3849#endif
3850
3851/*
3852 * Print scheduling while atomic bug:
3853 */
3854static noinline void __schedule_bug(struct task_struct *prev)
3855{
3856    struct pt_regs *regs = get_irq_regs();
3857
3858    printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
3859        prev->comm, prev->pid, preempt_count());
3860
3861    debug_show_held_locks(prev);
3862    print_modules();
3863    if (irqs_disabled())
3864        print_irqtrace_events(prev);
3865
3866    if (regs)
3867        show_regs(regs);
3868    else
3869        dump_stack();
3870}
3871
3872/*
3873 * Various schedule()-time debugging checks and statistics:
3874 */
3875static inline void schedule_debug(struct task_struct *prev)
3876{
3877    /*
3878     * Test if we are atomic. Since do_exit() needs to call into
3879     * schedule() atomically, we ignore that path for now.
3880     * Otherwise, whine if we are scheduling when we should not be.
3881     */
3882    if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
3883        __schedule_bug(prev);
3884
3885    profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3886
3887    schedstat_inc(this_rq(), sched_count);
3888#ifdef CONFIG_SCHEDSTATS
3889    if (unlikely(prev->lock_depth >= 0)) {
3890        schedstat_inc(this_rq(), rq_sched_info.bkl_count);
3891        schedstat_inc(prev, sched_info.bkl_count);
3892    }
3893#endif
3894}
3895
3896static void put_prev_task(struct rq *rq, struct task_struct *prev)
3897{
3898    if (prev->se.on_rq)
3899        update_rq_clock(rq);
3900    prev->sched_class->put_prev_task(rq, prev);
3901}
3902
3903/*
3904 * Pick up the highest-prio task:
3905 */
3906static inline struct task_struct *
3907pick_next_task(struct rq *rq)
3908{
3909    const struct sched_class *class;
3910    struct task_struct *p;
3911
3912    /*
3913     * Optimization: we know that if all tasks are in
3914     * the fair class we can call that function directly:
3915     */
3916    if (likely(rq->nr_running == rq->cfs.nr_running)) {
3917        p = fair_sched_class.pick_next_task(rq);
3918        if (likely(p))
3919            return p;
3920    }
3921
3922    for_each_class(class) {
3923        p = class->pick_next_task(rq);
3924        if (p)
3925            return p;
3926    }
3927
3928    BUG(); /* the idle class will always have a runnable task */
3929}
3930
3931/*
3932 * schedule() is the main scheduler function.
3933 */
3934asmlinkage void __sched schedule(void)
3935{
3936    struct task_struct *prev, *next;
3937    unsigned long *switch_count;
3938    struct rq *rq;
3939    int cpu;
3940
3941need_resched:
3942    preempt_disable();
3943    cpu = smp_processor_id();
3944    rq = cpu_rq(cpu);
3945    rcu_note_context_switch(cpu);
3946    prev = rq->curr;
3947
3948    release_kernel_lock(prev);
3949need_resched_nonpreemptible:
3950
3951    schedule_debug(prev);
3952
3953    if (sched_feat(HRTICK))
3954        hrtick_clear(rq);
3955
3956    raw_spin_lock_irq(&rq->lock);
3957
3958    switch_count = &prev->nivcsw;
3959    if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
3960        if (unlikely(signal_pending_state(prev->state, prev))) {
3961            prev->state = TASK_RUNNING;
3962        } else {
3963            /*
3964             * If a worker is going to sleep, notify and
3965             * ask workqueue whether it wants to wake up a
3966             * task to maintain concurrency. If so, wake
3967             * up the task.
3968             */
3969            if (prev->flags & PF_WQ_WORKER) {
3970                struct task_struct *to_wakeup;
3971
3972                to_wakeup = wq_worker_sleeping(prev, cpu);
3973                if (to_wakeup)
3974                    try_to_wake_up_local(to_wakeup);
3975            }
3976            deactivate_task(rq, prev, DEQUEUE_SLEEP);
3977        }
3978        switch_count = &prev->nvcsw;
3979    }
3980
3981    pre_schedule(rq, prev);
3982
3983    if (unlikely(!rq->nr_running))
3984        idle_balance(cpu, rq);
3985
3986    put_prev_task(rq, prev);
3987    next = pick_next_task(rq);
3988    clear_tsk_need_resched(prev);
3989    rq->skip_clock_update = 0;
3990
3991    if (likely(prev != next)) {
3992        sched_info_switch(prev, next);
3993        perf_event_task_sched_out(prev, next);
3994
3995        rq->nr_switches++;
3996        rq->curr = next;
3997        ++*switch_count;
3998
3999        context_switch(rq, prev, next); /* unlocks the rq */
4000        /*
4001         * The context switch have flipped the stack from under us
4002         * and restored the local variables which were saved when
4003         * this task called schedule() in the past. prev == current
4004         * is still correct, but it can be moved to another cpu/rq.
4005         */
4006        cpu = smp_processor_id();
4007        rq = cpu_rq(cpu);
4008    } else
4009        raw_spin_unlock_irq(&rq->lock);
4010
4011    post_schedule(rq);
4012
4013    if (unlikely(reacquire_kernel_lock(prev)))
4014        goto need_resched_nonpreemptible;
4015
4016    preempt_enable_no_resched();
4017    if (need_resched())
4018        goto need_resched;
4019}
4020EXPORT_SYMBOL(schedule);
4021
4022#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
4023/*
4024 * Look out! "owner" is an entirely speculative pointer
4025 * access and not reliable.
4026 */
4027int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
4028{
4029    unsigned int cpu;
4030    struct rq *rq;
4031
4032    if (!sched_feat(OWNER_SPIN))
4033        return 0;
4034
4035#ifdef CONFIG_DEBUG_PAGEALLOC
4036    /*
4037     * Need to access the cpu field knowing that
4038     * DEBUG_PAGEALLOC could have unmapped it if
4039     * the mutex owner just released it and exited.
4040     */
4041    if (probe_kernel_address(&owner->cpu, cpu))
4042        return 0;
4043#else
4044    cpu = owner->cpu;
4045#endif
4046
4047    /*
4048     * Even if the access succeeded (likely case),
4049     * the cpu field may no longer be valid.
4050     */
4051    if (cpu >= nr_cpumask_bits)
4052        return 0;
4053
4054    /*
4055     * We need to validate that we can do a
4056     * get_cpu() and that we have the percpu area.
4057     */
4058    if (!cpu_online(cpu))
4059        return 0;
4060
4061    rq = cpu_rq(cpu);
4062
4063    for (;;) {
4064        /*
4065         * Owner changed, break to re-assess state.
4066         */
4067        if (lock->owner != owner) {
4068            /*
4069             * If the lock has switched to a different owner,
4070             * we likely have heavy contention. Return 0 to quit
4071             * optimistic spinning and not contend further:
4072             */
4073            if (lock->owner)
4074                return 0;
4075            break;
4076        }
4077
4078        /*
4079         * Is that owner really running on that cpu?
4080         */
4081        if (task_thread_info(rq->curr) != owner || need_resched())
4082            return 0;
4083
4084        arch_mutex_cpu_relax();
4085    }
4086
4087    return 1;
4088}
4089#endif
4090
4091#ifdef CONFIG_PREEMPT
4092/*
4093 * this is the entry point to schedule() from in-kernel preemption
4094 * off of preempt_enable. Kernel preemptions off return from interrupt
4095 * occur there and call schedule directly.
4096 */
4097asmlinkage void __sched notrace preempt_schedule(void)
4098{
4099    struct thread_info *ti = current_thread_info();
4100
4101    /*
4102     * If there is a non-zero preempt_count or interrupts are disabled,
4103     * we do not want to preempt the current task. Just return..
4104     */
4105    if (likely(ti->preempt_count || irqs_disabled()))
4106        return;
4107
4108    do {
4109        add_preempt_count_notrace(PREEMPT_ACTIVE);
4110        schedule();
4111        sub_preempt_count_notrace(PREEMPT_ACTIVE);
4112
4113        /*
4114         * Check again in case we missed a preemption opportunity
4115         * between schedule and now.
4116         */
4117        barrier();
4118    } while (need_resched());
4119}
4120EXPORT_SYMBOL(preempt_schedule);
4121
4122/*
4123 * this is the entry point to schedule() from kernel preemption
4124 * off of irq context.
4125 * Note, that this is called and return with irqs disabled. This will
4126 * protect us against recursive calling from irq.
4127 */
4128asmlinkage void __sched preempt_schedule_irq(void)
4129{
4130    struct thread_info *ti = current_thread_info();
4131
4132    /* Catch callers which need to be fixed */
4133    BUG_ON(ti->preempt_count || !irqs_disabled());
4134
4135    do {
4136        add_preempt_count(PREEMPT_ACTIVE);
4137        local_irq_enable();
4138        schedule();
4139        local_irq_disable();
4140        sub_preempt_count(PREEMPT_ACTIVE);
4141
4142        /*
4143         * Check again in case we missed a preemption opportunity
4144         * between schedule and now.
4145         */
4146        barrier();
4147    } while (need_resched());
4148}
4149
4150#endif /* CONFIG_PREEMPT */
4151
4152int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
4153              void *key)
4154{
4155    return try_to_wake_up(curr->private, mode, wake_flags);
4156}
4157EXPORT_SYMBOL(default_wake_function);
4158
4159/*
4160 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
4161 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
4162 * number) then we wake all the non-exclusive tasks and one exclusive task.
4163 *
4164 * There are circumstances in which we can try to wake a task which has already
4165 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
4166 * zero in this (rare) case, and we handle it by continuing to scan the queue.
4167 */
4168static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
4169            int nr_exclusive, int wake_flags, void *key)
4170{
4171    wait_queue_t *curr, *next;
4172
4173    list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
4174        unsigned flags = curr->flags;
4175
4176        if (curr->func(curr, mode, wake_flags, key) &&
4177                (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
4178            break;
4179    }
4180}
4181
4182/**
4183 * __wake_up - wake up threads blocked on a waitqueue.
4184 * @q: the waitqueue
4185 * @mode: which threads
4186 * @nr_exclusive: how many wake-one or wake-many threads to wake up
4187 * @key: is directly passed to the wakeup function
4188 *
4189 * It may be assumed that this function implies a write memory barrier before
4190 * changing the task state if and only if any tasks are woken up.
4191 */
4192void __wake_up(wait_queue_head_t *q, unsigned int mode,
4193            int nr_exclusive, void *key)
4194{
4195    unsigned long flags;
4196
4197    spin_lock_irqsave(&q->lock, flags);
4198    __wake_up_common(q, mode, nr_exclusive, 0, key);
4199    spin_unlock_irqrestore(&q->lock, flags);
4200}
4201EXPORT_SYMBOL(__wake_up);
4202
4203/*
4204 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
4205 */
4206void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
4207{
4208    __wake_up_common(q, mode, 1, 0, NULL);
4209}
4210EXPORT_SYMBOL_GPL(__wake_up_locked);
4211
4212void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
4213{
4214    __wake_up_common(q, mode, 1, 0, key);
4215}
4216EXPORT_SYMBOL_GPL(__wake_up_locked_key);
4217
4218/**
4219 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
4220 * @q: the waitqueue
4221 * @mode: which threads
4222 * @nr_exclusive: how many wake-one or wake-many threads to wake up
4223 * @key: opaque value to be passed to wakeup targets
4224 *
4225 * The sync wakeup differs that the waker knows that it will schedule
4226 * away soon, so while the target thread will be woken up, it will not
4227 * be migrated to another CPU - ie. the two threads are 'synchronized'
4228 * with each other. This can prevent needless bouncing between CPUs.
4229 *
4230 * On UP it can prevent extra preemption.
4231 *
4232 * It may be assumed that this function implies a write memory barrier before
4233 * changing the task state if and only if any tasks are woken up.
4234 */
4235void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
4236            int nr_exclusive, void *key)
4237{
4238    unsigned long flags;
4239    int wake_flags = WF_SYNC;
4240
4241    if (unlikely(!q))
4242        return;
4243
4244    if (unlikely(!nr_exclusive))
4245        wake_flags = 0;
4246
4247    spin_lock_irqsave(&q->lock, flags);
4248    __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
4249    spin_unlock_irqrestore(&q->lock, flags);
4250}
4251EXPORT_SYMBOL_GPL(__wake_up_sync_key);
4252
4253/*
4254 * __wake_up_sync - see __wake_up_sync_key()
4255 */
4256void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
4257{
4258    __wake_up_sync_key(q, mode, nr_exclusive, NULL);
4259}
4260EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
4261
4262/**
4263 * complete: - signals a single thread waiting on this completion
4264 * @x: holds the state of this particular completion
4265 *
4266 * This will wake up a single thread waiting on this completion. Threads will be
4267 * awakened in the same order in which they were queued.
4268 *
4269 * See also complete_all(), wait_for_completion() and related routines.
4270 *
4271 * It may be assumed that this function implies a write memory barrier before
4272 * changing the task state if and only if any tasks are woken up.
4273 */
4274void complete(struct completion *x)
4275{
4276    unsigned long flags;
4277
4278    spin_lock_irqsave(&x->wait.lock, flags);
4279    x->done++;
4280    __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
4281    spin_unlock_irqrestore(&x->wait.lock, flags);
4282}
4283EXPORT_SYMBOL(complete);
4284
4285/**
4286 * complete_all: - signals all threads waiting on this completion
4287 * @x: holds the state of this particular completion
4288 *
4289 * This will wake up all threads waiting on this particular completion event.
4290 *
4291 * It may be assumed that this function implies a write memory barrier before
4292 * changing the task state if and only if any tasks are woken up.
4293 */
4294void complete_all(struct completion *x)
4295{
4296    unsigned long flags;
4297
4298    spin_lock_irqsave(&x->wait.lock, flags);
4299    x->done += UINT_MAX/2;
4300    __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
4301    spin_unlock_irqrestore(&x->wait.lock, flags);
4302}
4303EXPORT_SYMBOL(complete_all);
4304
4305static inline long __sched
4306do_wait_for_common(struct completion *x, long timeout, int state)
4307{
4308    if (!x->done) {
4309        DECLARE_WAITQUEUE(wait, current);
4310
4311        __add_wait_queue_tail_exclusive(&x->wait, &wait);
4312        do {
4313            if (signal_pending_state(state, current)) {
4314                timeout = -ERESTARTSYS;
4315                break;
4316            }
4317            __set_current_state(state);
4318            spin_unlock_irq(&x->wait.lock);
4319            timeout = schedule_timeout(timeout);
4320            spin_lock_irq(&x->wait.lock);
4321        } while (!x->done && timeout);
4322        __remove_wait_queue(&x->wait, &wait);
4323        if (!x->done)
4324            return timeout;
4325    }
4326    x->done--;
4327    return timeout ?: 1;
4328}
4329
4330static long __sched
4331wait_for_common(struct completion *x, long timeout, int state)
4332{
4333    might_sleep();
4334
4335    spin_lock_irq(&x->wait.lock);
4336    timeout = do_wait_for_common(x, timeout, state);
4337    spin_unlock_irq(&x->wait.lock);
4338    return timeout;
4339}
4340
4341/**
4342 * wait_for_completion: - waits for completion of a task
4343 * @x: holds the state of this particular completion
4344 *
4345 * This waits to be signaled for completion of a specific task. It is NOT
4346 * interruptible and there is no timeout.
4347 *
4348 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
4349 * and interrupt capability. Also see complete().
4350 */
4351void __sched wait_for_completion(struct completion *x)
4352{
4353    wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
4354}
4355EXPORT_SYMBOL(wait_for_completion);
4356
4357/**
4358 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
4359 * @x: holds the state of this particular completion
4360 * @timeout: timeout value in jiffies
4361 *
4362 * This waits for either a completion of a specific task to be signaled or for a
4363 * specified timeout to expire. The timeout is in jiffies. It is not
4364 * interruptible.
4365 */
4366unsigned long __sched
4367wait_for_completion_timeout(struct completion *x, unsigned long timeout)
4368{
4369    return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
4370}
4371EXPORT_SYMBOL(wait_for_completion_timeout);
4372
4373/**
4374 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
4375 * @x: holds the state of this particular completion
4376 *
4377 * This waits for completion of a specific task to be signaled. It is
4378 * interruptible.
4379 */
4380int __sched wait_for_completion_interruptible(struct completion *x)
4381{
4382    long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
4383    if (t == -ERESTARTSYS)
4384        return t;
4385    return 0;
4386}
4387EXPORT_SYMBOL(wait_for_completion_interruptible);
4388
4389/**
4390 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
4391 * @x: holds the state of this particular completion
4392 * @timeout: timeout value in jiffies
4393 *
4394 * This waits for either a completion of a specific task to be signaled or for a
4395 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
4396 */
4397long __sched
4398wait_for_completion_interruptible_timeout(struct completion *x,
4399                      unsigned long timeout)
4400{
4401    return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
4402}
4403EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
4404
4405/**
4406 * wait_for_completion_killable: - waits for completion of a task (killable)
4407 * @x: holds the state of this particular completion
4408 *
4409 * This waits to be signaled for completion of a specific task. It can be
4410 * interrupted by a kill signal.
4411 */
4412int __sched wait_for_completion_killable(struct completion *x)
4413{
4414    long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
4415    if (t == -ERESTARTSYS)
4416        return t;
4417    return 0;
4418}
4419EXPORT_SYMBOL(wait_for_completion_killable);
4420
4421/**
4422 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
4423 * @x: holds the state of this particular completion
4424 * @timeout: timeout value in jiffies
4425 *
4426 * This waits for either a completion of a specific task to be
4427 * signaled or for a specified timeout to expire. It can be
4428 * interrupted by a kill signal. The timeout is in jiffies.
4429 */
4430long __sched
4431wait_for_completion_killable_timeout(struct completion *x,
4432                     unsigned long timeout)
4433{
4434    return wait_for_common(x, timeout, TASK_KILLABLE);
4435}
4436EXPORT_SYMBOL(wait_for_completion_killable_timeout);
4437
4438/**
4439 * try_wait_for_completion - try to decrement a completion without blocking
4440 * @x: completion structure
4441 *
4442 * Returns: 0 if a decrement cannot be done without blocking
4443 * 1 if a decrement succeeded.
4444 *
4445 * If a completion is being used as a counting completion,
4446 * attempt to decrement the counter without blocking. This
4447 * enables us to avoid waiting if the resource the completion
4448 * is protecting is not available.
4449 */
4450bool try_wait_for_completion(struct completion *x)
4451{
4452    unsigned long flags;
4453    int ret = 1;
4454
4455    spin_lock_irqsave(&x->wait.lock, flags);
4456    if (!x->done)
4457        ret = 0;
4458    else
4459        x->done--;
4460    spin_unlock_irqrestore(&x->wait.lock, flags);
4461    return ret;
4462}
4463EXPORT_SYMBOL(try_wait_for_completion);
4464
4465/**
4466 * completion_done - Test to see if a completion has any waiters
4467 * @x: completion structure
4468 *
4469 * Returns: 0 if there are waiters (wait_for_completion() in progress)
4470 * 1 if there are no waiters.
4471 *
4472 */
4473bool completion_done(struct completion *x)
4474{
4475    unsigned long flags;
4476    int ret = 1;
4477
4478    spin_lock_irqsave(&x->wait.lock, flags);
4479    if (!x->done)
4480        ret = 0;
4481    spin_unlock_irqrestore(&x->wait.lock, flags);
4482    return ret;
4483}
4484EXPORT_SYMBOL(completion_done);
4485
4486static long __sched
4487sleep_on_common(wait_queue_head_t *q, int state, long timeout)
4488{
4489    unsigned long flags;
4490    wait_queue_t wait;
4491
4492    init_waitqueue_entry(&wait, current);
4493
4494    __set_current_state(state);
4495
4496    spin_lock_irqsave(&q->lock, flags);
4497    __add_wait_queue(q, &wait);
4498    spin_unlock(&q->lock);
4499    timeout = schedule_timeout(timeout);
4500    spin_lock_irq(&q->lock);
4501    __remove_wait_queue(q, &wait);
4502    spin_unlock_irqrestore(&q->lock, flags);
4503
4504    return timeout;
4505}
4506
4507void __sched interruptible_sleep_on(wait_queue_head_t *q)
4508{
4509    sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
4510}
4511EXPORT_SYMBOL(interruptible_sleep_on);
4512
4513long __sched
4514interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
4515{
4516    return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
4517}
4518EXPORT_SYMBOL(interruptible_sleep_on_timeout);
4519
4520void __sched sleep_on(wait_queue_head_t *q)
4521{
4522    sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
4523}
4524EXPORT_SYMBOL(sleep_on);
4525
4526long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
4527{
4528    return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
4529}
4530EXPORT_SYMBOL(sleep_on_timeout);
4531
4532#ifdef CONFIG_RT_MUTEXES
4533
4534/*
4535 * rt_mutex_setprio - set the current priority of a task
4536 * @p: task
4537 * @prio: prio value (kernel-internal form)
4538 *
4539 * This function changes the 'effective' priority of a task. It does
4540 * not touch ->normal_prio like __setscheduler().
4541 *
4542 * Used by the rt_mutex code to implement priority inheritance logic.
4543 */
4544void rt_mutex_setprio(struct task_struct *p, int prio)
4545{
4546    unsigned long flags;
4547    int oldprio, on_rq, running;
4548    struct rq *rq;
4549    const struct sched_class *prev_class;
4550
4551    BUG_ON(prio < 0 || prio > MAX_PRIO);
4552
4553    rq = task_rq_lock(p, &flags);
4554
4555    trace_sched_pi_setprio(p, prio);
4556    oldprio = p->prio;
4557    prev_class = p->sched_class;
4558    on_rq = p->se.on_rq;
4559    running = task_current(rq, p);
4560    if (on_rq)
4561        dequeue_task(rq, p, 0);
4562    if (running)
4563        p->sched_class->put_prev_task(rq, p);
4564
4565    if (rt_prio(prio))
4566        p->sched_class = &rt_sched_class;
4567    else
4568        p->sched_class = &fair_sched_class;
4569
4570    p->prio = prio;
4571
4572    if (running)
4573        p->sched_class->set_curr_task(rq);
4574    if (on_rq) {
4575        enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
4576
4577        check_class_changed(rq, p, prev_class, oldprio, running);
4578    }
4579    task_rq_unlock(rq, &flags);
4580}
4581
4582#endif
4583
4584void set_user_nice(struct task_struct *p, long nice)
4585{
4586    int old_prio, delta, on_rq;
4587    unsigned long flags;
4588    struct rq *rq;
4589
4590    if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
4591        return;
4592    /*
4593     * We have to be careful, if called from sys_setpriority(),
4594     * the task might be in the middle of scheduling on another CPU.
4595     */
4596    rq = task_rq_lock(p, &flags);
4597    /*
4598     * The RT priorities are set via sched_setscheduler(), but we still
4599     * allow the 'normal' nice value to be set - but as expected
4600     * it wont have any effect on scheduling until the task is
4601     * SCHED_FIFO/SCHED_RR:
4602     */
4603    if (task_has_rt_policy(p)) {
4604        p->static_prio = NICE_TO_PRIO(nice);
4605        goto out_unlock;
4606    }
4607    on_rq = p->se.on_rq;
4608    if (on_rq)
4609        dequeue_task(rq, p, 0);
4610
4611    p->static_prio = NICE_TO_PRIO(nice);
4612    set_load_weight(p);
4613    old_prio = p->prio;
4614    p->prio = effective_prio(p);
4615    delta = p->prio - old_prio;
4616
4617    if (on_rq) {
4618        enqueue_task(rq, p, 0);
4619        /*
4620         * If the task increased its priority or is running and
4621         * lowered its priority, then reschedule its CPU:
4622         */
4623        if (delta < 0 || (delta > 0 && task_running(rq, p)))
4624            resched_task(rq->curr);
4625    }
4626out_unlock:
4627    task_rq_unlock(rq, &flags);
4628}
4629EXPORT_SYMBOL(set_user_nice);
4630
4631/*
4632 * can_nice - check if a task can reduce its nice value
4633 * @p: task
4634 * @nice: nice value
4635 */
4636int can_nice(const struct task_struct *p, const int nice)
4637{
4638    /* convert nice value [19,-20] to rlimit style value [1,40] */
4639    int nice_rlim = 20 - nice;
4640
4641    return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
4642        capable(CAP_SYS_NICE));
4643}
4644
4645#ifdef __ARCH_WANT_SYS_NICE
4646
4647/*
4648 * sys_nice - change the priority of the current process.
4649 * @increment: priority increment
4650 *
4651 * sys_setpriority is a more generic, but much slower function that
4652 * does similar things.
4653 */
4654SYSCALL_DEFINE1(nice, int, increment)
4655{
4656    long nice, retval;
4657
4658    /*
4659     * Setpriority might change our priority at the same moment.
4660     * We don't have to worry. Conceptually one call occurs first
4661     * and we have a single winner.
4662     */
4663    if (increment < -40)
4664        increment = -40;
4665    if (increment > 40)
4666        increment = 40;
4667
4668    nice = TASK_NICE(current) + increment;
4669    if (nice < -20)
4670        nice = -20;
4671    if (nice > 19)
4672        nice = 19;
4673
4674    if (increment < 0 && !can_nice(current, nice))
4675        return -EPERM;
4676
4677    retval = security_task_setnice(current, nice);
4678    if (retval)
4679        return retval;
4680
4681    set_user_nice(current, nice);
4682    return 0;
4683}
4684
4685#endif
4686
4687/**
4688 * task_prio - return the priority value of a given task.
4689 * @p: the task in question.
4690 *
4691 * This is the priority value as seen by users in /proc.
4692 * RT tasks are offset by -200. Normal tasks are centered
4693 * around 0, value goes from -16 to +15.
4694 */
4695int task_prio(const struct task_struct *p)
4696{
4697    return p->prio - MAX_RT_PRIO;
4698}
4699
4700/**
4701 * task_nice - return the nice value of a given task.
4702 * @p: the task in question.
4703 */
4704int task_nice(const struct task_struct *p)
4705{
4706    return TASK_NICE(p);
4707}
4708EXPORT_SYMBOL(task_nice);
4709
4710/**
4711 * idle_cpu - is a given cpu idle currently?
4712 * @cpu: the processor in question.
4713 */
4714int idle_cpu(int cpu)
4715{
4716    return cpu_curr(cpu) == cpu_rq(cpu)->idle;
4717}
4718
4719/**
4720 * idle_task - return the idle task for a given cpu.
4721 * @cpu: the processor in question.
4722 */
4723struct task_struct *idle_task(int cpu)
4724{
4725    return cpu_rq(cpu)->idle;
4726}
4727
4728/**
4729 * find_process_by_pid - find a process with a matching PID value.
4730 * @pid: the pid in question.
4731 */
4732static struct task_struct *find_process_by_pid(pid_t pid)
4733{
4734    return pid ? find_task_by_vpid(pid) : current;
4735}
4736
4737/* Actually do priority change: must hold rq lock. */
4738static void
4739__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
4740{
4741    BUG_ON(p->se.on_rq);
4742
4743    p->policy = policy;
4744    p->rt_priority = prio;
4745    p->normal_prio = normal_prio(p);
4746    /* we are holding p->pi_lock already */
4747    p->prio = rt_mutex_getprio(p);
4748    if (rt_prio(p->prio))
4749        p->sched_class = &rt_sched_class;
4750    else
4751        p->sched_class = &fair_sched_class;
4752    set_load_weight(p);
4753}
4754
4755/*
4756 * check the target process has a UID that matches the current process's
4757 */
4758static bool check_same_owner(struct task_struct *p)
4759{
4760    const struct cred *cred = current_cred(), *pcred;
4761    bool match;
4762
4763    rcu_read_lock();
4764    pcred = __task_cred(p);
4765    match = (cred->euid == pcred->euid ||
4766         cred->euid == pcred->uid);
4767    rcu_read_unlock();
4768    return match;
4769}
4770
4771static int __sched_setscheduler(struct task_struct *p, int policy,
4772                const struct sched_param *param, bool user)
4773{
4774    int retval, oldprio, oldpolicy = -1, on_rq, running;
4775    unsigned long flags;
4776    const struct sched_class *prev_class;
4777    struct rq *rq;
4778    int reset_on_fork;
4779
4780    /* may grab non-irq protected spin_locks */
4781    BUG_ON(in_interrupt());
4782recheck:
4783    /* double check policy once rq lock held */
4784    if (policy < 0) {
4785        reset_on_fork = p->sched_reset_on_fork;
4786        policy = oldpolicy = p->policy;
4787    } else {
4788        reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
4789        policy &= ~SCHED_RESET_ON_FORK;
4790
4791        if (policy != SCHED_FIFO && policy != SCHED_RR &&
4792                policy != SCHED_NORMAL && policy != SCHED_BATCH &&
4793                policy != SCHED_IDLE)
4794            return -EINVAL;
4795    }
4796
4797    /*
4798     * Valid priorities for SCHED_FIFO and SCHED_RR are
4799     * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
4800     * SCHED_BATCH and SCHED_IDLE is 0.
4801     */
4802    if (param->sched_priority < 0 ||
4803        (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
4804        (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
4805        return -EINVAL;
4806    if (rt_policy(policy) != (param->sched_priority != 0))
4807        return -EINVAL;
4808
4809    /*
4810     * Allow unprivileged RT tasks to decrease priority:
4811     */
4812    if (user && !capable(CAP_SYS_NICE)) {
4813        if (rt_policy(policy)) {
4814            unsigned long rlim_rtprio =
4815                    task_rlimit(p, RLIMIT_RTPRIO);
4816
4817            /* can't set/change the rt policy */
4818            if (policy != p->policy && !rlim_rtprio)
4819                return -EPERM;
4820
4821            /* can't increase priority */
4822            if (param->sched_priority > p->rt_priority &&
4823                param->sched_priority > rlim_rtprio)
4824                return -EPERM;
4825        }
4826        /*
4827         * Like positive nice levels, dont allow tasks to
4828         * move out of SCHED_IDLE either:
4829         */
4830        if (p->policy == SCHED_IDLE && policy != SCHED_IDLE)
4831            return -EPERM;
4832
4833        /* can't change other user's priorities */
4834        if (!check_same_owner(p))
4835            return -EPERM;
4836
4837        /* Normal users shall not reset the sched_reset_on_fork flag */
4838        if (p->sched_reset_on_fork && !reset_on_fork)
4839            return -EPERM;
4840    }
4841
4842    if (user) {
4843        retval = security_task_setscheduler(p);
4844        if (retval)
4845            return retval;
4846    }
4847
4848    /*
4849     * make sure no PI-waiters arrive (or leave) while we are
4850     * changing the priority of the task:
4851     */
4852    raw_spin_lock_irqsave(&p->pi_lock, flags);
4853    /*
4854     * To be able to change p->policy safely, the apropriate
4855     * runqueue lock must be held.
4856     */
4857    rq = __task_rq_lock(p);
4858
4859    /*
4860     * Changing the policy of the stop threads its a very bad idea
4861     */
4862    if (p == rq->stop) {
4863        __task_rq_unlock(rq);
4864        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4865        return -EINVAL;
4866    }
4867
4868#ifdef CONFIG_RT_GROUP_SCHED
4869    if (user) {
4870        /*
4871         * Do not allow realtime tasks into groups that have no runtime
4872         * assigned.
4873         */
4874        if (rt_bandwidth_enabled() && rt_policy(policy) &&
4875                task_group(p)->rt_bandwidth.rt_runtime == 0 &&
4876                !task_group_is_autogroup(task_group(p))) {
4877            __task_rq_unlock(rq);
4878            raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4879            return -EPERM;
4880        }
4881    }
4882#endif
4883
4884    /* recheck policy now with rq lock held */
4885    if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4886        policy = oldpolicy = -1;
4887        __task_rq_unlock(rq);
4888        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4889        goto recheck;
4890    }
4891    on_rq = p->se.on_rq;
4892    running = task_current(rq, p);
4893    if (on_rq)
4894        deactivate_task(rq, p, 0);
4895    if (running)
4896        p->sched_class->put_prev_task(rq, p);
4897
4898    p->sched_reset_on_fork = reset_on_fork;
4899
4900    oldprio = p->prio;
4901    prev_class = p->sched_class;
4902    __setscheduler(rq, p, policy, param->sched_priority);
4903
4904    if (running)
4905        p->sched_class->set_curr_task(rq);
4906    if (on_rq) {
4907        activate_task(rq, p, 0);
4908
4909        check_class_changed(rq, p, prev_class, oldprio, running);
4910    }
4911    __task_rq_unlock(rq);
4912    raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4913
4914    rt_mutex_adjust_pi(p);
4915
4916    return 0;
4917}
4918
4919/**
4920 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4921 * @p: the task in question.
4922 * @policy: new policy.
4923 * @param: structure containing the new RT priority.
4924 *
4925 * NOTE that the task may be already dead.
4926 */
4927int sched_setscheduler(struct task_struct *p, int policy,
4928               const struct sched_param *param)
4929{
4930    return __sched_setscheduler(p, policy, param, true);
4931}
4932EXPORT_SYMBOL_GPL(sched_setscheduler);
4933
4934/**
4935 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
4936 * @p: the task in question.
4937 * @policy: new policy.
4938 * @param: structure containing the new RT priority.
4939 *
4940 * Just like sched_setscheduler, only don't bother checking if the
4941 * current context has permission. For example, this is needed in
4942 * stop_machine(): we create temporary high priority worker threads,
4943 * but our caller might not have that capability.
4944 */
4945int sched_setscheduler_nocheck(struct task_struct *p, int policy,
4946                   const struct sched_param *param)
4947{
4948    return __sched_setscheduler(p, policy, param, false);
4949}
4950
4951static int
4952do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4953{
4954    struct sched_param lparam;
4955    struct task_struct *p;
4956    int retval;
4957
4958    if (!param || pid < 0)
4959        return -EINVAL;
4960    if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4961        return -EFAULT;
4962
4963    rcu_read_lock();
4964    retval = -ESRCH;
4965    p = find_process_by_pid(pid);
4966    if (p != NULL)
4967        retval = sched_setscheduler(p, policy, &lparam);
4968    rcu_read_unlock();
4969
4970    return retval;
4971}
4972
4973/**
4974 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4975 * @pid: the pid in question.
4976 * @policy: new policy.
4977 * @param: structure containing the new RT priority.
4978 */
4979SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
4980        struct sched_param __user *, param)
4981{
4982    /* negative values for policy are not valid */
4983    if (policy < 0)
4984        return -EINVAL;
4985
4986    return do_sched_setscheduler(pid, policy, param);
4987}
4988
4989/**
4990 * sys_sched_setparam - set/change the RT priority of a thread
4991 * @pid: the pid in question.
4992 * @param: structure containing the new RT priority.
4993 */
4994SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
4995{
4996    return do_sched_setscheduler(pid, -1, param);
4997}
4998
4999/**
5000 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
5001 * @pid: the pid in question.
5002 */
5003SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
5004{
5005    struct task_struct *p;
5006    int retval;
5007
5008    if (pid < 0)
5009        return -EINVAL;
5010
5011    retval = -ESRCH;
5012    rcu_read_lock();
5013    p = find_process_by_pid(pid);
5014    if (p) {
5015        retval = security_task_getscheduler(p);
5016        if (!retval)
5017            retval = p->policy
5018                | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
5019    }
5020    rcu_read_unlock();
5021    return retval;
5022}
5023
5024/**
5025 * sys_sched_getparam - get the RT priority of a thread
5026 * @pid: the pid in question.
5027 * @param: structure containing the RT priority.
5028 */
5029SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
5030{
5031    struct sched_param lp;
5032    struct task_struct *p;
5033    int retval;
5034
5035    if (!param || pid < 0)
5036        return -EINVAL;
5037
5038    rcu_read_lock();
5039    p = find_process_by_pid(pid);
5040    retval = -ESRCH;
5041    if (!p)
5042        goto out_unlock;
5043
5044    retval = security_task_getscheduler(p);
5045    if (retval)
5046        goto out_unlock;
5047
5048    lp.sched_priority = p->rt_priority;
5049    rcu_read_unlock();
5050
5051    /*
5052     * This one might sleep, we cannot do it with a spinlock held ...
5053     */
5054    retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
5055
5056    return retval;
5057
5058out_unlock:
5059    rcu_read_unlock();
5060    return retval;
5061}
5062
5063long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
5064{
5065    cpumask_var_t cpus_allowed, new_mask;
5066    struct task_struct *p;
5067    int retval;
5068
5069    get_online_cpus();
5070    rcu_read_lock();
5071
5072    p = find_process_by_pid(pid);
5073    if (!p) {
5074        rcu_read_unlock();
5075        put_online_cpus();
5076        return -ESRCH;
5077    }
5078
5079    /* Prevent p going away */
5080    get_task_struct(p);
5081    rcu_read_unlock();
5082
5083    if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
5084        retval = -ENOMEM;
5085        goto out_put_task;
5086    }
5087    if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
5088        retval = -ENOMEM;
5089        goto out_free_cpus_allowed;
5090    }
5091    retval = -EPERM;
5092    if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
5093        goto out_unlock;
5094
5095    retval = security_task_setscheduler(p);
5096    if (retval)
5097        goto out_unlock;
5098
5099    cpuset_cpus_allowed(p, cpus_allowed);
5100    cpumask_and(new_mask, in_mask, cpus_allowed);
5101again:
5102    retval = set_cpus_allowed_ptr(p, new_mask);
5103
5104    if (!retval) {
5105        cpuset_cpus_allowed(p, cpus_allowed);
5106        if (!cpumask_subset(new_mask, cpus_allowed)) {
5107            /*
5108             * We must have raced with a concurrent cpuset
5109             * update. Just reset the cpus_allowed to the
5110             * cpuset's cpus_allowed
5111             */
5112            cpumask_copy(new_mask, cpus_allowed);
5113            goto again;
5114        }
5115    }
5116out_unlock:
5117    free_cpumask_var(new_mask);
5118out_free_cpus_allowed:
5119    free_cpumask_var(cpus_allowed);
5120out_put_task:
5121    put_task_struct(p);
5122    put_online_cpus();
5123    return retval;
5124}
5125
5126static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
5127                 struct cpumask *new_mask)
5128{
5129    if (len < cpumask_size())
5130        cpumask_clear(new_mask);
5131    else if (len > cpumask_size())
5132        len = cpumask_size();
5133
5134    return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
5135}
5136
5137/**
5138 * sys_sched_setaffinity - set the cpu affinity of a process
5139 * @pid: pid of the process
5140 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5141 * @user_mask_ptr: user-space pointer to the new cpu mask
5142 */
5143SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
5144        unsigned long __user *, user_mask_ptr)
5145{
5146    cpumask_var_t new_mask;
5147    int retval;
5148
5149    if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
5150        return -ENOMEM;
5151
5152    retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
5153    if (retval == 0)
5154        retval = sched_setaffinity(pid, new_mask);
5155    free_cpumask_var(new_mask);
5156    return retval;
5157}
5158
5159long sched_getaffinity(pid_t pid, struct cpumask *mask)
5160{
5161    struct task_struct *p;
5162    unsigned long flags;
5163    struct rq *rq;
5164    int retval;
5165
5166    get_online_cpus();
5167    rcu_read_lock();
5168
5169    retval = -ESRCH;
5170    p = find_process_by_pid(pid);
5171    if (!p)
5172        goto out_unlock;
5173
5174    retval = security_task_getscheduler(p);
5175    if (retval)
5176        goto out_unlock;
5177
5178    rq = task_rq_lock(p, &flags);
5179    cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
5180    task_rq_unlock(rq, &flags);
5181
5182out_unlock:
5183    rcu_read_unlock();
5184    put_online_cpus();
5185
5186    return retval;
5187}
5188
5189/**
5190 * sys_sched_getaffinity - get the cpu affinity of a process
5191 * @pid: pid of the process
5192 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5193 * @user_mask_ptr: user-space pointer to hold the current cpu mask
5194 */
5195SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
5196        unsigned long __user *, user_mask_ptr)
5197{
5198    int ret;
5199    cpumask_var_t mask;
5200
5201    if ((len * BITS_PER_BYTE) < nr_cpu_ids)
5202        return -EINVAL;
5203    if (len & (sizeof(unsigned long)-1))
5204        return -EINVAL;
5205
5206    if (!alloc_cpumask_var(&mask, GFP_KERNEL))
5207        return -ENOMEM;
5208
5209    ret = sched_getaffinity(pid, mask);
5210    if (ret == 0) {
5211        size_t retlen = min_t(size_t, len, cpumask_size());
5212
5213        if (copy_to_user(user_mask_ptr, mask, retlen))
5214            ret = -EFAULT;
5215        else
5216            ret = retlen;
5217    }
5218    free_cpumask_var(mask);
5219
5220    return ret;
5221}
5222
5223/**
5224 * sys_sched_yield - yield the current processor to other threads.
5225 *
5226 * This function yields the current CPU to other tasks. If there are no
5227 * other threads running on this CPU then this function will return.
5228 */
5229SYSCALL_DEFINE0(sched_yield)
5230{
5231    struct rq *rq = this_rq_lock();
5232
5233    schedstat_inc(rq, yld_count);
5234    current->sched_class->yield_task(rq);
5235
5236    /*
5237     * Since we are going to call schedule() anyway, there's
5238     * no need to preempt or enable interrupts:
5239     */
5240    __release(rq->lock);
5241    spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
5242    do_raw_spin_unlock(&rq->lock);
5243    preempt_enable_no_resched();
5244
5245    schedule();
5246
5247    return 0;
5248}
5249
5250static inline int should_resched(void)
5251{
5252    return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
5253}
5254
5255static void __cond_resched(void)
5256{
5257    add_preempt_count(PREEMPT_ACTIVE);
5258    schedule();
5259    sub_preempt_count(PREEMPT_ACTIVE);
5260}
5261
5262int __sched _cond_resched(void)
5263{
5264    if (should_resched()) {
5265        __cond_resched();
5266        return 1;
5267    }
5268    return 0;
5269}
5270EXPORT_SYMBOL(_cond_resched);
5271
5272/*
5273 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
5274 * call schedule, and on return reacquire the lock.
5275 *
5276 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
5277 * operations here to prevent schedule() from being called twice (once via
5278 * spin_unlock(), once by hand).
5279 */
5280int __cond_resched_lock(spinlock_t *lock)
5281{
5282    int resched = should_resched();
5283    int ret = 0;
5284
5285    lockdep_assert_held(lock);
5286
5287    if (spin_needbreak(lock) || resched) {
5288        spin_unlock(lock);
5289        if (resched)
5290            __cond_resched();
5291        else
5292            cpu_relax();
5293        ret = 1;
5294        spin_lock(lock);
5295    }
5296    return ret;
5297}
5298EXPORT_SYMBOL(__cond_resched_lock);
5299
5300int __sched __cond_resched_softirq(void)
5301{
5302    BUG_ON(!in_softirq());
5303
5304    if (should_resched()) {
5305        local_bh_enable();
5306        __cond_resched();
5307        local_bh_disable();
5308        return 1;
5309    }
5310    return 0;
5311}
5312EXPORT_SYMBOL(__cond_resched_softirq);
5313
5314/**
5315 * yield - yield the current processor to other threads.
5316 *
5317 * This is a shortcut for kernel-space yielding - it marks the
5318 * thread runnable and calls sys_sched_yield().
5319 */
5320void __sched yield(void)
5321{
5322    set_current_state(TASK_RUNNING);
5323    sys_sched_yield();
5324}
5325EXPORT_SYMBOL(yield);
5326
5327/*
5328 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
5329 * that process accounting knows that this is a task in IO wait state.
5330 */
5331void __sched io_schedule(void)
5332{
5333    struct rq *rq = raw_rq();
5334
5335    delayacct_blkio_start();
5336    atomic_inc(&rq->nr_iowait);
5337    current->in_iowait = 1;
5338    schedule();
5339    current->in_iowait = 0;
5340    atomic_dec(&rq->nr_iowait);
5341    delayacct_blkio_end();
5342}
5343EXPORT_SYMBOL(io_schedule);
5344
5345long __sched io_schedule_timeout(long timeout)
5346{
5347    struct rq *rq = raw_rq();
5348    long ret;
5349
5350    delayacct_blkio_start();
5351    atomic_inc(&rq->nr_iowait);
5352    current->in_iowait = 1;
5353    ret = schedule_timeout(timeout);
5354    current->in_iowait = 0;
5355    atomic_dec(&rq->nr_iowait);
5356    delayacct_blkio_end();
5357    return ret;
5358}
5359
5360/**
5361 * sys_sched_get_priority_max - return maximum RT priority.
5362 * @policy: scheduling class.
5363 *
5364 * this syscall returns the maximum rt_priority that can be used
5365 * by a given scheduling class.
5366 */
5367SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
5368{
5369    int ret = -EINVAL;
5370
5371    switch (policy) {
5372    case SCHED_FIFO:
5373    case SCHED_RR:
5374        ret = MAX_USER_RT_PRIO-1;
5375        break;
5376    case SCHED_NORMAL:
5377    case SCHED_BATCH:
5378    case SCHED_IDLE:
5379        ret = 0;
5380        break;
5381    }
5382    return ret;
5383}
5384
5385/**
5386 * sys_sched_get_priority_min - return minimum RT priority.
5387 * @policy: scheduling class.
5388 *
5389 * this syscall returns the minimum rt_priority that can be used
5390 * by a given scheduling class.
5391 */
5392SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
5393{
5394    int ret = -EINVAL;
5395
5396    switch (policy) {
5397    case SCHED_FIFO:
5398    case SCHED_RR:
5399        ret = 1;
5400        break;
5401    case SCHED_NORMAL:
5402    case SCHED_BATCH:
5403    case SCHED_IDLE:
5404        ret = 0;
5405    }
5406    return ret;
5407}
5408
5409/**
5410 * sys_sched_rr_get_interval - return the default timeslice of a process.
5411 * @pid: pid of the process.
5412 * @interval: userspace pointer to the timeslice value.
5413 *
5414 * this syscall writes the default timeslice value of a given process
5415 * into the user-space timespec buffer. A value of '0' means infinity.
5416 */
5417SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
5418        struct timespec __user *, interval)
5419{
5420    struct task_struct *p;
5421    unsigned int time_slice;
5422    unsigned long flags;
5423    struct rq *rq;
5424    int retval;
5425    struct timespec t;
5426
5427    if (pid < 0)
5428        return -EINVAL;
5429
5430    retval = -ESRCH;
5431    rcu_read_lock();
5432    p = find_process_by_pid(pid);
5433    if (!p)
5434        goto out_unlock;
5435
5436    retval = security_task_getscheduler(p);
5437    if (retval)
5438        goto out_unlock;
5439
5440    rq = task_rq_lock(p, &flags);
5441    time_slice = p->sched_class->get_rr_interval(rq, p);
5442    task_rq_unlock(rq, &flags);
5443
5444    rcu_read_unlock();
5445    jiffies_to_timespec(time_slice, &t);
5446    retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
5447    return retval;
5448
5449out_unlock:
5450    rcu_read_unlock();
5451    return retval;
5452}
5453
5454static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
5455
5456void sched_show_task(struct task_struct *p)
5457{
5458    unsigned long free = 0;
5459    unsigned state;
5460
5461    state = p->state ? __ffs(p->state) + 1 : 0;
5462    printk(KERN_INFO "%-15.15s %c", p->comm,
5463        state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
5464#if BITS_PER_LONG == 32
5465    if (state == TASK_RUNNING)
5466        printk(KERN_CONT " running ");
5467    else
5468        printk(KERN_CONT " %08lx ", thread_saved_pc(p));
5469#else
5470    if (state == TASK_RUNNING)
5471        printk(KERN_CONT " running task ");
5472    else
5473        printk(KERN_CONT " %016lx ", thread_saved_pc(p));
5474#endif
5475#ifdef CONFIG_DEBUG_STACK_USAGE
5476    free = stack_not_used(p);
5477#endif
5478    printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
5479        task_pid_nr(p), task_pid_nr(p->real_parent),
5480        (unsigned long)task_thread_info(p)->flags);
5481
5482    show_stack(p, NULL);
5483}
5484
5485void show_state_filter(unsigned long state_filter)
5486{
5487    struct task_struct *g, *p;
5488
5489#if BITS_PER_LONG == 32
5490    printk(KERN_INFO
5491        " task PC stack pid father\n");
5492#else
5493    printk(KERN_INFO
5494        " task PC stack pid father\n");
5495#endif
5496    read_lock(&tasklist_lock);
5497    do_each_thread(g, p) {
5498        /*
5499         * reset the NMI-timeout, listing all files on a slow
5500         * console might take alot of time:
5501         */
5502        touch_nmi_watchdog();
5503        if (!state_filter || (p->state & state_filter))
5504            sched_show_task(p);
5505    } while_each_thread(g, p);
5506
5507    touch_all_softlockup_watchdogs();
5508
5509#ifdef CONFIG_SCHED_DEBUG
5510    sysrq_sched_debug_show();
5511#endif
5512    read_unlock(&tasklist_lock);
5513    /*
5514     * Only show locks if all tasks are dumped:
5515     */
5516    if (!state_filter)
5517        debug_show_all_locks();
5518}
5519
5520void __cpuinit init_idle_bootup_task(struct task_struct *idle)
5521{
5522    idle->sched_class = &idle_sched_class;
5523}
5524
5525/**
5526 * init_idle - set up an idle thread for a given CPU
5527 * @idle: task in question
5528 * @cpu: cpu the idle task belongs to
5529 *
5530 * NOTE: this function does not set the idle thread's NEED_RESCHED
5531 * flag, to make booting more robust.
5532 */
5533void __cpuinit init_idle(struct task_struct *idle, int cpu)
5534{
5535    struct rq *rq = cpu_rq(cpu);
5536    unsigned long flags;
5537
5538    raw_spin_lock_irqsave(&rq->lock, flags);
5539
5540    __sched_fork(idle);
5541    idle->state = TASK_RUNNING;
5542    idle->se.exec_start = sched_clock();
5543
5544    cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
5545    /*
5546     * We're having a chicken and egg problem, even though we are
5547     * holding rq->lock, the cpu isn't yet set to this cpu so the
5548     * lockdep check in task_group() will fail.
5549     *
5550     * Similar case to sched_fork(). / Alternatively we could
5551     * use task_rq_lock() here and obtain the other rq->lock.
5552     *
5553     * Silence PROVE_RCU
5554     */
5555    rcu_read_lock();
5556    __set_task_cpu(idle, cpu);
5557    rcu_read_unlock();
5558
5559    rq->curr = rq->idle = idle;
5560#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
5561    idle->oncpu = 1;
5562#endif
5563    raw_spin_unlock_irqrestore(&rq->lock, flags);
5564
5565    /* Set the preempt count _outside_ the spinlocks! */
5566#if defined(CONFIG_PREEMPT)
5567    task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
5568#else
5569    task_thread_info(idle)->preempt_count = 0;
5570#endif
5571    /*
5572     * The idle tasks have their own, simple scheduling class:
5573     */
5574    idle->sched_class = &idle_sched_class;
5575    ftrace_graph_init_task(idle);
5576}
5577
5578/*
5579 * In a system that switches off the HZ timer nohz_cpu_mask
5580 * indicates which cpus entered this state. This is used
5581 * in the rcu update to wait only for active cpus. For system
5582 * which do not switch off the HZ timer nohz_cpu_mask should
5583 * always be CPU_BITS_NONE.
5584 */
5585cpumask_var_t nohz_cpu_mask;
5586
5587/*
5588 * Increase the granularity value when there are more CPUs,
5589 * because with more CPUs the 'effective latency' as visible
5590 * to users decreases. But the relationship is not linear,
5591 * so pick a second-best guess by going with the log2 of the
5592 * number of CPUs.
5593 *
5594 * This idea comes from the SD scheduler of Con Kolivas:
5595 */
5596static int get_update_sysctl_factor(void)
5597{
5598    unsigned int cpus = min_t(int, num_online_cpus(), 8);
5599    unsigned int factor;
5600
5601    switch (sysctl_sched_tunable_scaling) {
5602    case SCHED_TUNABLESCALING_NONE:
5603        factor = 1;
5604        break;
5605    case SCHED_TUNABLESCALING_LINEAR:
5606        factor = cpus;
5607        break;
5608    case SCHED_TUNABLESCALING_LOG:
5609    default:
5610        factor = 1 + ilog2(cpus);
5611        break;
5612    }
5613
5614    return factor;
5615}
5616
5617static void update_sysctl(void)
5618{
5619    unsigned int factor = get_update_sysctl_factor();
5620
5621#define SET_SYSCTL(name) \
5622    (sysctl_##name = (factor) * normalized_sysctl_##name)
5623    SET_SYSCTL(sched_min_granularity);
5624    SET_SYSCTL(sched_latency);
5625    SET_SYSCTL(sched_wakeup_granularity);
5626#undef SET_SYSCTL
5627}
5628
5629static inline void sched_init_granularity(void)
5630{
5631    update_sysctl();
5632}
5633
5634#ifdef CONFIG_SMP
5635/*
5636 * This is how migration works:
5637 *
5638 * 1) we invoke migration_cpu_stop() on the target CPU using
5639 * stop_one_cpu().
5640 * 2) stopper starts to run (implicitly forcing the migrated thread
5641 * off the CPU)
5642 * 3) it checks whether the migrated task is still in the wrong runqueue.
5643 * 4) if it's in the wrong runqueue then the migration thread removes
5644 * it and puts it into the right queue.
5645 * 5) stopper completes and stop_one_cpu() returns and the migration
5646 * is done.
5647 */
5648
5649/*
5650 * Change a given task's CPU affinity. Migrate the thread to a
5651 * proper CPU and schedule it away if the CPU it's executing on
5652 * is removed from the allowed bitmask.
5653 *
5654 * NOTE: the caller must have a valid reference to the task, the
5655 * task must not exit() & deallocate itself prematurely. The
5656 * call is not atomic; no spinlocks may be held.
5657 */
5658int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
5659{
5660    unsigned long flags;
5661    struct rq *rq;
5662    unsigned int dest_cpu;
5663    int ret = 0;
5664
5665    /*
5666     * Serialize against TASK_WAKING so that ttwu() and wunt() can
5667     * drop the rq->lock and still rely on ->cpus_allowed.
5668     */
5669again:
5670    while (task_is_waking(p))
5671        cpu_relax();
5672    rq = task_rq_lock(p, &flags);
5673    if (task_is_waking(p)) {
5674        task_rq_unlock(rq, &flags);
5675        goto again;
5676    }
5677
5678    if (!cpumask_intersects(new_mask, cpu_active_mask)) {
5679        ret = -EINVAL;
5680        goto out;
5681    }
5682
5683    if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
5684             !cpumask_equal(&p->cpus_allowed, new_mask))) {
5685        ret = -EINVAL;
5686        goto out;
5687    }
5688
5689    if (p->sched_class->set_cpus_allowed)
5690        p->sched_class->set_cpus_allowed(p, new_mask);
5691    else {
5692        cpumask_copy(&p->cpus_allowed, new_mask);
5693        p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
5694    }
5695
5696    /* Can the task run on the task's current CPU? If so, we're done */
5697    if (cpumask_test_cpu(task_cpu(p), new_mask))
5698        goto out;
5699
5700    dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
5701    if (migrate_task(p, rq)) {
5702        struct migration_arg arg = { p, dest_cpu };
5703        /* Need help from migration thread: drop lock and wait. */
5704        task_rq_unlock(rq, &flags);
5705        stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
5706        tlb_migrate_finish(p->mm);
5707        return 0;
5708    }
5709out:
5710    task_rq_unlock(rq, &flags);
5711
5712    return ret;
5713}
5714EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
5715
5716/*
5717 * Move (not current) task off this cpu, onto dest cpu. We're doing
5718 * this because either it can't run here any more (set_cpus_allowed()
5719 * away from this CPU, or CPU going down), or because we're
5720 * attempting to rebalance this task on exec (sched_exec).
5721 *
5722 * So we race with normal scheduler movements, but that's OK, as long
5723 * as the task is no longer on this CPU.
5724 *
5725 * Returns non-zero if task was successfully migrated.
5726 */
5727static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
5728{
5729    struct rq *rq_dest, *rq_src;
5730    int ret = 0;
5731
5732    if (unlikely(!cpu_active(dest_cpu)))
5733        return ret;
5734
5735    rq_src = cpu_rq(src_cpu);
5736    rq_dest = cpu_rq(dest_cpu);
5737
5738    double_rq_lock(rq_src, rq_dest);
5739    /* Already moved. */
5740    if (task_cpu(p) != src_cpu)
5741        goto done;
5742    /* Affinity changed (again). */
5743    if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
5744        goto fail;
5745
5746    /*
5747     * If we're not on a rq, the next wake-up will ensure we're
5748     * placed properly.
5749     */
5750    if (p->se.on_rq) {
5751        deactivate_task(rq_src, p, 0);
5752        set_task_cpu(p, dest_cpu);
5753        activate_task(rq_dest, p, 0);
5754        check_preempt_curr(rq_dest, p, 0);
5755    }
5756done:
5757    ret = 1;
5758fail:
5759    double_rq_unlock(rq_src, rq_dest);
5760    return ret;
5761}
5762
5763/*
5764 * migration_cpu_stop - this will be executed by a highprio stopper thread
5765 * and performs thread migration by bumping thread off CPU then
5766 * 'pushing' onto another runqueue.
5767 */
5768static int migration_cpu_stop(void *data)
5769{
5770    struct migration_arg *arg = data;
5771
5772    /*
5773     * The original target cpu might have gone down and we might
5774     * be on another cpu but it doesn't matter.
5775     */
5776    local_irq_disable();
5777    __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
5778    local_irq_enable();
5779    return 0;
5780}
5781
5782#ifdef CONFIG_HOTPLUG_CPU
5783
5784/*
5785 * Ensures that the idle task is using init_mm right before its cpu goes
5786 * offline.
5787 */
5788void idle_task_exit(void)
5789{
5790    struct mm_struct *mm = current->active_mm;
5791
5792    BUG_ON(cpu_online(smp_processor_id()));
5793
5794    if (mm != &init_mm)
5795        switch_mm(mm, &init_mm, current);
5796    mmdrop(mm);
5797}
5798
5799/*
5800 * While a dead CPU has no uninterruptible tasks queued at this point,
5801 * it might still have a nonzero ->nr_uninterruptible counter, because
5802 * for performance reasons the counter is not stricly tracking tasks to
5803 * their home CPUs. So we just add the counter to another CPU's counter,
5804 * to keep the global sum constant after CPU-down:
5805 */
5806static void migrate_nr_uninterruptible(struct rq *rq_src)
5807{
5808    struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
5809
5810    rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
5811    rq_src->nr_uninterruptible = 0;
5812}
5813
5814/*
5815 * remove the tasks which were accounted by rq from calc_load_tasks.
5816 */
5817static void calc_global_load_remove(struct rq *rq)
5818{
5819    atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
5820    rq->calc_load_active = 0;
5821}
5822
5823/*
5824 * Migrate all tasks from the rq, sleeping tasks will be migrated by
5825 * try_to_wake_up()->select_task_rq().
5826 *
5827 * Called with rq->lock held even though we'er in stop_machine() and
5828 * there's no concurrency possible, we hold the required locks anyway
5829 * because of lock validation efforts.
5830 */
5831static void migrate_tasks(unsigned int dead_cpu)
5832{
5833    struct rq *rq = cpu_rq(dead_cpu);
5834    struct task_struct *next, *stop = rq->stop;
5835    int dest_cpu;
5836
5837    /*
5838     * Fudge the rq selection such that the below task selection loop
5839     * doesn't get stuck on the currently eligible stop task.
5840     *
5841     * We're currently inside stop_machine() and the rq is either stuck
5842     * in the stop_machine_cpu_stop() loop, or we're executing this code,
5843     * either way we should never end up calling schedule() until we're
5844     * done here.
5845     */
5846    rq->stop = NULL;
5847
5848    for ( ; ; ) {
5849        /*
5850         * There's this thread running, bail when that's the only
5851         * remaining thread.
5852         */
5853        if (rq->nr_running == 1)
5854            break;
5855
5856        next = pick_next_task(rq);
5857        BUG_ON(!next);
5858        next->sched_class->put_prev_task(rq, next);
5859
5860        /* Find suitable destination for @next, with force if needed. */
5861        dest_cpu = select_fallback_rq(dead_cpu, next);
5862        raw_spin_unlock(&rq->lock);
5863
5864        __migrate_task(next, dead_cpu, dest_cpu);
5865
5866        raw_spin_lock(&rq->lock);
5867    }
5868
5869    rq->stop = stop;
5870}
5871
5872#endif /* CONFIG_HOTPLUG_CPU */
5873
5874#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
5875
5876static struct ctl_table sd_ctl_dir[] = {
5877    {
5878        .procname = "sched_domain",
5879        .mode = 0555,
5880    },
5881    {}
5882};
5883
5884static struct ctl_table sd_ctl_root[] = {
5885    {
5886        .procname = "kernel",
5887        .mode = 0555,
5888        .child = sd_ctl_dir,
5889    },
5890    {}
5891};
5892
5893static struct ctl_table *sd_alloc_ctl_entry(int n)
5894{
5895    struct ctl_table *entry =
5896        kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
5897
5898    return entry;
5899}
5900
5901static void sd_free_ctl_entry(struct ctl_table **tablep)
5902{
5903    struct ctl_table *entry;
5904
5905    /*
5906     * In the intermediate directories, both the child directory and
5907     * procname are dynamically allocated and could fail but the mode
5908     * will always be set. In the lowest directory the names are
5909     * static strings and all have proc handlers.
5910     */
5911    for (entry = *tablep; entry->mode; entry++) {
5912        if (entry->child)
5913            sd_free_ctl_entry(&entry->child);
5914        if (entry->proc_handler == NULL)
5915            kfree(entry->procname);
5916    }
5917
5918    kfree(*tablep);
5919    *tablep = NULL;
5920}
5921
5922static void
5923set_table_entry(struct ctl_table *entry,
5924        const char *procname, void *data, int maxlen,
5925        mode_t mode, proc_handler *proc_handler)
5926{
5927    entry->procname = procname;
5928    entry->data = data;
5929    entry->maxlen = maxlen;
5930    entry->mode = mode;
5931    entry->proc_handler = proc_handler;
5932}
5933
5934static struct ctl_table *
5935sd_alloc_ctl_domain_table(struct sched_domain *sd)
5936{
5937    struct ctl_table *table = sd_alloc_ctl_entry(13);
5938
5939    if (table == NULL)
5940        return NULL;
5941
5942    set_table_entry(&table[0], "min_interval", &sd->min_interval,
5943        sizeof(long), 0644, proc_doulongvec_minmax);
5944    set_table_entry(&table[1], "max_interval", &sd->max_interval,
5945        sizeof(long), 0644, proc_doulongvec_minmax);
5946    set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
5947        sizeof(int), 0644, proc_dointvec_minmax);
5948    set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
5949        sizeof(int), 0644, proc_dointvec_minmax);
5950    set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
5951        sizeof(int), 0644, proc_dointvec_minmax);
5952    set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
5953        sizeof(int), 0644, proc_dointvec_minmax);
5954    set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
5955        sizeof(int), 0644, proc_dointvec_minmax);
5956    set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
5957        sizeof(int), 0644, proc_dointvec_minmax);
5958    set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
5959        sizeof(int), 0644, proc_dointvec_minmax);
5960    set_table_entry(&table[9], "cache_nice_tries",
5961        &sd->cache_nice_tries,
5962        sizeof(int), 0644, proc_dointvec_minmax);
5963    set_table_entry(&table[10], "flags", &sd->flags,
5964        sizeof(int), 0644, proc_dointvec_minmax);
5965    set_table_entry(&table[11], "name", sd->name,
5966        CORENAME_MAX_SIZE, 0444, proc_dostring);
5967    /* &table[12] is terminator */
5968
5969    return table;
5970}
5971
5972static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
5973{
5974    struct ctl_table *entry, *table;
5975    struct sched_domain *sd;
5976    int domain_num = 0, i;
5977    char buf[32];
5978
5979    for_each_domain(cpu, sd)
5980        domain_num++;
5981    entry = table = sd_alloc_ctl_entry(domain_num + 1);
5982    if (table == NULL)
5983        return NULL;
5984
5985    i = 0;
5986    for_each_domain(cpu, sd) {
5987        snprintf(buf, 32, "domain%d", i);
5988        entry->procname = kstrdup(buf, GFP_KERNEL);
5989        entry->mode = 0555;
5990        entry->child = sd_alloc_ctl_domain_table(sd);
5991        entry++;
5992        i++;
5993    }
5994    return table;
5995}
5996
5997static struct ctl_table_header *sd_sysctl_header;
5998static void register_sched_domain_sysctl(void)
5999{
6000    int i, cpu_num = num_possible_cpus();
6001    struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
6002    char buf[32];
6003
6004    WARN_ON(sd_ctl_dir[0].child);
6005    sd_ctl_dir[0].child = entry;
6006
6007    if (entry == NULL)
6008        return;
6009
6010    for_each_possible_cpu(i) {
6011        snprintf(buf, 32, "cpu%d", i);
6012        entry->procname = kstrdup(buf, GFP_KERNEL);
6013        entry->mode = 0555;
6014        entry->child = sd_alloc_ctl_cpu_table(i);
6015        entry++;
6016    }
6017
6018    WARN_ON(sd_sysctl_header);
6019    sd_sysctl_header = register_sysctl_table(sd_ctl_root);
6020}
6021
6022/* may be called multiple times per register */
6023static void unregister_sched_domain_sysctl(void)
6024{
6025    if (sd_sysctl_header)
6026        unregister_sysctl_table(sd_sysctl_header);
6027    sd_sysctl_header = NULL;
6028    if (sd_ctl_dir[0].child)
6029        sd_free_ctl_entry(&sd_ctl_dir[0].child);
6030}
6031#else
6032static void register_sched_domain_sysctl(void)
6033{
6034}
6035static void unregister_sched_domain_sysctl(void)
6036{
6037}
6038#endif
6039
6040static void set_rq_online(struct rq *rq)
6041{
6042    if (!rq->online) {
6043        const struct sched_class *class;
6044
6045        cpumask_set_cpu(rq->cpu, rq->rd->online);
6046        rq->online = 1;
6047
6048        for_each_class(class) {
6049            if (class->rq_online)
6050                class->rq_online(rq);
6051        }
6052    }
6053}
6054
6055static void set_rq_offline(struct rq *rq)
6056{
6057    if (rq->online) {
6058        const struct sched_class *class;
6059
6060        for_each_class(class) {
6061            if (class->rq_offline)
6062                class->rq_offline(rq);
6063        }
6064
6065        cpumask_clear_cpu(rq->cpu, rq->rd->online);
6066        rq->online = 0;
6067    }
6068}
6069
6070/*
6071 * migration_call - callback that gets triggered when a CPU is added.
6072 * Here we can start up the necessary migration thread for the new CPU.
6073 */
6074static int __cpuinit
6075migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
6076{
6077    int cpu = (long)hcpu;
6078    unsigned long flags;
6079    struct rq *rq = cpu_rq(cpu);
6080
6081    switch (action & ~CPU_TASKS_FROZEN) {
6082
6083    case CPU_UP_PREPARE:
6084        rq->calc_load_update = calc_load_update;
6085        break;
6086
6087    case CPU_ONLINE:
6088        /* Update our root-domain */
6089        raw_spin_lock_irqsave(&rq->lock, flags);
6090        if (rq->rd) {
6091            BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
6092
6093            set_rq_online(rq);
6094        }
6095        raw_spin_unlock_irqrestore(&rq->lock, flags);
6096        break;
6097
6098#ifdef CONFIG_HOTPLUG_CPU
6099    case CPU_DYING:
6100        /* Update our root-domain */
6101        raw_spin_lock_irqsave(&rq->lock, flags);
6102        if (rq->rd) {
6103            BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
6104            set_rq_offline(rq);
6105        }
6106        migrate_tasks(cpu);
6107        BUG_ON(rq->nr_running != 1); /* the migration thread */
6108        raw_spin_unlock_irqrestore(&rq->lock, flags);
6109
6110        migrate_nr_uninterruptible(rq);
6111        calc_global_load_remove(rq);
6112        break;
6113#endif
6114    }
6115    return NOTIFY_OK;
6116}
6117
6118/*
6119 * Register at high priority so that task migration (migrate_all_tasks)
6120 * happens before everything else. This has to be lower priority than
6121 * the notifier in the perf_event subsystem, though.
6122 */
6123static struct notifier_block __cpuinitdata migration_notifier = {
6124    .notifier_call = migration_call,
6125    .priority = CPU_PRI_MIGRATION,
6126};
6127
6128static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
6129                      unsigned long action, void *hcpu)
6130{
6131    switch (action & ~CPU_TASKS_FROZEN) {
6132    case CPU_ONLINE:
6133    case CPU_DOWN_FAILED:
6134        set_cpu_active((long)hcpu, true);
6135        return NOTIFY_OK;
6136    default:
6137        return NOTIFY_DONE;
6138    }
6139}
6140
6141static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
6142                    unsigned long action, void *hcpu)
6143{
6144    switch (action & ~CPU_TASKS_FROZEN) {
6145    case CPU_DOWN_PREPARE:
6146        set_cpu_active((long)hcpu, false);
6147        return NOTIFY_OK;
6148    default:
6149        return NOTIFY_DONE;
6150    }
6151}
6152
6153static int __init migration_init(void)
6154{
6155    void *cpu = (void *)(long)smp_processor_id();
6156    int err;
6157
6158    /* Initialize migration for the boot CPU */
6159    err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
6160    BUG_ON(err == NOTIFY_BAD);
6161    migration_call(&migration_notifier, CPU_ONLINE, cpu);
6162    register_cpu_notifier(&migration_notifier);
6163
6164    /* Register cpu active notifiers */
6165    cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
6166    cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
6167
6168    return 0;
6169}
6170early_initcall(migration_init);
6171#endif
6172
6173#ifdef CONFIG_SMP
6174
6175#ifdef CONFIG_SCHED_DEBUG
6176
6177static __read_mostly int sched_domain_debug_enabled;
6178
6179static int __init sched_domain_debug_setup(char *str)
6180{
6181    sched_domain_debug_enabled = 1;
6182
6183    return 0;
6184}
6185early_param("sched_debug", sched_domain_debug_setup);
6186
6187static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6188                  struct cpumask *groupmask)
6189{
6190    struct sched_group *group = sd->groups;
6191    char str[256];
6192
6193    cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
6194    cpumask_clear(groupmask);
6195
6196    printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
6197
6198    if (!(sd->flags & SD_LOAD_BALANCE)) {
6199        printk("does not load-balance\n");
6200        if (sd->parent)
6201            printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
6202                    " has parent");
6203        return -1;
6204    }
6205
6206    printk(KERN_CONT "span %s level %s\n", str, sd->name);
6207
6208    if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
6209        printk(KERN_ERR "ERROR: domain->span does not contain "
6210                "CPU%d\n", cpu);
6211    }
6212    if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
6213        printk(KERN_ERR "ERROR: domain->groups does not contain"
6214                " CPU%d\n", cpu);
6215    }
6216
6217    printk(KERN_DEBUG "%*s groups:", level + 1, "");
6218    do {
6219        if (!group) {
6220            printk("\n");
6221            printk(KERN_ERR "ERROR: group is NULL\n");
6222            break;
6223        }
6224
6225        if (!group->cpu_power) {
6226            printk(KERN_CONT "\n");
6227            printk(KERN_ERR "ERROR: domain->cpu_power not "
6228                    "set\n");
6229            break;
6230        }
6231
6232        if (!cpumask_weight(sched_group_cpus(group))) {
6233            printk(KERN_CONT "\n");
6234            printk(KERN_ERR "ERROR: empty group\n");
6235            break;
6236        }
6237
6238        if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
6239            printk(KERN_CONT "\n");
6240            printk(KERN_ERR "ERROR: repeated CPUs\n");
6241            break;
6242        }
6243
6244        cpumask_or(groupmask, groupmask, sched_group_cpus(group));
6245
6246        cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
6247
6248        printk(KERN_CONT " %s", str);
6249        if (group->cpu_power != SCHED_LOAD_SCALE) {
6250            printk(KERN_CONT " (cpu_power = %d)",
6251                group->cpu_power);
6252        }
6253
6254        group = group->next;
6255    } while (group != sd->groups);
6256    printk(KERN_CONT "\n");
6257
6258    if (!cpumask_equal(sched_domain_span(sd), groupmask))
6259        printk(KERN_ERR "ERROR: groups don't span domain->span\n");
6260
6261    if (sd->parent &&
6262        !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
6263        printk(KERN_ERR "ERROR: parent span is not a superset "
6264            "of domain->span\n");
6265    return 0;
6266}
6267
6268static void sched_domain_debug(struct sched_domain *sd, int cpu)
6269{
6270    cpumask_var_t groupmask;
6271    int level = 0;
6272
6273    if (!sched_domain_debug_enabled)
6274        return;
6275
6276    if (!sd) {
6277        printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
6278        return;
6279    }
6280
6281    printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
6282
6283    if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) {
6284        printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
6285        return;
6286    }
6287
6288    for (;;) {
6289        if (sched_domain_debug_one(sd, cpu, level, groupmask))
6290            break;
6291        level++;
6292        sd = sd->parent;
6293        if (!sd)
6294            break;
6295    }
6296    free_cpumask_var(groupmask);
6297}
6298#else /* !CONFIG_SCHED_DEBUG */
6299# define sched_domain_debug(sd, cpu) do { } while (0)
6300#endif /* CONFIG_SCHED_DEBUG */
6301
6302static int sd_degenerate(struct sched_domain *sd)
6303{
6304    if (cpumask_weight(sched_domain_span(sd)) == 1)
6305        return 1;
6306
6307    /* Following flags need at least 2 groups */
6308    if (sd->flags & (SD_LOAD_BALANCE |
6309             SD_BALANCE_NEWIDLE |
6310             SD_BALANCE_FORK |
6311             SD_BALANCE_EXEC |
6312             SD_SHARE_CPUPOWER |
6313