Root/target/linux/generic-2.6/patches-2.6.30/270-sched_bfs.patch

1This patch adds support for bfs v230, modified for diff size reduction
2
3--- a/Documentation/sysctl/kernel.txt
4+++ b/Documentation/sysctl/kernel.txt
5@@ -27,6 +27,7 @@ show up in /proc/sys/kernel:
6 - domainname
7 - hostname
8 - hotplug
9+- iso_cpu
10 - java-appletviewer [ binfmt_java, obsolete ]
11 - java-interpreter [ binfmt_java, obsolete ]
12 - kstack_depth_to_print [ X86 only ]
13@@ -48,6 +49,7 @@ show up in /proc/sys/kernel:
14 - randomize_va_space
15 - real-root-dev ==> Documentation/initrd.txt
16 - reboot-cmd [ SPARC only ]
17+- rr_interval
18 - rtsig-max
19 - rtsig-nr
20 - sem
21@@ -170,6 +172,16 @@ Default value is "/sbin/hotplug".
22 
23 ==============================================================
24 
25+iso_cpu: (BFS only)
26+
27+This sets the percentage cpu that the unprivileged SCHED_ISO tasks can
28+run effectively at realtime priority, averaged over a rolling five
29+seconds over the -whole- system, meaning all cpus.
30+
31+Set to 70 (percent) by default.
32+
33+==============================================================
34+
35 l2cr: (PPC only)
36 
37 This flag controls the L2 cache of G3 processor boards. If
38@@ -322,6 +334,19 @@ rebooting. ???
39 
40 ==============================================================
41 
42+rr_interval: (BFS only)
43+
44+This is the smallest duration that any cpu process scheduling unit
45+will run for. Increasing this value can increase throughput of cpu
46+bound tasks substantially but at the expense of increased latencies
47+overall. This value is in milliseconds and the default value chosen
48+depends on the number of cpus available at scheduler initialisation
49+with a minimum of 6.
50+
51+Valid values are from 1-5000.
52+
53+==============================================================
54+
55 rtsig-max & rtsig-nr:
56 
57 The file rtsig-max can be used to tune the maximum number
58--- a/include/linux/init_task.h
59+++ b/include/linux/init_task.h
60@@ -119,9 +119,10 @@ extern struct cred init_cred;
61     .usage = ATOMIC_INIT(2), \
62     .flags = PF_KTHREAD, \
63     .lock_depth = -1, \
64- .prio = MAX_PRIO-20, \
65+ .prio = NORMAL_PRIO, \
66     .static_prio = MAX_PRIO-20, \
67- .normal_prio = MAX_PRIO-20, \
68+ .normal_prio = NORMAL_PRIO, \
69+ .deadline = 0, \
70     .policy = SCHED_NORMAL, \
71     .cpus_allowed = CPU_MASK_ALL, \
72     .mm = NULL, \
73--- a/include/linux/sched.h
74+++ b/include/linux/sched.h
75@@ -36,9 +36,12 @@
76 #define SCHED_FIFO 1
77 #define SCHED_RR 2
78 #define SCHED_BATCH 3
79-/* SCHED_ISO: reserved but not implemented yet */
80+#define SCHED_ISO 4
81 #define SCHED_IDLE 5
82 
83+#define SCHED_MAX (SCHED_IDLE)
84+#define SCHED_RANGE(policy) ((policy) <= SCHED_MAX)
85+
86 #ifdef __KERNEL__
87 
88 struct sched_param {
89@@ -1042,10 +1045,13 @@ struct sched_entity {
90     struct load_weight load; /* for load-balancing */
91     struct rb_node run_node;
92     struct list_head group_node;
93+#ifdef CONFIG_SCHED_CFS
94     unsigned int on_rq;
95 
96     u64 exec_start;
97+#endif
98     u64 sum_exec_runtime;
99+#ifdef CONFIG_SCHED_CFS
100     u64 vruntime;
101     u64 prev_sum_exec_runtime;
102 
103@@ -1096,6 +1102,7 @@ struct sched_entity {
104     /* rq "owned" by this entity/group: */
105     struct cfs_rq *my_q;
106 #endif
107+#endif
108 };
109 
110 struct sched_rt_entity {
111@@ -1123,17 +1130,19 @@ struct task_struct {
112 
113     int lock_depth; /* BKL lock depth */
114 
115-#ifdef CONFIG_SMP
116-#ifdef __ARCH_WANT_UNLOCKED_CTXSW
117     int oncpu;
118-#endif
119-#endif
120-
121     int prio, static_prio, normal_prio;
122     unsigned int rt_priority;
123     const struct sched_class *sched_class;
124     struct sched_entity se;
125     struct sched_rt_entity rt;
126+ unsigned long deadline;
127+#ifdef CONFIG_SCHED_BFS
128+ int load_weight; /* for niceness load balancing purposes */
129+ int first_time_slice;
130+ unsigned long long timestamp, last_ran;
131+ unsigned long utime_pc, stime_pc;
132+#endif
133 
134 #ifdef CONFIG_PREEMPT_NOTIFIERS
135     /* list of struct preempt_notifier: */
136@@ -1156,6 +1165,9 @@ struct task_struct {
137 
138     unsigned int policy;
139     cpumask_t cpus_allowed;
140+#ifdef CONFIG_HOTPLUG_CPU
141+ cpumask_t unplugged_mask;
142+#endif
143 
144 #ifdef CONFIG_PREEMPT_RCU
145     int rcu_read_lock_nesting;
146@@ -1446,11 +1458,19 @@ struct task_struct {
147  * priority to a value higher than any user task. Note:
148  * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
149  */
150-
151+#define PRIO_RANGE (40)
152 #define MAX_USER_RT_PRIO 100
153 #define MAX_RT_PRIO MAX_USER_RT_PRIO
154-
155+#ifdef CONFIG_SCHED_BFS
156+#define MAX_PRIO (MAX_RT_PRIO + PRIO_RANGE)
157+#define ISO_PRIO (MAX_RT_PRIO)
158+#define NORMAL_PRIO (MAX_RT_PRIO + 1)
159+#define IDLE_PRIO (MAX_RT_PRIO + 2)
160+#define PRIO_LIMIT ((IDLE_PRIO) + 1)
161+#else
162 #define MAX_PRIO (MAX_RT_PRIO + 40)
163+#define NORMAL_PRIO (MAX_RT_PRIO - 20)
164+#endif
165 #define DEFAULT_PRIO (MAX_RT_PRIO + 20)
166 
167 static inline int rt_prio(int prio)
168@@ -1734,7 +1754,7 @@ task_sched_runtime(struct task_struct *t
169 extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
170 
171 /* sched_exec is called by processes performing an exec */
172-#ifdef CONFIG_SMP
173+#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_CFS)
174 extern void sched_exec(void);
175 #else
176 #define sched_exec() {}
177--- a/init/Kconfig
178+++ b/init/Kconfig
179@@ -435,9 +435,22 @@ config LOG_BUF_SHIFT
180 config HAVE_UNSTABLE_SCHED_CLOCK
181     bool
182 
183+choice
184+ prompt "Scheduler"
185+ default SCHED_CFS
186+
187+ config SCHED_CFS
188+ bool "CFS"
189+
190+ config SCHED_BFS
191+ bool "BFS"
192+
193+endchoice
194+
195 config GROUP_SCHED
196     bool "Group CPU scheduler"
197     depends on EXPERIMENTAL
198+ depends on SCHED_CFS
199     default n
200     help
201       This feature lets CPU scheduler recognize task groups and control CPU
202@@ -488,6 +501,7 @@ endchoice
203 
204 menuconfig CGROUPS
205     boolean "Control Group support"
206+ depends on SCHED_CFS
207     help
208       This option adds support for grouping sets of processes together, for
209       use with process control subsystems such as Cpusets, CFS, memory
210--- a/kernel/Makefile
211+++ b/kernel/Makefile
212@@ -2,7 +2,7 @@
213 # Makefile for the linux kernel.
214 #
215 
216-obj-y = sched.o fork.o exec_domain.o panic.o printk.o \
217+obj-y = $(if $(CONFIG_SCHED_CFS),sched.o,sched_bfs.o) fork.o exec_domain.o panic.o printk.o \
218         cpu.o exit.o itimer.o time.o softirq.o resource.o \
219         sysctl.o capability.o ptrace.o timer.o user.o \
220         signal.o sys.o kmod.o workqueue.o pid.o \
221@@ -103,6 +103,7 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER
222 # I turn this off for IA-64 only. Andreas Schwab says it's also needed on m68k
223 # to get a correct value for the wait-channel (WCHAN in ps). --davidm
224 CFLAGS_sched.o := $(PROFILING) -fno-omit-frame-pointer
225+CFLAGS_sched_bfs.o := $(PROFILING) -fno-omit-frame-pointer
226 endif
227 
228 $(obj)/configs.o: $(obj)/config_data.h
229--- a/kernel/kthread.c
230+++ b/kernel/kthread.c
231@@ -15,7 +15,11 @@
232 #include <linux/mutex.h>
233 #include <trace/sched.h>
234 
235+#ifdef CONFIG_SCHED_BFS
236+#define KTHREAD_NICE_LEVEL (0)
237+#else
238 #define KTHREAD_NICE_LEVEL (-5)
239+#endif
240 
241 static DEFINE_SPINLOCK(kthread_create_lock);
242 static LIST_HEAD(kthread_create_list);
243--- /dev/null
244+++ b/kernel/sched_bfs.c
245@@ -0,0 +1,6059 @@
246+/*
247+ * kernel/sched_bfs.c, was sched.c
248+ *
249+ * Kernel scheduler and related syscalls
250+ *
251+ * Copyright (C) 1991-2002 Linus Torvalds
252+ *
253+ * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
254+ * make semaphores SMP safe
255+ * 1998-11-19 Implemented schedule_timeout() and related stuff
256+ * by Andrea Arcangeli
257+ * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
258+ * hybrid priority-list and round-robin design with
259+ * an array-switch method of distributing timeslices
260+ * and per-CPU runqueues. Cleanups and useful suggestions
261+ * by Davide Libenzi, preemptible kernel bits by Robert Love.
262+ * 2003-09-03 Interactivity tuning by Con Kolivas.
263+ * 2004-04-02 Scheduler domains code by Nick Piggin
264+ * 2007-04-15 Work begun on replacing all interactivity tuning with a
265+ * fair scheduling design by Con Kolivas.
266+ * 2007-05-05 Load balancing (smp-nice) and other improvements
267+ * by Peter Williams
268+ * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
269+ * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
270+ * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
271+ * Thomas Gleixner, Mike Kravetz
272+ * now Brainfuck deadline scheduling policy by Con Kolivas deletes
273+ * a whole lot of those previous things.
274+ */
275+
276+#include <linux/mm.h>
277+#include <linux/module.h>
278+#include <linux/nmi.h>
279+#include <linux/init.h>
280+#include <asm/uaccess.h>
281+#include <linux/highmem.h>
282+#include <linux/smp_lock.h>
283+#include <asm/mmu_context.h>
284+#include <linux/interrupt.h>
285+#include <linux/capability.h>
286+#include <linux/completion.h>
287+#include <linux/kernel_stat.h>
288+#include <linux/debug_locks.h>
289+#include <linux/perf_counter.h>
290+#include <linux/security.h>
291+#include <linux/notifier.h>
292+#include <linux/profile.h>
293+#include <linux/freezer.h>
294+#include <linux/vmalloc.h>
295+#include <linux/blkdev.h>
296+#include <linux/delay.h>
297+#include <linux/smp.h>
298+#include <linux/threads.h>
299+#include <linux/timer.h>
300+#include <linux/rcupdate.h>
301+#include <linux/cpu.h>
302+#include <linux/cpuset.h>
303+#include <linux/cpumask.h>
304+#include <linux/percpu.h>
305+#include <linux/kthread.h>
306+#include <linux/proc_fs.h>
307+#include <linux/seq_file.h>
308+#include <linux/syscalls.h>
309+#include <linux/times.h>
310+#include <linux/tsacct_kern.h>
311+#include <linux/kprobes.h>
312+#include <linux/delayacct.h>
313+#include <linux/reciprocal_div.h>
314+#include <linux/log2.h>
315+#include <linux/bootmem.h>
316+#include <linux/ftrace.h>
317+
318+#include <asm/tlb.h>
319+#include <asm/unistd.h>
320+
321+#define CREATE_TRACE_POINTS
322+#include <trace/events/sched.h>
323+
324+#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO)
325+#define rt_task(p) rt_prio((p)->prio)
326+#define rt_queue(rq) rt_prio((rq)->rq_prio)
327+#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
328+#define is_rt_policy(policy) ((policy) == SCHED_FIFO || \
329+ (policy) == SCHED_RR)
330+#define has_rt_policy(p) unlikely(is_rt_policy((p)->policy))
331+#define idleprio_task(p) unlikely((p)->policy == SCHED_IDLE)
332+#define iso_task(p) unlikely((p)->policy == SCHED_ISO)
333+#define iso_queue(rq) unlikely((rq)->rq_policy == SCHED_ISO)
334+#define ISO_PERIOD ((5 * HZ * num_online_cpus()) + 1)
335+
336+/*
337+ * Convert user-nice values [ -20 ... 0 ... 19 ]
338+ * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
339+ * and back.
340+ */
341+#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
342+#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
343+#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
344+
345+/*
346+ * 'User priority' is the nice value converted to something we
347+ * can work with better when scaling various scheduler parameters,
348+ * it's a [ 0 ... 39 ] range.
349+ */
350+#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
351+#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
352+#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
353+#define SCHED_PRIO(p) ((p)+MAX_RT_PRIO)
354+
355+/* Some helpers for converting to/from various scales.*/
356+#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ))
357+#define MS_TO_NS(TIME) ((TIME) * 1000000)
358+#define MS_TO_US(TIME) ((TIME) * 1000)
359+
360+#ifdef CONFIG_SMP
361+/*
362+ * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
363+ * Since cpu_power is a 'constant', we can use a reciprocal divide.
364+ */
365+static inline u32 sg_div_cpu_power(const struct sched_group *sg, u32 load)
366+{
367+ return reciprocal_divide(load, sg->reciprocal_cpu_power);
368+}
369+
370+/*
371+ * Each time a sched group cpu_power is changed,
372+ * we must compute its reciprocal value
373+ */
374+static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
375+{
376+ sg->__cpu_power += val;
377+ sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power);
378+}
379+#endif
380+
381+/*
382+ * This is the time all tasks within the same priority round robin.
383+ * Value is in ms and set to a minimum of 6ms. Scales with number of cpus.
384+ * Tunable via /proc interface.
385+ */
386+int rr_interval __read_mostly = 6;
387+
388+/*
389+ * sched_iso_cpu - sysctl which determines the cpu percentage SCHED_ISO tasks
390+ * are allowed to run five seconds as real time tasks. This is the total over
391+ * all online cpus.
392+ */
393+int sched_iso_cpu __read_mostly = 70;
394+
395+int prio_ratios[PRIO_RANGE] __read_mostly;
396+
397+static inline unsigned long timeslice(void)
398+{
399+ return MS_TO_US(rr_interval);
400+}
401+
402+struct global_rq {
403+ spinlock_t lock;
404+ unsigned long nr_running;
405+ unsigned long nr_uninterruptible;
406+ unsigned long long nr_switches;
407+ struct list_head queue[PRIO_LIMIT];
408+ DECLARE_BITMAP(prio_bitmap, PRIO_LIMIT + 1);
409+ unsigned long iso_ticks;
410+ unsigned short iso_refractory;
411+#ifdef CONFIG_SMP
412+ unsigned long qnr; /* queued not running */
413+ cpumask_t cpu_idle_map;
414+#endif
415+};
416+
417+static struct global_rq grq;
418+
419+/*
420+ * This is the main, per-CPU runqueue data structure.
421+ * All this is protected by the global_rq lock.
422+ */
423+struct rq {
424+#ifdef CONFIG_SMP
425+#ifdef CONFIG_NO_HZ
426+ unsigned char in_nohz_recently;
427+#endif
428+#endif
429+
430+ struct task_struct *curr, *idle;
431+ struct mm_struct *prev_mm;
432+ struct list_head queue; /* Place to store currently running task */
433+
434+ /* Stored data about rq->curr to work outside grq lock */
435+ unsigned long rq_deadline;
436+ unsigned int rq_policy;
437+ int rq_time_slice;
438+ int rq_prio;
439+
440+ /* Accurate timekeeping data */
441+ u64 timekeep_clock;
442+ unsigned long user_pc, nice_pc, irq_pc, softirq_pc, system_pc,
443+ iowait_pc, idle_pc;
444+ atomic_t nr_iowait;
445+
446+ int cpu; /* cpu of this runqueue */
447+ int online;
448+
449+#ifdef CONFIG_SMP
450+ struct root_domain *rd;
451+ struct sched_domain *sd;
452+
453+ struct list_head migration_queue;
454+#endif
455+
456+ u64 clock;
457+#ifdef CONFIG_SCHEDSTATS
458+
459+ /* latency stats */
460+ struct sched_info rq_sched_info;
461+ unsigned long long rq_cpu_time;
462+ /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
463+
464+ /* sys_sched_yield() stats */
465+ unsigned int yld_count;
466+
467+ /* schedule() stats */
468+ unsigned int sched_switch;
469+ unsigned int sched_count;
470+ unsigned int sched_goidle;
471+
472+ /* try_to_wake_up() stats */
473+ unsigned int ttwu_count;
474+ unsigned int ttwu_local;
475+
476+ /* BKL stats */
477+ unsigned int bkl_count;
478+#endif
479+};
480+
481+static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp;
482+static DEFINE_MUTEX(sched_hotcpu_mutex);
483+
484+#ifdef CONFIG_SMP
485+
486+/*
487+ * We add the notion of a root-domain which will be used to define per-domain
488+ * variables. Each exclusive cpuset essentially defines an island domain by
489+ * fully partitioning the member cpus from any other cpuset. Whenever a new
490+ * exclusive cpuset is created, we also create and attach a new root-domain
491+ * object.
492+ *
493+ */
494+struct root_domain {
495+ atomic_t refcount;
496+ cpumask_var_t span;
497+ cpumask_var_t online;
498+
499+ /*
500+ * The "RT overload" flag: it gets set if a CPU has more than
501+ * one runnable RT task.
502+ */
503+ cpumask_var_t rto_mask;
504+ atomic_t rto_count;
505+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
506+ /*
507+ * Preferred wake up cpu nominated by sched_mc balance that will be
508+ * used when most cpus are idle in the system indicating overall very
509+ * low system utilisation. Triggered at POWERSAVINGS_BALANCE_WAKEUP(2)
510+ */
511+ unsigned int sched_mc_preferred_wakeup_cpu;
512+#endif
513+};
514+
515+/*
516+ * By default the system creates a single root-domain with all cpus as
517+ * members (mimicking the global state we have today).
518+ */
519+static struct root_domain def_root_domain;
520+
521+#endif
522+
523+static inline int cpu_of(struct rq *rq)
524+{
525+#ifdef CONFIG_SMP
526+ return rq->cpu;
527+#else
528+ return 0;
529+#endif
530+}
531+
532+/*
533+ * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
534+ * See detach_destroy_domains: synchronize_sched for details.
535+ *
536+ * The domain tree of any CPU may only be accessed from within
537+ * preempt-disabled sections.
538+ */
539+#define for_each_domain(cpu, __sd) \
540+ for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
541+
542+#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
543+#define this_rq() (&__get_cpu_var(runqueues))
544+#define task_rq(p) cpu_rq(task_cpu(p))
545+#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
546+
547+#include "sched_stats.h"
548+
549+#ifndef prepare_arch_switch
550+# define prepare_arch_switch(next) do { } while (0)
551+#endif
552+#ifndef finish_arch_switch
553+# define finish_arch_switch(prev) do { } while (0)
554+#endif
555+
556+inline void update_rq_clock(struct rq *rq)
557+{
558+ rq->clock = sched_clock_cpu(cpu_of(rq));
559+}
560+
561+static inline int task_running(struct task_struct *p)
562+{
563+ return (!!p->oncpu);
564+}
565+
566+static inline void grq_lock(void)
567+ __acquires(grq.lock)
568+{
569+ smp_mb();
570+ spin_lock(&grq.lock);
571+}
572+
573+static inline void grq_unlock(void)
574+ __releases(grq.lock)
575+{
576+ spin_unlock(&grq.lock);
577+}
578+
579+static inline void grq_lock_irq(void)
580+ __acquires(grq.lock)
581+{
582+ smp_mb();
583+ spin_lock_irq(&grq.lock);
584+}
585+
586+static inline void time_lock_grq(struct rq *rq)
587+ __acquires(grq.lock)
588+{
589+ grq_lock();
590+ update_rq_clock(rq);
591+}
592+
593+static inline void grq_unlock_irq(void)
594+ __releases(grq.lock)
595+{
596+ spin_unlock_irq(&grq.lock);
597+}
598+
599+static inline void grq_lock_irqsave(unsigned long *flags)
600+ __acquires(grq.lock)
601+{
602+ smp_mb();
603+ spin_lock_irqsave(&grq.lock, *flags);
604+}
605+
606+static inline void grq_unlock_irqrestore(unsigned long *flags)
607+ __releases(grq.lock)
608+{
609+ spin_unlock_irqrestore(&grq.lock, *flags);
610+}
611+
612+static inline struct rq
613+*task_grq_lock(struct task_struct *p, unsigned long *flags)
614+ __acquires(grq.lock)
615+{
616+ grq_lock_irqsave(flags);
617+ return task_rq(p);
618+}
619+
620+static inline struct rq
621+*time_task_grq_lock(struct task_struct *p, unsigned long *flags)
622+ __acquires(grq.lock)
623+{
624+ struct rq *rq = task_grq_lock(p, flags);
625+ update_rq_clock(rq);
626+ return rq;
627+}
628+
629+static inline void task_grq_unlock(unsigned long *flags)
630+ __releases(grq.lock)
631+{
632+ grq_unlock_irqrestore(flags);
633+}
634+
635+/**
636+ * runqueue_is_locked
637+ *
638+ * Returns true if the global runqueue is locked.
639+ * This interface allows printk to be called with the runqueue lock
640+ * held and know whether or not it is OK to wake up the klogd.
641+ */
642+int runqueue_is_locked(void)
643+{
644+ return spin_is_locked(&grq.lock);
645+}
646+
647+void task_rq_unlock_wait(struct task_struct *p)
648+ __releases(grq.lock)
649+{
650+ smp_mb(); /* spin-unlock-wait is not a full memory barrier */
651+ spin_unlock_wait(&grq.lock);
652+}
653+
654+static inline void time_grq_lock(struct rq *rq, unsigned long *flags)
655+ __acquires(grq.lock)
656+{
657+ spin_lock_irqsave(&grq.lock, *flags);
658+ update_rq_clock(rq);
659+}
660+
661+static inline struct rq *__task_grq_lock(struct task_struct *p)
662+ __acquires(grq.lock)
663+{
664+ grq_lock();
665+ return task_rq(p);
666+}
667+
668+static inline void __task_grq_unlock(void)
669+ __releases(grq.lock)
670+{
671+ grq_unlock();
672+}
673+
674+#ifndef __ARCH_WANT_UNLOCKED_CTXSW
675+static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
676+{
677+}
678+
679+static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
680+{
681+#ifdef CONFIG_DEBUG_SPINLOCK
682+ /* this is a valid case when another task releases the spinlock */
683+ grq.lock.owner = current;
684+#endif
685+ /*
686+ * If we are tracking spinlock dependencies then we have to
687+ * fix up the runqueue lock - which gets 'carried over' from
688+ * prev into current:
689+ */
690+ spin_acquire(&grq.lock.dep_map, 0, 0, _THIS_IP_);
691+
692+ grq_unlock_irq();
693+}
694+
695+#else /* __ARCH_WANT_UNLOCKED_CTXSW */
696+
697+static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
698+{
699+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
700+ grq_unlock_irq();
701+#else
702+ grq_unlock();
703+#endif
704+}
705+
706+static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
707+{
708+ smp_wmb();
709+#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
710+ local_irq_enable();
711+#endif
712+}
713+#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
714+
715+/*
716+ * A task that is queued will be on the grq run list.
717+ * A task that is not running or queued will not be on the grq run list.
718+ * A task that is currently running will have ->oncpu set and be queued
719+ * temporarily in its own rq queue.
720+ * A task that is running and no longer queued will be seen only on
721+ * context switch exit.
722+ */
723+
724+static inline int task_queued(struct task_struct *p)
725+{
726+ return (!list_empty(&p->rt.run_list));
727+}
728+
729+static inline int task_queued_only(struct task_struct *p)
730+{
731+ return (!list_empty(&p->rt.run_list) && !task_running(p));
732+}
733+
734+/*
735+ * Removing from the global runqueue. Enter with grq locked.
736+ */
737+static void dequeue_task(struct task_struct *p)
738+{
739+ list_del_init(&p->rt.run_list);
740+ if (list_empty(grq.queue + p->prio))
741+ __clear_bit(p->prio, grq.prio_bitmap);
742+}
743+
744+static inline void reset_first_time_slice(struct task_struct *p)
745+{
746+ if (unlikely(p->first_time_slice))
747+ p->first_time_slice = 0;
748+}
749+
750+static int idleprio_suitable(struct task_struct *p)
751+{
752+ return (!freezing(p) && !signal_pending(p) &&
753+ !(task_contributes_to_load(p)) && !(p->flags & (PF_EXITING)));
754+}
755+
756+static int isoprio_suitable(void)
757+{
758+ return !grq.iso_refractory;
759+}
760+
761+/*
762+ * Adding to the global runqueue. Enter with grq locked.
763+ */
764+static void enqueue_task(struct task_struct *p)
765+{
766+ if (!rt_task(p)) {
767+ /* Check it hasn't gotten rt from PI */
768+ if ((idleprio_task(p) && idleprio_suitable(p)) ||
769+ (iso_task(p) && isoprio_suitable()))
770+ p->prio = p->normal_prio;
771+ else
772+ p->prio = NORMAL_PRIO;
773+ }
774+ __set_bit(p->prio, grq.prio_bitmap);
775+ list_add_tail(&p->rt.run_list, grq.queue + p->prio);
776+ sched_info_queued(p);
777+}
778+
779+/* Only idle task does this as a real time task*/
780+static inline void enqueue_task_head(struct task_struct *p)
781+{
782+ __set_bit(p->prio, grq.prio_bitmap);
783+ list_add(&p->rt.run_list, grq.queue + p->prio);
784+ sched_info_queued(p);
785+}
786+
787+static inline void requeue_task(struct task_struct *p)
788+{
789+ sched_info_queued(p);
790+}
791+
792+static inline int pratio(struct task_struct *p)
793+{
794+ return prio_ratios[TASK_USER_PRIO(p)];
795+}
796+
797+/*
798+ * task_timeslice - all tasks of all priorities get the exact same timeslice
799+ * length. CPU distribution is handled by giving different deadlines to
800+ * tasks of different priorities.
801+ */
802+static inline int task_timeslice(struct task_struct *p)
803+{
804+ return (rr_interval * pratio(p) / 100);
805+}
806+
807+#ifdef CONFIG_SMP
808+static inline void inc_qnr(void)
809+{
810+ grq.qnr++;
811+}
812+
813+static inline void dec_qnr(void)
814+{
815+ grq.qnr--;
816+}
817+
818+static inline int queued_notrunning(void)
819+{
820+ return grq.qnr;
821+}
822+#else
823+static inline void inc_qnr(void)
824+{
825+}
826+
827+static inline void dec_qnr(void)
828+{
829+}
830+
831+static inline int queued_notrunning(void)
832+{
833+ return grq.nr_running;
834+}
835+#endif
836+
837+/*
838+ * activate_idle_task - move idle task to the _front_ of runqueue.
839+ */
840+static inline void activate_idle_task(struct task_struct *p)
841+{
842+ enqueue_task_head(p);
843+ grq.nr_running++;
844+ inc_qnr();
845+}
846+
847+static inline int normal_prio(struct task_struct *p)
848+{
849+ if (has_rt_policy(p))
850+ return MAX_RT_PRIO - 1 - p->rt_priority;
851+ if (idleprio_task(p))
852+ return IDLE_PRIO;
853+ if (iso_task(p))
854+ return ISO_PRIO;
855+ return NORMAL_PRIO;
856+}
857+
858+/*
859+ * Calculate the current priority, i.e. the priority
860+ * taken into account by the scheduler. This value might
861+ * be boosted by RT tasks as it will be RT if the task got
862+ * RT-boosted. If not then it returns p->normal_prio.
863+ */
864+static int effective_prio(struct task_struct *p)
865+{
866+ p->normal_prio = normal_prio(p);
867+ /*
868+ * If we are RT tasks or we were boosted to RT priority,
869+ * keep the priority unchanged. Otherwise, update priority
870+ * to the normal priority:
871+ */
872+ if (!rt_prio(p->prio))
873+ return p->normal_prio;
874+ return p->prio;
875+}
876+
877+/*
878+ * activate_task - move a task to the runqueue. Enter with grq locked. The rq
879+ * doesn't really matter but gives us the local clock.
880+ */
881+static void activate_task(struct task_struct *p, struct rq *rq)
882+{
883+ u64 now = rq->clock;
884+
885+ /*
886+ * Sleep time is in units of nanosecs, so shift by 20 to get a
887+ * milliseconds-range estimation of the amount of time that the task
888+ * spent sleeping:
889+ */
890+ if (unlikely(prof_on == SLEEP_PROFILING)) {
891+ if (p->state == TASK_UNINTERRUPTIBLE)
892+ profile_hits(SLEEP_PROFILING, (void *)get_wchan(p),
893+ (now - p->timestamp) >> 20);
894+ }
895+
896+ p->prio = effective_prio(p);
897+ p->timestamp = now;
898+ if (task_contributes_to_load(p))
899+ grq.nr_uninterruptible--;
900+ enqueue_task(p);
901+ grq.nr_running++;
902+ inc_qnr();
903+}
904+
905+/*
906+ * deactivate_task - If it's running, it's not on the grq and we can just
907+ * decrement the nr_running.
908+ */
909+static inline void deactivate_task(struct task_struct *p)
910+{
911+ if (task_contributes_to_load(p))
912+ grq.nr_uninterruptible++;
913+ grq.nr_running--;
914+}
915+
916+#ifdef CONFIG_SMP
917+void set_task_cpu(struct task_struct *p, unsigned int cpu)
918+{
919+ trace_sched_migrate_task(p, cpu);
920+ /*
921+ * After ->cpu is set up to a new value, task_grq_lock(p, ...) can be
922+ * successfuly executed on another CPU. We must ensure that updates of
923+ * per-task data have been completed by this moment.
924+ */
925+ smp_wmb();
926+ task_thread_info(p)->cpu = cpu;
927+}
928+#endif
929+
930+/*
931+ * Move a task off the global queue and take it to a cpu for it will
932+ * become the running task.
933+ */
934+static inline void take_task(struct rq *rq, struct task_struct *p)
935+{
936+ set_task_cpu(p, rq->cpu);
937+ dequeue_task(p);
938+ list_add(&p->rt.run_list, &rq->queue);
939+ dec_qnr();
940+}
941+
942+/*
943+ * Returns a descheduling task to the grq runqueue unless it is being
944+ * deactivated.
945+ */
946+static inline void return_task(struct task_struct *p, int deactivate)
947+{
948+ list_del_init(&p->rt.run_list);
949+ if (deactivate)
950+ deactivate_task(p);
951+ else {
952+ inc_qnr();
953+ enqueue_task(p);
954+ }
955+}
956+
957+/*
958+ * resched_task - mark a task 'to be rescheduled now'.
959+ *
960+ * On UP this means the setting of the need_resched flag, on SMP it
961+ * might also involve a cross-CPU call to trigger the scheduler on
962+ * the target CPU.
963+ */
964+#ifdef CONFIG_SMP
965+
966+#ifndef tsk_is_polling
967+#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
968+#endif
969+
970+static void resched_task(struct task_struct *p)
971+{
972+ int cpu;
973+
974+ assert_spin_locked(&grq.lock);
975+
976+ if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
977+ return;
978+
979+ set_tsk_thread_flag(p, TIF_NEED_RESCHED);
980+
981+ cpu = task_cpu(p);
982+ if (cpu == smp_processor_id())
983+ return;
984+
985+ /* NEED_RESCHED must be visible before we test polling */
986+ smp_mb();
987+ if (!tsk_is_polling(p))
988+ smp_send_reschedule(cpu);
989+}
990+
991+#else
992+static inline void resched_task(struct task_struct *p)
993+{
994+ assert_spin_locked(&grq.lock);
995+ set_tsk_need_resched(p);
996+}
997+#endif
998+
999+/**
1000+ * task_curr - is this task currently executing on a CPU?
1001+ * @p: the task in question.
1002+ */
1003+inline int task_curr(const struct task_struct *p)
1004+{
1005+ return cpu_curr(task_cpu(p)) == p;
1006+}
1007+
1008+#ifdef CONFIG_SMP
1009+struct migration_req {
1010+ struct list_head list;
1011+
1012+ struct task_struct *task;
1013+ int dest_cpu;
1014+
1015+ struct completion done;
1016+};
1017+
1018+/*
1019+ * wait_task_context_switch - wait for a thread to complete at least one
1020+ * context switch.
1021+ *
1022+ * @p must not be current.
1023+ */
1024+void wait_task_context_switch(struct task_struct *p)
1025+{
1026+ unsigned long nvcsw, nivcsw, flags;
1027+ int running;
1028+ struct rq *rq;
1029+
1030+ nvcsw = p->nvcsw;
1031+ nivcsw = p->nivcsw;
1032+ for (;;) {
1033+ /*
1034+ * The runqueue is assigned before the actual context
1035+ * switch. We need to take the runqueue lock.
1036+ *
1037+ * We could check initially without the lock but it is
1038+ * very likely that we need to take the lock in every
1039+ * iteration.
1040+ */
1041+ rq = task_grq_lock(p, &flags);
1042+ running = task_running(p);
1043+ task_grq_unlock(&flags);
1044+
1045+ if (likely(!running))
1046+ break;
1047+ /*
1048+ * The switch count is incremented before the actual
1049+ * context switch. We thus wait for two switches to be
1050+ * sure at least one completed.
1051+ */
1052+ if ((p->nvcsw - nvcsw) > 1)
1053+ break;
1054+ if ((p->nivcsw - nivcsw) > 1)
1055+ break;
1056+
1057+ cpu_relax();
1058+ }
1059+}
1060+
1061+/*
1062+ * wait_task_inactive - wait for a thread to unschedule.
1063+ *
1064+ * If @match_state is nonzero, it's the @p->state value just checked and
1065+ * not expected to change. If it changes, i.e. @p might have woken up,
1066+ * then return zero. When we succeed in waiting for @p to be off its CPU,
1067+ * we return a positive number (its total switch count). If a second call
1068+ * a short while later returns the same number, the caller can be sure that
1069+ * @p has remained unscheduled the whole time.
1070+ *
1071+ * The caller must ensure that the task *will* unschedule sometime soon,
1072+ * else this function might spin for a *long* time. This function can't
1073+ * be called with interrupts off, or it may introduce deadlock with
1074+ * smp_call_function() if an IPI is sent by the same process we are
1075+ * waiting to become inactive.
1076+ */
1077+unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1078+{
1079+ unsigned long flags;
1080+ int running, on_rq;
1081+ unsigned long ncsw;
1082+ struct rq *rq;
1083+
1084+ for (;;) {
1085+ /*
1086+ * We do the initial early heuristics without holding
1087+ * any task-queue locks at all. We'll only try to get
1088+ * the runqueue lock when things look like they will
1089+ * work out!
1090+ */
1091+ rq = task_rq(p);
1092+
1093+ /*
1094+ * If the task is actively running on another CPU
1095+ * still, just relax and busy-wait without holding
1096+ * any locks.
1097+ *
1098+ * NOTE! Since we don't hold any locks, it's not
1099+ * even sure that "rq" stays as the right runqueue!
1100+ * But we don't care, since this will
1101+ * return false if the runqueue has changed and p
1102+ * is actually now running somewhere else!
1103+ */
1104+ while (task_running(p) && p == rq->curr) {
1105+ if (match_state && unlikely(p->state != match_state))
1106+ return 0;
1107+ cpu_relax();
1108+ }
1109+
1110+ /*
1111+ * Ok, time to look more closely! We need the grq
1112+ * lock now, to be *sure*. If we're wrong, we'll
1113+ * just go back and repeat.
1114+ */
1115+ rq = task_grq_lock(p, &flags);
1116+ trace_sched_wait_task(rq, p);
1117+ running = task_running(p);
1118+ on_rq = task_queued(p);
1119+ ncsw = 0;
1120+ if (!match_state || p->state == match_state)
1121+ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
1122+ task_grq_unlock(&flags);
1123+
1124+ /*
1125+ * If it changed from the expected state, bail out now.
1126+ */
1127+ if (unlikely(!ncsw))
1128+ break;
1129+
1130+ /*
1131+ * Was it really running after all now that we
1132+ * checked with the proper locks actually held?
1133+ *
1134+ * Oops. Go back and try again..
1135+ */
1136+ if (unlikely(running)) {
1137+ cpu_relax();
1138+ continue;
1139+ }
1140+
1141+ /*
1142+ * It's not enough that it's not actively running,
1143+ * it must be off the runqueue _entirely_, and not
1144+ * preempted!
1145+ *
1146+ * So if it was still runnable (but just not actively
1147+ * running right now), it's preempted, and we should
1148+ * yield - it could be a while.
1149+ */
1150+ if (unlikely(on_rq)) {
1151+ schedule_timeout_uninterruptible(1);
1152+ continue;
1153+ }
1154+
1155+ /*
1156+ * Ahh, all good. It wasn't running, and it wasn't
1157+ * runnable, which means that it will never become
1158+ * running in the future either. We're all done!
1159+ */
1160+ break;
1161+ }
1162+
1163+ return ncsw;
1164+}
1165+
1166+/***
1167+ * kick_process - kick a running thread to enter/exit the kernel
1168+ * @p: the to-be-kicked thread
1169+ *
1170+ * Cause a process which is running on another CPU to enter
1171+ * kernel-mode, without any delay. (to get signals handled.)
1172+ *
1173+ * NOTE: this function doesnt have to take the runqueue lock,
1174+ * because all it wants to ensure is that the remote task enters
1175+ * the kernel. If the IPI races and the task has been migrated
1176+ * to another CPU then no harm is done and the purpose has been
1177+ * achieved as well.
1178+ */
1179+void kick_process(struct task_struct *p)
1180+{
1181+ int cpu;
1182+
1183+ preempt_disable();
1184+ cpu = task_cpu(p);
1185+ if ((cpu != smp_processor_id()) && task_curr(p))
1186+ smp_send_reschedule(cpu);
1187+ preempt_enable();
1188+}
1189+EXPORT_SYMBOL_GPL(kick_process);
1190+#endif
1191+
1192+#define rq_idle(rq) ((rq)->rq_prio == PRIO_LIMIT)
1193+
1194+/*
1195+ * RT tasks preempt purely on priority. SCHED_NORMAL tasks preempt on the
1196+ * basis of earlier deadlines. SCHED_BATCH and SCHED_IDLE don't preempt,
1197+ * they cooperatively multitask.
1198+ */
1199+static inline int task_preempts_curr(struct task_struct *p, struct rq *rq)
1200+{
1201+ int preempts = 0;
1202+
1203+ if (p->prio < rq->rq_prio)
1204+ preempts = 1;
1205+ else if (p->policy == SCHED_NORMAL && (p->prio == rq->rq_prio &&
1206+ time_before(p->deadline, rq->rq_deadline)))
1207+ preempts = 1;
1208+ return preempts;
1209+}
1210+
1211+/*
1212+ * Wake up *any* suitable cpu to schedule this task.
1213+ */
1214+static void try_preempt(struct task_struct *p)
1215+{
1216+ struct rq *highest_prio_rq, *this_rq;
1217+ unsigned long latest_deadline, cpu;
1218+ int highest_prio;
1219+ cpumask_t tmp;
1220+
1221+ /* Try the task's previous rq first and as a fallback */
1222+ this_rq = task_rq(p);
1223+
1224+ if (cpu_isset(this_rq->cpu, p->cpus_allowed)) {
1225+ highest_prio_rq = this_rq;
1226+ /* If this_rq is idle, use that. */
1227+ if (rq_idle(this_rq))
1228+ goto found_rq;
1229+ } else
1230+ highest_prio_rq = cpu_rq(any_online_cpu(p->cpus_allowed));
1231+ latest_deadline = this_rq->rq_deadline;
1232+ highest_prio = this_rq->rq_prio;
1233+
1234+ cpus_and(tmp, cpu_online_map, p->cpus_allowed);
1235+
1236+ for_each_cpu_mask(cpu, tmp) {
1237+ struct rq *rq;
1238+ int rq_prio;
1239+
1240+ rq = cpu_rq(cpu);
1241+
1242+ if (rq_idle(rq)) {
1243+ /* found an idle rq, use that one */
1244+ highest_prio_rq = rq;
1245+ goto found_rq;
1246+ }
1247+
1248+ rq_prio = rq->rq_prio;
1249+ if (rq_prio > highest_prio ||
1250+ (rq_prio == highest_prio &&
1251+ time_after(rq->rq_deadline, latest_deadline))) {
1252+ highest_prio = rq_prio;
1253+ latest_deadline = rq->rq_deadline;
1254+ highest_prio_rq = rq;
1255+ }
1256+ }
1257+
1258+ if (!task_preempts_curr(p, highest_prio_rq))
1259+ return;
1260+found_rq:
1261+ resched_task(highest_prio_rq->curr);
1262+ return;
1263+}
1264+
1265+/**
1266+ * task_oncpu_function_call - call a function on the cpu on which a task runs
1267+ * @p: the task to evaluate
1268+ * @func: the function to be called
1269+ * @info: the function call argument
1270+ *
1271+ * Calls the function @func when the task is currently running. This might
1272+ * be on the current CPU, which just calls the function directly
1273+ */
1274+void task_oncpu_function_call(struct task_struct *p,
1275+ void (*func) (void *info), void *info)
1276+{
1277+ int cpu;
1278+
1279+ preempt_disable();
1280+ cpu = task_cpu(p);
1281+ if (task_curr(p))
1282+ smp_call_function_single(cpu, func, info, 1);
1283+ preempt_enable();
1284+}
1285+
1286+#ifdef CONFIG_SMP
1287+static int suitable_idle_cpus(struct task_struct *p)
1288+{
1289+ return (cpus_intersects(p->cpus_allowed, grq.cpu_idle_map));
1290+}
1291+#else
1292+static int suitable_idle_cpus(struct task_struct *p)
1293+{
1294+ return 0;
1295+}
1296+#endif
1297+
1298+/***
1299+ * try_to_wake_up - wake up a thread
1300+ * @p: the to-be-woken-up thread
1301+ * @state: the mask of task states that can be woken
1302+ * @sync: do a synchronous wakeup?
1303+ *
1304+ * Put it on the run-queue if it's not already there. The "current"
1305+ * thread is always on the run-queue (except when the actual
1306+ * re-schedule is in progress), and as such you're allowed to do
1307+ * the simpler "current->state = TASK_RUNNING" to mark yourself
1308+ * runnable without the overhead of this.
1309+ *
1310+ * returns failure only if the task is already active.
1311+ */
1312+static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1313+{
1314+ unsigned long flags;
1315+ int success = 0;
1316+ long old_state;
1317+ struct rq *rq;
1318+
1319+ rq = time_task_grq_lock(p, &flags);
1320+ old_state = p->state;
1321+ if (!(old_state & state))
1322+ goto out_unlock;
1323+
1324+ /*
1325+ * Note this catches tasks that are running and queued, but returns
1326+ * false during the context switch when they're running and no
1327+ * longer queued.
1328+ */
1329+ if (task_queued(p))
1330+ goto out_running;
1331+
1332+ activate_task(p, rq);
1333+ /*
1334+ * Sync wakeups (i.e. those types of wakeups where the waker
1335+ * has indicated that it will leave the CPU in short order)
1336+ * don't trigger a preemption if there are no idle cpus,
1337+ * instead waiting for current to deschedule.
1338+ */
1339+ if (!sync || (sync && suitable_idle_cpus(p)))
1340+ try_preempt(p);
1341+ success = 1;
1342+
1343+out_running:
1344+ trace_sched_wakeup(rq, p, success);
1345+ p->state = TASK_RUNNING;
1346+out_unlock:
1347+ task_grq_unlock(&flags);
1348+ return success;
1349+}
1350+
1351+/**
1352+ * wake_up_process - Wake up a specific process
1353+ * @p: The process to be woken up.
1354+ *
1355+ * Attempt to wake up the nominated process and move it to the set of runnable
1356+ * processes. Returns 1 if the process was woken up, 0 if it was already
1357+ * running.
1358+ *
1359+ * It may be assumed that this function implies a write memory barrier before
1360+ * changing the task state if and only if any tasks are woken up.
1361+ */
1362+int wake_up_process(struct task_struct *p)
1363+{
1364+ return try_to_wake_up(p, TASK_ALL, 0);
1365+}
1366+EXPORT_SYMBOL(wake_up_process);
1367+
1368+int wake_up_state(struct task_struct *p, unsigned int state)
1369+{
1370+ return try_to_wake_up(p, state, 0);
1371+}
1372+
1373+/*
1374+ * Perform scheduler related setup for a newly forked process p.
1375+ * p is forked by current.
1376+ */
1377+void sched_fork(struct task_struct *p, int clone_flags)
1378+{
1379+ int cpu = get_cpu();
1380+ struct rq *rq;
1381+
1382+#ifdef CONFIG_PREEMPT_NOTIFIERS
1383+ INIT_HLIST_HEAD(&p->preempt_notifiers);
1384+#endif
1385+ /*
1386+ * We mark the process as running here, but have not actually
1387+ * inserted it onto the runqueue yet. This guarantees that
1388+ * nobody will actually run it, and a signal or other external
1389+ * event cannot wake it up and insert it on the runqueue either.
1390+ */
1391+ p->state = TASK_RUNNING;
1392+ set_task_cpu(p, cpu);
1393+
1394+ /* Should be reset in fork.c but done here for ease of bfs patching */
1395+ p->se.sum_exec_runtime = p->stime_pc = p->utime_pc = 0;
1396+
1397+ /*
1398+ * Make sure we do not leak PI boosting priority to the child:
1399+ */
1400+ p->prio = current->normal_prio;
1401+
1402+ INIT_LIST_HEAD(&p->rt.run_list);
1403+#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1404+ if (unlikely(sched_info_on()))
1405+ memset(&p->sched_info, 0, sizeof(p->sched_info));
1406+#endif
1407+
1408+ p->oncpu = 0;
1409+
1410+#ifdef CONFIG_PREEMPT
1411+ /* Want to start with kernel preemption disabled. */
1412+ task_thread_info(p)->preempt_count = 1;
1413+#endif
1414+ if (unlikely(p->policy == SCHED_FIFO))
1415+ goto out;
1416+ /*
1417+ * Share the timeslice between parent and child, thus the
1418+ * total amount of pending timeslices in the system doesn't change,
1419+ * resulting in more scheduling fairness. If it's negative, it won't
1420+ * matter since that's the same as being 0. current's time_slice is
1421+ * actually in rq_time_slice when it's running.
1422+ */
1423+ local_irq_disable();
1424+ rq = task_rq(current);
1425+ if (likely(rq->rq_time_slice > 0)) {
1426+ rq->rq_time_slice /= 2;
1427+ /*
1428+ * The remainder of the first timeslice might be recovered by
1429+ * the parent if the child exits early enough.
1430+ */
1431+ p->first_time_slice = 1;
1432+ }
1433+ p->rt.time_slice = rq->rq_time_slice;
1434+ local_irq_enable();
1435+out:
1436+ put_cpu();
1437+}
1438+
1439+/*
1440+ * wake_up_new_task - wake up a newly created task for the first time.
1441+ *
1442+ * This function will do some initial scheduler statistics housekeeping
1443+ * that must be done for every newly created context, then puts the task
1444+ * on the runqueue and wakes it.
1445+ */
1446+void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1447+{
1448+ struct task_struct *parent;
1449+ unsigned long flags;
1450+ struct rq *rq;
1451+
1452+ rq = time_task_grq_lock(p, &flags); ;
1453+ parent = p->parent;
1454+ BUG_ON(p->state != TASK_RUNNING);
1455+ set_task_cpu(p, task_cpu(parent));
1456+
1457+ activate_task(p, rq);
1458+ trace_sched_wakeup_new(rq, p, 1);
1459+ if (!(clone_flags & CLONE_VM) && rq->curr == parent &&
1460+ !suitable_idle_cpus(p)) {
1461+ /*
1462+ * The VM isn't cloned, so we're in a good position to
1463+ * do child-runs-first in anticipation of an exec. This
1464+ * usually avoids a lot of COW overhead.
1465+ */
1466+ resched_task(parent);
1467+ } else
1468+ try_preempt(p);
1469+ task_grq_unlock(&flags);
1470+}
1471+
1472+/*
1473+ * Potentially available exiting-child timeslices are
1474+ * retrieved here - this way the parent does not get
1475+ * penalized for creating too many threads.
1476+ *
1477+ * (this cannot be used to 'generate' timeslices
1478+ * artificially, because any timeslice recovered here
1479+ * was given away by the parent in the first place.)
1480+ */
1481+void sched_exit(struct task_struct *p)
1482+{
1483+ struct task_struct *parent;
1484+ unsigned long flags;
1485+ struct rq *rq;
1486+
1487+ if (p->first_time_slice) {
1488+ parent = p->parent;
1489+ rq = task_grq_lock(parent, &flags);
1490+ parent->rt.time_slice += p->rt.time_slice;
1491+ if (unlikely(parent->rt.time_slice > timeslice()))
1492+ parent->rt.time_slice = timeslice();
1493+ task_grq_unlock(&flags);
1494+ }
1495+}
1496+
1497+#ifdef CONFIG_PREEMPT_NOTIFIERS
1498+
1499+/**
1500+ * preempt_notifier_register - tell me when current is being preempted & rescheduled
1501+ * @notifier: notifier struct to register
1502+ */
1503+void preempt_notifier_register(struct preempt_notifier *notifier)
1504+{
1505+ hlist_add_head(&notifier->link, &current->preempt_notifiers);
1506+}
1507+EXPORT_SYMBOL_GPL(preempt_notifier_register);
1508+
1509+/**
1510+ * preempt_notifier_unregister - no longer interested in preemption notifications
1511+ * @notifier: notifier struct to unregister
1512+ *
1513+ * This is safe to call from within a preemption notifier.
1514+ */
1515+void preempt_notifier_unregister(struct preempt_notifier *notifier)
1516+{
1517+ hlist_del(&notifier->link);
1518+}
1519+EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
1520+
1521+static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1522+{
1523+ struct preempt_notifier *notifier;
1524+ struct hlist_node *node;
1525+
1526+ hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
1527+ notifier->ops->sched_in(notifier, raw_smp_processor_id());
1528+}
1529+
1530+static void
1531+fire_sched_out_preempt_notifiers(struct task_struct *curr,
1532+ struct task_struct *next)
1533+{
1534+ struct preempt_notifier *notifier;
1535+ struct hlist_node *node;
1536+
1537+ hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
1538+ notifier->ops->sched_out(notifier, next);
1539+}
1540+
1541+#else /* !CONFIG_PREEMPT_NOTIFIERS */
1542+
1543+static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1544+{
1545+}
1546+
1547+static void
1548+fire_sched_out_preempt_notifiers(struct task_struct *curr,
1549+ struct task_struct *next)
1550+{
1551+}
1552+
1553+#endif /* CONFIG_PREEMPT_NOTIFIERS */
1554+
1555+/**
1556+ * prepare_task_switch - prepare to switch tasks
1557+ * @rq: the runqueue preparing to switch
1558+ * @next: the task we are going to switch to.
1559+ *
1560+ * This is called with the rq lock held and interrupts off. It must
1561+ * be paired with a subsequent finish_task_switch after the context
1562+ * switch.
1563+ *
1564+ * prepare_task_switch sets up locking and calls architecture specific
1565+ * hooks.
1566+ */
1567+static inline void
1568+prepare_task_switch(struct rq *rq, struct task_struct *prev,
1569+ struct task_struct *next)
1570+{
1571+ fire_sched_out_preempt_notifiers(prev, next);
1572+ prepare_lock_switch(rq, next);
1573+ prepare_arch_switch(next);
1574+}
1575+
1576+/**
1577+ * finish_task_switch - clean up after a task-switch
1578+ * @rq: runqueue associated with task-switch
1579+ * @prev: the thread we just switched away from.
1580+ *
1581+ * finish_task_switch must be called after the context switch, paired
1582+ * with a prepare_task_switch call before the context switch.
1583+ * finish_task_switch will reconcile locking set up by prepare_task_switch,
1584+ * and do any other architecture-specific cleanup actions.
1585+ *
1586+ * Note that we may have delayed dropping an mm in context_switch(). If
1587+ * so, we finish that here outside of the runqueue lock. (Doing it
1588+ * with the lock held can cause deadlocks; see schedule() for
1589+ * details.)
1590+ */
1591+static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
1592+ __releases(grq.lock)
1593+{
1594+ struct mm_struct *mm = rq->prev_mm;
1595+ long prev_state;
1596+
1597+ rq->prev_mm = NULL;
1598+
1599+ /*
1600+ * A task struct has one reference for the use as "current".
1601+ * If a task dies, then it sets TASK_DEAD in tsk->state and calls
1602+ * schedule one last time. The schedule call will never return, and
1603+ * the scheduled task must drop that reference.
1604+ * The test for TASK_DEAD must occur while the runqueue locks are
1605+ * still held, otherwise prev could be scheduled on another cpu, die
1606+ * there before we look at prev->state, and then the reference would
1607+ * be dropped twice.
1608+ * Manfred Spraul <manfred@colorfullife.com>
1609+ */
1610+ prev_state = prev->state;
1611+ finish_arch_switch(prev);
1612+ perf_counter_task_sched_in(current, cpu_of(rq));
1613+ finish_lock_switch(rq, prev);
1614+
1615+ fire_sched_in_preempt_notifiers(current);
1616+ if (mm)
1617+ mmdrop(mm);
1618+ if (unlikely(prev_state == TASK_DEAD)) {
1619+ /*
1620+ * Remove function-return probe instances associated with this
1621+ * task and put them back on the free list.
1622+ */
1623+ kprobe_flush_task(prev);
1624+ put_task_struct(prev);
1625+ }
1626+}
1627+
1628+/**
1629+ * schedule_tail - first thing a freshly forked thread must call.
1630+ * @prev: the thread we just switched away from.
1631+ */
1632+asmlinkage void schedule_tail(struct task_struct *prev)
1633+ __releases(grq.lock)
1634+{
1635+ struct rq *rq = this_rq();
1636+
1637+ finish_task_switch(rq, prev);
1638+#ifdef __ARCH_WANT_UNLOCKED_CTXSW
1639+ /* In this case, finish_task_switch does not reenable preemption */
1640+ preempt_enable();
1641+#endif
1642+ if (current->set_child_tid)
1643+ put_user(current->pid, current->set_child_tid);
1644+}
1645+
1646+/*
1647+ * context_switch - switch to the new MM and the new
1648+ * thread's register state.
1649+ */
1650+static inline void
1651+context_switch(struct rq *rq, struct task_struct *prev,
1652+ struct task_struct *next)
1653+{
1654+ struct mm_struct *mm, *oldmm;
1655+
1656+ prepare_task_switch(rq, prev, next);
1657+ trace_sched_switch(rq, prev, next);
1658+ mm = next->mm;
1659+ oldmm = prev->active_mm;
1660+ /*
1661+ * For paravirt, this is coupled with an exit in switch_to to
1662+ * combine the page table reload and the switch backend into
1663+ * one hypercall.
1664+ */
1665+ arch_enter_lazy_cpu_mode();
1666+
1667+ if (unlikely(!mm)) {
1668+ next->active_mm = oldmm;
1669+ atomic_inc(&oldmm->mm_count);
1670+ enter_lazy_tlb(oldmm, next);
1671+ } else
1672+ switch_mm(oldmm, mm, next);
1673+
1674+ if (unlikely(!prev->mm)) {
1675+ prev->active_mm = NULL;
1676+ rq->prev_mm = oldmm;
1677+ }
1678+ /*
1679+ * Since the runqueue lock will be released by the next
1680+ * task (which is an invalid locking op but in the case
1681+ * of the scheduler it's an obvious special-case), so we
1682+ * do an early lockdep release here:
1683+ */
1684+#ifndef __ARCH_WANT_UNLOCKED_CTXSW
1685+ spin_release(&grq.lock.dep_map, 1, _THIS_IP_);
1686+#endif
1687+
1688+ /* Here we just switch the register state and the stack. */
1689+ switch_to(prev, next, prev);
1690+
1691+ barrier();
1692+ /*
1693+ * this_rq must be evaluated again because prev may have moved
1694+ * CPUs since it called schedule(), thus the 'rq' on its stack
1695+ * frame will be invalid.
1696+ */
1697+ finish_task_switch(this_rq(), prev);
1698+}
1699+
1700+/*
1701+ * nr_running, nr_uninterruptible and nr_context_switches:
1702+ *
1703+ * externally visible scheduler statistics: current number of runnable
1704+ * threads, current number of uninterruptible-sleeping threads, total
1705+ * number of context switches performed since bootup. All are measured
1706+ * without grabbing the grq lock but the occasional inaccurate result
1707+ * doesn't matter so long as it's positive.
1708+ */
1709+unsigned long nr_running(void)
1710+{
1711+ long nr = grq.nr_running;
1712+
1713+ if (unlikely(nr < 0))
1714+ nr = 0;
1715+ return (unsigned long)nr;
1716+}
1717+
1718+unsigned long nr_uninterruptible(void)
1719+{
1720+ unsigned long nu = grq.nr_uninterruptible;
1721+
1722+ if (unlikely(nu < 0))
1723+ nu = 0;
1724+ return nu;
1725+}
1726+
1727+unsigned long long nr_context_switches(void)
1728+{
1729+ long long ns = grq.nr_switches;
1730+
1731+ /* This is of course impossible */
1732+ if (unlikely(ns < 0))
1733+ ns = 1;
1734+ return (long long)ns;
1735+}
1736+
1737+unsigned long nr_iowait(void)
1738+{
1739+ unsigned long i, sum = 0;
1740+
1741+ for_each_possible_cpu(i)
1742+ sum += atomic_read(&cpu_rq(i)->nr_iowait);
1743+
1744+ return sum;
1745+}
1746+
1747+unsigned long nr_active(void)
1748+{
1749+ return nr_running() + nr_uninterruptible();
1750+}
1751+
1752+DEFINE_PER_CPU(struct kernel_stat, kstat);
1753+
1754+EXPORT_PER_CPU_SYMBOL(kstat);
1755+
1756+/*
1757+ * On each tick, see what percentage of that tick was attributed to each
1758+ * component and add the percentage to the _pc values. Once a _pc value has
1759+ * accumulated one tick's worth, account for that. This means the total
1760+ * percentage of load components will always be 100 per tick.
1761+ */
1762+static void pc_idle_time(struct rq *rq, unsigned long pc)
1763+{
1764+ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
1765+ cputime64_t tmp = cputime_to_cputime64(jiffies_to_cputime(1));
1766+
1767+ if (atomic_read(&rq->nr_iowait) > 0) {
1768+ rq->iowait_pc += pc;
1769+ if (rq->iowait_pc >= 100) {
1770+ rq->iowait_pc %= 100;
1771+ cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
1772+ }
1773+ } else {
1774+ rq->idle_pc += pc;
1775+ if (rq->idle_pc >= 100) {
1776+ rq->idle_pc %= 100;
1777+ cpustat->idle = cputime64_add(cpustat->idle, tmp);
1778+ }
1779+ }
1780+}
1781+
1782+static void
1783+pc_system_time(struct rq *rq, struct task_struct *p, int hardirq_offset,
1784+ unsigned long pc, unsigned long ns)
1785+{
1786+ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
1787+ cputime_t one_jiffy = jiffies_to_cputime(1);
1788+ cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
1789+ cputime64_t tmp = cputime_to_cputime64(one_jiffy);
1790+
1791+ p->stime_pc += pc;
1792+ if (p->stime_pc >= 100) {
1793+ p->stime_pc -= 100;
1794+ p->stime = cputime_add(p->stime, one_jiffy);
1795+ p->stimescaled = cputime_add(p->stimescaled, one_jiffy_scaled);
1796+ account_group_system_time(p, one_jiffy);
1797+ acct_update_integrals(p);
1798+ }
1799+ p->se.sum_exec_runtime += ns;
1800+
1801+ if (hardirq_count() - hardirq_offset)
1802+ rq->irq_pc += pc;
1803+ else if (softirq_count()) {
1804+ rq->softirq_pc += pc;
1805+ if (rq->softirq_pc >= 100) {
1806+ rq->softirq_pc %= 100;
1807+ cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
1808+ }
1809+ } else {
1810+ rq->system_pc += pc;
1811+ if (rq->system_pc >= 100) {
1812+ rq->system_pc %= 100;
1813+ cpustat->system = cputime64_add(cpustat->system, tmp);
1814+ }
1815+ }
1816+}
1817+
1818+static void pc_user_time(struct rq *rq, struct task_struct *p,
1819+ unsigned long pc, unsigned long ns)
1820+{
1821+ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
1822+ cputime_t one_jiffy = jiffies_to_cputime(1);
1823+ cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
1824+ cputime64_t tmp = cputime_to_cputime64(one_jiffy);
1825+
1826+ p->utime_pc += pc;
1827+ if (p->utime_pc >= 100) {
1828+ p->utime_pc -= 100;
1829+ p->utime = cputime_add(p->utime, one_jiffy);
1830+ p->utimescaled = cputime_add(p->utimescaled, one_jiffy_scaled);
1831+ account_group_user_time(p, one_jiffy);
1832+ acct_update_integrals(p);
1833+ }
1834+ p->se.sum_exec_runtime += ns;
1835+
1836+ if (TASK_NICE(p) > 0 || idleprio_task(p)) {
1837+ rq->nice_pc += pc;
1838+ if (rq->nice_pc >= 100) {
1839+ rq->nice_pc %= 100;
1840+ cpustat->nice = cputime64_add(cpustat->nice, tmp);
1841+ }
1842+ } else {
1843+ rq->user_pc += pc;
1844+ if (rq->user_pc >= 100) {
1845+ rq->user_pc %= 100;
1846+ cpustat->user = cputime64_add(cpustat->user, tmp);
1847+ }
1848+ }
1849+}
1850+
1851+/* Convert nanoseconds to percentage of one tick. */
1852+#define NS_TO_PC(NS) (NS * 100 / JIFFIES_TO_NS(1))
1853+
1854+/*
1855+ * This is called on clock ticks and on context switches.
1856+ * Bank in p->se.sum_exec_runtime the ns elapsed since the last tick or switch.
1857+ * CPU scheduler quota accounting is also performed here in microseconds.
1858+ * The value returned from sched_clock() occasionally gives bogus values so
1859+ * some sanity checking is required. Time is supposed to be banked all the
1860+ * time so default to half a tick to make up for when sched_clock reverts
1861+ * to just returning jiffies, and for hardware that can't do tsc.
1862+ */
1863+static void
1864+update_cpu_clock(struct rq *rq, struct task_struct *p, int tick)
1865+{
1866+ long time_diff = rq->clock - p->last_ran;
1867+ long account_ns = rq->clock - rq->timekeep_clock;
1868+ struct task_struct *idle = rq->idle;
1869+ unsigned long account_pc;
1870+
1871+ /*
1872+ * There should be less than or equal to one jiffy worth, and not
1873+ * negative/overflow. time_diff is only used for internal scheduler
1874+ * time_slice accounting.
1875+ */
1876+ if (time_diff <= 0)
1877+ time_diff = JIFFIES_TO_NS(1) / 2;
1878+ else if (time_diff > JIFFIES_TO_NS(1))
1879+ time_diff = JIFFIES_TO_NS(1);
1880+
1881+ if (unlikely(account_ns < 0))
1882+ account_ns = 0;
1883+
1884+ account_pc = NS_TO_PC(account_ns);
1885+
1886+ if (tick) {
1887+ int user_tick = user_mode(get_irq_regs());
1888+
1889+ /* Accurate tick timekeeping */
1890+ if (user_tick)
1891+ pc_user_time(rq, p, account_pc, account_ns);
1892+ else if (p != idle || (irq_count() != HARDIRQ_OFFSET))
1893+ pc_system_time(rq, p, HARDIRQ_OFFSET,
1894+ account_pc, account_ns);
1895+ else
1896+ pc_idle_time(rq, account_pc);
1897+ } else {
1898+ /* Accurate subtick timekeeping */
1899+ if (p == idle)
1900+ pc_idle_time(rq, account_pc);
1901+ else
1902+ pc_user_time(rq, p, account_pc, account_ns);
1903+ }
1904+
1905+ /* time_slice accounting is done in usecs to avoid overflow on 32bit */
1906+ if (rq->rq_policy != SCHED_FIFO && p != idle)
1907+ rq->rq_time_slice -= time_diff / 1000;
1908+ p->last_ran = rq->timekeep_clock = rq->clock;
1909+}
1910+
1911+/*
1912+ * Return any ns on the sched_clock that have not yet been accounted in
1913+ * @p in case that task is currently running.
1914+ *
1915+ * Called with task_grq_lock() held on @rq.
1916+ */
1917+static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
1918+{
1919+ u64 ns = 0;
1920+
1921+ if (p == rq->curr) {
1922+ update_rq_clock(rq);
1923+ ns = rq->clock - p->last_ran;
1924+ if ((s64)ns < 0)
1925+ ns = 0;
1926+ }
1927+
1928+ return ns;
1929+}
1930+
1931+unsigned long long task_delta_exec(struct task_struct *p)
1932+{
1933+ unsigned long flags;
1934+ struct rq *rq;
1935+ u64 ns = 0;
1936+
1937+ rq = task_grq_lock(p, &flags);
1938+ ns = do_task_delta_exec(p, rq);
1939+ task_grq_unlock(&flags);
1940+
1941+ return ns;
1942+}
1943+
1944+/*
1945+ * Return accounted runtime for the task.
1946+ * In case the task is currently running, return the runtime plus current's
1947+ * pending runtime that have not been accounted yet.
1948+ */
1949+unsigned long long task_sched_runtime(struct task_struct *p)
1950+{
1951+ unsigned long flags;
1952+ struct rq *rq;
1953+ u64 ns = 0;
1954+
1955+ rq = task_grq_lock(p, &flags);
1956+ ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
1957+ task_grq_unlock(&flags);
1958+
1959+ return ns;
1960+}
1961+
1962+/*
1963+ * Return sum_exec_runtime for the thread group.
1964+ * In case the task is currently running, return the sum plus current's
1965+ * pending runtime that have not been accounted yet.
1966+ *
1967+ * Note that the thread group might have other running tasks as well,
1968+ * so the return value not includes other pending runtime that other
1969+ * running tasks might have.
1970+ */
1971+unsigned long long thread_group_sched_runtime(struct task_struct *p)
1972+{
1973+ struct task_cputime totals;
1974+ unsigned long flags;
1975+ struct rq *rq;
1976+ u64 ns;
1977+
1978+ rq = task_grq_lock(p, &flags);
1979+ thread_group_cputime(p, &totals);
1980+ ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
1981+ task_grq_unlock(&flags);
1982+
1983+ return ns;
1984+}
1985+
1986+/* Compatibility crap for removal */
1987+void account_user_time(struct task_struct *p, cputime_t cputime,
1988+ cputime_t cputime_scaled)
1989+{
1990+}
1991+
1992+void account_idle_time(cputime_t cputime)
1993+{
1994+}
1995+
1996+/*
1997+ * Account guest cpu time to a process.
1998+ * @p: the process that the cpu time gets accounted to
1999+ * @cputime: the cpu time spent in virtual machine since the last update
2000+ * @cputime_scaled: cputime scaled by cpu frequency
2001+ */
2002+static void account_guest_time(struct task_struct *p, cputime_t cputime,
2003+ cputime_t cputime_scaled)
2004+{
2005+ cputime64_t tmp;
2006+ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2007+
2008+ tmp = cputime_to_cputime64(cputime);
2009+
2010+ /* Add guest time to process. */
2011+ p->utime = cputime_add(p->utime, cputime);
2012+ p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
2013+ account_group_user_time(p, cputime);
2014+ p->gtime = cputime_add(p->gtime, cputime);
2015+
2016+ /* Add guest time to cpustat. */
2017+ cpustat->user = cputime64_add(cpustat->user, tmp);
2018+ cpustat->guest = cputime64_add(cpustat->guest, tmp);
2019+}
2020+
2021+/*
2022+ * Account system cpu time to a process.
2023+ * @p: the process that the cpu time gets accounted to
2024+ * @hardirq_offset: the offset to subtract from hardirq_count()
2025+ * @cputime: the cpu time spent in kernel space since the last update
2026+ * @cputime_scaled: cputime scaled by cpu frequency
2027+ * This is for guest only now.
2028+ */
2029+void account_system_time(struct task_struct *p, int hardirq_offset,
2030+ cputime_t cputime, cputime_t cputime_scaled)
2031+{
2032+
2033+ if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0))
2034+ account_guest_time(p, cputime, cputime_scaled);
2035+}
2036+
2037+/*
2038+ * Account for involuntary wait time.
2039+ * @steal: the cpu time spent in involuntary wait
2040+ */
2041+void account_steal_time(cputime_t cputime)
2042+{
2043+ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2044+ cputime64_t cputime64 = cputime_to_cputime64(cputime);
2045+
2046+ cpustat->steal = cputime64_add(cpustat->steal, cputime64);
2047+}
2048+
2049+/*
2050+ * Account for idle time.
2051+ * @cputime: the cpu time spent in idle wait
2052+ */
2053+static void account_idle_times(cputime_t cputime)
2054+{
2055+ struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2056+ cputime64_t cputime64 = cputime_to_cputime64(cputime);
2057+ struct rq *rq = this_rq();
2058+
2059+ if (atomic_read(&rq->nr_iowait) > 0)
2060+ cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
2061+ else
2062+ cpustat->idle = cputime64_add(cpustat->idle, cputime64);
2063+}
2064+
2065+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
2066+
2067+void account_process_tick(struct task_struct *p, int user_tick)
2068+{
2069+}
2070+
2071+/*
2072+ * Account multiple ticks of steal time.
2073+ * @p: the process from which the cpu time has been stolen
2074+ * @ticks: number of stolen ticks
2075+ */
2076+void account_steal_ticks(unsigned long ticks)
2077+{
2078+ account_steal_time(jiffies_to_cputime(ticks));
2079+}
2080+
2081+/*
2082+ * Account multiple ticks of idle time.
2083+ * @ticks: number of stolen ticks
2084+ */
2085+void account_idle_ticks(unsigned long ticks)
2086+{
2087+ account_idle_times(jiffies_to_cputime(ticks));
2088+}
2089+#endif
2090+
2091+/*
2092+ * Functions to test for when SCHED_ISO tasks have used their allocated
2093+ * quota as real time scheduling and convert them back to SCHED_NORMAL.
2094+ * Where possible, the data is tested lockless, to avoid grabbing grq_lock
2095+ * because the occasional inaccurate result won't matter. However the
2096+ * data is only ever modified under lock.
2097+ */
2098+static void set_iso_refractory(void)
2099+{
2100+ grq_lock();
2101+ grq.iso_refractory = 1;
2102+ grq_unlock();
2103+}
2104+
2105+static void clear_iso_refractory(void)
2106+{
2107+ grq_lock();
2108+ grq.iso_refractory = 0;
2109+ grq_unlock();
2110+}
2111+
2112+/*
2113+ * Test if SCHED_ISO tasks have run longer than their alloted period as RT
2114+ * tasks and set the refractory flag if necessary. There is 10% hysteresis
2115+ * for unsetting the flag.
2116+ */
2117+static unsigned int test_ret_isorefractory(struct rq *rq)
2118+{
2119+ if (likely(!grq.iso_refractory)) {
2120+ if (grq.iso_ticks / ISO_PERIOD > sched_iso_cpu)
2121+ set_iso_refractory();
2122+ } else {
2123+ if (grq.iso_ticks / ISO_PERIOD < (sched_iso_cpu * 90 / 100))
2124+ clear_iso_refractory();
2125+ }
2126+ return grq.iso_refractory;
2127+}
2128+
2129+static void iso_tick(void)
2130+{
2131+ grq_lock();
2132+ grq.iso_ticks += 100;
2133+ grq_unlock();
2134+}
2135+
2136+/* No SCHED_ISO task was running so decrease rq->iso_ticks */
2137+static inline void no_iso_tick(void)
2138+{
2139+ if (grq.iso_ticks) {
2140+ grq_lock();
2141+ grq.iso_ticks = grq.iso_ticks * (ISO_PERIOD - 1) / ISO_PERIOD;
2142+ grq_unlock();
2143+ }
2144+}
2145+
2146+static int rq_running_iso(struct rq *rq)
2147+{
2148+ return rq->rq_prio == ISO_PRIO;
2149+}
2150+
2151+/* This manages tasks that have run out of timeslice during a scheduler_tick */
2152+static void task_running_tick(struct rq *rq)
2153+{
2154+ struct task_struct *p;
2155+
2156+ /*
2157+ * If a SCHED_ISO task is running we increment the iso_ticks. In
2158+ * order to prevent SCHED_ISO tasks from causing starvation in the
2159+ * presence of true RT tasks we account those as iso_ticks as well.
2160+ */
2161+ if ((rt_queue(rq) || (iso_queue(rq) && !grq.iso_refractory))) {
2162+ if (grq.iso_ticks <= (ISO_PERIOD * 100) - 100)
2163+ iso_tick();
2164+ } else
2165+ no_iso_tick();
2166+
2167+ if (iso_queue(rq)) {
2168+ if (unlikely(test_ret_isorefractory(rq))) {
2169+ if (rq_running_iso(rq)) {
2170+ /*
2171+ * SCHED_ISO task is running as RT and limit
2172+ * has been hit. Force it to reschedule as
2173+ * SCHED_NORMAL by zeroing its time_slice
2174+ */
2175+ rq->rq_time_slice = 0;
2176+ }
2177+ }
2178+ }
2179+
2180+ /* SCHED_FIFO tasks never run out of timeslice. */
2181+ if (rq_idle(rq) || rq->rq_time_slice > 0 || rq->rq_policy == SCHED_FIFO)
2182+ return;
2183+
2184+ /* p->rt.time_slice <= 0. We only modify task_struct under grq lock */
2185+ grq_lock();
2186+ p = rq->curr;
2187+ if (likely(task_running(p))) {
2188+ requeue_task(p);
2189+ set_tsk_need_resched(p);
2190+ }
2191+ grq_unlock();
2192+}
2193+
2194+void wake_up_idle_cpu(int cpu);
2195+
2196+/*
2197+ * This function gets called by the timer code, with HZ frequency.
2198+ * We call it with interrupts disabled. The data modified is all
2199+ * local to struct rq so we don't need to grab grq lock.
2200+ */
2201+void scheduler_tick(void)
2202+{
2203+ int cpu = smp_processor_id();
2204+ struct rq *rq = cpu_rq(cpu);
2205+
2206+ sched_clock_tick();
2207+ update_rq_clock(rq);
2208+ update_cpu_clock(rq, rq->curr, 1);
2209+ if (!rq_idle(rq))
2210+ task_running_tick(rq);
2211+ else {
2212+ no_iso_tick();
2213+ if (unlikely(queued_notrunning()))
2214+ set_tsk_need_resched(rq->idle);
2215+ }
2216+}
2217+
2218+notrace unsigned long get_parent_ip(unsigned long addr)
2219+{
2220+ if (in_lock_functions(addr)) {
2221+ addr = CALLER_ADDR2;
2222+ if (in_lock_functions(addr))
2223+ addr = CALLER_ADDR3;
2224+ }
2225+ return addr;
2226+}
2227+
2228+#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
2229+ defined(CONFIG_PREEMPT_TRACER))
2230+void __kprobes add_preempt_count(int val)
2231+{
2232+#ifdef CONFIG_DEBUG_PREEMPT
2233+ /*
2234+ * Underflow?
2235+ */
2236+ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
2237+ return;
2238+#endif
2239+ preempt_count() += val;
2240+#ifdef CONFIG_DEBUG_PREEMPT
2241+ /*
2242+ * Spinlock count overflowing soon?
2243+ */
2244+ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
2245+ PREEMPT_MASK - 10);
2246+#endif
2247+ if (preempt_count() == val)
2248+ trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
2249+}
2250+EXPORT_SYMBOL(add_preempt_count);
2251+
2252+void __kprobes sub_preempt_count(int val)
2253+{
2254+#ifdef CONFIG_DEBUG_PREEMPT
2255+ /*
2256+ * Underflow?
2257+ */
2258+ if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
2259+ return;
2260+ /*
2261+ * Is the spinlock portion underflowing?
2262+ */
2263+ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
2264+ !(preempt_count() & PREEMPT_MASK)))
2265+ return;
2266+#endif
2267+
2268+ if (preempt_count() == val)
2269+ trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
2270+ preempt_count() -= val;
2271+}
2272+EXPORT_SYMBOL(sub_preempt_count);
2273+#endif
2274+
2275+/*
2276+ * Deadline is "now" in jiffies + (offset by priority). Setting the deadline
2277+ * is the key to everything. It distributes cpu fairly amongst tasks of the
2278+ * same nice value, it proportions cpu according to nice level, it means the
2279+ * task that last woke up the longest ago has the earliest deadline, thus
2280+ * ensuring that interactive tasks get low latency on wake up.
2281+ */
2282+static inline int prio_deadline_diff(struct task_struct *p)
2283+{
2284+ return (pratio(p) * rr_interval * HZ / 1000 / 100) ? : 1;
2285+}
2286+
2287+static inline int longest_deadline(void)
2288+{
2289+ return (prio_ratios[39] * rr_interval * HZ / 1000 / 100);
2290+}
2291+
2292+/*
2293+ * SCHED_IDLE tasks still have a deadline set, but offset by to nice +19.
2294+ * This allows nice levels to work between IDLEPRIO tasks and gives a
2295+ * deadline longer than nice +19 for when they're scheduled as SCHED_NORMAL
2296+ * tasks.
2297+ */
2298+static inline void time_slice_expired(struct task_struct *p)
2299+{
2300+ reset_first_time_slice(p);
2301+ p->rt.time_slice = timeslice();
2302+ p->deadline = jiffies + prio_deadline_diff(p);
2303+ if (idleprio_task(p))
2304+ p->deadline += longest_deadline();
2305+}
2306+
2307+static inline void check_deadline(struct task_struct *p)
2308+{
2309+ if (p->rt.time_slice <= 0)
2310+ time_slice_expired(p);
2311+}
2312+
2313+/*
2314+ * O(n) lookup of all tasks in the global runqueue. The real brainfuck
2315+ * of lock contention and O(n). It's not really O(n) as only the queued,
2316+ * but not running tasks are scanned, and is O(n) queued in the worst case
2317+ * scenario only because the right task can be found before scanning all of
2318+ * them.
2319+ * Tasks are selected in this order:
2320+ * Real time tasks are selected purely by their static priority and in the
2321+ * order they were queued, so the lowest value idx, and the first queued task
2322+ * of that priority value is chosen.
2323+ * If no real time tasks are found, the SCHED_ISO priority is checked, and
2324+ * all SCHED_ISO tasks have the same priority value, so they're selected by
2325+ * the earliest deadline value.
2326+ * If no SCHED_ISO tasks are found, SCHED_NORMAL tasks are selected by the
2327+ * earliest deadline.
2328+ * Finally if no SCHED_NORMAL tasks are found, SCHED_IDLEPRIO tasks are
2329+ * selected by the earliest deadline.
2330+ */
2331+static inline struct
2332+task_struct *earliest_deadline_task(struct rq *rq, struct task_struct *idle)
2333+{
2334+ unsigned long dl, earliest_deadline = 0; /* Initialise to silence compiler */
2335+ struct task_struct *p, *edt;
2336+ unsigned int cpu = rq->cpu;
2337+ struct list_head *queue;
2338+ int idx = 0;
2339+
2340+ edt = idle;
2341+retry:
2342+ idx = find_next_bit(grq.prio_bitmap, PRIO_LIMIT, idx);
2343+ if (idx >= PRIO_LIMIT)
2344+ goto out;
2345+ queue = &grq.queue[idx];
2346+ list_for_each_entry(p, queue, rt.run_list) {
2347+ /* Make sure cpu affinity is ok */
2348+ if (!cpu_isset(cpu, p->cpus_allowed))
2349+ continue;
2350+ if (idx < MAX_RT_PRIO) {
2351+ /* We found an rt task */
2352+ edt = p;
2353+ goto out_take;
2354+ }
2355+
2356+ /*
2357+ * No rt task, select the earliest deadline task now.
2358+ * On the 1st run the 2nd condition is never used, so
2359+ * there is no need to initialise earliest_deadline
2360+ * before. Normalise all old deadlines to now.
2361+ */
2362+ if (time_before(p->deadline, jiffies))
2363+ dl = jiffies;
2364+ else
2365+ dl = p->deadline;
2366+
2367+ if (edt == idle ||
2368+ time_before(dl, earliest_deadline)) {
2369+ earliest_deadline = dl;
2370+ edt = p;
2371+ }
2372+ }
2373+ if (edt == idle) {
2374+ if (++idx < PRIO_LIMIT)
2375+ goto retry;
2376+ goto out;
2377+ }
2378+out_take:
2379+ take_task(rq, edt);
2380+out:
2381+ return edt;
2382+}
2383+
2384+#ifdef CONFIG_SMP
2385+static inline void set_cpuidle_map(unsigned long cpu)
2386+{
2387+ cpu_set(cpu, grq.cpu_idle_map);
2388+}
2389+
2390+static inline void clear_cpuidle_map(unsigned long cpu)
2391+{
2392+ cpu_clear(cpu, grq.cpu_idle_map);
2393+}
2394+
2395+#else /* CONFIG_SMP */
2396+static inline void set_cpuidle_map(unsigned long cpu)
2397+{
2398+}
2399+
2400+static inline void clear_cpuidle_map(unsigned long cpu)
2401+{
2402+}
2403+#endif /* !CONFIG_SMP */
2404+
2405+/*
2406+ * Print scheduling while atomic bug:
2407+ */
2408+static noinline void __schedule_bug(struct task_struct *prev)
2409+{
2410+ struct pt_regs *regs = get_irq_regs();
2411+
2412+ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
2413+ prev->comm, prev->pid, preempt_count());
2414+
2415+ debug_show_held_locks(prev);
2416+ print_modules();
2417+ if (irqs_disabled())
2418+ print_irqtrace_events(prev);
2419+
2420+ if (regs)
2421+ show_regs(regs);
2422+ else
2423+ dump_stack();
2424+}
2425+
2426+/*
2427+ * Various schedule()-time debugging checks and statistics:
2428+ */
2429+static inline void schedule_debug(struct task_struct *prev)
2430+{
2431+ /*
2432+ * Test if we are atomic. Since do_exit() needs to call into
2433+ * schedule() atomically, we ignore that path for now.
2434+ * Otherwise, whine if we are scheduling when we should not be.
2435+ */
2436+ if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
2437+ __schedule_bug(prev);
2438+
2439+ profile_hit(SCHED_PROFILING, __builtin_return_address(0));
2440+
2441+ schedstat_inc(this_rq(), sched_count);
2442+#ifdef CONFIG_SCHEDSTATS
2443+ if (unlikely(prev->lock_depth >= 0)) {
2444+ schedstat_inc(this_rq(), bkl_count);
2445+ schedstat_inc(prev, sched_info.bkl_count);
2446+ }
2447+#endif
2448+}
2449+
2450+/*
2451+ * schedule() is the main scheduler function.
2452+ */
2453+asmlinkage void __sched __schedule(void)
2454+{
2455+ struct task_struct *prev, *next, *idle;
2456+ int deactivate = 0, cpu;
2457+ long *switch_count;
2458+ struct rq *rq;
2459+ u64 now;
2460+
2461+ cpu = smp_processor_id();
2462+ rq = this_rq();
2463+ rcu_qsctr_inc(cpu);
2464+ prev = rq->curr;
2465+ switch_count = &prev->nivcsw;
2466+
2467+ release_kernel_lock(prev);
2468+need_resched_nonpreemptible:
2469+
2470+ schedule_debug(prev);
2471+ idle = rq->idle;
2472+ /*
2473+ * The idle thread is not allowed to schedule!
2474+ * Remove this check after it has been exercised a bit.
2475+ */
2476+ if (unlikely(prev == idle) && prev->state != TASK_RUNNING) {
2477+ printk(KERN_ERR "bad: scheduling from the idle thread!\n");
2478+ dump_stack();
2479+ }
2480+
2481+ grq_lock_irq();
2482+ update_rq_clock(rq);
2483+ now = rq->clock;
2484+ update_cpu_clock(rq, prev, 0);
2485+
2486+ clear_tsk_need_resched(prev);
2487+
2488+ if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
2489+ if (unlikely(signal_pending_state(prev->state, prev)))
2490+ prev->state = TASK_RUNNING;
2491+ else
2492+ deactivate = 1;
2493+ switch_count = &prev->nvcsw;
2494+ }
2495+
2496+ if (prev != idle) {
2497+ /* Update all the information stored on struct rq */
2498+ prev->rt.time_slice = rq->rq_time_slice;
2499+ prev->deadline = rq->rq_deadline;
2500+ check_deadline(prev);
2501+ return_task(prev, deactivate);
2502+ }
2503+
2504+ if (likely(queued_notrunning())) {
2505+ next = earliest_deadline_task(rq, idle);
2506+ } else {
2507+ next = idle;
2508+ schedstat_inc(rq, sched_goidle);
2509+ }
2510+
2511+ if (next == rq->idle)
2512+ set_cpuidle_map(cpu);
2513+ else
2514+ clear_cpuidle_map(cpu);
2515+
2516+ prefetch(next);
2517+ prefetch_stack(next);
2518+
2519+ prev->timestamp = prev->last_ran = now;
2520+
2521+ if (likely(prev != next)) {
2522+ rq->rq_time_slice = next->rt.time_slice;
2523+ rq->rq_deadline = next->deadline;
2524+ rq->rq_prio = next->prio;
2525+
2526+ sched_info_switch(prev, next);
2527+ grq.nr_switches++;
2528+ next->oncpu = 1;
2529+ prev->oncpu = 0;
2530+ rq->curr = next;
2531+ ++*switch_count;
2532+
2533+ context_switch(rq, prev, next); /* unlocks the rq */
2534+ /*
2535+ * the context switch might have flipped the stack from under
2536+ * us, hence refresh the local variables.
2537+ */
2538+ cpu = smp_processor_id();
2539+ rq = cpu_rq(cpu);
2540+ } else
2541+ grq_unlock_irq();
2542+
2543+ if (unlikely(reacquire_kernel_lock(current) < 0))
2544+ goto need_resched_nonpreemptible;
2545+}
2546+
2547+asmlinkage void __sched schedule(void)
2548+{
2549+need_resched:
2550+ preempt_disable();
2551+ __schedule();
2552+ preempt_enable_no_resched();
2553+ if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
2554+ goto need_resched;
2555+}
2556+EXPORT_SYMBOL(schedule);
2557+
2558+#ifdef CONFIG_SMP
2559+int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
2560+{
2561+ return 0;
2562+}
2563+#endif
2564+
2565+#ifdef CONFIG_PREEMPT
2566+/*
2567+ * this is the entry point to schedule() from in-kernel preemption
2568+ * off of preempt_enable. Kernel preemptions off return from interrupt
2569+ * occur there and call schedule directly.
2570+ */
2571+asmlinkage void __sched preempt_schedule(void)
2572+{
2573+ struct thread_info *ti = current_thread_info();
2574+
2575+ /*
2576+ * If there is a non-zero preempt_count or interrupts are disabled,
2577+ * we do not want to preempt the current task. Just return..
2578+ */
2579+ if (likely(ti->preempt_count || irqs_disabled()))
2580+ return;
2581+
2582+ do {
2583+ add_preempt_count(PREEMPT_ACTIVE);
2584+ schedule();
2585+ sub_preempt_count(PREEMPT_ACTIVE);
2586+
2587+ /*
2588+ * Check again in case we missed a preemption opportunity
2589+ * between schedule and now.
2590+ */
2591+ barrier();
2592+ } while (need_resched());
2593+}
2594+EXPORT_SYMBOL(preempt_schedule);
2595+
2596+/*
2597+ * this is the entry point to schedule() from kernel preemption
2598+ * off of irq context.
2599+ * Note, that this is called and return with irqs disabled. This will
2600+ * protect us against recursive calling from irq.
2601+ */
2602+asmlinkage void __sched preempt_schedule_irq(void)
2603+{
2604+ struct thread_info *ti = current_thread_info();
2605+
2606+ /* Catch callers which need to be fixed */
2607+ BUG_ON(ti->preempt_count || !irqs_disabled());
2608+
2609+ do {
2610+ add_preempt_count(PREEMPT_ACTIVE);
2611+ local_irq_enable();
2612+ schedule();
2613+ local_irq_disable();
2614+ sub_preempt_count(PREEMPT_ACTIVE);
2615+
2616+ /*
2617+ * Check again in case we missed a preemption opportunity
2618+ * between schedule and now.
2619+ */
2620+ barrier();
2621+ } while (need_resched());
2622+}
2623+
2624+#endif /* CONFIG_PREEMPT */
2625+
2626+int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
2627+ void *key)
2628+{
2629+ return try_to_wake_up(curr->private, mode, sync);
2630+}
2631+EXPORT_SYMBOL(default_wake_function);
2632+
2633+/*
2634+ * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
2635+ * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
2636+ * number) then we wake all the non-exclusive tasks and one exclusive task.
2637+ *
2638+ * There are circumstances in which we can try to wake a task which has already
2639+ * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
2640+ * zero in this (rare) case, and we handle it by continuing to scan the queue.
2641+ */
2642+void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
2643+ int nr_exclusive, int sync, void *key)
2644+{
2645+ struct list_head *tmp, *next;
2646+
2647+ list_for_each_safe(tmp, next, &q->task_list) {
2648+ wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
2649+ unsigned flags = curr->flags;
2650+
2651+ if (curr->func(curr, mode, sync, key) &&
2652+ (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
2653+ break;
2654+ }
2655+}
2656+
2657+/**
2658+ * __wake_up - wake up threads blocked on a waitqueue.
2659+ * @q: the waitqueue
2660+ * @mode: which threads
2661+ * @nr_exclusive: how many wake-one or wake-many threads to wake up
2662+ * @key: is directly passed to the wakeup function
2663+ *
2664+ * It may be assumed that this function implies a write memory barrier before
2665+ * changing the task state if and only if any tasks are woken up.
2666+ */
2667+void __wake_up(wait_queue_head_t *q, unsigned int mode,
2668+ int nr_exclusive, void *key)
2669+{
2670+ unsigned long flags;
2671+
2672+ spin_lock_irqsave(&q->lock, flags);
2673+ __wake_up_common(q, mode, nr_exclusive, 0, key);
2674+ spin_unlock_irqrestore(&q->lock, flags);
2675+}
2676+EXPORT_SYMBOL(__wake_up);
2677+
2678+/*
2679+ * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
2680+ */
2681+void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
2682+{
2683+ __wake_up_common(q, mode, 1, 0, NULL);
2684+}
2685+
2686+void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
2687+{
2688+ __wake_up_common(q, mode, 1, 0, key);
2689+}
2690+
2691+/**
2692+ * __wake_up_sync_key - wake up threads blocked on a waitqueue.
2693+ * @q: the waitqueue
2694+ * @mode: which threads
2695+ * @nr_exclusive: how many wake-one or wake-many threads to wake up
2696+ * @key: opaque value to be passed to wakeup targets
2697+ *
2698+ * The sync wakeup differs that the waker knows that it will schedule
2699+ * away soon, so while the target thread will be woken up, it will not
2700+ * be migrated to another CPU - ie. the two threads are 'synchronized'
2701+ * with each other. This can prevent needless bouncing between CPUs.
2702+ *
2703+ * On UP it can prevent extra preemption.
2704+ *
2705+ * It may be assumed that this function implies a write memory barrier before
2706+ * changing the task state if and only if any tasks are woken up.
2707+ */
2708+void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
2709+ int nr_exclusive, void *key)
2710+{
2711+ unsigned long flags;
2712+ int sync = 1;
2713+
2714+ if (unlikely(!q))
2715+ return;
2716+
2717+ if (unlikely(!nr_exclusive))
2718+ sync = 0;
2719+
2720+ spin_lock_irqsave(&q->lock, flags);
2721+ __wake_up_common(q, mode, nr_exclusive, sync, key);
2722+ spin_unlock_irqrestore(&q->lock, flags);
2723+}
2724+EXPORT_SYMBOL_GPL(__wake_up_sync_key);
2725+
2726+/**
2727+ * __wake_up_sync - wake up threads blocked on a waitqueue.
2728+ * @q: the waitqueue
2729+ * @mode: which threads
2730+ * @nr_exclusive: how many wake-one or wake-many threads to wake up
2731+ *
2732+ * The sync wakeup differs that the waker knows that it will schedule
2733+ * away soon, so while the target thread will be woken up, it will not
2734+ * be migrated to another CPU - ie. the two threads are 'synchronized'
2735+ * with each other. This can prevent needless bouncing between CPUs.
2736+ *
2737+ * On UP it can prevent extra preemption.
2738+ */
2739+void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
2740+{
2741+ unsigned long flags;
2742+ int sync = 1;
2743+
2744+ if (unlikely(!q))
2745+ return;
2746+
2747+ if (unlikely(!nr_exclusive))
2748+ sync = 0;
2749+
2750+ spin_lock_irqsave(&q->lock, flags);
2751+ __wake_up_common(q, mode, nr_exclusive, sync, NULL);
2752+ spin_unlock_irqrestore(&q->lock, flags);
2753+}
2754+EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
2755+
2756+/**
2757+ * complete: - signals a single thread waiting on this completion
2758+ * @x: holds the state of this particular completion
2759+ *
2760+ * This will wake up a single thread waiting on this completion. Threads will be
2761+ * awakened in the same order in which they were queued.
2762+ *
2763+ * See also complete_all(), wait_for_completion() and related routines.
2764+ *
2765+ * It may be assumed that this function implies a write memory barrier before
2766+ * changing the task state if and only if any tasks are woken up.
2767+ */
2768+void complete(struct completion *x)
2769+{
2770+ unsigned long flags;
2771+
2772+ spin_lock_irqsave(&x->wait.lock, flags);
2773+ x->done++;
2774+ __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
2775+ spin_unlock_irqrestore(&x->wait.lock, flags);
2776+}
2777+EXPORT_SYMBOL(complete);
2778+
2779+/**
2780+ * complete_all: - signals all threads waiting on this completion
2781+ * @x: holds the state of this particular completion
2782+ *
2783+ * This will wake up all threads waiting on this particular completion event.
2784+ *
2785+ * It may be assumed that this function implies a write memory barrier before
2786+ * changing the task state if and only if any tasks are woken up.
2787+ */
2788+void complete_all(struct completion *x)
2789+{
2790+ unsigned long flags;
2791+
2792+ spin_lock_irqsave(&x->wait.lock, flags);
2793+ x->done += UINT_MAX/2;
2794+ __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
2795+ spin_unlock_irqrestore(&x->wait.lock, flags);
2796+}
2797+EXPORT_SYMBOL(complete_all);
2798+
2799+static inline long __sched
2800+do_wait_for_common(struct completion *x, long timeout, int state)
2801+{
2802+ if (!x->done) {
2803+ DECLARE_WAITQUEUE(wait, current);
2804+
2805+ wait.flags |= WQ_FLAG_EXCLUSIVE;
2806+ __add_wait_queue_tail(&x->wait, &wait);
2807+ do {
2808+ if (signal_pending_state(state, current)) {
2809+ timeout = -ERESTARTSYS;
2810+ break;
2811+ }
2812+ __set_current_state(state);
2813+ spin_unlock_irq(&x->wait.lock);
2814+ timeout = schedule_timeout(timeout);
2815+ spin_lock_irq(&x->wait.lock);
2816+ } while (!x->done && timeout);
2817+ __remove_wait_queue(&x->wait, &wait);
2818+ if (!x->done)
2819+ return timeout;
2820+ }
2821+ x->done--;
2822+ return timeout ?: 1;
2823+}
2824+
2825+static long __sched
2826+wait_for_common(struct completion *x, long timeout, int state)
2827+{
2828+ might_sleep();
2829+
2830+ spin_lock_irq(&x->wait.lock);
2831+ timeout = do_wait_for_common(x, timeout, state);
2832+ spin_unlock_irq(&x->wait.lock);
2833+ return timeout;
2834+}
2835+
2836+/**
2837+ * wait_for_completion: - waits for completion of a task
2838+ * @x: holds the state of this particular completion
2839+ *
2840+ * This waits to be signaled for completion of a specific task. It is NOT
2841+ * interruptible and there is no timeout.
2842+ *
2843+ * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
2844+ * and interrupt capability. Also see complete().
2845+ */
2846+void __sched wait_for_completion(struct completion *x)
2847+{
2848+ wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
2849+}
2850+EXPORT_SYMBOL(wait_for_completion);
2851+
2852+/**
2853+ * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
2854+ * @x: holds the state of this particular completion
2855+ * @timeout: timeout value in jiffies
2856+ *
2857+ * This waits for either a completion of a specific task to be signaled or for a
2858+ * specified timeout to expire. The timeout is in jiffies. It is not
2859+ * interruptible.
2860+ */
2861+unsigned long __sched
2862+wait_for_completion_timeout(struct completion *x, unsigned long timeout)
2863+{
2864+ return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
2865+}
2866+EXPORT_SYMBOL(wait_for_completion_timeout);
2867+
2868+/**
2869+ * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
2870+ * @x: holds the state of this particular completion
2871+ *
2872+ * This waits for completion of a specific task to be signaled. It is
2873+ * interruptible.
2874+ */
2875+int __sched wait_for_completion_interruptible(struct completion *x)
2876+{
2877+ long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
2878+ if (t == -ERESTARTSYS)
2879+ return t;
2880+ return 0;
2881+}
2882+EXPORT_SYMBOL(wait_for_completion_interruptible);
2883+
2884+/**
2885+ * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
2886+ * @x: holds the state of this particular completion
2887+ * @timeout: timeout value in jiffies
2888+ *
2889+ * This waits for either a completion of a specific task to be signaled or for a
2890+ * specified timeout to expire. It is interruptible. The timeout is in jiffies.
2891+ */
2892+unsigned long __sched
2893+wait_for_completion_interruptible_timeout(struct completion *x,
2894+ unsigned long timeout)
2895+{
2896+ return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
2897+}
2898+EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
2899+
2900+/**
2901+ * wait_for_completion_killable: - waits for completion of a task (killable)
2902+ * @x: holds the state of this particular completion
2903+ *
2904+ * This waits to be signaled for completion of a specific task. It can be
2905+ * interrupted by a kill signal.
2906+ */
2907+int __sched wait_for_completion_killable(struct completion *x)
2908+{
2909+ long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
2910+ if (t == -ERESTARTSYS)
2911+ return t;
2912+ return 0;
2913+}
2914+EXPORT_SYMBOL(wait_for_completion_killable);
2915+
2916+/**
2917+ * try_wait_for_completion - try to decrement a completion without blocking
2918+ * @x: completion structure
2919+ *
2920+ * Returns: 0 if a decrement cannot be done without blocking
2921+ * 1 if a decrement succeeded.
2922+ *
2923+ * If a completion is being used as a counting completion,
2924+ * attempt to decrement the counter without blocking. This
2925+ * enables us to avoid waiting if the resource the completion
2926+ * is protecting is not available.
2927+ */
2928+bool try_wait_for_completion(struct completion *x)
2929+{
2930+ int ret = 1;
2931+
2932+ spin_lock_irq(&x->wait.lock);
2933+ if (!x->done)
2934+ ret = 0;
2935+ else
2936+ x->done--;
2937+ spin_unlock_irq(&x->wait.lock);
2938+ return ret;
2939+}
2940+EXPORT_SYMBOL(try_wait_for_completion);
2941+
2942+/**
2943+ * completion_done - Test to see if a completion has any waiters
2944+ * @x: completion structure
2945+ *
2946+ * Returns: 0 if there are waiters (wait_for_completion() in progress)
2947+ * 1 if there are no waiters.
2948+ *
2949+ */
2950+bool completion_done(struct completion *x)
2951+{
2952+ int ret = 1;
2953+
2954+ spin_lock_irq(&x->wait.lock);
2955+ if (!x->done)
2956+ ret = 0;
2957+ spin_unlock_irq(&x->wait.lock);
2958+ return ret;
2959+}
2960+EXPORT_SYMBOL(completion_done);
2961+
2962+static long __sched
2963+sleep_on_common(wait_queue_head_t *q, int state, long timeout)
2964+{
2965+ unsigned long flags;
2966+ wait_queue_t wait;
2967+
2968+ init_waitqueue_entry(&wait, current);
2969+
2970+ __set_current_state(state);
2971+
2972+ spin_lock_irqsave(&q->lock, flags);
2973+ __add_wait_queue(q, &wait);
2974+ spin_unlock(&q->lock);
2975+ timeout = schedule_timeout(timeout);
2976+ spin_lock_irq(&q->lock);
2977+ __remove_wait_queue(q, &wait);
2978+ spin_unlock_irqrestore(&q->lock, flags);
2979+
2980+ return timeout;
2981+}
2982+
2983+void __sched interruptible_sleep_on(wait_queue_head_t *q)
2984+{
2985+ sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
2986+}
2987+EXPORT_SYMBOL(interruptible_sleep_on);
2988+
2989+long __sched
2990+interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
2991+{
2992+ return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
2993+}
2994+EXPORT_SYMBOL(interruptible_sleep_on_timeout);
2995+
2996+void __sched sleep_on(wait_queue_head_t *q)
2997+{
2998+ sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
2999+}
3000+EXPORT_SYMBOL(sleep_on);
3001+
3002+long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
3003+{
3004+ return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
3005+}
3006+EXPORT_SYMBOL(sleep_on_timeout);
3007+
3008+#ifdef CONFIG_RT_MUTEXES
3009+
3010+/*
3011+ * rt_mutex_setprio - set the current priority of a task
3012+ * @p: task
3013+ * @prio: prio value (kernel-internal form)
3014+ *
3015+ * This function changes the 'effective' priority of a task. It does
3016+ * not touch ->normal_prio like __setscheduler().
3017+ *
3018+ * Used by the rt_mutex code to implement priority inheritance logic.
3019+ */
3020+void rt_mutex_setprio(struct task_struct *p, int prio)
3021+{
3022+ unsigned long flags;
3023+ int queued, oldprio;
3024+ struct rq *rq;
3025+
3026+ BUG_ON(prio < 0 || prio > MAX_PRIO);
3027+
3028+ rq = time_task_grq_lock(p, &flags);
3029+
3030+ oldprio = p->prio;
3031+ queued = task_queued_only(p);
3032+ if (queued)
3033+ dequeue_task(p);
3034+ p->prio = prio;
3035+ if (task_running(p) && prio > oldprio)
3036+ resched_task(p);
3037+ if (queued) {
3038+ enqueue_task(p);
3039+ try_preempt(p);
3040+ }
3041+
3042+ task_grq_unlock(&flags);
3043+}
3044+
3045+#endif
3046+
3047+/*
3048+ * Adjust the deadline for when the priority is to change, before it's
3049+ * changed.
3050+ */
3051+static void adjust_deadline(struct task_struct *p, int new_prio)
3052+{
3053+ p->deadline += (prio_ratios[USER_PRIO(new_prio)] - pratio(p)) *
3054+ rr_interval * HZ / 1000 / 100;
3055+}
3056+
3057+void set_user_nice(struct task_struct *p, long nice)
3058+{
3059+ int queued, new_static;
3060+ unsigned long flags;
3061+ struct rq *rq;
3062+
3063+ if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
3064+ return;
3065+ new_static = NICE_TO_PRIO(nice);
3066+ /*
3067+ * We have to be careful, if called from sys_setpriority(),
3068+ * the task might be in the middle of scheduling on another CPU.
3069+ */
3070+ rq = time_task_grq_lock(p, &flags);
3071+ /*
3072+ * The RT priorities are set via sched_setscheduler(), but we still
3073+ * allow the 'normal' nice value to be set - but as expected
3074+ * it wont have any effect on scheduling until the task is
3075+ * not SCHED_NORMAL/SCHED_BATCH:
3076+ */
3077+ if (has_rt_policy(p)) {
3078+ p->static_prio = new_static;
3079+ goto out_unlock;
3080+ }
3081+ queued = task_queued_only(p);
3082+ /*
3083+ * If p is actually running, we don't need to do anything when
3084+ * changing the priority because the grq is unaffected.
3085+ */
3086+ if (queued)
3087+ dequeue_task(p);
3088+
3089+ adjust_deadline(p, new_static);
3090+ p->static_prio = new_static;
3091+ p->prio = effective_prio(p);
3092+
3093+ if (queued) {
3094+ enqueue_task(p);
3095+ try_preempt(p);
3096+ }
3097+
3098+ /* Just resched the task, schedule() will know what to do. */
3099+ if (task_running(p))
3100+ resched_task(p);
3101+out_unlock:
3102+ task_grq_unlock(&flags);
3103+}
3104+EXPORT_SYMBOL(set_user_nice);
3105+
3106+/*
3107+ * can_nice - check if a task can reduce its nice value
3108+ * @p: task
3109+ * @nice: nice value
3110+ */
3111+int can_nice(const struct task_struct *p, const int nice)
3112+{
3113+ /* convert nice value [19,-20] to rlimit style value [1,40] */
3114+ int nice_rlim = 20 - nice;
3115+
3116+ return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
3117+ capable(CAP_SYS_NICE));
3118+}
3119+
3120+#ifdef __ARCH_WANT_SYS_NICE
3121+
3122+/*
3123+ * sys_nice - change the priority of the current process.
3124+ * @increment: priority increment
3125+ *
3126+ * sys_setpriority is a more generic, but much slower function that
3127+ * does similar things.
3128+ */
3129+SYSCALL_DEFINE1(nice, int, increment)
3130+{
3131+ long nice, retval;
3132+
3133+ /*
3134+ * Setpriority might change our priority at the same moment.
3135+ * We don't have to worry. Conceptually one call occurs first
3136+ * and we have a single winner.
3137+ */
3138+ if (increment < -40)
3139+ increment = -40;
3140+ if (increment > 40)
3141+ increment = 40;
3142+
3143+ nice = TASK_NICE(current) + increment;
3144+ if (nice < -20)
3145+ nice = -20;
3146+ if (nice > 19)
3147+ nice = 19;
3148+
3149+ if (increment < 0 && !can_nice(current, nice))
3150+ return -EPERM;
3151+
3152+ retval = security_task_setnice(current, nice);
3153+ if (retval)
3154+ return retval;
3155+
3156+ set_user_nice(current, nice);
3157+ return 0;
3158+}
3159+
3160+#endif
3161+
3162+/**
3163+ * task_prio - return the priority value of a given task.
3164+ * @p: the task in question.
3165+ *
3166+ * This is the priority value as seen by users in /proc.
3167+ * RT tasks are offset by -100. Normal tasks are centered
3168+ * around 1, value goes from 0 (SCHED_ISO) up to 82 (nice +19
3169+ * SCHED_IDLE).
3170+ */
3171+int task_prio(const struct task_struct *p)
3172+{
3173+ int delta, prio = p->prio - MAX_RT_PRIO;
3174+
3175+ /* rt tasks and iso tasks */
3176+ if (prio <= 0)
3177+ goto out;
3178+
3179+ delta = (p->deadline - jiffies) * 40 / longest_deadline();
3180+ if (delta > 0 && delta <= 80)
3181+ prio += delta;
3182+out:
3183+ return prio;
3184+}
3185+
3186+/**
3187+ * task_nice - return the nice value of a given task.
3188+ * @p: the task in question.
3189+ */
3190+int task_nice(const struct task_struct *p)
3191+{
3192+ return TASK_NICE(p);
3193+}
3194+EXPORT_SYMBOL_GPL(task_nice);
3195+
3196+/**
3197+ * idle_cpu - is a given cpu idle currently?
3198+ * @cpu: the processor in question.
3199+ */
3200+int idle_cpu(int cpu)
3201+{
3202+ return cpu_curr(cpu) == cpu_rq(cpu)->idle;
3203+}
3204+
3205+/**
3206+ * idle_task - return the idle task for a given cpu.
3207+ * @cpu: the processor in question.
3208+ */
3209+struct task_struct *idle_task(int cpu)
3210+{
3211+ return cpu_rq(cpu)->idle;
3212+}
3213+
3214+/**
3215+ * find_process_by_pid - find a process with a matching PID value.
3216+ * @pid: the pid in question.
3217+ */
3218+static inline struct task_struct *find_process_by_pid(pid_t pid)
3219+{
3220+ return pid ? find_task_by_vpid(pid) : current;
3221+}
3222+
3223+/* Actually do priority change: must hold grq lock. */
3224+static void __setscheduler(struct task_struct *p, int policy, int prio)
3225+{
3226+ BUG_ON(task_queued_only(p));
3227+
3228+ p->policy = policy;
3229+ p->rt_priority = prio;
3230+ p->normal_prio = normal_prio(p);
3231+ /* we are holding p->pi_lock already */
3232+ p->prio = rt_mutex_getprio(p);
3233+ /*
3234+ * Reschedule if running. schedule() will know if it can continue
3235+ * running or not.
3236+ */
3237+ if (task_running(p))
3238+ resched_task(p);
3239+}
3240+
3241+/*
3242+ * check the target process has a UID that matches the current process's
3243+ */
3244+static bool check_same_owner(struct task_struct *p)
3245+{
3246+ const struct cred *cred = current_cred(), *pcred;
3247+ bool match;
3248+
3249+ rcu_read_lock();
3250+ pcred = __task_cred(p);
3251+ match = (cred->euid == pcred->euid ||
3252+ cred->euid == pcred->uid);
3253+ rcu_read_unlock();
3254+ return match;
3255+}
3256+
3257+static int __sched_setscheduler(struct task_struct *p, int policy,
3258+ struct sched_param *param, bool user)
3259+{
3260+ struct sched_param zero_param = { .sched_priority = 0 };
3261+ int queued, retval, oldprio, oldpolicy = -1;
3262+ unsigned long flags, rlim_rtprio = 0;
3263+ struct rq *rq;
3264+
3265+ /* may grab non-irq protected spin_locks */
3266+ BUG_ON(in_interrupt());
3267+
3268+ if (is_rt_policy(policy) && !capable(CAP_SYS_NICE)) {
3269+ unsigned long lflags;
3270+
3271+ if (!lock_task_sighand(p, &lflags))
3272+ return -ESRCH;
3273+ rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
3274+ unlock_task_sighand(p, &lflags);
3275+ if (rlim_rtprio)
3276+ goto recheck;
3277+ /*
3278+ * If the caller requested an RT policy without having the
3279+ * necessary rights, we downgrade the policy to SCHED_ISO.
3280+ * We also set the parameter to zero to pass the checks.
3281+ */
3282+ policy = SCHED_ISO;
3283+ param = &zero_param;
3284+ }
3285+recheck:
3286+ /* double check policy once rq lock held */
3287+ if (policy < 0)
3288+ policy = oldpolicy = p->policy;
3289+ else if (!SCHED_RANGE(policy))
3290+ return -EINVAL;
3291+ /*
3292+ * Valid priorities for SCHED_FIFO and SCHED_RR are
3293+ * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and
3294+ * SCHED_BATCH is 0.
3295+ */
3296+ if (param->sched_priority < 0 ||
3297+ (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
3298+ (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
3299+ return -EINVAL;
3300+ if (is_rt_policy(policy) != (param->sched_priority != 0))
3301+ return -EINVAL;
3302+
3303+ /*
3304+ * Allow unprivileged RT tasks to decrease priority:
3305+ */
3306+ if (user && !capable(CAP_SYS_NICE)) {
3307+ if (is_rt_policy(policy)) {
3308+ /* can't set/change the rt policy */
3309+ if (policy != p->policy && !rlim_rtprio)
3310+ return -EPERM;
3311+
3312+ /* can't increase priority */
3313+ if (param->sched_priority > p->rt_priority &&
3314+ param->sched_priority > rlim_rtprio)
3315+ return -EPERM;
3316+ } else {
3317+ switch (p->policy) {
3318+ /*
3319+ * Can only downgrade policies but not back to
3320+ * SCHED_NORMAL
3321+ */
3322+ case SCHED_ISO:
3323+ if (policy == SCHED_ISO)
3324+ goto out;
3325+ if (policy == SCHED_NORMAL)
3326+ return -EPERM;
3327+ break;
3328+ case SCHED_BATCH:
3329+ if (policy == SCHED_BATCH)
3330+ goto out;
3331+ if (policy != SCHED_IDLE)
3332+ return -EPERM;
3333+ break;
3334+ case SCHED_IDLE:
3335+ if (policy == SCHED_IDLE)
3336+ goto out;
3337+ return -EPERM;
3338+ default:
3339+ break;
3340+ }
3341+ }
3342+
3343+ /* can't change other user's priorities */
3344+ if (!check_same_owner(p))
3345+ return -EPERM;
3346+ }
3347+
3348+ retval = security_task_setscheduler(p, policy, param);
3349+ if (retval)
3350+ return retval;
3351+ /*
3352+ * make sure no PI-waiters arrive (or leave) while we are
3353+ * changing the priority of the task:
3354+ */
3355+ spin_lock_irqsave(&p->pi_lock, flags);
3356+ /*
3357+ * To be able to change p->policy safely, the apropriate
3358+ * runqueue lock must be held.
3359+ */
3360+ rq = __task_grq_lock(p);
3361+ /* recheck policy now with rq lock held */
3362+ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
3363+ __task_grq_unlock();
3364+ spin_unlock_irqrestore(&p->pi_lock, flags);
3365+ policy = oldpolicy = -1;
3366+ goto recheck;
3367+ }
3368+ update_rq_clock(rq);
3369+ queued = task_queued_only(p);
3370+ if (queued)
3371+ dequeue_task(p);
3372+ oldprio = p->prio;
3373+ __setscheduler(p, policy, param->sched_priority);
3374+ if (queued) {
3375+ enqueue_task(p);
3376+ try_preempt(p);
3377+ }
3378+ __task_grq_unlock();
3379+ spin_unlock_irqrestore(&p->pi_lock, flags);
3380+
3381+ rt_mutex_adjust_pi(p);
3382+out:
3383+ return 0;
3384+}
3385+
3386+/**
3387+ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
3388+ * @p: the task in question.
3389+ * @policy: new policy.
3390+ * @param: structure containing the new RT priority.
3391+ *
3392+ * NOTE that the task may be already dead.
3393+ */
3394+int sched_setscheduler(struct task_struct *p, int policy,
3395+ struct sched_param *param)
3396+{
3397+ return __sched_setscheduler(p, policy, param, true);
3398+}
3399+
3400+EXPORT_SYMBOL_GPL(sched_setscheduler);
3401+
3402+/**
3403+ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
3404+ * @p: the task in question.
3405+ * @policy: new policy.
3406+ * @param: structure containing the new RT priority.
3407+ *
3408+ * Just like sched_setscheduler, only don't bother checking if the
3409+ * current context has permission. For example, this is needed in
3410+ * stop_machine(): we create temporary high priority worker threads,
3411+ * but our caller might not have that capability.
3412+ */
3413+int sched_setscheduler_nocheck(struct task_struct *p, int policy,
3414+ struct sched_param *param)
3415+{
3416+ return __sched_setscheduler(p, policy, param, false);
3417+}
3418+
3419+static int
3420+do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
3421+{
3422+ struct sched_param lparam;
3423+ struct task_struct *p;
3424+ int retval;
3425+
3426+ if (!param || pid < 0)
3427+ return -EINVAL;
3428+ if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
3429+ return -EFAULT;
3430+
3431+ rcu_read_lock();
3432+ retval = -ESRCH;
3433+ p = find_process_by_pid(pid);
3434+ if (p != NULL)
3435+ retval = sched_setscheduler(p, policy, &lparam);
3436+ rcu_read_unlock();
3437+
3438+ return retval;
3439+}
3440+
3441+/**
3442+ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
3443+ * @pid: the pid in question.
3444+ * @policy: new policy.
3445+ * @param: structure containing the new RT priority.
3446+ */
3447+asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
3448+ struct sched_param __user *param)
3449+{
3450+ /* negative values for policy are not valid */
3451+ if (policy < 0)
3452+ return -EINVAL;
3453+
3454+ return do_sched_setscheduler(pid, policy, param);
3455+}
3456+
3457+/**
3458+ * sys_sched_setparam - set/change the RT priority of a thread
3459+ * @pid: the pid in question.
3460+ * @param: structure containing the new RT priority.
3461+ */
3462+SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
3463+{
3464+ return do_sched_setscheduler(pid, -1, param);
3465+}
3466+
3467+/**
3468+ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
3469+ * @pid: the pid in question.
3470+ */
3471+SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
3472+{
3473+ struct task_struct *p;
3474+ int retval = -EINVAL;
3475+
3476+ if (pid < 0)
3477+ goto out_nounlock;
3478+
3479+ retval = -ESRCH;
3480+ read_lock(&tasklist_lock);
3481+ p = find_process_by_pid(pid);
3482+ if (p) {
3483+ retval = security_task_getscheduler(p);
3484+ if (!retval)
3485+ retval = p->policy;
3486+ }
3487+ read_unlock(&tasklist_lock);
3488+
3489+out_nounlock:
3490+ return retval;
3491+}
3492+
3493+/**
3494+ * sys_sched_getscheduler - get the RT priority of a thread
3495+ * @pid: the pid in question.
3496+ * @param: structure containing the RT priority.
3497+ */
3498+SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
3499+{
3500+ struct sched_param lp;
3501+ struct task_struct *p;
3502+ int retval = -EINVAL;
3503+
3504+ if (!param || pid < 0)
3505+ goto out_nounlock;
3506+
3507+ read_lock(&tasklist_lock);
3508+ p = find_process_by_pid(pid);
3509+ retval = -ESRCH;
3510+ if (!p)
3511+ goto out_unlock;
3512+
3513+ retval = security_task_getscheduler(p);
3514+ if (retval)
3515+ goto out_unlock;
3516+
3517+ lp.sched_priority = p->rt_priority;
3518+ read_unlock(&tasklist_lock);
3519+
3520+ /*
3521+ * This one might sleep, we cannot do it with a spinlock held ...
3522+ */
3523+ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
3524+
3525+out_nounlock:
3526+ return retval;
3527+
3528+out_unlock:
3529+ read_unlock(&tasklist_lock);
3530+ return retval;
3531+}
3532+
3533+long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
3534+{
3535+ cpumask_var_t cpus_allowed, new_mask;
3536+ struct task_struct *p;
3537+ int retval;
3538+
3539+ get_online_cpus();
3540+ read_lock(&tasklist_lock);
3541+
3542+ p = find_process_by_pid(pid);
3543+ if (!p) {
3544+ read_unlock(&tasklist_lock);
3545+ put_online_cpus();
3546+ return -ESRCH;
3547+ }
3548+
3549+ /*
3550+ * It is not safe to call set_cpus_allowed with the
3551+ * tasklist_lock held. We will bump the task_struct's
3552+ * usage count and then drop tasklist_lock.
3553+ */
3554+ get_task_struct(p);
3555+ read_unlock(&tasklist_lock);
3556+
3557+ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
3558+ retval = -ENOMEM;
3559+ goto out_put_task;
3560+ }
3561+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
3562+ retval = -ENOMEM;
3563+ goto out_free_cpus_allowed;
3564+ }
3565+ retval = -EPERM;
3566+ if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
3567+ goto out_unlock;
3568+
3569+ retval = security_task_setscheduler(p, 0, NULL);
3570+ if (retval)
3571+ goto out_unlock;
3572+
3573+ cpuset_cpus_allowed(p, cpus_allowed);
3574+ cpumask_and(new_mask, in_mask, cpus_allowed);
3575+again:
3576+ retval = set_cpus_allowed_ptr(p, new_mask);
3577+
3578+ if (!retval) {
3579+ cpuset_cpus_allowed(p, cpus_allowed);
3580+ if (!cpumask_subset(new_mask, cpus_allowed)) {
3581+ /*
3582+ * We must have raced with a concurrent cpuset
3583+ * update. Just reset the cpus_allowed to the
3584+ * cpuset's cpus_allowed
3585+ */
3586+ cpumask_copy(new_mask, cpus_allowed);
3587+ goto again;
3588+ }
3589+ }
3590+out_unlock:
3591+ free_cpumask_var(new_mask);
3592+out_free_cpus_allowed:
3593+ free_cpumask_var(cpus_allowed);
3594+out_put_task:
3595+ put_task_struct(p);
3596+ put_online_cpus();
3597+ return retval;
3598+}
3599+
3600+static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
3601+ cpumask_t *new_mask)
3602+{
3603+ if (len < sizeof(cpumask_t)) {
3604+ memset(new_mask, 0, sizeof(cpumask_t));
3605+ } else if (len > sizeof(cpumask_t)) {
3606+ len = sizeof(cpumask_t);
3607+ }
3608+ return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
3609+}
3610+
3611+
3612+/**
3613+ * sys_sched_setaffinity - set the cpu affinity of a process
3614+ * @pid: pid of the process
3615+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3616+ * @user_mask_ptr: user-space pointer to the new cpu mask
3617+ */
3618+SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
3619+ unsigned long __user *, user_mask_ptr)
3620+{
3621+ cpumask_var_t new_mask;
3622+ int retval;
3623+
3624+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
3625+ return -ENOMEM;
3626+
3627+ retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
3628+ if (retval == 0)
3629+ retval = sched_setaffinity(pid, new_mask);
3630+ free_cpumask_var(new_mask);
3631+ return retval;
3632+}
3633+
3634+long sched_getaffinity(pid_t pid, cpumask_t *mask)
3635+{
3636+ struct task_struct *p;
3637+ int retval;
3638+
3639+ mutex_lock(&sched_hotcpu_mutex);
3640+ read_lock(&tasklist_lock);
3641+
3642+ retval = -ESRCH;
3643+ p = find_process_by_pid(pid);
3644+ if (!p)
3645+ goto out_unlock;
3646+
3647+ retval = security_task_getscheduler(p);
3648+ if (retval)
3649+ goto out_unlock;
3650+
3651+ cpus_and(*mask, p->cpus_allowed, cpu_online_map);
3652+
3653+out_unlock:
3654+ read_unlock(&tasklist_lock);
3655+ mutex_unlock(&sched_hotcpu_mutex);
3656+ if (retval)
3657+ return retval;
3658+
3659+ return 0;
3660+}
3661+
3662+/**
3663+ * sys_sched_getaffinity - get the cpu affinity of a process
3664+ * @pid: pid of the process
3665+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3666+ * @user_mask_ptr: user-space pointer to hold the current cpu mask
3667+ */
3668+SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
3669+ unsigned long __user *, user_mask_ptr)
3670+{
3671+ int ret;
3672+ cpumask_var_t mask;
3673+
3674+ if (len < cpumask_size())
3675+ return -EINVAL;
3676+
3677+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
3678+ return -ENOMEM;
3679+
3680+ ret = sched_getaffinity(pid, mask);
3681+ if (ret == 0) {
3682+ if (copy_to_user(user_mask_ptr, mask, cpumask_size()))
3683+ ret = -EFAULT;
3684+ else
3685+ ret = cpumask_size();
3686+ }
3687+ free_cpumask_var(mask);
3688+
3689+ return ret;
3690+}
3691+
3692+/**
3693+ * sys_sched_yield - yield the current processor to other threads.
3694+ *
3695+ * This function yields the current CPU to other tasks. It does this by
3696+ * refilling the timeslice, resetting the deadline and scheduling away.
3697+ */
3698+SYSCALL_DEFINE0(sched_yield)
3699+{
3700+ struct task_struct *p;
3701+
3702+ grq_lock_irq();
3703+ p = current;
3704+ schedstat_inc(this_rq(), yld_count);
3705+ update_rq_clock(task_rq(p));
3706+ time_slice_expired(p);
3707+ requeue_task(p);
3708+
3709+ /*
3710+ * Since we are going to call schedule() anyway, there's
3711+ * no need to preempt or enable interrupts:
3712+ */
3713+ __release(grq.lock);
3714+ spin_release(&grq.lock.dep_map, 1, _THIS_IP_);
3715+ _raw_spin_unlock(&grq.lock);
3716+ preempt_enable_no_resched();
3717+
3718+ schedule();
3719+
3720+ return 0;
3721+}
3722+
3723+static inline int should_resched(void)
3724+{
3725+ return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
3726+}
3727+
3728+static void __cond_resched(void)
3729+{
3730+#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
3731+ __might_sleep(__FILE__, __LINE__);
3732+#endif
3733+ /*
3734+ * The BKS might be reacquired before we have dropped
3735+ * PREEMPT_ACTIVE, which could trigger a second
3736+ * cond_resched() call.
3737+ */
3738+ do {
3739+ add_preempt_count(PREEMPT_ACTIVE);
3740+ schedule();
3741+ sub_preempt_count(PREEMPT_ACTIVE);
3742+ } while (need_resched());
3743+}
3744+
3745+int __sched _cond_resched(void)
3746+{
3747+ if (should_resched()) {
3748+ __cond_resched();
3749+ return 1;
3750+ }
3751+ return 0;
3752+}
3753+EXPORT_SYMBOL(_cond_resched);
3754+
3755+/*
3756+ * cond_resched_lock() - if a reschedule is pending, drop the given lock,
3757+ * call schedule, and on return reacquire the lock.
3758+ *
3759+ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
3760+ * operations here to prevent schedule() from being called twice (once via
3761+ * spin_unlock(), once by hand).
3762+ */
3763+int cond_resched_lock(spinlock_t *lock)
3764+{
3765+ int resched = should_resched();
3766+ int ret = 0;
3767+
3768+ if (spin_needbreak(lock) || resched) {
3769+ spin_unlock(lock);
3770+ if (resched)
3771+ __cond_resched();
3772+ else
3773+ cpu_relax();
3774+ ret = 1;
3775+ spin_lock(lock);
3776+ }
3777+ return ret;
3778+}
3779+EXPORT_SYMBOL(cond_resched_lock);
3780+
3781+int __sched cond_resched_softirq(void)
3782+{
3783+ BUG_ON(!in_softirq());
3784+
3785+ if (should_resched()) {
3786+ local_bh_enable();
3787+ __cond_resched();
3788+ local_bh_disable();
3789+ return 1;
3790+ }
3791+ return 0;
3792+}
3793+EXPORT_SYMBOL(cond_resched_softirq);
3794+
3795+/**
3796+ * yield - yield the current processor to other threads.
3797+ *
3798+ * This is a shortcut for kernel-space yielding - it marks the
3799+ * thread runnable and calls sys_sched_yield().
3800+ */
3801+void __sched yield(void)
3802+{
3803+ set_current_state(TASK_RUNNING);
3804+ sys_sched_yield();
3805+}
3806+EXPORT_SYMBOL(yield);
3807+
3808+/*
3809+ * This task is about to go to sleep on IO. Increment rq->nr_iowait so
3810+ * that process accounting knows that this is a task in IO wait state.
3811+ *
3812+ * But don't do that if it is a deliberate, throttling IO wait (this task
3813+ * has set its backing_dev_info: the queue against which it should throttle)
3814+ */
3815+void __sched io_schedule(void)
3816+{
3817+ struct rq *rq = &__raw_get_cpu_var(runqueues);
3818+
3819+ delayacct_blkio_start();
3820+ atomic_inc(&rq->nr_iowait);
3821+ schedule();
3822+ atomic_dec(&rq->nr_iowait);
3823+ delayacct_blkio_end();
3824+}
3825+EXPORT_SYMBOL(io_schedule);
3826+
3827+long __sched io_schedule_timeout(long timeout)
3828+{
3829+ struct rq *rq = &__raw_get_cpu_var(runqueues);
3830+ long ret;
3831+
3832+ delayacct_blkio_start();
3833+ atomic_inc(&rq->nr_iowait);
3834+ ret = schedule_timeout(timeout);
3835+ atomic_dec(&rq->nr_iowait);
3836+ delayacct_blkio_end();
3837+ return ret;
3838+}
3839+
3840+/**
3841+ * sys_sched_get_priority_max - return maximum RT priority.
3842+ * @policy: scheduling class.
3843+ *
3844+ * this syscall returns the maximum rt_priority that can be used
3845+ * by a given scheduling class.
3846+ */
3847+SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
3848+{
3849+ int ret = -EINVAL;
3850+
3851+ switch (policy) {
3852+ case SCHED_FIFO:
3853+ case SCHED_RR:
3854+ ret = MAX_USER_RT_PRIO-1;
3855+ break;
3856+ case SCHED_NORMAL:
3857+ case SCHED_BATCH:
3858+ case SCHED_ISO:
3859+ case SCHED_IDLE:
3860+ ret = 0;
3861+ break;
3862+ }
3863+ return ret;
3864+}
3865+
3866+/**
3867+ * sys_sched_get_priority_min - return minimum RT priority.
3868+ * @policy: scheduling class.
3869+ *
3870+ * this syscall returns the minimum rt_priority that can be used
3871+ * by a given scheduling class.
3872+ */
3873+SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
3874+{
3875+ int ret = -EINVAL;
3876+
3877+ switch (policy) {
3878+ case SCHED_FIFO:
3879+ case SCHED_RR:
3880+ ret = 1;
3881+ break;
3882+ case SCHED_NORMAL:
3883+ case SCHED_BATCH:
3884+ case SCHED_ISO:
3885+ case SCHED_IDLE:
3886+ ret = 0;
3887+ break;
3888+ }
3889+ return ret;
3890+}
3891+
3892+/**
3893+ * sys_sched_rr_get_interval - return the default timeslice of a process.
3894+ * @pid: pid of the process.
3895+ * @interval: userspace pointer to the timeslice value.
3896+ *
3897+ * this syscall writes the default timeslice value of a given process
3898+ * into the user-space timespec buffer. A value of '0' means infinity.
3899+ */
3900+SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
3901+ struct timespec __user *, interval)
3902+{
3903+ struct task_struct *p;
3904+ int retval = -EINVAL;
3905+ struct timespec t;
3906+
3907+ if (pid < 0)
3908+ goto out_nounlock;
3909+
3910+ retval = -ESRCH;
3911+ read_lock(&tasklist_lock);
3912+ p = find_process_by_pid(pid);
3913+ if (!p)
3914+ goto out_unlock;
3915+
3916+ retval = security_task_getscheduler(p);
3917+ if (retval)
3918+ goto out_unlock;
3919+
3920+ t = ns_to_timespec(p->policy == SCHED_FIFO ? 0 :
3921+ MS_TO_NS(task_timeslice(p)));
3922+ read_unlock(&tasklist_lock);
3923+ retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
3924+out_nounlock:
3925+ return retval;
3926+out_unlock:
3927+ read_unlock(&tasklist_lock);
3928+ return retval;
3929+}
3930+
3931+static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
3932+
3933+void sched_show_task(struct task_struct *p)
3934+{
3935+ unsigned long free = 0;
3936+ unsigned state;
3937+
3938+ state = p->state ? __ffs(p->state) + 1 : 0;
3939+ printk(KERN_INFO "%-13.13s %c", p->comm,
3940+ state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
3941+#if BITS_PER_LONG == 32
3942+ if (state == TASK_RUNNING)
3943+ printk(KERN_CONT " running ");
3944+ else
3945+ printk(KERN_CONT " %08lx ", thread_saved_pc(p));
3946+#else
3947+ if (state == TASK_RUNNING)
3948+ printk(KERN_CONT " running task ");
3949+ else
3950+ printk(KERN_CONT " %016lx ", thread_saved_pc(p));
3951+#endif
3952+#ifdef CONFIG_DEBUG_STACK_USAGE
3953+ free = stack_not_used(p);
3954+#endif
3955+ printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
3956+ task_pid_nr(p), task_pid_nr(p->real_parent),
3957+ (unsigned long)task_thread_info(p)->flags);
3958+
3959+ show_stack(p, NULL);
3960+}
3961+
3962+void show_state_filter(unsigned long state_filter)
3963+{
3964+ struct task_struct *g, *p;
3965+
3966+#if BITS_PER_LONG == 32
3967+ printk(KERN_INFO
3968+ " task PC stack pid father\n");
3969+#else
3970+ printk(KERN_INFO
3971+ " task PC stack pid father\n");
3972+#endif
3973+ read_lock(&tasklist_lock);
3974+ do_each_thread(g, p) {
3975+ /*
3976+ * reset the NMI-timeout, listing all files on a slow
3977+ * console might take alot of time:
3978+ */
3979+ touch_nmi_watchdog();
3980+ if (!state_filter || (p->state & state_filter))
3981+ sched_show_task(p);
3982+ } while_each_thread(g, p);
3983+
3984+ touch_all_softlockup_watchdogs();
3985+
3986+ read_unlock(&tasklist_lock);
3987+ /*
3988+ * Only show locks if all tasks are dumped:
3989+ */
3990+ if (state_filter == -1)
3991+ debug_show_all_locks();
3992+}
3993+
3994+/**
3995+ * init_idle - set up an idle thread for a given CPU
3996+ * @idle: task in question
3997+ * @cpu: cpu the idle task belongs to
3998+ *
3999+ * NOTE: this function does not set the idle thread's NEED_RESCHED
4000+ * flag, to make booting more robust.
4001+ */
4002+void __cpuinit init_idle(struct task_struct *idle, int cpu)
4003+{
4004+ struct rq *rq = cpu_rq(cpu);
4005+ unsigned long flags;
4006+
4007+ time_grq_lock(rq, &flags);
4008+ idle->timestamp = idle->last_ran = rq->clock;
4009+ idle->state = TASK_RUNNING;
4010+ /* Setting prio to illegal value shouldn't matter when never queued */
4011+ idle->prio = rq->rq_prio = PRIO_LIMIT;
4012+ rq->rq_deadline = idle->deadline;
4013+ rq->rq_policy = idle->policy;
4014+ rq->rq_time_slice = idle->rt.time_slice;
4015+ idle->cpus_allowed = cpumask_of_cpu(cpu);
4016+ set_task_cpu(idle, cpu);
4017+ rq->curr = rq->idle = idle;
4018+ idle->oncpu = 1;
4019+ set_cpuidle_map(cpu);
4020+#ifdef CONFIG_HOTPLUG_CPU
4021+ idle->unplugged_mask = CPU_MASK_NONE;
4022+#endif
4023+ grq_unlock_irqrestore(&flags);
4024+
4025+ /* Set the preempt count _outside_ the spinlocks! */
4026+#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
4027+ task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
4028+#else
4029+ task_thread_info(idle)->preempt_count = 0;
4030+#endif
4031+ ftrace_graph_init_task(idle);
4032+}
4033+
4034+/*
4035+ * In a system that switches off the HZ timer nohz_cpu_mask
4036+ * indicates which cpus entered this state. This is used
4037+ * in the rcu update to wait only for active cpus. For system
4038+ * which do not switch off the HZ timer nohz_cpu_mask should
4039+ * always be CPU_BITS_NONE.
4040+ */
4041+cpumask_var_t nohz_cpu_mask;
4042+
4043+#ifdef CONFIG_SMP
4044+#ifdef CONFIG_NO_HZ
4045+static struct {
4046+ atomic_t load_balancer;
4047+ cpumask_var_t cpu_mask;
4048+ cpumask_var_t ilb_grp_nohz_mask;
4049+} nohz ____cacheline_aligned = {
4050+ .load_balancer = ATOMIC_INIT(-1),
4051+};
4052+
4053+int get_nohz_load_balancer(void)
4054+{
4055+ return atomic_read(&nohz.load_balancer);
4056+}
4057+
4058+/*
4059+ * This routine will try to nominate the ilb (idle load balancing)
4060+ * owner among the cpus whose ticks are stopped. ilb owner will do the idle
4061+ * load balancing on behalf of all those cpus. If all the cpus in the system
4062+ * go into this tickless mode, then there will be no ilb owner (as there is
4063+ * no need for one) and all the cpus will sleep till the next wakeup event
4064+ * arrives...
4065+ *
4066+ * For the ilb owner, tick is not stopped. And this tick will be used
4067+ * for idle load balancing. ilb owner will still be part of
4068+ * nohz.cpu_mask..
4069+ *
4070+ * While stopping the tick, this cpu will become the ilb owner if there
4071+ * is no other owner. And will be the owner till that cpu becomes busy
4072+ * or if all cpus in the system stop their ticks at which point
4073+ * there is no need for ilb owner.
4074+ *
4075+ * When the ilb owner becomes busy, it nominates another owner, during the
4076+ * next busy scheduler_tick()
4077+ */
4078+int select_nohz_load_balancer(int stop_tick)
4079+{
4080+ int cpu = smp_processor_id();
4081+
4082+ if (stop_tick) {
4083+ cpu_rq(cpu)->in_nohz_recently = 1;
4084+
4085+ if (!cpu_active(cpu)) {
4086+ if (atomic_read(&nohz.load_balancer) != cpu)
4087+ return 0;
4088+
4089+ /*
4090+ * If we are going offline and still the leader,
4091+ * give up!
4092+ */
4093+ if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
4094+ BUG();
4095+
4096+ return 0;
4097+ }
4098+
4099+ cpumask_set_cpu(cpu, nohz.cpu_mask);
4100+
4101+ /* time for ilb owner also to sleep */
4102+ if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
4103+ if (atomic_read(&nohz.load_balancer) == cpu)
4104+ atomic_set(&nohz.load_balancer, -1);
4105+ return 0;
4106+ }
4107+
4108+ if (atomic_read(&nohz.load_balancer) == -1) {
4109+ /* make me the ilb owner */
4110+ if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1)
4111+ return 1;
4112+ } else if (atomic_read(&nohz.load_balancer) == cpu)
4113+ return 1;
4114+ } else {
4115+ if (!cpumask_test_cpu(cpu, nohz.cpu_mask))
4116+ return 0;
4117+
4118+ cpumask_clear_cpu(cpu, nohz.cpu_mask);
4119+
4120+ if (atomic_read(&nohz.load_balancer) == cpu)
4121+ if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
4122+ BUG();
4123+ }
4124+ return 0;
4125+}
4126+
4127+/*
4128+ * When add_timer_on() enqueues a timer into the timer wheel of an
4129+ * idle CPU then this timer might expire before the next timer event
4130+ * which is scheduled to wake up that CPU. In case of a completely
4131+ * idle system the next event might even be infinite time into the
4132+ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
4133+ * leaves the inner idle loop so the newly added timer is taken into
4134+ * account when the CPU goes back to idle and evaluates the timer
4135+ * wheel for the next timer event.
4136+ */
4137+void wake_up_idle_cpu(int cpu)
4138+{
4139+ struct task_struct *idle;
4140+ struct rq *rq;
4141+
4142+ if (cpu == smp_processor_id())
4143+ return;
4144+
4145+ rq = cpu_rq(cpu);
4146+ idle = rq->idle;
4147+
4148+ /*
4149+ * This is safe, as this function is called with the timer
4150+ * wheel base lock of (cpu) held. When the CPU is on the way
4151+ * to idle and has not yet set rq->curr to idle then it will
4152+ * be serialized on the timer wheel base lock and take the new
4153+ * timer into account automatically.
4154+ */
4155+ if (unlikely(rq->curr != idle))
4156+ return;
4157+
4158+ /*
4159+ * We can set TIF_RESCHED on the idle task of the other CPU
4160+ * lockless. The worst case is that the other CPU runs the
4161+ * idle task through an additional NOOP schedule()
4162+ */
4163+ set_tsk_need_resched(idle);
4164+
4165+ /* NEED_RESCHED must be visible before we test polling */
4166+ smp_mb();
4167+ if (!tsk_is_polling(idle))
4168+ smp_send_reschedule(cpu);
4169+}
4170+
4171+#endif /* CONFIG_NO_HZ */
4172+
4173+/*
4174+ * Change a given task's CPU affinity. Migrate the thread to a
4175+ * proper CPU and schedule it away if the CPU it's executing on
4176+ * is removed from the allowed bitmask.
4177+ *
4178+ * NOTE: the caller must have a valid reference to the task, the
4179+ * task must not exit() & deallocate itself prematurely. The
4180+ * call is not atomic; no spinlocks may be held.
4181+ */
4182+int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
4183+{
4184+ unsigned long flags;
4185+ int running = 0;
4186+ int queued = 0;
4187+ struct rq *rq;
4188+ int ret = 0;
4189+
4190+ rq = task_grq_lock(p, &flags);
4191+ if (!cpumask_intersects(new_mask, cpu_online_mask)) {
4192+ ret = -EINVAL;
4193+ goto out;
4194+ }
4195+
4196+ if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
4197+ !cpumask_equal(&p->cpus_allowed, new_mask))) {
4198+ ret = -EINVAL;
4199+ goto out;
4200+ }
4201+
4202+ queued = task_queued_only(p);
4203+
4204+ cpumask_copy(&p->cpus_allowed, new_mask);
4205+ p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
4206+
4207+ /* Can the task run on the task's current CPU? If so, we're done */
4208+ if (cpumask_test_cpu(task_cpu(p), new_mask))
4209+ goto out;
4210+
4211+ /* Reschedule the task, schedule() will know if it can keep running */
4212+ if (task_running(p))
4213+ running = 1;
4214+ else
4215+ set_task_cpu(p, cpumask_any_and(cpu_online_mask, new_mask));
4216+
4217+out:
4218+ if (queued)
4219+ try_preempt(p);
4220+ task_grq_unlock(&flags);
4221+
4222+ /* This might be a flaky way of changing cpus! */
4223+ if (running)
4224+ schedule();
4225+ return ret;
4226+}
4227+EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
4228+
4229+#ifdef CONFIG_HOTPLUG_CPU
4230+/* Schedules idle task to be the next runnable task on current CPU.
4231+ * It does so by boosting its priority to highest possible.
4232+ * Used by CPU offline code.
4233+ */
4234+void sched_idle_next(void)
4235+{
4236+ int this_cpu = smp_processor_id();
4237+ struct rq *rq = cpu_rq(this_cpu);
4238+ struct task_struct *idle = rq->idle;
4239+ unsigned long flags;
4240+
4241+ /* cpu has to be offline */
4242+ BUG_ON(cpu_online(this_cpu));
4243+
4244+ /*
4245+ * Strictly not necessary since rest of the CPUs are stopped by now
4246+ * and interrupts disabled on the current cpu.
4247+ */
4248+ time_grq_lock(rq, &flags);
4249+
4250+ __setscheduler(idle, SCHED_FIFO, MAX_RT_PRIO - 1);
4251+
4252+ activate_idle_task(idle);
4253+ set_tsk_need_resched(rq->curr);
4254+
4255+ grq_unlock_irqrestore(&flags);
4256+}
4257+
4258+/*
4259+ * Ensures that the idle task is using init_mm right before its cpu goes
4260+ * offline.
4261+ */
4262+void idle_task_exit(void)
4263+{
4264+ struct mm_struct *mm = current->active_mm;
4265+
4266+ BUG_ON(cpu_online(smp_processor_id()));
4267+
4268+ if (mm != &init_mm)
4269+ switch_mm(mm, &init_mm, current);
4270+ mmdrop(mm);
4271+}
4272+
4273+#endif /* CONFIG_HOTPLUG_CPU */
4274+
4275+#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
4276+
4277+static struct ctl_table sd_ctl_dir[] = {
4278+ {
4279+ .procname = "sched_domain",
4280+ .mode = 0555,
4281+ },
4282+ {0, },
4283+};
4284+
4285+static struct ctl_table sd_ctl_root[] = {
4286+ {
4287+ .ctl_name = CTL_KERN,
4288+ .procname = "kernel",
4289+ .mode = 0555,
4290+ .child = sd_ctl_dir,
4291+ },
4292+ {0, },
4293+};
4294+
4295+static struct ctl_table *sd_alloc_ctl_entry(int n)
4296+{
4297+ struct ctl_table *entry =
4298+ kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
4299+
4300+ return entry;
4301+}
4302+
4303+static void sd_free_ctl_entry(struct ctl_table **tablep)
4304+{
4305+ struct ctl_table *entry;
4306+
4307+ /*
4308+ * In the intermediate directories, both the child directory and
4309+ * procname are dynamically allocated and could fail but the mode
4310+ * will always be set. In the lowest directory the names are
4311+ * static strings and all have proc handlers.
4312+ */
4313+ for (entry = *tablep; entry->mode; entry++) {
4314+ if (entry->child)
4315+ sd_free_ctl_entry(&entry->child);
4316+ if (entry->proc_handler == NULL)
4317+ kfree(entry->procname);
4318+ }
4319+
4320+ kfree(*tablep);
4321+ *tablep = NULL;
4322+}
4323+
4324+static void
4325+set_table_entry(struct ctl_table *entry,
4326+ const char *procname, void *data, int maxlen,
4327+ mode_t mode, proc_handler *proc_handler)
4328+{
4329+ entry->procname = procname;
4330+ entry->data = data;
4331+ entry->maxlen = maxlen;
4332+ entry->mode = mode;
4333+ entry->proc_handler = proc_handler;
4334+}
4335+
4336+static struct ctl_table *
4337+sd_alloc_ctl_domain_table(struct sched_domain *sd)
4338+{
4339+ struct ctl_table *table = sd_alloc_ctl_entry(13);
4340+
4341+ if (table == NULL)
4342+ return NULL;
4343+
4344+ set_table_entry(&table[0], "min_interval", &sd->min_interval,
4345+ sizeof(long), 0644, proc_doulongvec_minmax);
4346+ set_table_entry(&table[1], "max_interval", &sd->max_interval,
4347+ sizeof(long), 0644, proc_doulongvec_minmax);
4348+ set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
4349+ sizeof(int), 0644, proc_dointvec_minmax);
4350+ set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
4351+ sizeof(int), 0644, proc_dointvec_minmax);
4352+ set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
4353+ sizeof(int), 0644, proc_dointvec_minmax);
4354+ set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
4355+ sizeof(int), 0644, proc_dointvec_minmax);
4356+ set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
4357+ sizeof(int), 0644, proc_dointvec_minmax);
4358+ set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
4359+ sizeof(int), 0644, proc_dointvec_minmax);
4360+ set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
4361+ sizeof(int), 0644, proc_dointvec_minmax);
4362+ set_table_entry(&table[9], "cache_nice_tries",
4363+ &sd->cache_nice_tries,
4364+ sizeof(int), 0644, proc_dointvec_minmax);
4365+ set_table_entry(&table[10], "flags", &sd->flags,
4366+ sizeof(int), 0644, proc_dointvec_minmax);
4367+ set_table_entry(&table[11], "name", sd->name,
4368+ CORENAME_MAX_SIZE, 0444, proc_dostring);
4369+ /* &table[12] is terminator */
4370+
4371+ return table;
4372+}
4373+
4374+static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
4375+{
4376+ struct ctl_table *entry, *table;
4377+ struct sched_domain *sd;
4378+ int domain_num = 0, i;
4379+ char buf[32];
4380+
4381+ for_each_domain(cpu, sd)
4382+ domain_num++;
4383+ entry = table = sd_alloc_ctl_entry(domain_num + 1);
4384+ if (table == NULL)
4385+ return NULL;
4386+
4387+ i = 0;
4388+ for_each_domain(cpu, sd) {
4389+ snprintf(buf, 32, "domain%d", i);
4390+ entry->procname = kstrdup(buf, GFP_KERNEL);
4391+ entry->mode = 0555;
4392+ entry->child = sd_alloc_ctl_domain_table(sd);
4393+ entry++;
4394+ i++;
4395+ }
4396+ return table;
4397+}
4398+
4399+static struct ctl_table_header *sd_sysctl_header;
4400+static void register_sched_domain_sysctl(void)
4401+{
4402+ int i, cpu_num = num_online_cpus();
4403+ struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
4404+ char buf[32];
4405+
4406+ WARN_ON(sd_ctl_dir[0].child);
4407+ sd_ctl_dir[0].child = entry;
4408+
4409+ if (entry == NULL)
4410+ return;
4411+
4412+ for_each_online_cpu(i) {
4413+ snprintf(buf, 32, "cpu%d", i);
4414+ entry->procname = kstrdup(buf, GFP_KERNEL);
4415+ entry->mode = 0555;
4416+ entry->child = sd_alloc_ctl_cpu_table(i);
4417+ entry++;
4418+ }
4419+
4420+ WARN_ON(sd_sysctl_header);
4421+ sd_sysctl_header = register_sysctl_table(sd_ctl_root);
4422+}
4423+
4424+/* may be called multiple times per register */
4425+static void unregister_sched_domain_sysctl(void)
4426+{
4427+ if (sd_sysctl_header)
4428+ unregister_sysctl_table(sd_sysctl_header);
4429+ sd_sysctl_header = NULL;
4430+ if (sd_ctl_dir[0].child)
4431+ sd_free_ctl_entry(&sd_ctl_dir[0].child);
4432+}
4433+#else
4434+static void register_sched_domain_sysctl(void)
4435+{
4436+}
4437+static void unregister_sched_domain_sysctl(void)
4438+{
4439+}
4440+#endif
4441+
4442+static void set_rq_online(struct rq *rq)
4443+{
4444+ if (!rq->online) {
4445+ cpumask_set_cpu(rq->cpu, rq->rd->online);
4446+ rq->online = 1;
4447+ }
4448+}
4449+
4450+static void set_rq_offline(struct rq *rq)
4451+{
4452+ if (rq->online) {
4453+ cpumask_clear_cpu(rq->cpu, rq->rd->online);
4454+ rq->online = 0;
4455+ }
4456+}
4457+
4458+#ifdef CONFIG_HOTPLUG_CPU
4459+/*
4460+ * This cpu is going down, so walk over the tasklist and find tasks that can
4461+ * only run on this cpu and remove their affinity. Store their value in
4462+ * unplugged_mask so it can be restored once their correct cpu is online. No
4463+ * need to do anything special since they'll just move on next reschedule if
4464+ * they're running.
4465+ */
4466+static void remove_cpu(unsigned long cpu)
4467+{
4468+ struct task_struct *p, *t;
4469+
4470+ read_lock(&tasklist_lock);
4471+
4472+ do_each_thread(t, p) {
4473+ cpumask_t cpus_remaining;
4474+
4475+ cpus_and(cpus_remaining, p->cpus_allowed, cpu_online_map);
4476+ cpu_clear(cpu, cpus_remaining);
4477+ if (cpus_empty(cpus_remaining)) {
4478+ p->unplugged_mask = p->cpus_allowed;
4479+ p->cpus_allowed = cpu_possible_map;
4480+ }
4481+ } while_each_thread(t, p);
4482+
4483+ read_unlock(&tasklist_lock);
4484+}
4485+
4486+/*
4487+ * This cpu is coming up so add it to the cpus_allowed.
4488+ */
4489+static void add_cpu(unsigned long cpu)
4490+{
4491+ struct task_struct *p, *t;
4492+
4493+ read_lock(&tasklist_lock);
4494+
4495+ do_each_thread(t, p) {
4496+ /* Have we taken all the cpus from the unplugged_mask back */
4497+ if (cpus_empty(p->unplugged_mask))
4498+ continue;
4499+
4500+ /* Was this cpu in the unplugged_mask mask */
4501+ if (cpu_isset(cpu, p->unplugged_mask)) {
4502+ cpu_set(cpu, p->cpus_allowed);
4503+ if (cpus_subset(p->unplugged_mask, p->cpus_allowed)) {
4504+ /*
4505+ * Have we set more than the unplugged_mask?
4506+ * If so, that means we have remnants set from
4507+ * the unplug/plug cycle and need to remove
4508+ * them. Then clear the unplugged_mask as we've
4509+ * set all the cpus back.
4510+ */
4511+ p->cpus_allowed = p->unplugged_mask;
4512+ cpus_clear(p->unplugged_mask);
4513+ }
4514+ }
4515+ } while_each_thread(t, p);
4516+
4517+ read_unlock(&tasklist_lock);
4518+}
4519+#else
4520+static void add_cpu(unsigned long cpu)
4521+{
4522+}
4523+#endif
4524+
4525+/*
4526+ * migration_call - callback that gets triggered when a CPU is added.
4527+ */
4528+static int __cpuinit
4529+migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
4530+{
4531+ int cpu = (long)hcpu;
4532+ unsigned long flags;
4533+ struct rq *rq;
4534+
4535+ switch (action) {
4536+
4537+ case CPU_UP_PREPARE:
4538+ case CPU_UP_PREPARE_FROZEN:
4539+ break;
4540+
4541+ case CPU_ONLINE:
4542+ case CPU_ONLINE_FROZEN:
4543+ /* Update our root-domain */
4544+ rq = cpu_rq(cpu);
4545+ grq_lock_irqsave(&flags);
4546+ if (rq->rd) {
4547+ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
4548+
4549+ set_rq_online(rq);
4550+ }
4551+ add_cpu(cpu);
4552+ grq_unlock_irqrestore(&flags);
4553+ break;
4554+
4555+#ifdef CONFIG_HOTPLUG_CPU
4556+ case CPU_UP_CANCELED:
4557+ case CPU_UP_CANCELED_FROZEN:
4558+ break;
4559+
4560+ case CPU_DEAD:
4561+ case CPU_DEAD_FROZEN:
4562+ cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
4563+ rq = cpu_rq(cpu);
4564+ /* Idle task back to normal (off runqueue, low prio) */
4565+ grq_lock_irq();
4566+ remove_cpu(cpu);
4567+ deactivate_task(rq->idle);
4568+ rq->idle->static_prio = MAX_PRIO;
4569+ __setscheduler(rq->idle, SCHED_NORMAL, 0);
4570+ rq->idle->prio = PRIO_LIMIT;
4571+ update_rq_clock(rq);
4572+ grq_unlock_irq();
4573+ cpuset_unlock();
4574+ break;
4575+
4576+ case CPU_DYING:
4577+ case CPU_DYING_FROZEN:
4578+ rq = cpu_rq(cpu);
4579+ grq_lock_irqsave(&flags);
4580+ if (rq->rd) {
4581+ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
4582+ set_rq_offline(rq);
4583+ }
4584+ grq_unlock_irqrestore(&flags);
4585+ break;
4586+#endif
4587+ }
4588+ return NOTIFY_OK;
4589+}
4590+
4591+/*
4592+ * Register at high priority so that task migration (migrate_all_tasks)
4593+ * happens before everything else. This has to be lower priority than
4594+ * the notifier in the perf_counter subsystem, though.
4595+ */
4596+static struct notifier_block __cpuinitdata migration_notifier = {
4597+ .notifier_call = migration_call,
4598+ .priority = 10
4599+};
4600+
4601+int __init migration_init(void)
4602+{
4603+ void *cpu = (void *)(long)smp_processor_id();
4604+ int err;
4605+
4606+ /* Start one for the boot CPU: */
4607+ err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
4608+ BUG_ON(err == NOTIFY_BAD);
4609+ migration_call(&migration_notifier, CPU_ONLINE, cpu);
4610+ register_cpu_notifier(&migration_notifier);
4611+
4612+ return 0;
4613+}
4614+early_initcall(migration_init);
4615+#endif
4616+
4617+/*
4618+ * sched_domains_mutex serializes calls to arch_init_sched_domains,
4619+ * detach_destroy_domains and partition_sched_domains.
4620+ */
4621+static DEFINE_MUTEX(sched_domains_mutex);
4622+
4623+#ifdef CONFIG_SMP
4624+
4625+#ifdef CONFIG_SCHED_DEBUG
4626+
4627+static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
4628+ struct cpumask *groupmask)
4629+{
4630+ struct sched_group *group = sd->groups;
4631+ char str[256];
4632+
4633+ cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
4634+ cpumask_clear(groupmask);
4635+
4636+ printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
4637+
4638+ if (!(sd->flags & SD_LOAD_BALANCE)) {
4639+ printk("does not load-balance\n");
4640+ if (sd->parent)
4641+ printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
4642+ " has parent");
4643+ return -1;
4644+ }
4645+
4646+ printk(KERN_CONT "span %s level %s\n", str, sd->name);
4647+
4648+ if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
4649+ printk(KERN_ERR "ERROR: domain->span does not contain "
4650+ "CPU%d\n", cpu);
4651+ }
4652+ if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
4653+ printk(KERN_ERR "ERROR: domain->groups does not contain"
4654+ " CPU%d\n", cpu);
4655+ }
4656+
4657+ printk(KERN_DEBUG "%*s groups:", level + 1, "");
4658+ do {
4659+ if (!group) {
4660+ printk("\n");
4661+ printk(KERN_ERR "ERROR: group is NULL\n");
4662+ break;
4663+ }
4664+
4665+ if (!group->__cpu_power) {
4666+ printk(KERN_CONT "\n");
4667+ printk(KERN_ERR "ERROR: domain->cpu_power not "
4668+ "set\n");
4669+ break;
4670+ }
4671+
4672+ if (!cpumask_weight(sched_group_cpus(group))) {
4673+ printk(KERN_CONT "\n");
4674+ printk(KERN_ERR "ERROR: empty group\n");
4675+ break;
4676+ }
4677+
4678+ if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
4679+ printk(KERN_CONT "\n");
4680+ printk(KERN_ERR "ERROR: repeated CPUs\n");
4681+ break;
4682+ }
4683+
4684+ cpumask_or(groupmask, groupmask, sched_group_cpus(group));
4685+
4686+ cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
4687+
4688+ printk(KERN_CONT " %s", str);
4689+ if (group->__cpu_power != SCHED_LOAD_SCALE) {
4690+ printk(KERN_CONT " (__cpu_power = %d)",
4691+ group->__cpu_power);
4692+ }
4693+
4694+ group = group->next;
4695+ } while (group != sd->groups);
4696+ printk(KERN_CONT "\n");
4697+
4698+ if (!cpumask_equal(sched_domain_span(sd), groupmask))
4699+ printk(KERN_ERR "ERROR: groups don't span domain->span\n");
4700+
4701+ if (sd->parent &&
4702+ !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
4703+ printk(KERN_ERR "ERROR: parent span is not a superset "
4704+ "of domain->span\n");
4705+ return 0;
4706+}
4707+
4708+static void sched_domain_debug(struct sched_domain *sd, int cpu)
4709+{
4710+ cpumask_var_t groupmask;
4711+ int level = 0;
4712+
4713+ if (!sd) {
4714+ printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
4715+ return;
4716+ }
4717+
4718+ printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
4719+
4720+ if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) {
4721+ printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
4722+ return;
4723+ }
4724+
4725+ for (;;) {
4726+ if (sched_domain_debug_one(sd, cpu, level, groupmask))
4727+ break;
4728+ level++;
4729+ sd = sd->parent;
4730+ if (!sd)
4731+ break;
4732+ }
4733+ free_cpumask_var(groupmask);
4734+}
4735+#else /* !CONFIG_SCHED_DEBUG */
4736+# define sched_domain_debug(sd, cpu) do { } while (0)
4737+#endif /* CONFIG_SCHED_DEBUG */
4738+
4739+static int sd_degenerate(struct sched_domain *sd)
4740+{
4741+ if (cpumask_weight(sched_domain_span(sd)) == 1)
4742+ return 1;
4743+
4744+ /* Following flags need at least 2 groups */
4745+ if (sd->flags & (SD_LOAD_BALANCE |
4746+ SD_BALANCE_NEWIDLE |
4747+ SD_BALANCE_FORK |
4748+ SD_BALANCE_EXEC |
4749+ SD_SHARE_CPUPOWER |
4750+ SD_SHARE_PKG_RESOURCES)) {
4751+ if (sd->groups != sd->groups->next)
4752+ return 0;
4753+ }
4754+
4755+ /* Following flags don't use groups */
4756+ if (sd->flags & (SD_WAKE_IDLE |
4757+ SD_WAKE_AFFINE |
4758+ SD_WAKE_BALANCE))
4759+ return 0;
4760+
4761+ return 1;
4762+}
4763+
4764+static int
4765+sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
4766+{
4767+ unsigned long cflags = sd->flags, pflags = parent->flags;
4768+
4769+ if (sd_degenerate(parent))
4770+ return 1;
4771+
4772+ if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
4773+ return 0;
4774+
4775+ /* Does parent contain flags not in child? */
4776+ /* WAKE_BALANCE is a subset of WAKE_AFFINE */
4777+ if (cflags & SD_WAKE_AFFINE)
4778+ pflags &= ~SD_WAKE_BALANCE;
4779+ /* Flags needing groups don't count if only 1 group in parent */
4780+ if (parent->groups == parent->groups->next) {
4781+ pflags &= ~(SD_LOAD_BALANCE |
4782+ SD_BALANCE_NEWIDLE |
4783+ SD_BALANCE_FORK |
4784+ SD_BALANCE_EXEC |
4785+ SD_SHARE_CPUPOWER |
4786+ SD_SHARE_PKG_RESOURCES);
4787+ if (nr_node_ids == 1)
4788+ pflags &= ~SD_SERIALIZE;
4789+ }
4790+ if (~cflags & pflags)
4791+ return 0;
4792+
4793+ return 1;
4794+}
4795+
4796+static void free_rootdomain(struct root_domain *rd)
4797+{
4798+ free_cpumask_var(rd->rto_mask);
4799+ free_cpumask_var(rd->online);
4800+ free_cpumask_var(rd->span);
4801+ kfree(rd);
4802+}
4803+
4804+static void rq_attach_root(struct rq *rq, struct root_domain *rd)
4805+{
4806+ struct root_domain *old_rd = NULL;
4807+ unsigned long flags;
4808+
4809+ grq_lock_irqsave(&flags);
4810+
4811+ if (rq->rd) {
4812+ old_rd = rq->rd;
4813+
4814+ if (cpumask_test_cpu(rq->cpu, old_rd->online))
4815+ set_rq_offline(rq);
4816+
4817+ cpumask_clear_cpu(rq->cpu, old_rd->span);
4818+
4819+ /*
4820+ * If we dont want to free the old_rt yet then
4821+ * set old_rd to NULL to skip the freeing later
4822+ * in this function:
4823+ */
4824+ if (!atomic_dec_and_test(&old_rd->refcount))
4825+ old_rd = NULL;
4826+ }
4827+
4828+ atomic_inc(&rd->refcount);
4829+ rq->rd = rd;
4830+
4831+ cpumask_set_cpu(rq->cpu, rd->span);
4832+ if (cpumask_test_cpu(rq->cpu, cpu_online_mask))
4833+ set_rq_online(rq);
4834+
4835+ grq_unlock_irqrestore(&flags);
4836+
4837+ if (old_rd)
4838+ free_rootdomain(old_rd);
4839+}
4840+
4841+static int init_rootdomain(struct root_domain *rd, bool bootmem)
4842+{
4843+ gfp_t gfp = GFP_KERNEL;
4844+
4845+ memset(rd, 0, sizeof(*rd));
4846+
4847+ if (bootmem)
4848+ gfp = GFP_NOWAIT;
4849+
4850+ if (!alloc_cpumask_var(&rd->span, gfp))
4851+ goto out;
4852+ if (!alloc_cpumask_var(&rd->online, gfp))
4853+ goto free_span;
4854+ if (!alloc_cpumask_var(&rd->rto_mask, gfp))
4855+ goto free_online;
4856+
4857+ return 0;
4858+
4859+free_online:
4860+ free_cpumask_var(rd->online);
4861+free_span:
4862+ free_cpumask_var(rd->span);
4863+out:
4864+ return -ENOMEM;
4865+}
4866+
4867+static void init_defrootdomain(void)
4868+{
4869+ init_rootdomain(&def_root_domain, true);
4870+
4871+ atomic_set(&def_root_domain.refcount, 1);
4872+}
4873+
4874+static struct root_domain *alloc_rootdomain(void)
4875+{
4876+ struct root_domain *rd;
4877+
4878+ rd = kmalloc(sizeof(*rd), GFP_KERNEL);
4879+ if (!rd)
4880+ return NULL;
4881+
4882+ if (init_rootdomain(rd, false) != 0) {
4883+ kfree(rd);
4884+ return NULL;
4885+ }
4886+
4887+ return rd;
4888+}
4889+
4890+/*
4891+ * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
4892+ * hold the hotplug lock.
4893+ */
4894+static void
4895+cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
4896+{
4897+ struct rq *rq = cpu_rq(cpu);
4898+ struct sched_domain *tmp;
4899+
4900+ /* Remove the sched domains which do not contribute to scheduling. */
4901+ for (tmp = sd; tmp; ) {
4902+ struct sched_domain *parent = tmp->parent;
4903+ if (!parent)
4904+ break;
4905+
4906+ if (sd_parent_degenerate(tmp, parent)) {
4907+ tmp->parent = parent->parent;
4908+ if (parent->parent)
4909+ parent->parent->child = tmp;
4910+ } else
4911+ tmp = tmp->parent;
4912+ }
4913+
4914+ if (sd && sd_degenerate(sd)) {
4915+ sd = sd->parent;
4916+ if (sd)
4917+ sd->child = NULL;
4918+ }
4919+
4920+ sched_domain_debug(sd, cpu);
4921+
4922+ rq_attach_root(rq, rd);
4923+ rcu_assign_pointer(rq->sd, sd);
4924+}
4925+
4926+/* cpus with isolated domains */
4927+static cpumask_var_t cpu_isolated_map;
4928+
4929+/* Setup the mask of cpus configured for isolated domains */
4930+static int __init isolated_cpu_setup(char *str)
4931+{
4932+ cpulist_parse(str, cpu_isolated_map);
4933+ return 1;
4934+}
4935+
4936+__setup("isolcpus=", isolated_cpu_setup);
4937+
4938+/*
4939+ * init_sched_build_groups takes the cpumask we wish to span, and a pointer
4940+ * to a function which identifies what group(along with sched group) a CPU
4941+ * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
4942+ * (due to the fact that we keep track of groups covered with a struct cpumask).
4943+ *
4944+ * init_sched_build_groups will build a circular linked list of the groups
4945+ * covered by the given span, and will set each group's ->cpumask correctly,
4946+ * and ->cpu_power to 0.
4947+ */
4948+static void
4949+init_sched_build_groups(const struct cpumask *span,
4950+ const struct cpumask *cpu_map,
4951+ int (*group_fn)(int cpu, const struct cpumask *cpu_map,
4952+ struct sched_group **sg,
4953+ struct cpumask *tmpmask),
4954+ struct cpumask *covered, struct cpumask *tmpmask)
4955+{
4956+ struct sched_group *first = NULL, *last = NULL;
4957+ int i;
4958+
4959+ cpumask_clear(covered);
4960+
4961+ for_each_cpu(i, span) {
4962+ struct sched_group *sg;
4963+ int group = group_fn(i, cpu_map, &sg, tmpmask);
4964+ int j;
4965+
4966+ if (cpumask_test_cpu(i, covered))
4967+ continue;
4968+
4969+ cpumask_clear(sched_group_cpus(sg));
4970+ sg->__cpu_power = 0;
4971+
4972+ for_each_cpu(j, span) {
4973+ if (group_fn(j, cpu_map, NULL, tmpmask) != group)
4974+ continue;
4975+
4976+ cpumask_set_cpu(j, covered);
4977+ cpumask_set_cpu(j, sched_group_cpus(sg));
4978+ }
4979+ if (!first)
4980+ first = sg;
4981+ if (last)
4982+ last->next = sg;
4983+ last = sg;
4984+ }
4985+ last->next = first;
4986+}
4987+
4988+#define SD_NODES_PER_DOMAIN 16
4989+
4990+#ifdef CONFIG_NUMA
4991+
4992+/**
4993+ * find_next_best_node - find the next node to include in a sched_domain
4994+ * @node: node whose sched_domain we're building
4995+ * @used_nodes: nodes already in the sched_domain
4996+ *
4997+ * Find the next node to include in a given scheduling domain. Simply
4998+ * finds the closest node not already in the @used_nodes map.
4999+ *
5000+ * Should use nodemask_t.
5001+ */
5002+static int find_next_best_node(int node, nodemask_t *used_nodes)
5003+{
5004+ int i, n, val, min_val, best_node = 0;
5005+
5006+ min_val = INT_MAX;
5007+
5008+ for (i = 0; i < nr_node_ids; i++) {
5009+ /* Start at @node */
5010+ n = (node + i) % nr_node_ids;
5011+
5012+ if (!nr_cpus_node(n))
5013+ continue;
5014+
5015+ /* Skip already used nodes */
5016+ if (node_isset(n, *used_nodes))
5017+ continue;
5018+
5019+ /* Simple min distance search */
5020+ val = node_distance(node, n);
5021+
5022+ if (val < min_val) {
5023+ min_val = val;
5024+ best_node = n;
5025+ }
5026+ }
5027+
5028+ node_set(best_node, *used_nodes);
5029+ return best_node;
5030+}
5031+
5032+/**
5033+ * sched_domain_node_span - get a cpumask for a node's sched_domain
5034+ * @node: node whose cpumask we're constructing
5035+ * @span: resulting cpumask
5036+ *
5037+ * Given a node, construct a good cpumask for its sched_domain to span. It
5038+ * should be one that prevents unnecessary balancing, but also spreads tasks
5039+ * out optimally.
5040+ */
5041+static void sched_domain_node_span(int node, struct cpumask *span)
5042+{
5043+ nodemask_t used_nodes;
5044+ int i;
5045+
5046+ cpumask_clear(span);
5047+ nodes_clear(used_nodes);
5048+
5049+ cpumask_or(span, span, cpumask_of_node(node));
5050+ node_set(node, used_nodes);
5051+
5052+ for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
5053+ int next_node = find_next_best_node(node, &used_nodes);
5054+
5055+ cpumask_or(span, span, cpumask_of_node(next_node));
5056+ }
5057+}
5058+#endif /* CONFIG_NUMA */
5059+
5060+int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
5061+
5062+/*
5063+ * The cpus mask in sched_group and sched_domain hangs off the end.
5064+ *
5065+ * ( See the the comments in include/linux/sched.h:struct sched_group
5066+ * and struct sched_domain. )
5067+ */
5068+struct static_sched_group {
5069+ struct sched_group sg;
5070+ DECLARE_BITMAP(cpus, CONFIG_NR_CPUS);
5071+};
5072+
5073+struct static_sched_domain {
5074+ struct sched_domain sd;
5075+ DECLARE_BITMAP(span, CONFIG_NR_CPUS);
5076+};
5077+
5078+/*
5079+ * SMT sched-domains:
5080+ */
5081+#ifdef CONFIG_SCHED_SMT
5082+static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
5083+static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus);
5084+
5085+static int
5086+cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
5087+ struct sched_group **sg, struct cpumask *unused)
5088+{
5089+ if (sg)
5090+ *sg = &per_cpu(sched_group_cpus, cpu).sg;
5091+ return cpu;
5092+}
5093+#endif /* CONFIG_SCHED_SMT */
5094+
5095+/*
5096+ * multi-core sched-domains:
5097+ */
5098+#ifdef CONFIG_SCHED_MC
5099+static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
5100+static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
5101+#endif /* CONFIG_SCHED_MC */
5102+
5103+#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
5104+static int
5105+cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
5106+ struct sched_group **sg, struct cpumask *mask)
5107+{
5108+ int group;
5109+
5110+ cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
5111+ group = cpumask_first(mask);
5112+ if (sg)
5113+ *sg = &per_cpu(sched_group_core, group).sg;
5114+ return group;
5115+}
5116+#elif defined(CONFIG_SCHED_MC)
5117+static int
5118+cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
5119+ struct sched_group **sg, struct cpumask *unused)
5120+{
5121+ if (sg)
5122+ *sg = &per_cpu(sched_group_core, cpu).sg;
5123+ return cpu;
5124+}
5125+#endif
5126+
5127+static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
5128+static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
5129+
5130+static int
5131+cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
5132+ struct sched_group **sg, struct cpumask *mask)
5133+{
5134+ int group;
5135+#ifdef CONFIG_SCHED_MC
5136+ cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
5137+ group = cpumask_first(mask);
5138+#elif defined(CONFIG_SCHED_SMT)
5139+ cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
5140+ group = cpumask_first(mask);
5141+#else
5142+ group = cpu;
5143+#endif
5144+ if (sg)
5145+ *sg = &per_cpu(sched_group_phys, group).sg;
5146+ return group;
5147+}
5148+
5149+/**
5150+ * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
5151+ * @group: The group whose first cpu is to be returned.
5152+ */
5153+static inline unsigned int group_first_cpu(struct sched_group *group)
5154+{
5155+ return cpumask_first(sched_group_cpus(group));
5156+}
5157+
5158+#ifdef CONFIG_NUMA
5159+/*
5160+ * The init_sched_build_groups can't handle what we want to do with node
5161+ * groups, so roll our own. Now each node has its own list of groups which
5162+ * gets dynamically allocated.
5163+ */
5164+static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
5165+static struct sched_group ***sched_group_nodes_bycpu;
5166+
5167+static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
5168+static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
5169+
5170+static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
5171+ struct sched_group **sg,
5172+ struct cpumask *nodemask)
5173+{
5174+ int group;
5175+
5176+ cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map);
5177+ group = cpumask_first(nodemask);
5178+
5179+ if (sg)
5180+ *sg = &per_cpu(sched_group_allnodes, group).sg;
5181+ return group;
5182+}
5183+
5184+static void init_numa_sched_groups_power(struct sched_group *group_head)
5185+{
5186+ struct sched_group *sg = group_head;
5187+ int j;
5188+
5189+ if (!sg)
5190+ return;
5191+ do {
5192+ for_each_cpu(j, sched_group_cpus(sg)) {
5193+ struct sched_domain *sd;
5194+
5195+ sd = &per_cpu(phys_domains, j).sd;
5196+ if (j != group_first_cpu(sd->groups)) {
5197+ /*
5198+ * Only add "power" once for each
5199+ * physical package.
5200+ */
5201+ continue;
5202+ }
5203+
5204+ sg_inc_cpu_power(sg, sd->groups->__cpu_power);
5205+ }
5206+ sg = sg->next;
5207+ } while (sg != group_head);
5208+}
5209+#endif /* CONFIG_NUMA */
5210+
5211+#ifdef CONFIG_NUMA
5212+/* Free memory allocated for various sched_group structures */
5213+static void free_sched_groups(const struct cpumask *cpu_map,
5214+ struct cpumask *nodemask)
5215+{
5216+ int cpu, i;
5217+
5218+ for_each_cpu(cpu, cpu_map) {
5219+ struct sched_group **sched_group_nodes
5220+ = sched_group_nodes_bycpu[cpu];
5221+
5222+ if (!sched_group_nodes)
5223+ continue;
5224+
5225+ for (i = 0; i < nr_node_ids; i++) {
5226+ struct sched_group *oldsg, *sg = sched_group_nodes[i];
5227+
5228+ cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
5229+ if (cpumask_empty(nodemask))
5230+ continue;
5231+
5232+ if (sg == NULL)
5233+ continue;
5234+ sg = sg->next;
5235+next_sg:
5236+ oldsg = sg;
5237+ sg = sg->next;
5238+ kfree(oldsg);
5239+ if (oldsg != sched_group_nodes[i])
5240+ goto next_sg;
5241+ }
5242+ kfree(sched_group_nodes);
5243+ sched_group_nodes_bycpu[cpu] = NULL;
5244+ }
5245+}
5246+#else /* !CONFIG_NUMA */
5247+static void free_sched_groups(const struct cpumask *cpu_map,
5248+ struct cpumask *nodemask)
5249+{
5250+}
5251+#endif /* CONFIG_NUMA */
5252+
5253+/*
5254+ * Initialize sched groups cpu_power.
5255+ *
5256+ * cpu_power indicates the capacity of sched group, which is used while
5257+ * distributing the load between different sched groups in a sched domain.
5258+ * Typically cpu_power for all the groups in a sched domain will be same unless
5259+ * there are asymmetries in the topology. If there are asymmetries, group
5260+ * having more cpu_power will pickup more load compared to the group having
5261+ * less cpu_power.
5262+ *
5263+ * cpu_power will be a multiple of SCHED_LOAD_SCALE. This multiple represents
5264+ * the maximum number of tasks a group can handle in the presence of other idle
5265+ * or lightly loaded groups in the same sched domain.
5266+ */
5267+static void init_sched_groups_power(int cpu, struct sched_domain *sd)
5268+{
5269+ struct sched_domain *child;
5270+ struct sched_group *group;
5271+
5272+ WARN_ON(!sd || !sd->groups);
5273+
5274+ if (cpu != group_first_cpu(sd->groups))
5275+ return;
5276+
5277+ child = sd->child;
5278+
5279+ sd->groups->__cpu_power = 0;
5280+
5281+ /*
5282+ * For perf policy, if the groups in child domain share resources
5283+ * (for example cores sharing some portions of the cache hierarchy
5284+ * or SMT), then set this domain groups cpu_power such that each group
5285+ * can handle only one task, when there are other idle groups in the
5286+ * same sched domain.
5287+ */
5288+ if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) &&
5289+ (child->flags &
5290+ (SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) {
5291+ sg_inc_cpu_power(sd->groups, SCHED_LOAD_SCALE);
5292+ return;
5293+ }
5294+
5295+ /*
5296+ * add cpu_power of each child group to this groups cpu_power
5297+ */
5298+ group = child->groups;
5299+ do {
5300+ sg_inc_cpu_power(sd->groups, group->__cpu_power);
5301+ group = group->next;
5302+ } while (group != child->groups);
5303+}
5304+
5305+/*
5306+ * Initializers for schedule domains
5307+ * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
5308+ */
5309+
5310+#ifdef CONFIG_SCHED_DEBUG
5311+# define SD_INIT_NAME(sd, type) sd->name = #type
5312+#else
5313+# define SD_INIT_NAME(sd, type) do { } while (0)
5314+#endif
5315+
5316+#define SD_INIT(sd, type) sd_init_##type(sd)
5317+
5318+#define SD_INIT_FUNC(type) \
5319+static noinline void sd_init_##type(struct sched_domain *sd) \
5320+{ \
5321+ memset(sd, 0, sizeof(*sd)); \
5322+ *sd = SD_##type##_INIT; \
5323+ sd->level = SD_LV_##type; \
5324+ SD_INIT_NAME(sd, type); \
5325+}
5326+
5327+SD_INIT_FUNC(CPU)
5328+#ifdef CONFIG_NUMA
5329+ SD_INIT_FUNC(ALLNODES)
5330+ SD_INIT_FUNC(NODE)
5331+#endif
5332+#ifdef CONFIG_SCHED_SMT
5333+ SD_INIT_FUNC(SIBLING)
5334+#endif
5335+#ifdef CONFIG_SCHED_MC
5336+ SD_INIT_FUNC(MC)
5337+#endif
5338+
5339+static int default_relax_domain_level = -1;
5340+
5341+static int __init setup_relax_domain_level(char *str)
5342+{
5343+ unsigned long val;
5344+
5345+ val = simple_strtoul(str, NULL, 0);
5346+ if (val < SD_LV_MAX)
5347+ default_relax_domain_level = val;
5348+
5349+ return 1;
5350+}
5351+__setup("relax_domain_level=", setup_relax_domain_level);
5352+
5353+static void set_domain_attribute(struct sched_domain *sd,
5354+ struct sched_domain_attr *attr)
5355+{
5356+ int request;
5357+
5358+ if (!attr || attr->relax_domain_level < 0) {
5359+ if (default_relax_domain_level < 0)
5360+ return;
5361+ else
5362+ request = default_relax_domain_level;
5363+ } else
5364+ request = attr->relax_domain_level;
5365+ if (request < sd->level) {
5366+ /* turn off idle balance on this domain */
5367+ sd->flags &= ~(SD_WAKE_IDLE|SD_BALANCE_NEWIDLE);
5368+ } else {
5369+ /* turn on idle balance on this domain */
5370+ sd->flags |= (SD_WAKE_IDLE_FAR|SD_BALANCE_NEWIDLE);
5371+ }
5372+}
5373+
5374+/*
5375+ * Build sched domains for a given set of cpus and attach the sched domains
5376+ * to the individual cpus
5377+ */
5378+static int __build_sched_domains(const struct cpumask *cpu_map,
5379+ struct sched_domain_attr *attr)
5380+{
5381+ int i, err = -ENOMEM;
5382+ struct root_domain *rd;
5383+ cpumask_var_t nodemask, this_sibling_map, this_core_map, send_covered,
5384+ tmpmask;
5385+#ifdef CONFIG_NUMA
5386+ cpumask_var_t domainspan, covered, notcovered;
5387+ struct sched_group **sched_group_nodes = NULL;
5388+ int sd_allnodes = 0;
5389+
5390+ if (!alloc_cpumask_var(&domainspan, GFP_KERNEL))
5391+ goto out;
5392+ if (!alloc_cpumask_var(&covered, GFP_KERNEL))
5393+ goto free_domainspan;
5394+ if (!alloc_cpumask_var(&notcovered, GFP_KERNEL))
5395+ goto free_covered;
5396+#endif
5397+
5398+ if (!alloc_cpumask_var(&nodemask, GFP_KERNEL))
5399+ goto free_notcovered;
5400+ if (!alloc_cpumask_var(&this_sibling_map, GFP_KERNEL))
5401+ goto free_nodemask;
5402+ if (!alloc_cpumask_var(&this_core_map, GFP_KERNEL))
5403+ goto free_this_sibling_map;
5404+ if (!alloc_cpumask_var(&send_covered, GFP_KERNEL))
5405+ goto free_this_core_map;
5406+ if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
5407+ goto free_send_covered;
5408+
5409+#ifdef CONFIG_NUMA
5410+ /*
5411+ * Allocate the per-node list of sched groups
5412+ */
5413+ sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
5414+ GFP_KERNEL);
5415+ if (!sched_group_nodes) {
5416+ printk(KERN_WARNING "Can not alloc sched group node list\n");
5417+ goto free_tmpmask;
5418+ }
5419+#endif
5420+
5421+ rd = alloc_rootdomain();
5422+ if (!rd) {
5423+ printk(KERN_WARNING "Cannot alloc root domain\n");
5424+ goto free_sched_groups;
5425+ }
5426+
5427+#ifdef CONFIG_NUMA
5428+ sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes;
5429+#endif
5430+
5431+ /*
5432+ * Set up domains for cpus specified by the cpu_map.
5433+ */
5434+ for_each_cpu(i, cpu_map) {
5435+ struct sched_domain *sd = NULL, *p;
5436+
5437+ cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map);
5438+
5439+#ifdef CONFIG_NUMA
5440+ if (cpumask_weight(cpu_map) >
5441+ SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
5442+ sd = &per_cpu(allnodes_domains, i).sd;
5443+ SD_INIT(sd, ALLNODES);
5444+ set_domain_attribute(sd, attr);
5445+ cpumask_copy(sched_domain_span(sd), cpu_map);
5446+ cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
5447+ p = sd;
5448+ sd_allnodes = 1;
5449+ } else
5450+ p = NULL;
5451+
5452+ sd = &per_cpu(node_domains, i).sd;
5453+ SD_INIT(sd, NODE);
5454+ set_domain_attribute(sd, attr);
5455+ sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
5456+ sd->parent = p;
5457+ if (p)
5458+ p->child = sd;
5459+ cpumask_and(sched_domain_span(sd),
5460+ sched_domain_span(sd), cpu_map);
5461+#endif
5462+
5463+ p = sd;
5464+ sd = &per_cpu(phys_domains, i).sd;
5465+ SD_INIT(sd, CPU);
5466+ set_domain_attribute(sd, attr);
5467+ cpumask_copy(sched_domain_span(sd), nodemask);
5468+ sd->parent = p;
5469+ if (p)
5470+ p->child = sd;
5471+ cpu_to_phys_group(i, cpu_map, &sd->groups, tmpmask);
5472+
5473+#ifdef CONFIG_SCHED_MC
5474+ p = sd;
5475+ sd = &per_cpu(core_domains, i).sd;
5476+ SD_INIT(sd, MC);
5477+ set_domain_attribute(sd, attr);
5478+ cpumask_and(sched_domain_span(sd), cpu_map,
5479+ cpu_coregroup_mask(i));
5480+ sd->parent = p;
5481+ p->child = sd;
5482+ cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask);
5483+#endif
5484+
5485+#ifdef CONFIG_SCHED_SMT
5486+ p = sd;
5487+ sd = &per_cpu(cpu_domains, i).sd;
5488+ SD_INIT(sd, SIBLING);
5489+ set_domain_attribute(sd, attr);
5490+ cpumask_and(sched_domain_span(sd),
5491+ topology_thread_cpumask(i), cpu_map);
5492+ sd->parent = p;
5493+ p->child = sd;
5494+ cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
5495+#endif
5496+ }
5497+
5498+#ifdef CONFIG_SCHED_SMT
5499+ /* Set up CPU (sibling) groups */
5500+ for_each_cpu(i, cpu_map) {
5501+ cpumask_and(this_sibling_map,
5502+ topology_thread_cpumask(i), cpu_map);
5503+ if (i != cpumask_first(this_sibling_map))
5504+ continue;
5505+
5506+ init_sched_build_groups(this_sibling_map, cpu_map,
5507+ &cpu_to_cpu_group,
5508+ send_covered, tmpmask);
5509+ }
5510+#endif
5511+
5512+#ifdef CONFIG_SCHED_MC
5513+ /* Set up multi-core groups */
5514+ for_each_cpu(i, cpu_map) {
5515+ cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map);
5516+ if (i != cpumask_first(this_core_map))
5517+ continue;
5518+
5519+ init_sched_build_groups(this_core_map, cpu_map,
5520+ &cpu_to_core_group,
5521+ send_covered, tmpmask);
5522+ }
5523+#endif
5524+
5525+ /* Set up physical groups */
5526+ for (i = 0; i < nr_node_ids; i++) {
5527+ cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
5528+ if (cpumask_empty(nodemask))
5529+ continue;
5530+
5531+ init_sched_build_groups(nodemask, cpu_map,
5532+ &cpu_to_phys_group,
5533+ send_covered, tmpmask);
5534+ }
5535+
5536+#ifdef CONFIG_NUMA
5537+ /* Set up node groups */
5538+ if (sd_allnodes) {
5539+ init_sched_build_groups(cpu_map, cpu_map,
5540+ &cpu_to_allnodes_group,
5541+ send_covered, tmpmask);
5542+ }
5543+
5544+ for (i = 0; i < nr_node_ids; i++) {
5545+ /* Set up node groups */
5546+ struct sched_group *sg, *prev;
5547+ int j;
5548+
5549+ cpumask_clear(covered);
5550+ cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
5551+ if (cpumask_empty(nodemask)) {
5552+ sched_group_nodes[i] = NULL;
5553+ continue;
5554+ }
5555+
5556+ sched_domain_node_span(i, domainspan);
5557+ cpumask_and(domainspan, domainspan, cpu_map);
5558+
5559+ sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
5560+ GFP_KERNEL, i);
5561+ if (!sg) {
5562+ printk(KERN_WARNING "Can not alloc domain group for "
5563+ "node %d\n", i);
5564+ goto error;
5565+ }
5566+ sched_group_nodes[i] = sg;
5567+ for_each_cpu(j, nodemask) {
5568+ struct sched_domain *sd;
5569+
5570+ sd = &per_cpu(node_domains, j).sd;
5571+ sd->groups = sg;
5572+ }
5573+ sg->__cpu_power = 0;
5574+ cpumask_copy(sched_group_cpus(sg), nodemask);
5575+ sg->next = sg;
5576+ cpumask_or(covered, covered, nodemask);
5577+ prev = sg;
5578+
5579+ for (j = 0; j < nr_node_ids; j++) {
5580+ int n = (i + j) % nr_node_ids;
5581+
5582+ cpumask_complement(notcovered, covered);
5583+ cpumask_and(tmpmask, notcovered, cpu_map);
5584+ cpumask_and(tmpmask, tmpmask, domainspan);
5585+ if (cpumask_empty(tmpmask))
5586+ break;
5587+
5588+ cpumask_and(tmpmask, tmpmask, cpumask_of_node(n));
5589+ if (cpumask_empty(tmpmask))
5590+ continue;
5591+
5592+ sg = kmalloc_node(sizeof(struct sched_group) +
5593+ cpumask_size(),
5594+ GFP_KERNEL, i);
5595+ if (!sg) {
5596+ printk(KERN_WARNING
5597+ "Can not alloc domain group for node %d\n", j);
5598+ goto error;
5599+ }
5600+ sg->__cpu_power = 0;
5601+ cpumask_copy(sched_group_cpus(sg), tmpmask);
5602+ sg->next = prev->next;
5603+ cpumask_or(covered, covered, tmpmask);
5604+ prev->next = sg;
5605+ prev = sg;
5606+ }
5607+ }
5608+#endif
5609+
5610+ /* Calculate CPU power for physical packages and nodes */
5611+#ifdef CONFIG_SCHED_SMT
5612+ for_each_cpu(i, cpu_map) {
5613+ struct sched_domain *sd = &per_cpu(cpu_domains, i).sd;
5614+
5615+ init_sched_groups_power(i, sd);
5616+ }
5617+#endif
5618+#ifdef CONFIG_SCHED_MC
5619+ for_each_cpu(i, cpu_map) {
5620+ struct sched_domain *sd = &per_cpu(core_domains, i).sd;
5621+
5622+ init_sched_groups_power(i, sd);
5623+ }
5624+#endif
5625+
5626+ for_each_cpu(i, cpu_map) {
5627+ struct sched_domain *sd = &per_cpu(phys_domains, i).sd;
5628+
5629+ init_sched_groups_power(i, sd);
5630+ }
5631+
5632+#ifdef CONFIG_NUMA
5633+ for (i = 0; i < nr_node_ids; i++)
5634+ init_numa_sched_groups_power(sched_group_nodes[i]);
5635+
5636+ if (sd_allnodes) {
5637+ struct sched_group *sg;
5638+
5639+ cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
5640+ tmpmask);
5641+ init_numa_sched_groups_power(sg);
5642+ }
5643+#endif
5644+
5645+ /* Attach the domains */
5646+ for_each_cpu(i, cpu_map) {
5647+ struct sched_domain *sd;
5648+#ifdef CONFIG_SCHED_SMT
5649+ sd = &per_cpu(cpu_domains, i).sd;
5650+#elif defined(CONFIG_SCHED_MC)
5651+ sd = &per_cpu(core_domains, i).sd;
5652+#else
5653+ sd = &per_cpu(phys_domains, i).sd;
5654+#endif
5655+ cpu_attach_domain(sd, rd, i);
5656+ }
5657+
5658+ err = 0;
5659+
5660+free_tmpmask:
5661+ free_cpumask_var(tmpmask);
5662+free_send_covered:
5663+ free_cpumask_var(send_covered);
5664+free_this_core_map:
5665+ free_cpumask_var(this_core_map);
5666+free_this_sibling_map:
5667+ free_cpumask_var(this_sibling_map);
5668+free_nodemask:
5669+ free_cpumask_var(nodemask);
5670+free_notcovered:
5671+#ifdef CONFIG_NUMA
5672+ free_cpumask_var(notcovered);
5673+free_covered:
5674+ free_cpumask_var(covered);
5675+free_domainspan:
5676+ free_cpumask_var(domainspan);
5677+out:
5678+#endif
5679+ return err;
5680+
5681+free_sched_groups:
5682+#ifdef CONFIG_NUMA
5683+ kfree(sched_group_nodes);
5684+#endif
5685+ goto free_tmpmask;
5686+
5687+#ifdef CONFIG_NUMA
5688+error:
5689+ free_sched_groups(cpu_map, tmpmask);
5690+ free_rootdomain(rd);
5691+ goto free_tmpmask;
5692+#endif
5693+}
5694+
5695+static int build_sched_domains(const struct cpumask *cpu_map)
5696+{
5697+ return __build_sched_domains(cpu_map, NULL);
5698+}
5699+
5700+static struct cpumask *doms_cur; /* current sched domains */
5701+static int ndoms_cur; /* number of sched domains in 'doms_cur' */
5702+static struct sched_domain_attr *dattr_cur;
5703+ /* attribues of custom domains in 'doms_cur' */
5704+
5705+/*
5706+ * Special case: If a kmalloc of a doms_cur partition (array of
5707+ * cpumask) fails, then fallback to a single sched domain,
5708+ * as determined by the single cpumask fallback_doms.
5709+ */
5710+static cpumask_var_t fallback_doms;
5711+
5712+/*
5713+ * arch_update_cpu_topology lets virtualized architectures update the
5714+ * cpu core maps. It is supposed to return 1 if the topology changed
5715+ * or 0 if it stayed the same.
5716+ */
5717+int __attribute__((weak)) arch_update_cpu_topology(void)
5718+{
5719+ return 0;
5720+}
5721+
5722+/*
5723+ * Set up scheduler domains and groups. Callers must hold the hotplug lock.
5724+ * For now this just excludes isolated cpus, but could be used to
5725+ * exclude other special cases in the future.
5726+ */
5727+static int arch_init_sched_domains(const struct cpumask *cpu_map)
5728+{
5729+ int err;
5730+
5731+ arch_update_cpu_topology();
5732+ ndoms_cur = 1;
5733+ doms_cur = kmalloc(cpumask_size(), GFP_KERNEL);
5734+ if (!doms_cur)
5735+ doms_cur = fallback_doms;
5736+ cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map);
5737+ dattr_cur = NULL;
5738+ err = build_sched_domains(doms_cur);
5739+ register_sched_domain_sysctl();
5740+
5741+ return err;
5742+}
5743+
5744+static void arch_destroy_sched_domains(const struct cpumask *cpu_map,
5745+ struct cpumask *tmpmask)
5746+{
5747+ free_sched_groups(cpu_map, tmpmask);
5748+}
5749+
5750+/*
5751+ * Detach sched domains from a group of cpus specified in cpu_map
5752+ * These cpus will now be attached to the NULL domain
5753+ */
5754+static void detach_destroy_domains(const struct cpumask *cpu_map)
5755+{
5756+ /* Save because hotplug lock held. */
5757+ static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS);
5758+ int i;
5759+
5760+ for_each_cpu(i, cpu_map)
5761+ cpu_attach_domain(NULL, &def_root_domain, i);
5762+ synchronize_sched();
5763+ arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask));
5764+}
5765+
5766+/* handle null as "default" */
5767+static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
5768+ struct sched_domain_attr *new, int idx_new)
5769+{
5770+ struct sched_domain_attr tmp;
5771+
5772+ /* fast path */
5773+ if (!new && !cur)
5774+ return 1;
5775+
5776+ tmp = SD_ATTR_INIT;
5777+ return !memcmp(cur ? (cur + idx_cur) : &tmp,
5778+ new ? (new + idx_new) : &tmp,
5779+ sizeof(struct sched_domain_attr));
5780+}
5781+
5782+/*
5783+ * Partition sched domains as specified by the 'ndoms_new'
5784+ * cpumasks in the array doms_new[] of cpumasks. This compares
5785+ * doms_new[] to the current sched domain partitioning, doms_cur[].
5786+ * It destroys each deleted domain and builds each new domain.
5787+ *
5788+ * 'doms_new' is an array of cpumask's of length 'ndoms_new'.
5789+ * The masks don't intersect (don't overlap.) We should setup one
5790+ * sched domain for each mask. CPUs not in any of the cpumasks will
5791+ * not be load balanced. If the same cpumask appears both in the
5792+ * current 'doms_cur' domains and in the new 'doms_new', we can leave
5793+ * it as it is.
5794+ *
5795+ * The passed in 'doms_new' should be kmalloc'd. This routine takes
5796+ * ownership of it and will kfree it when done with it. If the caller
5797+ * failed the kmalloc call, then it can pass in doms_new == NULL &&
5798+ * ndoms_new == 1, and partition_sched_domains() will fallback to
5799+ * the single partition 'fallback_doms', it also forces the domains
5800+ * to be rebuilt.
5801+ *
5802+ * If doms_new == NULL it will be replaced with cpu_online_mask.
5803+ * ndoms_new == 0 is a special case for destroying existing domains,
5804+ * and it will not create the default domain.
5805+ *
5806+ * Call with hotplug lock held
5807+ */
5808+/* FIXME: Change to struct cpumask *doms_new[] */
5809+void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
5810+ struct sched_domain_attr *dattr_new)
5811+{
5812+ int i, j, n;
5813+ int new_topology;
5814+
5815+ mutex_lock(&sched_domains_mutex);
5816+
5817+ /* always unregister in case we don't destroy any domains */
5818+ unregister_sched_domain_sysctl();
5819+
5820+ /* Let architecture update cpu core mappings. */
5821+ new_topology = arch_update_cpu_topology();
5822+
5823+ n = doms_new ? ndoms_new : 0;
5824+
5825+ /* Destroy deleted domains */
5826+ for (i = 0; i < ndoms_cur; i++) {
5827+ for (j = 0; j < n && !new_topology; j++) {
5828+ if (cpumask_equal(&doms_cur[i], &doms_new[j])
5829+ && dattrs_equal(dattr_cur, i, dattr_new, j))
5830+ goto match1;
5831+ }
5832+ /* no match - a current sched domain not in new doms_new[] */
5833+ detach_destroy_domains(doms_cur + i);
5834+match1:
5835+ ;
5836+ }
5837+
5838+ if (doms_new == NULL) {
5839+ ndoms_cur = 0;
5840+ doms_new = fallback_doms;
5841+ cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map);
5842+ WARN_ON_ONCE(dattr_new);
5843+ }
5844+
5845+ /* Build new domains */
5846+ for (i = 0; i < ndoms_new; i++) {
5847+ for (j = 0; j < ndoms_cur && !new_topology; j++) {
5848+ if (cpumask_equal(&doms_new[i], &doms_cur[j])
5849+ && dattrs_equal(dattr_new, i, dattr_cur, j))
5850+ goto match2;
5851+ }
5852+ /* no match - add a new doms_new */
5853+ __build_sched_domains(doms_new + i,
5854+ dattr_new ? dattr_new + i : NULL);
5855+match2:
5856+ ;
5857+ }
5858+
5859+ /* Remember the new sched domains */
5860+ if (doms_cur != fallback_doms)
5861+ kfree(doms_cur);
5862+ kfree(dattr_cur); /* kfree(NULL) is safe */
5863+ doms_cur = doms_new;
5864+ dattr_cur = dattr_new;
5865+ ndoms_cur = ndoms_new;
5866+
5867+ register_sched_domain_sysctl();
5868+
5869+ mutex_unlock(&sched_domains_mutex);
5870+}
5871+
5872+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
5873+static void arch_reinit_sched_domains(void)
5874+{
5875+ get_online_cpus();
5876+
5877+ /* Destroy domains first to force the rebuild */
5878+ partition_sched_domains(0, NULL, NULL);
5879+
5880+ rebuild_sched_domains();
5881+ put_online_cpus();
5882+}
5883+
5884+static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
5885+{
5886+ unsigned int level = 0;
5887+
5888+ if (sscanf(buf, "%u", &level) != 1)
5889+ return -EINVAL;
5890+
5891+ /*
5892+ * level is always be positive so don't check for
5893+ * level < POWERSAVINGS_BALANCE_NONE which is 0
5894+ * What happens on 0 or 1 byte write,
5895+ * need to check for count as well?
5896+ */
5897+
5898+ if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
5899+ return -EINVAL;
5900+
5901+ if (smt)
5902+ sched_smt_power_savings = level;
5903+ else
5904+ sched_mc_power_savings = level;
5905+
5906+ arch_reinit_sched_domains();
5907+
5908+ return count;
5909+}
5910+
5911+#ifdef CONFIG_SCHED_MC
5912+static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
5913+ char *page)
5914+{
5915+ return sprintf(page, "%u\n", sched_mc_power_savings);
5916+}
5917+static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
5918+ const char *buf, size_t count)
5919+{
5920+ return sched_power_savings_store(buf, count, 0);
5921+}
5922+static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
5923+ sched_mc_power_savings_show,
5924+ sched_mc_power_savings_store);
5925+#endif
5926+
5927+#ifdef CONFIG_SCHED_SMT
5928+static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
5929+ char *page)
5930+{
5931+ return sprintf(page, "%u\n", sched_smt_power_savings);
5932+}
5933+static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
5934+ const char *buf, size_t count)
5935+{
5936+ return sched_power_savings_store(buf, count, 1);
5937+}
5938+static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
5939+ sched_smt_power_savings_show,
5940+ sched_smt_power_savings_store);
5941+#endif
5942+
5943+int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
5944+{
5945+ int err = 0;
5946+
5947+#ifdef CONFIG_SCHED_SMT
5948+ if (smt_capable())
5949+ err = sysfs_create_file(&cls->kset.kobj,
5950+ &attr_sched_smt_power_savings.attr);
5951+#endif
5952+#ifdef CONFIG_SCHED_MC
5953+ if (!err && mc_capable())
5954+ err = sysfs_create_file(&cls->kset.kobj,
5955+ &attr_sched_mc_power_savings.attr);
5956+#endif
5957+ return err;
5958+}
5959+#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
5960+
5961+#ifndef CONFIG_CPUSETS
5962+/*
5963+ * Add online and remove offline CPUs from the scheduler domains.
5964+ * When cpusets are enabled they take over this function.
5965+ */
5966+static int update_sched_domains(struct notifier_block *nfb,
5967+ unsigned long action, void *hcpu)
5968+{
5969+ switch (action) {
5970+ case CPU_ONLINE:
5971+ case CPU_ONLINE_FROZEN:
5972+ case CPU_DEAD:
5973+ case CPU_DEAD_FROZEN:
5974+ partition_sched_domains(1, NULL, NULL);
5975+ return NOTIFY_OK;
5976+
5977+ default:
5978+ return NOTIFY_DONE;
5979+ }
5980+}
5981+#endif
5982+
5983+static int update_runtime(struct notifier_block *nfb,
5984+ unsigned long action, void *hcpu)
5985+{
5986+ switch (action) {
5987+ case CPU_DOWN_PREPARE:
5988+ case CPU_DOWN_PREPARE_FROZEN:
5989+ return NOTIFY_OK;
5990+
5991+ case CPU_DOWN_FAILED:
5992+ case CPU_DOWN_FAILED_FROZEN:
5993+ case CPU_ONLINE:
5994+ case CPU_ONLINE_FROZEN:
5995+ return NOTIFY_OK;
5996+
5997+ default:
5998+ return NOTIFY_DONE;
5999+ }
6000+}
6001+
6002+void __init sched_init_smp(void)
6003+{
6004+ cpumask_var_t non_isolated_cpus;
6005+
6006+ alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
6007+
6008+#if defined(CONFIG_NUMA)
6009+ sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
6010+ GFP_KERNEL);
6011+ BUG_ON(sched_group_nodes_bycpu == NULL);
6012+#endif
6013+ get_online_cpus();
6014+ mutex_lock(&sched_domains_mutex);
6015+ arch_init_sched_domains(cpu_online_mask);
6016+ cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
6017+ if (cpumask_empty(non_isolated_cpus))
6018+ cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
6019+ mutex_unlock(&sched_domains_mutex);
6020+ put_online_cpus();
6021+
6022+#ifndef CONFIG_CPUSETS
6023+ /* XXX: Theoretical race here - CPU may be hotplugged now */
6024+ hotcpu_notifier(update_sched_domains, 0);
6025+#endif
6026+
6027+ /* RT runtime code needs to handle some hotplug events */
6028+ hotcpu_notifier(update_runtime, 0);
6029+
6030+ /* Move init over to a non-isolated CPU */
6031+ if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
6032+ BUG();
6033+ free_cpumask_var(non_isolated_cpus);
6034+
6035+ alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
6036+
6037+ /*
6038+ * Assume that every added cpu gives us slightly less overall latency
6039+ * allowing us to increase the base rr_interval, but in a non linear
6040+ * fashion.
6041+ */
6042+ rr_interval *= 1 + ilog2(num_online_cpus());
6043+}
6044+#else
6045+void __init sched_init_smp(void)
6046+{
6047+}
6048+#endif /* CONFIG_SMP */
6049+
6050+unsigned int sysctl_timer_migration = 1;
6051+
6052+int in_sched_functions(unsigned long addr)
6053+{
6054+ return in_lock_functions(addr) ||
6055+ (addr >= (unsigned long)__sched_text_start
6056+ && addr < (unsigned long)__sched_text_end);
6057+}
6058+
6059+void __init sched_init(void)
6060+{
6061+ int i;
6062+ int highest_cpu = 0;
6063+
6064+ prio_ratios[0] = 100;
6065+ for (i = 1 ; i < PRIO_RANGE ; i++)
6066+ prio_ratios[i] = prio_ratios[i - 1] * 11 / 10;
6067+
6068+#ifdef CONFIG_SMP
6069+ init_defrootdomain();
6070+ cpus_clear(grq.cpu_idle_map);
6071+#endif
6072+ spin_lock_init(&grq.lock);
6073+ for_each_possible_cpu(i) {
6074+ struct rq *rq;
6075+
6076+ rq = cpu_rq(i);
6077+ INIT_LIST_HEAD(&rq->queue);
6078+ rq->rq_deadline = 0;
6079+ rq->rq_prio = 0;
6080+ rq->cpu = i;
6081+ rq->user_pc = rq->nice_pc = rq->softirq_pc = rq->system_pc =
6082+ rq->iowait_pc = rq->idle_pc = 0;
6083+#ifdef CONFIG_SMP
6084+ rq->sd = NULL;
6085+ rq->rd = NULL;
6086+ rq->online = 0;
6087+ INIT_LIST_HEAD(&rq->migration_queue);
6088+ rq_attach_root(rq, &def_root_domain);
6089+#endif
6090+ atomic_set(&rq->nr_iowait, 0);
6091+ highest_cpu = i;
6092+ }
6093+ grq.iso_ticks = grq.nr_running = grq.nr_uninterruptible = 0;
6094+ for (i = 0; i < PRIO_LIMIT; i++)
6095+ INIT_LIST_HEAD(grq.queue + i);
6096+ bitmap_zero(grq.prio_bitmap, PRIO_LIMIT);
6097+ /* delimiter for bitsearch */
6098+ __set_bit(PRIO_LIMIT, grq.prio_bitmap);
6099+
6100+#ifdef CONFIG_SMP
6101+ nr_cpu_ids = highest_cpu + 1;
6102+#endif
6103+
6104+#ifdef CONFIG_PREEMPT_NOTIFIERS
6105+ INIT_HLIST_HEAD(&init_task.preempt_notifiers);
6106+#endif
6107+
6108+#ifdef CONFIG_RT_MUTEXES
6109+ plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
6110+#endif
6111+
6112+ /*
6113+ * The boot idle thread does lazy MMU switching as well:
6114+ */
6115+ atomic_inc(&init_mm.mm_count);
6116+ enter_lazy_tlb(&init_mm, current);
6117+
6118+ /*
6119+ * Make us the idle thread. Technically, schedule() should not be
6120+ * called from this thread, however somewhere below it might be,
6121+ * but because we are the idle thread, we just pick up running again
6122+ * when this runqueue becomes "idle".
6123+ */
6124+ init_idle(current, smp_processor_id());
6125+
6126+ /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
6127+ alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
6128+#ifdef CONFIG_SMP
6129+#ifdef CONFIG_NO_HZ
6130+ alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
6131+ alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
6132+#endif
6133+ alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
6134+#endif /* SMP */
6135+ perf_counter_init();
6136+}
6137+
6138+#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
6139+void __might_sleep(char *file, int line)
6140+{
6141+#ifdef in_atomic
6142+ static unsigned long prev_jiffy; /* ratelimiting */
6143+
6144+ if ((in_atomic() || irqs_disabled()) &&
6145+ system_state == SYSTEM_RUNNING && !oops_in_progress) {
6146+ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
6147+ return;
6148+ prev_jiffy = jiffies;
6149+ printk(KERN_ERR "BUG: sleeping function called from invalid"
6150+ " context at %s:%d\n", file, line);
6151+ printk("in_atomic():%d, irqs_disabled():%d\n",
6152+ in_atomic(), irqs_disabled());
6153+ debug_show_held_locks(current);
6154+ if (irqs_disabled())
6155+ print_irqtrace_events(current);
6156+ dump_stack();
6157+ }
6158+#endif
6159+}
6160+EXPORT_SYMBOL(__might_sleep);
6161+#endif
6162+
6163+#ifdef CONFIG_MAGIC_SYSRQ
6164+void normalize_rt_tasks(void)
6165+{
6166+ struct task_struct *g, *p;
6167+ unsigned long flags;
6168+ struct rq *rq;
6169+ int queued;
6170+
6171+ read_lock_irq(&tasklist_lock);
6172+
6173+ do_each_thread(g, p) {
6174+ if (!rt_task(p) && !iso_task(p))
6175+ continue;
6176+
6177+ spin_lock_irqsave(&p->pi_lock, flags);
6178+ rq = __task_grq_lock(p);
6179+ update_rq_clock(rq);
6180+
6181+ queued = task_queued_only(p);
6182+ if (queued)
6183+ dequeue_task(p);
6184+ __setscheduler(p, SCHED_NORMAL, 0);
6185+ if (task_running(p))
6186+ resched_task(p);
6187+ if (queued) {
6188+ enqueue_task(p);
6189+ try_preempt(p);
6190+ }
6191+
6192+ __task_grq_unlock();
6193+ spin_unlock_irqrestore(&p->pi_lock, flags);
6194+ } while_each_thread(g, p);
6195+
6196+ read_unlock_irq(&tasklist_lock);
6197+}
6198+#endif /* CONFIG_MAGIC_SYSRQ */
6199+
6200+#ifdef CONFIG_IA64
6201+/*
6202+ * These functions are only useful for the IA64 MCA handling.
6203+ *
6204+ * They can only be called when the whole system has been
6205+ * stopped - every CPU needs to be quiescent, and no scheduling
6206+ * activity can take place. Using them for anything else would
6207+ * be a serious bug, and as a result, they aren't even visible
6208+ * under any other configuration.
6209+ */
6210+
6211+/**
6212+ * curr_task - return the current task for a given cpu.
6213+ * @cpu: the processor in question.
6214+ *
6215+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6216+ */
6217+struct task_struct *curr_task(int cpu)
6218+{
6219+ return cpu_curr(cpu);
6220+}
6221+
6222+/**
6223+ * set_curr_task - set the current task for a given cpu.
6224+ * @cpu: the processor in question.
6225+ * @p: the task pointer to set.
6226+ *
6227+ * Description: This function must only be used when non-maskable interrupts
6228+ * are serviced on a separate stack. It allows the architecture to switch the
6229+ * notion of the current task on a cpu in a non-blocking manner. This function
6230+ * must be called with all CPU's synchronized, and interrupts disabled, the
6231+ * and caller must save the original value of the current task (see
6232+ * curr_task() above) and restore that value before reenabling interrupts and
6233+ * re-starting the system.
6234+ *
6235+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6236+ */
6237+void set_curr_task(int cpu, struct task_struct *p)
6238+{
6239+ cpu_curr(cpu) = p;
6240+}
6241+
6242+#endif
6243+
6244+/*
6245+ * Use precise platform statistics if available:
6246+ */
6247+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
6248+cputime_t task_utime(struct task_struct *p)
6249+{
6250+ return p->utime;
6251+}
6252+
6253+cputime_t task_stime(struct task_struct *p)
6254+{
6255+ return p->stime;
6256+}
6257+#else
6258+cputime_t task_utime(struct task_struct *p)
6259+{
6260+ clock_t utime = cputime_to_clock_t(p->utime),
6261+ total = utime + cputime_to_clock_t(p->stime);
6262+ u64 temp;
6263+
6264+ temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
6265+
6266+ if (total) {
6267+ temp *= utime;
6268+ do_div(temp, total);
6269+ }
6270+ utime = (clock_t)temp;
6271+
6272+ p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
6273+ return p->prev_utime;
6274+}
6275+
6276+cputime_t task_stime(struct task_struct *p)
6277+{
6278+ clock_t stime;
6279+
6280+ stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
6281+ cputime_to_clock_t(task_utime(p));
6282+
6283+ if (stime >= 0)
6284+ p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
6285+
6286+ return p->prev_stime;
6287+}
6288+#endif
6289+
6290+inline cputime_t task_gtime(struct task_struct *p)
6291+{
6292+ return p->gtime;
6293+}
6294+
6295+void __cpuinit init_idle_bootup_task(struct task_struct *idle)
6296+{}
6297+
6298+#ifdef CONFIG_SCHED_DEBUG
6299+void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
6300+{}
6301+
6302+void proc_sched_set_task(struct task_struct *p)
6303+{}
6304+#endif
6305--- a/kernel/sysctl.c
6306+++ b/kernel/sysctl.c
6307@@ -83,6 +83,8 @@ extern int percpu_pagelist_fraction;
6308 extern int compat_log;
6309 extern int latencytop_enabled;
6310 extern int sysctl_nr_open_min, sysctl_nr_open_max;
6311+extern int rr_interval;
6312+extern int sched_iso_cpu;
6313 #ifndef CONFIG_MMU
6314 extern int sysctl_nr_trim_pages;
6315 #endif
6316@@ -100,7 +102,8 @@ static int zero;
6317 static int __maybe_unused one = 1;
6318 static int __maybe_unused two = 2;
6319 static unsigned long one_ul = 1;
6320-static int one_hundred = 100;
6321+static int __read_mostly one_hundred = 100;
6322+static int __maybe_unused __read_mostly five_thousand = 5000;
6323 
6324 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
6325 static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
6326@@ -234,7 +237,7 @@ static struct ctl_table root_table[] = {
6327     { .ctl_name = 0 }
6328 };
6329 
6330-#ifdef CONFIG_SCHED_DEBUG
6331+#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SCHED_CFS)
6332 static int min_sched_granularity_ns = 100000; /* 100 usecs */
6333 static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
6334 static int min_wakeup_granularity_ns; /* 0 usecs */
6335@@ -242,7 +245,7 @@ static int max_wakeup_granularity_ns = N
6336 #endif
6337 
6338 static struct ctl_table kern_table[] = {
6339-#ifdef CONFIG_SCHED_DEBUG
6340+#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SCHED_CFS)
6341     {
6342         .ctl_name = CTL_UNNUMBERED,
6343         .procname = "sched_min_granularity_ns",
6344@@ -327,6 +330,7 @@ static struct ctl_table kern_table[] = {
6345         .proc_handler = &proc_dointvec,
6346     },
6347 #endif
6348+#ifdef CONFIG_SCHED_CFS
6349     {
6350         .ctl_name = CTL_UNNUMBERED,
6351         .procname = "sched_rt_period_us",
6352@@ -351,6 +355,7 @@ static struct ctl_table kern_table[] = {
6353         .mode = 0644,
6354         .proc_handler = &proc_dointvec,
6355     },
6356+#endif
6357 #ifdef CONFIG_PROVE_LOCKING
6358     {
6359         .ctl_name = CTL_UNNUMBERED,
6360@@ -756,6 +761,30 @@ static struct ctl_table kern_table[] = {
6361         .proc_handler = &proc_dointvec,
6362     },
6363 #endif
6364+#ifdef CONFIG_SCHED_BFS
6365+ {
6366+ .ctl_name = CTL_UNNUMBERED,
6367+ .procname = "rr_interval",
6368+ .data = &rr_interval,
6369+ .maxlen = sizeof (int),
6370+ .mode = 0644,
6371+ .proc_handler = &proc_dointvec_minmax,
6372+ .strategy = &sysctl_intvec,
6373+ .extra1 = &one,
6374+ .extra2 = &five_thousand,
6375+ },
6376+ {
6377+ .ctl_name = CTL_UNNUMBERED,
6378+ .procname = "iso_cpu",
6379+ .data = &sched_iso_cpu,
6380+ .maxlen = sizeof (int),
6381+ .mode = 0644,
6382+ .proc_handler = &proc_dointvec_minmax,
6383+ .strategy = &sysctl_intvec,
6384+ .extra1 = &zero,
6385+ .extra2 = &one_hundred,
6386+ },
6387+#endif
6388 #if defined(CONFIG_S390) && defined(CONFIG_SMP)
6389     {
6390         .ctl_name = KERN_SPIN_RETRY,
6391--- a/kernel/workqueue.c
6392+++ b/kernel/workqueue.c
6393@@ -320,7 +320,9 @@ static int worker_thread(void *__cwq)
6394     if (cwq->wq->freezeable)
6395         set_freezable();
6396 
6397+#ifdef CONFIG_SCHED_CFS
6398     set_user_nice(current, -5);
6399+#endif
6400 
6401     for (;;) {
6402         prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
6403--- /dev/null
6404+++ b/include/linux/perf_counter.h
6405@@ -0,0 +1,2 @@
6406+#define perf_counter_init() do {} while(0)
6407+#define perf_counter_task_sched_in(...) do {} while(0)
6408--- /dev/null
6409+++ b/include/trace/events/sched.h
6410@@ -0,0 +1 @@
6411+#include <trace/sched.h>
6412

Archive Download this file



interactive