Root/
1 | /* |
2 | * idle-task scheduling class. |
3 | * |
4 | * (NOTE: these are not related to SCHED_IDLE tasks which are |
5 | * handled in sched_fair.c) |
6 | */ |
7 | |
8 | #ifdef CONFIG_SMP |
9 | static int |
10 | select_task_rq_idle(struct rq *rq, struct task_struct *p, int sd_flag, int flags) |
11 | { |
12 | return task_cpu(p); /* IDLE tasks as never migrated */ |
13 | } |
14 | #endif /* CONFIG_SMP */ |
15 | /* |
16 | * Idle tasks are unconditionally rescheduled: |
17 | */ |
18 | static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) |
19 | { |
20 | resched_task(rq->idle); |
21 | } |
22 | |
23 | static struct task_struct *pick_next_task_idle(struct rq *rq) |
24 | { |
25 | schedstat_inc(rq, sched_goidle); |
26 | calc_load_account_idle(rq); |
27 | return rq->idle; |
28 | } |
29 | |
30 | /* |
31 | * It is not legal to sleep in the idle task - print a warning |
32 | * message if some code attempts to do it: |
33 | */ |
34 | static void |
35 | dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) |
36 | { |
37 | raw_spin_unlock_irq(&rq->lock); |
38 | printk(KERN_ERR "bad: scheduling from the idle thread!\n"); |
39 | dump_stack(); |
40 | raw_spin_lock_irq(&rq->lock); |
41 | } |
42 | |
43 | static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) |
44 | { |
45 | } |
46 | |
47 | static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) |
48 | { |
49 | } |
50 | |
51 | static void set_curr_task_idle(struct rq *rq) |
52 | { |
53 | } |
54 | |
55 | static void switched_to_idle(struct rq *rq, struct task_struct *p, |
56 | int running) |
57 | { |
58 | /* Can this actually happen?? */ |
59 | if (running) |
60 | resched_task(rq->curr); |
61 | else |
62 | check_preempt_curr(rq, p, 0); |
63 | } |
64 | |
65 | static void prio_changed_idle(struct rq *rq, struct task_struct *p, |
66 | int oldprio, int running) |
67 | { |
68 | /* This can happen for hot plug CPUS */ |
69 | |
70 | /* |
71 | * Reschedule if we are currently running on this runqueue and |
72 | * our priority decreased, or if we are not currently running on |
73 | * this runqueue and our priority is higher than the current's |
74 | */ |
75 | if (running) { |
76 | if (p->prio > oldprio) |
77 | resched_task(rq->curr); |
78 | } else |
79 | check_preempt_curr(rq, p, 0); |
80 | } |
81 | |
82 | static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task) |
83 | { |
84 | return 0; |
85 | } |
86 | |
87 | /* |
88 | * Simple, special scheduling class for the per-CPU idle tasks: |
89 | */ |
90 | static const struct sched_class idle_sched_class = { |
91 | /* .next is NULL */ |
92 | /* no enqueue/yield_task for idle tasks */ |
93 | |
94 | /* dequeue is not valid, we print a debug message there: */ |
95 | .dequeue_task = dequeue_task_idle, |
96 | |
97 | .check_preempt_curr = check_preempt_curr_idle, |
98 | |
99 | .pick_next_task = pick_next_task_idle, |
100 | .put_prev_task = put_prev_task_idle, |
101 | |
102 | #ifdef CONFIG_SMP |
103 | .select_task_rq = select_task_rq_idle, |
104 | #endif |
105 | |
106 | .set_curr_task = set_curr_task_idle, |
107 | .task_tick = task_tick_idle, |
108 | |
109 | .get_rr_interval = get_rr_interval_idle, |
110 | |
111 | .prio_changed = prio_changed_idle, |
112 | .switched_to = switched_to_idle, |
113 | |
114 | /* no .task_new for idle tasks */ |
115 | }; |
116 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9