Root/
1 | /* |
2 | * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR |
3 | * policies) |
4 | */ |
5 | |
6 | #ifdef CONFIG_RT_GROUP_SCHED |
7 | |
8 | #define rt_entity_is_task(rt_se) (!(rt_se)->my_q) |
9 | |
10 | static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) |
11 | { |
12 | #ifdef CONFIG_SCHED_DEBUG |
13 | WARN_ON_ONCE(!rt_entity_is_task(rt_se)); |
14 | #endif |
15 | return container_of(rt_se, struct task_struct, rt); |
16 | } |
17 | |
18 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) |
19 | { |
20 | return rt_rq->rq; |
21 | } |
22 | |
23 | static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) |
24 | { |
25 | return rt_se->rt_rq; |
26 | } |
27 | |
28 | #else /* CONFIG_RT_GROUP_SCHED */ |
29 | |
30 | #define rt_entity_is_task(rt_se) (1) |
31 | |
32 | static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) |
33 | { |
34 | return container_of(rt_se, struct task_struct, rt); |
35 | } |
36 | |
37 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) |
38 | { |
39 | return container_of(rt_rq, struct rq, rt); |
40 | } |
41 | |
42 | static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) |
43 | { |
44 | struct task_struct *p = rt_task_of(rt_se); |
45 | struct rq *rq = task_rq(p); |
46 | |
47 | return &rq->rt; |
48 | } |
49 | |
50 | #endif /* CONFIG_RT_GROUP_SCHED */ |
51 | |
52 | #ifdef CONFIG_SMP |
53 | |
54 | static inline int rt_overloaded(struct rq *rq) |
55 | { |
56 | return atomic_read(&rq->rd->rto_count); |
57 | } |
58 | |
59 | static inline void rt_set_overload(struct rq *rq) |
60 | { |
61 | if (!rq->online) |
62 | return; |
63 | |
64 | cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); |
65 | /* |
66 | * Make sure the mask is visible before we set |
67 | * the overload count. That is checked to determine |
68 | * if we should look at the mask. It would be a shame |
69 | * if we looked at the mask, but the mask was not |
70 | * updated yet. |
71 | */ |
72 | wmb(); |
73 | atomic_inc(&rq->rd->rto_count); |
74 | } |
75 | |
76 | static inline void rt_clear_overload(struct rq *rq) |
77 | { |
78 | if (!rq->online) |
79 | return; |
80 | |
81 | /* the order here really doesn't matter */ |
82 | atomic_dec(&rq->rd->rto_count); |
83 | cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); |
84 | } |
85 | |
86 | static void update_rt_migration(struct rt_rq *rt_rq) |
87 | { |
88 | if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { |
89 | if (!rt_rq->overloaded) { |
90 | rt_set_overload(rq_of_rt_rq(rt_rq)); |
91 | rt_rq->overloaded = 1; |
92 | } |
93 | } else if (rt_rq->overloaded) { |
94 | rt_clear_overload(rq_of_rt_rq(rt_rq)); |
95 | rt_rq->overloaded = 0; |
96 | } |
97 | } |
98 | |
99 | static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) |
100 | { |
101 | if (!rt_entity_is_task(rt_se)) |
102 | return; |
103 | |
104 | rt_rq = &rq_of_rt_rq(rt_rq)->rt; |
105 | |
106 | rt_rq->rt_nr_total++; |
107 | if (rt_se->nr_cpus_allowed > 1) |
108 | rt_rq->rt_nr_migratory++; |
109 | |
110 | update_rt_migration(rt_rq); |
111 | } |
112 | |
113 | static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) |
114 | { |
115 | if (!rt_entity_is_task(rt_se)) |
116 | return; |
117 | |
118 | rt_rq = &rq_of_rt_rq(rt_rq)->rt; |
119 | |
120 | rt_rq->rt_nr_total--; |
121 | if (rt_se->nr_cpus_allowed > 1) |
122 | rt_rq->rt_nr_migratory--; |
123 | |
124 | update_rt_migration(rt_rq); |
125 | } |
126 | |
127 | static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) |
128 | { |
129 | plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); |
130 | plist_node_init(&p->pushable_tasks, p->prio); |
131 | plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); |
132 | } |
133 | |
134 | static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) |
135 | { |
136 | plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); |
137 | } |
138 | |
139 | static inline int has_pushable_tasks(struct rq *rq) |
140 | { |
141 | return !plist_head_empty(&rq->rt.pushable_tasks); |
142 | } |
143 | |
144 | #else |
145 | |
146 | static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) |
147 | { |
148 | } |
149 | |
150 | static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p) |
151 | { |
152 | } |
153 | |
154 | static inline |
155 | void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) |
156 | { |
157 | } |
158 | |
159 | static inline |
160 | void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) |
161 | { |
162 | } |
163 | |
164 | #endif /* CONFIG_SMP */ |
165 | |
166 | static inline int on_rt_rq(struct sched_rt_entity *rt_se) |
167 | { |
168 | return !list_empty(&rt_se->run_list); |
169 | } |
170 | |
171 | #ifdef CONFIG_RT_GROUP_SCHED |
172 | |
173 | static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) |
174 | { |
175 | if (!rt_rq->tg) |
176 | return RUNTIME_INF; |
177 | |
178 | return rt_rq->rt_runtime; |
179 | } |
180 | |
181 | static inline u64 sched_rt_period(struct rt_rq *rt_rq) |
182 | { |
183 | return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); |
184 | } |
185 | |
186 | #define for_each_leaf_rt_rq(rt_rq, rq) \ |
187 | list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) |
188 | |
189 | #define for_each_sched_rt_entity(rt_se) \ |
190 | for (; rt_se; rt_se = rt_se->parent) |
191 | |
192 | static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) |
193 | { |
194 | return rt_se->my_q; |
195 | } |
196 | |
197 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head); |
198 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se); |
199 | |
200 | static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) |
201 | { |
202 | int this_cpu = smp_processor_id(); |
203 | struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; |
204 | struct sched_rt_entity *rt_se; |
205 | |
206 | rt_se = rt_rq->tg->rt_se[this_cpu]; |
207 | |
208 | if (rt_rq->rt_nr_running) { |
209 | if (rt_se && !on_rt_rq(rt_se)) |
210 | enqueue_rt_entity(rt_se, false); |
211 | if (rt_rq->highest_prio.curr < curr->prio) |
212 | resched_task(curr); |
213 | } |
214 | } |
215 | |
216 | static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) |
217 | { |
218 | int this_cpu = smp_processor_id(); |
219 | struct sched_rt_entity *rt_se; |
220 | |
221 | rt_se = rt_rq->tg->rt_se[this_cpu]; |
222 | |
223 | if (rt_se && on_rt_rq(rt_se)) |
224 | dequeue_rt_entity(rt_se); |
225 | } |
226 | |
227 | static inline int rt_rq_throttled(struct rt_rq *rt_rq) |
228 | { |
229 | return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; |
230 | } |
231 | |
232 | static int rt_se_boosted(struct sched_rt_entity *rt_se) |
233 | { |
234 | struct rt_rq *rt_rq = group_rt_rq(rt_se); |
235 | struct task_struct *p; |
236 | |
237 | if (rt_rq) |
238 | return !!rt_rq->rt_nr_boosted; |
239 | |
240 | p = rt_task_of(rt_se); |
241 | return p->prio != p->normal_prio; |
242 | } |
243 | |
244 | #ifdef CONFIG_SMP |
245 | static inline const struct cpumask *sched_rt_period_mask(void) |
246 | { |
247 | return cpu_rq(smp_processor_id())->rd->span; |
248 | } |
249 | #else |
250 | static inline const struct cpumask *sched_rt_period_mask(void) |
251 | { |
252 | return cpu_online_mask; |
253 | } |
254 | #endif |
255 | |
256 | static inline |
257 | struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) |
258 | { |
259 | return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; |
260 | } |
261 | |
262 | static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) |
263 | { |
264 | return &rt_rq->tg->rt_bandwidth; |
265 | } |
266 | |
267 | #else /* !CONFIG_RT_GROUP_SCHED */ |
268 | |
269 | static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) |
270 | { |
271 | return rt_rq->rt_runtime; |
272 | } |
273 | |
274 | static inline u64 sched_rt_period(struct rt_rq *rt_rq) |
275 | { |
276 | return ktime_to_ns(def_rt_bandwidth.rt_period); |
277 | } |
278 | |
279 | #define for_each_leaf_rt_rq(rt_rq, rq) \ |
280 | for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL) |
281 | |
282 | #define for_each_sched_rt_entity(rt_se) \ |
283 | for (; rt_se; rt_se = NULL) |
284 | |
285 | static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) |
286 | { |
287 | return NULL; |
288 | } |
289 | |
290 | static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) |
291 | { |
292 | if (rt_rq->rt_nr_running) |
293 | resched_task(rq_of_rt_rq(rt_rq)->curr); |
294 | } |
295 | |
296 | static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) |
297 | { |
298 | } |
299 | |
300 | static inline int rt_rq_throttled(struct rt_rq *rt_rq) |
301 | { |
302 | return rt_rq->rt_throttled; |
303 | } |
304 | |
305 | static inline const struct cpumask *sched_rt_period_mask(void) |
306 | { |
307 | return cpu_online_mask; |
308 | } |
309 | |
310 | static inline |
311 | struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) |
312 | { |
313 | return &cpu_rq(cpu)->rt; |
314 | } |
315 | |
316 | static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) |
317 | { |
318 | return &def_rt_bandwidth; |
319 | } |
320 | |
321 | #endif /* CONFIG_RT_GROUP_SCHED */ |
322 | |
323 | #ifdef CONFIG_SMP |
324 | /* |
325 | * We ran out of runtime, see if we can borrow some from our neighbours. |
326 | */ |
327 | static int do_balance_runtime(struct rt_rq *rt_rq) |
328 | { |
329 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); |
330 | struct root_domain *rd = cpu_rq(smp_processor_id())->rd; |
331 | int i, weight, more = 0; |
332 | u64 rt_period; |
333 | |
334 | weight = cpumask_weight(rd->span); |
335 | |
336 | raw_spin_lock(&rt_b->rt_runtime_lock); |
337 | rt_period = ktime_to_ns(rt_b->rt_period); |
338 | for_each_cpu(i, rd->span) { |
339 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
340 | s64 diff; |
341 | |
342 | if (iter == rt_rq) |
343 | continue; |
344 | |
345 | raw_spin_lock(&iter->rt_runtime_lock); |
346 | /* |
347 | * Either all rqs have inf runtime and there's nothing to steal |
348 | * or __disable_runtime() below sets a specific rq to inf to |
349 | * indicate its been disabled and disalow stealing. |
350 | */ |
351 | if (iter->rt_runtime == RUNTIME_INF) |
352 | goto next; |
353 | |
354 | /* |
355 | * From runqueues with spare time, take 1/n part of their |
356 | * spare time, but no more than our period. |
357 | */ |
358 | diff = iter->rt_runtime - iter->rt_time; |
359 | if (diff > 0) { |
360 | diff = div_u64((u64)diff, weight); |
361 | if (rt_rq->rt_runtime + diff > rt_period) |
362 | diff = rt_period - rt_rq->rt_runtime; |
363 | iter->rt_runtime -= diff; |
364 | rt_rq->rt_runtime += diff; |
365 | more = 1; |
366 | if (rt_rq->rt_runtime == rt_period) { |
367 | raw_spin_unlock(&iter->rt_runtime_lock); |
368 | break; |
369 | } |
370 | } |
371 | next: |
372 | raw_spin_unlock(&iter->rt_runtime_lock); |
373 | } |
374 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
375 | |
376 | return more; |
377 | } |
378 | |
379 | /* |
380 | * Ensure this RQ takes back all the runtime it lend to its neighbours. |
381 | */ |
382 | static void __disable_runtime(struct rq *rq) |
383 | { |
384 | struct root_domain *rd = rq->rd; |
385 | struct rt_rq *rt_rq; |
386 | |
387 | if (unlikely(!scheduler_running)) |
388 | return; |
389 | |
390 | for_each_leaf_rt_rq(rt_rq, rq) { |
391 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); |
392 | s64 want; |
393 | int i; |
394 | |
395 | raw_spin_lock(&rt_b->rt_runtime_lock); |
396 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
397 | /* |
398 | * Either we're all inf and nobody needs to borrow, or we're |
399 | * already disabled and thus have nothing to do, or we have |
400 | * exactly the right amount of runtime to take out. |
401 | */ |
402 | if (rt_rq->rt_runtime == RUNTIME_INF || |
403 | rt_rq->rt_runtime == rt_b->rt_runtime) |
404 | goto balanced; |
405 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
406 | |
407 | /* |
408 | * Calculate the difference between what we started out with |
409 | * and what we current have, that's the amount of runtime |
410 | * we lend and now have to reclaim. |
411 | */ |
412 | want = rt_b->rt_runtime - rt_rq->rt_runtime; |
413 | |
414 | /* |
415 | * Greedy reclaim, take back as much as we can. |
416 | */ |
417 | for_each_cpu(i, rd->span) { |
418 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
419 | s64 diff; |
420 | |
421 | /* |
422 | * Can't reclaim from ourselves or disabled runqueues. |
423 | */ |
424 | if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) |
425 | continue; |
426 | |
427 | raw_spin_lock(&iter->rt_runtime_lock); |
428 | if (want > 0) { |
429 | diff = min_t(s64, iter->rt_runtime, want); |
430 | iter->rt_runtime -= diff; |
431 | want -= diff; |
432 | } else { |
433 | iter->rt_runtime -= want; |
434 | want -= want; |
435 | } |
436 | raw_spin_unlock(&iter->rt_runtime_lock); |
437 | |
438 | if (!want) |
439 | break; |
440 | } |
441 | |
442 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
443 | /* |
444 | * We cannot be left wanting - that would mean some runtime |
445 | * leaked out of the system. |
446 | */ |
447 | BUG_ON(want); |
448 | balanced: |
449 | /* |
450 | * Disable all the borrow logic by pretending we have inf |
451 | * runtime - in which case borrowing doesn't make sense. |
452 | */ |
453 | rt_rq->rt_runtime = RUNTIME_INF; |
454 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
455 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
456 | } |
457 | } |
458 | |
459 | static void disable_runtime(struct rq *rq) |
460 | { |
461 | unsigned long flags; |
462 | |
463 | raw_spin_lock_irqsave(&rq->lock, flags); |
464 | __disable_runtime(rq); |
465 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
466 | } |
467 | |
468 | static void __enable_runtime(struct rq *rq) |
469 | { |
470 | struct rt_rq *rt_rq; |
471 | |
472 | if (unlikely(!scheduler_running)) |
473 | return; |
474 | |
475 | /* |
476 | * Reset each runqueue's bandwidth settings |
477 | */ |
478 | for_each_leaf_rt_rq(rt_rq, rq) { |
479 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); |
480 | |
481 | raw_spin_lock(&rt_b->rt_runtime_lock); |
482 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
483 | rt_rq->rt_runtime = rt_b->rt_runtime; |
484 | rt_rq->rt_time = 0; |
485 | rt_rq->rt_throttled = 0; |
486 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
487 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
488 | } |
489 | } |
490 | |
491 | static void enable_runtime(struct rq *rq) |
492 | { |
493 | unsigned long flags; |
494 | |
495 | raw_spin_lock_irqsave(&rq->lock, flags); |
496 | __enable_runtime(rq); |
497 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
498 | } |
499 | |
500 | static int balance_runtime(struct rt_rq *rt_rq) |
501 | { |
502 | int more = 0; |
503 | |
504 | if (rt_rq->rt_time > rt_rq->rt_runtime) { |
505 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
506 | more = do_balance_runtime(rt_rq); |
507 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
508 | } |
509 | |
510 | return more; |
511 | } |
512 | #else /* !CONFIG_SMP */ |
513 | static inline int balance_runtime(struct rt_rq *rt_rq) |
514 | { |
515 | return 0; |
516 | } |
517 | #endif /* CONFIG_SMP */ |
518 | |
519 | static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) |
520 | { |
521 | int i, idle = 1; |
522 | const struct cpumask *span; |
523 | |
524 | if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) |
525 | return 1; |
526 | |
527 | span = sched_rt_period_mask(); |
528 | for_each_cpu(i, span) { |
529 | int enqueue = 0; |
530 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); |
531 | struct rq *rq = rq_of_rt_rq(rt_rq); |
532 | |
533 | raw_spin_lock(&rq->lock); |
534 | if (rt_rq->rt_time) { |
535 | u64 runtime; |
536 | |
537 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
538 | if (rt_rq->rt_throttled) |
539 | balance_runtime(rt_rq); |
540 | runtime = rt_rq->rt_runtime; |
541 | rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); |
542 | if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { |
543 | rt_rq->rt_throttled = 0; |
544 | enqueue = 1; |
545 | } |
546 | if (rt_rq->rt_time || rt_rq->rt_nr_running) |
547 | idle = 0; |
548 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
549 | } else if (rt_rq->rt_nr_running) |
550 | idle = 0; |
551 | |
552 | if (enqueue) |
553 | sched_rt_rq_enqueue(rt_rq); |
554 | raw_spin_unlock(&rq->lock); |
555 | } |
556 | |
557 | return idle; |
558 | } |
559 | |
560 | static inline int rt_se_prio(struct sched_rt_entity *rt_se) |
561 | { |
562 | #ifdef CONFIG_RT_GROUP_SCHED |
563 | struct rt_rq *rt_rq = group_rt_rq(rt_se); |
564 | |
565 | if (rt_rq) |
566 | return rt_rq->highest_prio.curr; |
567 | #endif |
568 | |
569 | return rt_task_of(rt_se)->prio; |
570 | } |
571 | |
572 | static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) |
573 | { |
574 | u64 runtime = sched_rt_runtime(rt_rq); |
575 | |
576 | if (rt_rq->rt_throttled) |
577 | return rt_rq_throttled(rt_rq); |
578 | |
579 | if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq)) |
580 | return 0; |
581 | |
582 | balance_runtime(rt_rq); |
583 | runtime = sched_rt_runtime(rt_rq); |
584 | if (runtime == RUNTIME_INF) |
585 | return 0; |
586 | |
587 | if (rt_rq->rt_time > runtime) { |
588 | rt_rq->rt_throttled = 1; |
589 | if (rt_rq_throttled(rt_rq)) { |
590 | sched_rt_rq_dequeue(rt_rq); |
591 | return 1; |
592 | } |
593 | } |
594 | |
595 | return 0; |
596 | } |
597 | |
598 | /* |
599 | * Update the current task's runtime statistics. Skip current tasks that |
600 | * are not in our scheduling class. |
601 | */ |
602 | static void update_curr_rt(struct rq *rq) |
603 | { |
604 | struct task_struct *curr = rq->curr; |
605 | struct sched_rt_entity *rt_se = &curr->rt; |
606 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); |
607 | u64 delta_exec; |
608 | |
609 | if (!task_has_rt_policy(curr)) |
610 | return; |
611 | |
612 | delta_exec = rq->clock - curr->se.exec_start; |
613 | if (unlikely((s64)delta_exec < 0)) |
614 | delta_exec = 0; |
615 | |
616 | schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec)); |
617 | |
618 | curr->se.sum_exec_runtime += delta_exec; |
619 | account_group_exec_runtime(curr, delta_exec); |
620 | |
621 | curr->se.exec_start = rq->clock; |
622 | cpuacct_charge(curr, delta_exec); |
623 | |
624 | sched_rt_avg_update(rq, delta_exec); |
625 | |
626 | if (!rt_bandwidth_enabled()) |
627 | return; |
628 | |
629 | for_each_sched_rt_entity(rt_se) { |
630 | rt_rq = rt_rq_of_se(rt_se); |
631 | |
632 | if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { |
633 | raw_spin_lock(&rt_rq->rt_runtime_lock); |
634 | rt_rq->rt_time += delta_exec; |
635 | if (sched_rt_runtime_exceeded(rt_rq)) |
636 | resched_task(curr); |
637 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
638 | } |
639 | } |
640 | } |
641 | |
642 | #if defined CONFIG_SMP |
643 | |
644 | static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu); |
645 | |
646 | static inline int next_prio(struct rq *rq) |
647 | { |
648 | struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu); |
649 | |
650 | if (next && rt_prio(next->prio)) |
651 | return next->prio; |
652 | else |
653 | return MAX_RT_PRIO; |
654 | } |
655 | |
656 | static void |
657 | inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) |
658 | { |
659 | struct rq *rq = rq_of_rt_rq(rt_rq); |
660 | |
661 | if (prio < prev_prio) { |
662 | |
663 | /* |
664 | * If the new task is higher in priority than anything on the |
665 | * run-queue, we know that the previous high becomes our |
666 | * next-highest. |
667 | */ |
668 | rt_rq->highest_prio.next = prev_prio; |
669 | |
670 | if (rq->online) |
671 | cpupri_set(&rq->rd->cpupri, rq->cpu, prio); |
672 | |
673 | } else if (prio == rt_rq->highest_prio.curr) |
674 | /* |
675 | * If the next task is equal in priority to the highest on |
676 | * the run-queue, then we implicitly know that the next highest |
677 | * task cannot be any lower than current |
678 | */ |
679 | rt_rq->highest_prio.next = prio; |
680 | else if (prio < rt_rq->highest_prio.next) |
681 | /* |
682 | * Otherwise, we need to recompute next-highest |
683 | */ |
684 | rt_rq->highest_prio.next = next_prio(rq); |
685 | } |
686 | |
687 | static void |
688 | dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) |
689 | { |
690 | struct rq *rq = rq_of_rt_rq(rt_rq); |
691 | |
692 | if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next)) |
693 | rt_rq->highest_prio.next = next_prio(rq); |
694 | |
695 | if (rq->online && rt_rq->highest_prio.curr != prev_prio) |
696 | cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); |
697 | } |
698 | |
699 | #else /* CONFIG_SMP */ |
700 | |
701 | static inline |
702 | void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} |
703 | static inline |
704 | void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} |
705 | |
706 | #endif /* CONFIG_SMP */ |
707 | |
708 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
709 | static void |
710 | inc_rt_prio(struct rt_rq *rt_rq, int prio) |
711 | { |
712 | int prev_prio = rt_rq->highest_prio.curr; |
713 | |
714 | if (prio < prev_prio) |
715 | rt_rq->highest_prio.curr = prio; |
716 | |
717 | inc_rt_prio_smp(rt_rq, prio, prev_prio); |
718 | } |
719 | |
720 | static void |
721 | dec_rt_prio(struct rt_rq *rt_rq, int prio) |
722 | { |
723 | int prev_prio = rt_rq->highest_prio.curr; |
724 | |
725 | if (rt_rq->rt_nr_running) { |
726 | |
727 | WARN_ON(prio < prev_prio); |
728 | |
729 | /* |
730 | * This may have been our highest task, and therefore |
731 | * we may have some recomputation to do |
732 | */ |
733 | if (prio == prev_prio) { |
734 | struct rt_prio_array *array = &rt_rq->active; |
735 | |
736 | rt_rq->highest_prio.curr = |
737 | sched_find_first_bit(array->bitmap); |
738 | } |
739 | |
740 | } else |
741 | rt_rq->highest_prio.curr = MAX_RT_PRIO; |
742 | |
743 | dec_rt_prio_smp(rt_rq, prio, prev_prio); |
744 | } |
745 | |
746 | #else |
747 | |
748 | static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {} |
749 | static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {} |
750 | |
751 | #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */ |
752 | |
753 | #ifdef CONFIG_RT_GROUP_SCHED |
754 | |
755 | static void |
756 | inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) |
757 | { |
758 | if (rt_se_boosted(rt_se)) |
759 | rt_rq->rt_nr_boosted++; |
760 | |
761 | if (rt_rq->tg) |
762 | start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); |
763 | } |
764 | |
765 | static void |
766 | dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) |
767 | { |
768 | if (rt_se_boosted(rt_se)) |
769 | rt_rq->rt_nr_boosted--; |
770 | |
771 | WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); |
772 | } |
773 | |
774 | #else /* CONFIG_RT_GROUP_SCHED */ |
775 | |
776 | static void |
777 | inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) |
778 | { |
779 | start_rt_bandwidth(&def_rt_bandwidth); |
780 | } |
781 | |
782 | static inline |
783 | void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} |
784 | |
785 | #endif /* CONFIG_RT_GROUP_SCHED */ |
786 | |
787 | static inline |
788 | void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) |
789 | { |
790 | int prio = rt_se_prio(rt_se); |
791 | |
792 | WARN_ON(!rt_prio(prio)); |
793 | rt_rq->rt_nr_running++; |
794 | |
795 | inc_rt_prio(rt_rq, prio); |
796 | inc_rt_migration(rt_se, rt_rq); |
797 | inc_rt_group(rt_se, rt_rq); |
798 | } |
799 | |
800 | static inline |
801 | void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) |
802 | { |
803 | WARN_ON(!rt_prio(rt_se_prio(rt_se))); |
804 | WARN_ON(!rt_rq->rt_nr_running); |
805 | rt_rq->rt_nr_running--; |
806 | |
807 | dec_rt_prio(rt_rq, rt_se_prio(rt_se)); |
808 | dec_rt_migration(rt_se, rt_rq); |
809 | dec_rt_group(rt_se, rt_rq); |
810 | } |
811 | |
812 | static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) |
813 | { |
814 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); |
815 | struct rt_prio_array *array = &rt_rq->active; |
816 | struct rt_rq *group_rq = group_rt_rq(rt_se); |
817 | struct list_head *queue = array->queue + rt_se_prio(rt_se); |
818 | |
819 | /* |
820 | * Don't enqueue the group if its throttled, or when empty. |
821 | * The latter is a consequence of the former when a child group |
822 | * get throttled and the current group doesn't have any other |
823 | * active members. |
824 | */ |
825 | if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) |
826 | return; |
827 | |
828 | if (head) |
829 | list_add(&rt_se->run_list, queue); |
830 | else |
831 | list_add_tail(&rt_se->run_list, queue); |
832 | __set_bit(rt_se_prio(rt_se), array->bitmap); |
833 | |
834 | inc_rt_tasks(rt_se, rt_rq); |
835 | } |
836 | |
837 | static void __dequeue_rt_entity(struct sched_rt_entity *rt_se) |
838 | { |
839 | struct rt_rq *rt_rq = rt_rq_of_se(rt_se); |
840 | struct rt_prio_array *array = &rt_rq->active; |
841 | |
842 | list_del_init(&rt_se->run_list); |
843 | if (list_empty(array->queue + rt_se_prio(rt_se))) |
844 | __clear_bit(rt_se_prio(rt_se), array->bitmap); |
845 | |
846 | dec_rt_tasks(rt_se, rt_rq); |
847 | } |
848 | |
849 | /* |
850 | * Because the prio of an upper entry depends on the lower |
851 | * entries, we must remove entries top - down. |
852 | */ |
853 | static void dequeue_rt_stack(struct sched_rt_entity *rt_se) |
854 | { |
855 | struct sched_rt_entity *back = NULL; |
856 | |
857 | for_each_sched_rt_entity(rt_se) { |
858 | rt_se->back = back; |
859 | back = rt_se; |
860 | } |
861 | |
862 | for (rt_se = back; rt_se; rt_se = rt_se->back) { |
863 | if (on_rt_rq(rt_se)) |
864 | __dequeue_rt_entity(rt_se); |
865 | } |
866 | } |
867 | |
868 | static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) |
869 | { |
870 | dequeue_rt_stack(rt_se); |
871 | for_each_sched_rt_entity(rt_se) |
872 | __enqueue_rt_entity(rt_se, head); |
873 | } |
874 | |
875 | static void dequeue_rt_entity(struct sched_rt_entity *rt_se) |
876 | { |
877 | dequeue_rt_stack(rt_se); |
878 | |
879 | for_each_sched_rt_entity(rt_se) { |
880 | struct rt_rq *rt_rq = group_rt_rq(rt_se); |
881 | |
882 | if (rt_rq && rt_rq->rt_nr_running) |
883 | __enqueue_rt_entity(rt_se, false); |
884 | } |
885 | } |
886 | |
887 | /* |
888 | * Adding/removing a task to/from a priority array: |
889 | */ |
890 | static void |
891 | enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, bool head) |
892 | { |
893 | struct sched_rt_entity *rt_se = &p->rt; |
894 | |
895 | if (wakeup) |
896 | rt_se->timeout = 0; |
897 | |
898 | enqueue_rt_entity(rt_se, head); |
899 | |
900 | if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) |
901 | enqueue_pushable_task(rq, p); |
902 | } |
903 | |
904 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) |
905 | { |
906 | struct sched_rt_entity *rt_se = &p->rt; |
907 | |
908 | update_curr_rt(rq); |
909 | dequeue_rt_entity(rt_se); |
910 | |
911 | dequeue_pushable_task(rq, p); |
912 | } |
913 | |
914 | /* |
915 | * Put task to the end of the run list without the overhead of dequeue |
916 | * followed by enqueue. |
917 | */ |
918 | static void |
919 | requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) |
920 | { |
921 | if (on_rt_rq(rt_se)) { |
922 | struct rt_prio_array *array = &rt_rq->active; |
923 | struct list_head *queue = array->queue + rt_se_prio(rt_se); |
924 | |
925 | if (head) |
926 | list_move(&rt_se->run_list, queue); |
927 | else |
928 | list_move_tail(&rt_se->run_list, queue); |
929 | } |
930 | } |
931 | |
932 | static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) |
933 | { |
934 | struct sched_rt_entity *rt_se = &p->rt; |
935 | struct rt_rq *rt_rq; |
936 | |
937 | for_each_sched_rt_entity(rt_se) { |
938 | rt_rq = rt_rq_of_se(rt_se); |
939 | requeue_rt_entity(rt_rq, rt_se, head); |
940 | } |
941 | } |
942 | |
943 | static void yield_task_rt(struct rq *rq) |
944 | { |
945 | requeue_task_rt(rq, rq->curr, 0); |
946 | } |
947 | |
948 | #ifdef CONFIG_SMP |
949 | static int find_lowest_rq(struct task_struct *task); |
950 | |
951 | static int select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) |
952 | { |
953 | struct rq *rq = task_rq(p); |
954 | |
955 | if (sd_flag != SD_BALANCE_WAKE) |
956 | return smp_processor_id(); |
957 | |
958 | /* |
959 | * If the current task is an RT task, then |
960 | * try to see if we can wake this RT task up on another |
961 | * runqueue. Otherwise simply start this RT task |
962 | * on its current runqueue. |
963 | * |
964 | * We want to avoid overloading runqueues. Even if |
965 | * the RT task is of higher priority than the current RT task. |
966 | * RT tasks behave differently than other tasks. If |
967 | * one gets preempted, we try to push it off to another queue. |
968 | * So trying to keep a preempting RT task on the same |
969 | * cache hot CPU will force the running RT task to |
970 | * a cold CPU. So we waste all the cache for the lower |
971 | * RT task in hopes of saving some of a RT task |
972 | * that is just being woken and probably will have |
973 | * cold cache anyway. |
974 | */ |
975 | if (unlikely(rt_task(rq->curr)) && |
976 | (p->rt.nr_cpus_allowed > 1)) { |
977 | int cpu = find_lowest_rq(p); |
978 | |
979 | return (cpu == -1) ? task_cpu(p) : cpu; |
980 | } |
981 | |
982 | /* |
983 | * Otherwise, just let it ride on the affined RQ and the |
984 | * post-schedule router will push the preempted task away |
985 | */ |
986 | return task_cpu(p); |
987 | } |
988 | |
989 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) |
990 | { |
991 | if (rq->curr->rt.nr_cpus_allowed == 1) |
992 | return; |
993 | |
994 | if (p->rt.nr_cpus_allowed != 1 |
995 | && cpupri_find(&rq->rd->cpupri, p, NULL)) |
996 | return; |
997 | |
998 | if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) |
999 | return; |
1000 | |
1001 | /* |
1002 | * There appears to be other cpus that can accept |
1003 | * current and none to run 'p', so lets reschedule |
1004 | * to try and push current away: |
1005 | */ |
1006 | requeue_task_rt(rq, p, 1); |
1007 | resched_task(rq->curr); |
1008 | } |
1009 | |
1010 | #endif /* CONFIG_SMP */ |
1011 | |
1012 | /* |
1013 | * Preempt the current task with a newly woken task if needed: |
1014 | */ |
1015 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) |
1016 | { |
1017 | if (p->prio < rq->curr->prio) { |
1018 | resched_task(rq->curr); |
1019 | return; |
1020 | } |
1021 | |
1022 | #ifdef CONFIG_SMP |
1023 | /* |
1024 | * If: |
1025 | * |
1026 | * - the newly woken task is of equal priority to the current task |
1027 | * - the newly woken task is non-migratable while current is migratable |
1028 | * - current will be preempted on the next reschedule |
1029 | * |
1030 | * we should check to see if current can readily move to a different |
1031 | * cpu. If so, we will reschedule to allow the push logic to try |
1032 | * to move current somewhere else, making room for our non-migratable |
1033 | * task. |
1034 | */ |
1035 | if (p->prio == rq->curr->prio && !need_resched()) |
1036 | check_preempt_equal_prio(rq, p); |
1037 | #endif |
1038 | } |
1039 | |
1040 | static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, |
1041 | struct rt_rq *rt_rq) |
1042 | { |
1043 | struct rt_prio_array *array = &rt_rq->active; |
1044 | struct sched_rt_entity *next = NULL; |
1045 | struct list_head *queue; |
1046 | int idx; |
1047 | |
1048 | idx = sched_find_first_bit(array->bitmap); |
1049 | BUG_ON(idx >= MAX_RT_PRIO); |
1050 | |
1051 | queue = array->queue + idx; |
1052 | next = list_entry(queue->next, struct sched_rt_entity, run_list); |
1053 | |
1054 | return next; |
1055 | } |
1056 | |
1057 | static struct task_struct *_pick_next_task_rt(struct rq *rq) |
1058 | { |
1059 | struct sched_rt_entity *rt_se; |
1060 | struct task_struct *p; |
1061 | struct rt_rq *rt_rq; |
1062 | |
1063 | rt_rq = &rq->rt; |
1064 | |
1065 | if (unlikely(!rt_rq->rt_nr_running)) |
1066 | return NULL; |
1067 | |
1068 | if (rt_rq_throttled(rt_rq)) |
1069 | return NULL; |
1070 | |
1071 | do { |
1072 | rt_se = pick_next_rt_entity(rq, rt_rq); |
1073 | BUG_ON(!rt_se); |
1074 | rt_rq = group_rt_rq(rt_se); |
1075 | } while (rt_rq); |
1076 | |
1077 | p = rt_task_of(rt_se); |
1078 | p->se.exec_start = rq->clock; |
1079 | |
1080 | return p; |
1081 | } |
1082 | |
1083 | static struct task_struct *pick_next_task_rt(struct rq *rq) |
1084 | { |
1085 | struct task_struct *p = _pick_next_task_rt(rq); |
1086 | |
1087 | /* The running task is never eligible for pushing */ |
1088 | if (p) |
1089 | dequeue_pushable_task(rq, p); |
1090 | |
1091 | #ifdef CONFIG_SMP |
1092 | /* |
1093 | * We detect this state here so that we can avoid taking the RQ |
1094 | * lock again later if there is no need to push |
1095 | */ |
1096 | rq->post_schedule = has_pushable_tasks(rq); |
1097 | #endif |
1098 | |
1099 | return p; |
1100 | } |
1101 | |
1102 | static void put_prev_task_rt(struct rq *rq, struct task_struct *p) |
1103 | { |
1104 | update_curr_rt(rq); |
1105 | p->se.exec_start = 0; |
1106 | |
1107 | /* |
1108 | * The previous task needs to be made eligible for pushing |
1109 | * if it is still active |
1110 | */ |
1111 | if (p->se.on_rq && p->rt.nr_cpus_allowed > 1) |
1112 | enqueue_pushable_task(rq, p); |
1113 | } |
1114 | |
1115 | #ifdef CONFIG_SMP |
1116 | |
1117 | /* Only try algorithms three times */ |
1118 | #define RT_MAX_TRIES 3 |
1119 | |
1120 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); |
1121 | |
1122 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) |
1123 | { |
1124 | if (!task_running(rq, p) && |
1125 | (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) && |
1126 | (p->rt.nr_cpus_allowed > 1)) |
1127 | return 1; |
1128 | return 0; |
1129 | } |
1130 | |
1131 | /* Return the second highest RT task, NULL otherwise */ |
1132 | static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) |
1133 | { |
1134 | struct task_struct *next = NULL; |
1135 | struct sched_rt_entity *rt_se; |
1136 | struct rt_prio_array *array; |
1137 | struct rt_rq *rt_rq; |
1138 | int idx; |
1139 | |
1140 | for_each_leaf_rt_rq(rt_rq, rq) { |
1141 | array = &rt_rq->active; |
1142 | idx = sched_find_first_bit(array->bitmap); |
1143 | next_idx: |
1144 | if (idx >= MAX_RT_PRIO) |
1145 | continue; |
1146 | if (next && next->prio < idx) |
1147 | continue; |
1148 | list_for_each_entry(rt_se, array->queue + idx, run_list) { |
1149 | struct task_struct *p; |
1150 | |
1151 | if (!rt_entity_is_task(rt_se)) |
1152 | continue; |
1153 | |
1154 | p = rt_task_of(rt_se); |
1155 | if (pick_rt_task(rq, p, cpu)) { |
1156 | next = p; |
1157 | break; |
1158 | } |
1159 | } |
1160 | if (!next) { |
1161 | idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1); |
1162 | goto next_idx; |
1163 | } |
1164 | } |
1165 | |
1166 | return next; |
1167 | } |
1168 | |
1169 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); |
1170 | |
1171 | static int find_lowest_rq(struct task_struct *task) |
1172 | { |
1173 | struct sched_domain *sd; |
1174 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); |
1175 | int this_cpu = smp_processor_id(); |
1176 | int cpu = task_cpu(task); |
1177 | |
1178 | if (task->rt.nr_cpus_allowed == 1) |
1179 | return -1; /* No other targets possible */ |
1180 | |
1181 | if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) |
1182 | return -1; /* No targets found */ |
1183 | |
1184 | /* |
1185 | * At this point we have built a mask of cpus representing the |
1186 | * lowest priority tasks in the system. Now we want to elect |
1187 | * the best one based on our affinity and topology. |
1188 | * |
1189 | * We prioritize the last cpu that the task executed on since |
1190 | * it is most likely cache-hot in that location. |
1191 | */ |
1192 | if (cpumask_test_cpu(cpu, lowest_mask)) |
1193 | return cpu; |
1194 | |
1195 | /* |
1196 | * Otherwise, we consult the sched_domains span maps to figure |
1197 | * out which cpu is logically closest to our hot cache data. |
1198 | */ |
1199 | if (!cpumask_test_cpu(this_cpu, lowest_mask)) |
1200 | this_cpu = -1; /* Skip this_cpu opt if not among lowest */ |
1201 | |
1202 | for_each_domain(cpu, sd) { |
1203 | if (sd->flags & SD_WAKE_AFFINE) { |
1204 | int best_cpu; |
1205 | |
1206 | /* |
1207 | * "this_cpu" is cheaper to preempt than a |
1208 | * remote processor. |
1209 | */ |
1210 | if (this_cpu != -1 && |
1211 | cpumask_test_cpu(this_cpu, sched_domain_span(sd))) |
1212 | return this_cpu; |
1213 | |
1214 | best_cpu = cpumask_first_and(lowest_mask, |
1215 | sched_domain_span(sd)); |
1216 | if (best_cpu < nr_cpu_ids) |
1217 | return best_cpu; |
1218 | } |
1219 | } |
1220 | |
1221 | /* |
1222 | * And finally, if there were no matches within the domains |
1223 | * just give the caller *something* to work with from the compatible |
1224 | * locations. |
1225 | */ |
1226 | if (this_cpu != -1) |
1227 | return this_cpu; |
1228 | |
1229 | cpu = cpumask_any(lowest_mask); |
1230 | if (cpu < nr_cpu_ids) |
1231 | return cpu; |
1232 | return -1; |
1233 | } |
1234 | |
1235 | /* Will lock the rq it finds */ |
1236 | static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) |
1237 | { |
1238 | struct rq *lowest_rq = NULL; |
1239 | int tries; |
1240 | int cpu; |
1241 | |
1242 | for (tries = 0; tries < RT_MAX_TRIES; tries++) { |
1243 | cpu = find_lowest_rq(task); |
1244 | |
1245 | if ((cpu == -1) || (cpu == rq->cpu)) |
1246 | break; |
1247 | |
1248 | lowest_rq = cpu_rq(cpu); |
1249 | |
1250 | /* if the prio of this runqueue changed, try again */ |
1251 | if (double_lock_balance(rq, lowest_rq)) { |
1252 | /* |
1253 | * We had to unlock the run queue. In |
1254 | * the mean time, task could have |
1255 | * migrated already or had its affinity changed. |
1256 | * Also make sure that it wasn't scheduled on its rq. |
1257 | */ |
1258 | if (unlikely(task_rq(task) != rq || |
1259 | !cpumask_test_cpu(lowest_rq->cpu, |
1260 | &task->cpus_allowed) || |
1261 | task_running(rq, task) || |
1262 | !task->se.on_rq)) { |
1263 | |
1264 | raw_spin_unlock(&lowest_rq->lock); |
1265 | lowest_rq = NULL; |
1266 | break; |
1267 | } |
1268 | } |
1269 | |
1270 | /* If this rq is still suitable use it. */ |
1271 | if (lowest_rq->rt.highest_prio.curr > task->prio) |
1272 | break; |
1273 | |
1274 | /* try again */ |
1275 | double_unlock_balance(rq, lowest_rq); |
1276 | lowest_rq = NULL; |
1277 | } |
1278 | |
1279 | return lowest_rq; |
1280 | } |
1281 | |
1282 | static struct task_struct *pick_next_pushable_task(struct rq *rq) |
1283 | { |
1284 | struct task_struct *p; |
1285 | |
1286 | if (!has_pushable_tasks(rq)) |
1287 | return NULL; |
1288 | |
1289 | p = plist_first_entry(&rq->rt.pushable_tasks, |
1290 | struct task_struct, pushable_tasks); |
1291 | |
1292 | BUG_ON(rq->cpu != task_cpu(p)); |
1293 | BUG_ON(task_current(rq, p)); |
1294 | BUG_ON(p->rt.nr_cpus_allowed <= 1); |
1295 | |
1296 | BUG_ON(!p->se.on_rq); |
1297 | BUG_ON(!rt_task(p)); |
1298 | |
1299 | return p; |
1300 | } |
1301 | |
1302 | /* |
1303 | * If the current CPU has more than one RT task, see if the non |
1304 | * running task can migrate over to a CPU that is running a task |
1305 | * of lesser priority. |
1306 | */ |
1307 | static int push_rt_task(struct rq *rq) |
1308 | { |
1309 | struct task_struct *next_task; |
1310 | struct rq *lowest_rq; |
1311 | |
1312 | if (!rq->rt.overloaded) |
1313 | return 0; |
1314 | |
1315 | next_task = pick_next_pushable_task(rq); |
1316 | if (!next_task) |
1317 | return 0; |
1318 | |
1319 | retry: |
1320 | if (unlikely(next_task == rq->curr)) { |
1321 | WARN_ON(1); |
1322 | return 0; |
1323 | } |
1324 | |
1325 | /* |
1326 | * It's possible that the next_task slipped in of |
1327 | * higher priority than current. If that's the case |
1328 | * just reschedule current. |
1329 | */ |
1330 | if (unlikely(next_task->prio < rq->curr->prio)) { |
1331 | resched_task(rq->curr); |
1332 | return 0; |
1333 | } |
1334 | |
1335 | /* We might release rq lock */ |
1336 | get_task_struct(next_task); |
1337 | |
1338 | /* find_lock_lowest_rq locks the rq if found */ |
1339 | lowest_rq = find_lock_lowest_rq(next_task, rq); |
1340 | if (!lowest_rq) { |
1341 | struct task_struct *task; |
1342 | /* |
1343 | * find lock_lowest_rq releases rq->lock |
1344 | * so it is possible that next_task has migrated. |
1345 | * |
1346 | * We need to make sure that the task is still on the same |
1347 | * run-queue and is also still the next task eligible for |
1348 | * pushing. |
1349 | */ |
1350 | task = pick_next_pushable_task(rq); |
1351 | if (task_cpu(next_task) == rq->cpu && task == next_task) { |
1352 | /* |
1353 | * If we get here, the task hasnt moved at all, but |
1354 | * it has failed to push. We will not try again, |
1355 | * since the other cpus will pull from us when they |
1356 | * are ready. |
1357 | */ |
1358 | dequeue_pushable_task(rq, next_task); |
1359 | goto out; |
1360 | } |
1361 | |
1362 | if (!task) |
1363 | /* No more tasks, just exit */ |
1364 | goto out; |
1365 | |
1366 | /* |
1367 | * Something has shifted, try again. |
1368 | */ |
1369 | put_task_struct(next_task); |
1370 | next_task = task; |
1371 | goto retry; |
1372 | } |
1373 | |
1374 | deactivate_task(rq, next_task, 0); |
1375 | set_task_cpu(next_task, lowest_rq->cpu); |
1376 | activate_task(lowest_rq, next_task, 0); |
1377 | |
1378 | resched_task(lowest_rq->curr); |
1379 | |
1380 | double_unlock_balance(rq, lowest_rq); |
1381 | |
1382 | out: |
1383 | put_task_struct(next_task); |
1384 | |
1385 | return 1; |
1386 | } |
1387 | |
1388 | static void push_rt_tasks(struct rq *rq) |
1389 | { |
1390 | /* push_rt_task will return true if it moved an RT */ |
1391 | while (push_rt_task(rq)) |
1392 | ; |
1393 | } |
1394 | |
1395 | static int pull_rt_task(struct rq *this_rq) |
1396 | { |
1397 | int this_cpu = this_rq->cpu, ret = 0, cpu; |
1398 | struct task_struct *p; |
1399 | struct rq *src_rq; |
1400 | |
1401 | if (likely(!rt_overloaded(this_rq))) |
1402 | return 0; |
1403 | |
1404 | for_each_cpu(cpu, this_rq->rd->rto_mask) { |
1405 | if (this_cpu == cpu) |
1406 | continue; |
1407 | |
1408 | src_rq = cpu_rq(cpu); |
1409 | |
1410 | /* |
1411 | * Don't bother taking the src_rq->lock if the next highest |
1412 | * task is known to be lower-priority than our current task. |
1413 | * This may look racy, but if this value is about to go |
1414 | * logically higher, the src_rq will push this task away. |
1415 | * And if its going logically lower, we do not care |
1416 | */ |
1417 | if (src_rq->rt.highest_prio.next >= |
1418 | this_rq->rt.highest_prio.curr) |
1419 | continue; |
1420 | |
1421 | /* |
1422 | * We can potentially drop this_rq's lock in |
1423 | * double_lock_balance, and another CPU could |
1424 | * alter this_rq |
1425 | */ |
1426 | double_lock_balance(this_rq, src_rq); |
1427 | |
1428 | /* |
1429 | * Are there still pullable RT tasks? |
1430 | */ |
1431 | if (src_rq->rt.rt_nr_running <= 1) |
1432 | goto skip; |
1433 | |
1434 | p = pick_next_highest_task_rt(src_rq, this_cpu); |
1435 | |
1436 | /* |
1437 | * Do we have an RT task that preempts |
1438 | * the to-be-scheduled task? |
1439 | */ |
1440 | if (p && (p->prio < this_rq->rt.highest_prio.curr)) { |
1441 | WARN_ON(p == src_rq->curr); |
1442 | WARN_ON(!p->se.on_rq); |
1443 | |
1444 | /* |
1445 | * There's a chance that p is higher in priority |
1446 | * than what's currently running on its cpu. |
1447 | * This is just that p is wakeing up and hasn't |
1448 | * had a chance to schedule. We only pull |
1449 | * p if it is lower in priority than the |
1450 | * current task on the run queue |
1451 | */ |
1452 | if (p->prio < src_rq->curr->prio) |
1453 | goto skip; |
1454 | |
1455 | ret = 1; |
1456 | |
1457 | deactivate_task(src_rq, p, 0); |
1458 | set_task_cpu(p, this_cpu); |
1459 | activate_task(this_rq, p, 0); |
1460 | /* |
1461 | * We continue with the search, just in |
1462 | * case there's an even higher prio task |
1463 | * in another runqueue. (low likelyhood |
1464 | * but possible) |
1465 | */ |
1466 | } |
1467 | skip: |
1468 | double_unlock_balance(this_rq, src_rq); |
1469 | } |
1470 | |
1471 | return ret; |
1472 | } |
1473 | |
1474 | static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) |
1475 | { |
1476 | /* Try to pull RT tasks here if we lower this rq's prio */ |
1477 | if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio) |
1478 | pull_rt_task(rq); |
1479 | } |
1480 | |
1481 | static void post_schedule_rt(struct rq *rq) |
1482 | { |
1483 | push_rt_tasks(rq); |
1484 | } |
1485 | |
1486 | /* |
1487 | * If we are not running and we are not going to reschedule soon, we should |
1488 | * try to push tasks away now |
1489 | */ |
1490 | static void task_woken_rt(struct rq *rq, struct task_struct *p) |
1491 | { |
1492 | if (!task_running(rq, p) && |
1493 | !test_tsk_need_resched(rq->curr) && |
1494 | has_pushable_tasks(rq) && |
1495 | p->rt.nr_cpus_allowed > 1) |
1496 | push_rt_tasks(rq); |
1497 | } |
1498 | |
1499 | static void set_cpus_allowed_rt(struct task_struct *p, |
1500 | const struct cpumask *new_mask) |
1501 | { |
1502 | int weight = cpumask_weight(new_mask); |
1503 | |
1504 | BUG_ON(!rt_task(p)); |
1505 | |
1506 | /* |
1507 | * Update the migration status of the RQ if we have an RT task |
1508 | * which is running AND changing its weight value. |
1509 | */ |
1510 | if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) { |
1511 | struct rq *rq = task_rq(p); |
1512 | |
1513 | if (!task_current(rq, p)) { |
1514 | /* |
1515 | * Make sure we dequeue this task from the pushable list |
1516 | * before going further. It will either remain off of |
1517 | * the list because we are no longer pushable, or it |
1518 | * will be requeued. |
1519 | */ |
1520 | if (p->rt.nr_cpus_allowed > 1) |
1521 | dequeue_pushable_task(rq, p); |
1522 | |
1523 | /* |
1524 | * Requeue if our weight is changing and still > 1 |
1525 | */ |
1526 | if (weight > 1) |
1527 | enqueue_pushable_task(rq, p); |
1528 | |
1529 | } |
1530 | |
1531 | if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) { |
1532 | rq->rt.rt_nr_migratory++; |
1533 | } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) { |
1534 | BUG_ON(!rq->rt.rt_nr_migratory); |
1535 | rq->rt.rt_nr_migratory--; |
1536 | } |
1537 | |
1538 | update_rt_migration(&rq->rt); |
1539 | } |
1540 | |
1541 | cpumask_copy(&p->cpus_allowed, new_mask); |
1542 | p->rt.nr_cpus_allowed = weight; |
1543 | } |
1544 | |
1545 | /* Assumes rq->lock is held */ |
1546 | static void rq_online_rt(struct rq *rq) |
1547 | { |
1548 | if (rq->rt.overloaded) |
1549 | rt_set_overload(rq); |
1550 | |
1551 | __enable_runtime(rq); |
1552 | |
1553 | cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); |
1554 | } |
1555 | |
1556 | /* Assumes rq->lock is held */ |
1557 | static void rq_offline_rt(struct rq *rq) |
1558 | { |
1559 | if (rq->rt.overloaded) |
1560 | rt_clear_overload(rq); |
1561 | |
1562 | __disable_runtime(rq); |
1563 | |
1564 | cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID); |
1565 | } |
1566 | |
1567 | /* |
1568 | * When switch from the rt queue, we bring ourselves to a position |
1569 | * that we might want to pull RT tasks from other runqueues. |
1570 | */ |
1571 | static void switched_from_rt(struct rq *rq, struct task_struct *p, |
1572 | int running) |
1573 | { |
1574 | /* |
1575 | * If there are other RT tasks then we will reschedule |
1576 | * and the scheduling of the other RT tasks will handle |
1577 | * the balancing. But if we are the last RT task |
1578 | * we may need to handle the pulling of RT tasks |
1579 | * now. |
1580 | */ |
1581 | if (!rq->rt.rt_nr_running) |
1582 | pull_rt_task(rq); |
1583 | } |
1584 | |
1585 | static inline void init_sched_rt_class(void) |
1586 | { |
1587 | unsigned int i; |
1588 | |
1589 | for_each_possible_cpu(i) |
1590 | zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), |
1591 | GFP_KERNEL, cpu_to_node(i)); |
1592 | } |
1593 | #endif /* CONFIG_SMP */ |
1594 | |
1595 | /* |
1596 | * When switching a task to RT, we may overload the runqueue |
1597 | * with RT tasks. In this case we try to push them off to |
1598 | * other runqueues. |
1599 | */ |
1600 | static void switched_to_rt(struct rq *rq, struct task_struct *p, |
1601 | int running) |
1602 | { |
1603 | int check_resched = 1; |
1604 | |
1605 | /* |
1606 | * If we are already running, then there's nothing |
1607 | * that needs to be done. But if we are not running |
1608 | * we may need to preempt the current running task. |
1609 | * If that current running task is also an RT task |
1610 | * then see if we can move to another run queue. |
1611 | */ |
1612 | if (!running) { |
1613 | #ifdef CONFIG_SMP |
1614 | if (rq->rt.overloaded && push_rt_task(rq) && |
1615 | /* Don't resched if we changed runqueues */ |
1616 | rq != task_rq(p)) |
1617 | check_resched = 0; |
1618 | #endif /* CONFIG_SMP */ |
1619 | if (check_resched && p->prio < rq->curr->prio) |
1620 | resched_task(rq->curr); |
1621 | } |
1622 | } |
1623 | |
1624 | /* |
1625 | * Priority of the task has changed. This may cause |
1626 | * us to initiate a push or pull. |
1627 | */ |
1628 | static void prio_changed_rt(struct rq *rq, struct task_struct *p, |
1629 | int oldprio, int running) |
1630 | { |
1631 | if (running) { |
1632 | #ifdef CONFIG_SMP |
1633 | /* |
1634 | * If our priority decreases while running, we |
1635 | * may need to pull tasks to this runqueue. |
1636 | */ |
1637 | if (oldprio < p->prio) |
1638 | pull_rt_task(rq); |
1639 | /* |
1640 | * If there's a higher priority task waiting to run |
1641 | * then reschedule. Note, the above pull_rt_task |
1642 | * can release the rq lock and p could migrate. |
1643 | * Only reschedule if p is still on the same runqueue. |
1644 | */ |
1645 | if (p->prio > rq->rt.highest_prio.curr && rq->curr == p) |
1646 | resched_task(p); |
1647 | #else |
1648 | /* For UP simply resched on drop of prio */ |
1649 | if (oldprio < p->prio) |
1650 | resched_task(p); |
1651 | #endif /* CONFIG_SMP */ |
1652 | } else { |
1653 | /* |
1654 | * This task is not running, but if it is |
1655 | * greater than the current running task |
1656 | * then reschedule. |
1657 | */ |
1658 | if (p->prio < rq->curr->prio) |
1659 | resched_task(rq->curr); |
1660 | } |
1661 | } |
1662 | |
1663 | static void watchdog(struct rq *rq, struct task_struct *p) |
1664 | { |
1665 | unsigned long soft, hard; |
1666 | |
1667 | if (!p->signal) |
1668 | return; |
1669 | |
1670 | /* max may change after cur was read, this will be fixed next tick */ |
1671 | soft = task_rlimit(p, RLIMIT_RTTIME); |
1672 | hard = task_rlimit_max(p, RLIMIT_RTTIME); |
1673 | |
1674 | if (soft != RLIM_INFINITY) { |
1675 | unsigned long next; |
1676 | |
1677 | p->rt.timeout++; |
1678 | next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ); |
1679 | if (p->rt.timeout > next) |
1680 | p->cputime_expires.sched_exp = p->se.sum_exec_runtime; |
1681 | } |
1682 | } |
1683 | |
1684 | static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) |
1685 | { |
1686 | update_curr_rt(rq); |
1687 | |
1688 | watchdog(rq, p); |
1689 | |
1690 | /* |
1691 | * RR tasks need a special form of timeslice management. |
1692 | * FIFO tasks have no timeslices. |
1693 | */ |
1694 | if (p->policy != SCHED_RR) |
1695 | return; |
1696 | |
1697 | if (--p->rt.time_slice) |
1698 | return; |
1699 | |
1700 | p->rt.time_slice = DEF_TIMESLICE; |
1701 | |
1702 | /* |
1703 | * Requeue to the end of queue if we are not the only element |
1704 | * on the queue: |
1705 | */ |
1706 | if (p->rt.run_list.prev != p->rt.run_list.next) { |
1707 | requeue_task_rt(rq, p, 0); |
1708 | set_tsk_need_resched(p); |
1709 | } |
1710 | } |
1711 | |
1712 | static void set_curr_task_rt(struct rq *rq) |
1713 | { |
1714 | struct task_struct *p = rq->curr; |
1715 | |
1716 | p->se.exec_start = rq->clock; |
1717 | |
1718 | /* The running task is never eligible for pushing */ |
1719 | dequeue_pushable_task(rq, p); |
1720 | } |
1721 | |
1722 | static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) |
1723 | { |
1724 | /* |
1725 | * Time slice is 0 for SCHED_FIFO tasks |
1726 | */ |
1727 | if (task->policy == SCHED_RR) |
1728 | return DEF_TIMESLICE; |
1729 | else |
1730 | return 0; |
1731 | } |
1732 | |
1733 | static const struct sched_class rt_sched_class = { |
1734 | .next = &fair_sched_class, |
1735 | .enqueue_task = enqueue_task_rt, |
1736 | .dequeue_task = dequeue_task_rt, |
1737 | .yield_task = yield_task_rt, |
1738 | |
1739 | .check_preempt_curr = check_preempt_curr_rt, |
1740 | |
1741 | .pick_next_task = pick_next_task_rt, |
1742 | .put_prev_task = put_prev_task_rt, |
1743 | |
1744 | #ifdef CONFIG_SMP |
1745 | .select_task_rq = select_task_rq_rt, |
1746 | |
1747 | .set_cpus_allowed = set_cpus_allowed_rt, |
1748 | .rq_online = rq_online_rt, |
1749 | .rq_offline = rq_offline_rt, |
1750 | .pre_schedule = pre_schedule_rt, |
1751 | .post_schedule = post_schedule_rt, |
1752 | .task_woken = task_woken_rt, |
1753 | .switched_from = switched_from_rt, |
1754 | #endif |
1755 | |
1756 | .set_curr_task = set_curr_task_rt, |
1757 | .task_tick = task_tick_rt, |
1758 | |
1759 | .get_rr_interval = get_rr_interval_rt, |
1760 | |
1761 | .prio_changed = prio_changed_rt, |
1762 | .switched_to = switched_to_rt, |
1763 | }; |
1764 | |
1765 | #ifdef CONFIG_SCHED_DEBUG |
1766 | extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); |
1767 | |
1768 | static void print_rt_stats(struct seq_file *m, int cpu) |
1769 | { |
1770 | struct rt_rq *rt_rq; |
1771 | |
1772 | rcu_read_lock(); |
1773 | for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu)) |
1774 | print_rt_rq(m, cpu, rt_rq); |
1775 | rcu_read_unlock(); |
1776 | } |
1777 | #endif /* CONFIG_SCHED_DEBUG */ |
1778 | |
1779 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9