Root/
1 | /* |
2 | * Implement CPU time clocks for the POSIX clock interface. |
3 | */ |
4 | |
5 | #include <linux/sched.h> |
6 | #include <linux/posix-timers.h> |
7 | #include <linux/errno.h> |
8 | #include <linux/math64.h> |
9 | #include <asm/uaccess.h> |
10 | #include <linux/kernel_stat.h> |
11 | |
12 | /* |
13 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. |
14 | */ |
15 | void update_rlimit_cpu(unsigned long rlim_new) |
16 | { |
17 | cputime_t cputime; |
18 | |
19 | cputime = secs_to_cputime(rlim_new); |
20 | if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || |
21 | cputime_gt(current->signal->it_prof_expires, cputime)) { |
22 | spin_lock_irq(¤t->sighand->siglock); |
23 | set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); |
24 | spin_unlock_irq(¤t->sighand->siglock); |
25 | } |
26 | } |
27 | |
28 | static int check_clock(const clockid_t which_clock) |
29 | { |
30 | int error = 0; |
31 | struct task_struct *p; |
32 | const pid_t pid = CPUCLOCK_PID(which_clock); |
33 | |
34 | if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX) |
35 | return -EINVAL; |
36 | |
37 | if (pid == 0) |
38 | return 0; |
39 | |
40 | read_lock(&tasklist_lock); |
41 | p = find_task_by_vpid(pid); |
42 | if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ? |
43 | same_thread_group(p, current) : thread_group_leader(p))) { |
44 | error = -EINVAL; |
45 | } |
46 | read_unlock(&tasklist_lock); |
47 | |
48 | return error; |
49 | } |
50 | |
51 | static inline union cpu_time_count |
52 | timespec_to_sample(const clockid_t which_clock, const struct timespec *tp) |
53 | { |
54 | union cpu_time_count ret; |
55 | ret.sched = 0; /* high half always zero when .cpu used */ |
56 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
57 | ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec; |
58 | } else { |
59 | ret.cpu = timespec_to_cputime(tp); |
60 | } |
61 | return ret; |
62 | } |
63 | |
64 | static void sample_to_timespec(const clockid_t which_clock, |
65 | union cpu_time_count cpu, |
66 | struct timespec *tp) |
67 | { |
68 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) |
69 | *tp = ns_to_timespec(cpu.sched); |
70 | else |
71 | cputime_to_timespec(cpu.cpu, tp); |
72 | } |
73 | |
74 | static inline int cpu_time_before(const clockid_t which_clock, |
75 | union cpu_time_count now, |
76 | union cpu_time_count then) |
77 | { |
78 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
79 | return now.sched < then.sched; |
80 | } else { |
81 | return cputime_lt(now.cpu, then.cpu); |
82 | } |
83 | } |
84 | static inline void cpu_time_add(const clockid_t which_clock, |
85 | union cpu_time_count *acc, |
86 | union cpu_time_count val) |
87 | { |
88 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
89 | acc->sched += val.sched; |
90 | } else { |
91 | acc->cpu = cputime_add(acc->cpu, val.cpu); |
92 | } |
93 | } |
94 | static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock, |
95 | union cpu_time_count a, |
96 | union cpu_time_count b) |
97 | { |
98 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
99 | a.sched -= b.sched; |
100 | } else { |
101 | a.cpu = cputime_sub(a.cpu, b.cpu); |
102 | } |
103 | return a; |
104 | } |
105 | |
106 | /* |
107 | * Divide and limit the result to res >= 1 |
108 | * |
109 | * This is necessary to prevent signal delivery starvation, when the result of |
110 | * the division would be rounded down to 0. |
111 | */ |
112 | static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div) |
113 | { |
114 | cputime_t res = cputime_div(time, div); |
115 | |
116 | return max_t(cputime_t, res, 1); |
117 | } |
118 | |
119 | /* |
120 | * Update expiry time from increment, and increase overrun count, |
121 | * given the current clock sample. |
122 | */ |
123 | static void bump_cpu_timer(struct k_itimer *timer, |
124 | union cpu_time_count now) |
125 | { |
126 | int i; |
127 | |
128 | if (timer->it.cpu.incr.sched == 0) |
129 | return; |
130 | |
131 | if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { |
132 | unsigned long long delta, incr; |
133 | |
134 | if (now.sched < timer->it.cpu.expires.sched) |
135 | return; |
136 | incr = timer->it.cpu.incr.sched; |
137 | delta = now.sched + incr - timer->it.cpu.expires.sched; |
138 | /* Don't use (incr*2 < delta), incr*2 might overflow. */ |
139 | for (i = 0; incr < delta - incr; i++) |
140 | incr = incr << 1; |
141 | for (; i >= 0; incr >>= 1, i--) { |
142 | if (delta < incr) |
143 | continue; |
144 | timer->it.cpu.expires.sched += incr; |
145 | timer->it_overrun += 1 << i; |
146 | delta -= incr; |
147 | } |
148 | } else { |
149 | cputime_t delta, incr; |
150 | |
151 | if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu)) |
152 | return; |
153 | incr = timer->it.cpu.incr.cpu; |
154 | delta = cputime_sub(cputime_add(now.cpu, incr), |
155 | timer->it.cpu.expires.cpu); |
156 | /* Don't use (incr*2 < delta), incr*2 might overflow. */ |
157 | for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++) |
158 | incr = cputime_add(incr, incr); |
159 | for (; i >= 0; incr = cputime_halve(incr), i--) { |
160 | if (cputime_lt(delta, incr)) |
161 | continue; |
162 | timer->it.cpu.expires.cpu = |
163 | cputime_add(timer->it.cpu.expires.cpu, incr); |
164 | timer->it_overrun += 1 << i; |
165 | delta = cputime_sub(delta, incr); |
166 | } |
167 | } |
168 | } |
169 | |
170 | static inline cputime_t prof_ticks(struct task_struct *p) |
171 | { |
172 | return cputime_add(p->utime, p->stime); |
173 | } |
174 | static inline cputime_t virt_ticks(struct task_struct *p) |
175 | { |
176 | return p->utime; |
177 | } |
178 | |
179 | int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp) |
180 | { |
181 | int error = check_clock(which_clock); |
182 | if (!error) { |
183 | tp->tv_sec = 0; |
184 | tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ); |
185 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
186 | /* |
187 | * If sched_clock is using a cycle counter, we |
188 | * don't have any idea of its true resolution |
189 | * exported, but it is much more than 1s/HZ. |
190 | */ |
191 | tp->tv_nsec = 1; |
192 | } |
193 | } |
194 | return error; |
195 | } |
196 | |
197 | int posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp) |
198 | { |
199 | /* |
200 | * You can never reset a CPU clock, but we check for other errors |
201 | * in the call before failing with EPERM. |
202 | */ |
203 | int error = check_clock(which_clock); |
204 | if (error == 0) { |
205 | error = -EPERM; |
206 | } |
207 | return error; |
208 | } |
209 | |
210 | |
211 | /* |
212 | * Sample a per-thread clock for the given task. |
213 | */ |
214 | static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, |
215 | union cpu_time_count *cpu) |
216 | { |
217 | switch (CPUCLOCK_WHICH(which_clock)) { |
218 | default: |
219 | return -EINVAL; |
220 | case CPUCLOCK_PROF: |
221 | cpu->cpu = prof_ticks(p); |
222 | break; |
223 | case CPUCLOCK_VIRT: |
224 | cpu->cpu = virt_ticks(p); |
225 | break; |
226 | case CPUCLOCK_SCHED: |
227 | cpu->sched = task_sched_runtime(p); |
228 | break; |
229 | } |
230 | return 0; |
231 | } |
232 | |
233 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) |
234 | { |
235 | struct sighand_struct *sighand; |
236 | struct signal_struct *sig; |
237 | struct task_struct *t; |
238 | |
239 | *times = INIT_CPUTIME; |
240 | |
241 | rcu_read_lock(); |
242 | sighand = rcu_dereference(tsk->sighand); |
243 | if (!sighand) |
244 | goto out; |
245 | |
246 | sig = tsk->signal; |
247 | |
248 | t = tsk; |
249 | do { |
250 | times->utime = cputime_add(times->utime, t->utime); |
251 | times->stime = cputime_add(times->stime, t->stime); |
252 | times->sum_exec_runtime += t->se.sum_exec_runtime; |
253 | |
254 | t = next_thread(t); |
255 | } while (t != tsk); |
256 | |
257 | times->utime = cputime_add(times->utime, sig->utime); |
258 | times->stime = cputime_add(times->stime, sig->stime); |
259 | times->sum_exec_runtime += sig->sum_sched_runtime; |
260 | out: |
261 | rcu_read_unlock(); |
262 | } |
263 | |
264 | static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b) |
265 | { |
266 | if (cputime_gt(b->utime, a->utime)) |
267 | a->utime = b->utime; |
268 | |
269 | if (cputime_gt(b->stime, a->stime)) |
270 | a->stime = b->stime; |
271 | |
272 | if (b->sum_exec_runtime > a->sum_exec_runtime) |
273 | a->sum_exec_runtime = b->sum_exec_runtime; |
274 | } |
275 | |
276 | void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) |
277 | { |
278 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
279 | struct task_cputime sum; |
280 | unsigned long flags; |
281 | |
282 | spin_lock_irqsave(&cputimer->lock, flags); |
283 | if (!cputimer->running) { |
284 | cputimer->running = 1; |
285 | /* |
286 | * The POSIX timer interface allows for absolute time expiry |
287 | * values through the TIMER_ABSTIME flag, therefore we have |
288 | * to synchronize the timer to the clock every time we start |
289 | * it. |
290 | */ |
291 | thread_group_cputime(tsk, &sum); |
292 | update_gt_cputime(&cputimer->cputime, &sum); |
293 | } |
294 | *times = cputimer->cputime; |
295 | spin_unlock_irqrestore(&cputimer->lock, flags); |
296 | } |
297 | |
298 | /* |
299 | * Sample a process (thread group) clock for the given group_leader task. |
300 | * Must be called with tasklist_lock held for reading. |
301 | */ |
302 | static int cpu_clock_sample_group(const clockid_t which_clock, |
303 | struct task_struct *p, |
304 | union cpu_time_count *cpu) |
305 | { |
306 | struct task_cputime cputime; |
307 | |
308 | switch (CPUCLOCK_WHICH(which_clock)) { |
309 | default: |
310 | return -EINVAL; |
311 | case CPUCLOCK_PROF: |
312 | thread_group_cputime(p, &cputime); |
313 | cpu->cpu = cputime_add(cputime.utime, cputime.stime); |
314 | break; |
315 | case CPUCLOCK_VIRT: |
316 | thread_group_cputime(p, &cputime); |
317 | cpu->cpu = cputime.utime; |
318 | break; |
319 | case CPUCLOCK_SCHED: |
320 | cpu->sched = thread_group_sched_runtime(p); |
321 | break; |
322 | } |
323 | return 0; |
324 | } |
325 | |
326 | |
327 | int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp) |
328 | { |
329 | const pid_t pid = CPUCLOCK_PID(which_clock); |
330 | int error = -EINVAL; |
331 | union cpu_time_count rtn; |
332 | |
333 | if (pid == 0) { |
334 | /* |
335 | * Special case constant value for our own clocks. |
336 | * We don't have to do any lookup to find ourselves. |
337 | */ |
338 | if (CPUCLOCK_PERTHREAD(which_clock)) { |
339 | /* |
340 | * Sampling just ourselves we can do with no locking. |
341 | */ |
342 | error = cpu_clock_sample(which_clock, |
343 | current, &rtn); |
344 | } else { |
345 | read_lock(&tasklist_lock); |
346 | error = cpu_clock_sample_group(which_clock, |
347 | current, &rtn); |
348 | read_unlock(&tasklist_lock); |
349 | } |
350 | } else { |
351 | /* |
352 | * Find the given PID, and validate that the caller |
353 | * should be able to see it. |
354 | */ |
355 | struct task_struct *p; |
356 | rcu_read_lock(); |
357 | p = find_task_by_vpid(pid); |
358 | if (p) { |
359 | if (CPUCLOCK_PERTHREAD(which_clock)) { |
360 | if (same_thread_group(p, current)) { |
361 | error = cpu_clock_sample(which_clock, |
362 | p, &rtn); |
363 | } |
364 | } else { |
365 | read_lock(&tasklist_lock); |
366 | if (thread_group_leader(p) && p->signal) { |
367 | error = |
368 | cpu_clock_sample_group(which_clock, |
369 | p, &rtn); |
370 | } |
371 | read_unlock(&tasklist_lock); |
372 | } |
373 | } |
374 | rcu_read_unlock(); |
375 | } |
376 | |
377 | if (error) |
378 | return error; |
379 | sample_to_timespec(which_clock, rtn, tp); |
380 | return 0; |
381 | } |
382 | |
383 | |
384 | /* |
385 | * Validate the clockid_t for a new CPU-clock timer, and initialize the timer. |
386 | * This is called from sys_timer_create with the new timer already locked. |
387 | */ |
388 | int posix_cpu_timer_create(struct k_itimer *new_timer) |
389 | { |
390 | int ret = 0; |
391 | const pid_t pid = CPUCLOCK_PID(new_timer->it_clock); |
392 | struct task_struct *p; |
393 | |
394 | if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX) |
395 | return -EINVAL; |
396 | |
397 | INIT_LIST_HEAD(&new_timer->it.cpu.entry); |
398 | new_timer->it.cpu.incr.sched = 0; |
399 | new_timer->it.cpu.expires.sched = 0; |
400 | |
401 | read_lock(&tasklist_lock); |
402 | if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) { |
403 | if (pid == 0) { |
404 | p = current; |
405 | } else { |
406 | p = find_task_by_vpid(pid); |
407 | if (p && !same_thread_group(p, current)) |
408 | p = NULL; |
409 | } |
410 | } else { |
411 | if (pid == 0) { |
412 | p = current->group_leader; |
413 | } else { |
414 | p = find_task_by_vpid(pid); |
415 | if (p && !thread_group_leader(p)) |
416 | p = NULL; |
417 | } |
418 | } |
419 | new_timer->it.cpu.task = p; |
420 | if (p) { |
421 | get_task_struct(p); |
422 | } else { |
423 | ret = -EINVAL; |
424 | } |
425 | read_unlock(&tasklist_lock); |
426 | |
427 | return ret; |
428 | } |
429 | |
430 | /* |
431 | * Clean up a CPU-clock timer that is about to be destroyed. |
432 | * This is called from timer deletion with the timer already locked. |
433 | * If we return TIMER_RETRY, it's necessary to release the timer's lock |
434 | * and try again. (This happens when the timer is in the middle of firing.) |
435 | */ |
436 | int posix_cpu_timer_del(struct k_itimer *timer) |
437 | { |
438 | struct task_struct *p = timer->it.cpu.task; |
439 | int ret = 0; |
440 | |
441 | if (likely(p != NULL)) { |
442 | read_lock(&tasklist_lock); |
443 | if (unlikely(p->signal == NULL)) { |
444 | /* |
445 | * We raced with the reaping of the task. |
446 | * The deletion should have cleared us off the list. |
447 | */ |
448 | BUG_ON(!list_empty(&timer->it.cpu.entry)); |
449 | } else { |
450 | spin_lock(&p->sighand->siglock); |
451 | if (timer->it.cpu.firing) |
452 | ret = TIMER_RETRY; |
453 | else |
454 | list_del(&timer->it.cpu.entry); |
455 | spin_unlock(&p->sighand->siglock); |
456 | } |
457 | read_unlock(&tasklist_lock); |
458 | |
459 | if (!ret) |
460 | put_task_struct(p); |
461 | } |
462 | |
463 | return ret; |
464 | } |
465 | |
466 | /* |
467 | * Clean out CPU timers still ticking when a thread exited. The task |
468 | * pointer is cleared, and the expiry time is replaced with the residual |
469 | * time for later timer_gettime calls to return. |
470 | * This must be called with the siglock held. |
471 | */ |
472 | static void cleanup_timers(struct list_head *head, |
473 | cputime_t utime, cputime_t stime, |
474 | unsigned long long sum_exec_runtime) |
475 | { |
476 | struct cpu_timer_list *timer, *next; |
477 | cputime_t ptime = cputime_add(utime, stime); |
478 | |
479 | list_for_each_entry_safe(timer, next, head, entry) { |
480 | list_del_init(&timer->entry); |
481 | if (cputime_lt(timer->expires.cpu, ptime)) { |
482 | timer->expires.cpu = cputime_zero; |
483 | } else { |
484 | timer->expires.cpu = cputime_sub(timer->expires.cpu, |
485 | ptime); |
486 | } |
487 | } |
488 | |
489 | ++head; |
490 | list_for_each_entry_safe(timer, next, head, entry) { |
491 | list_del_init(&timer->entry); |
492 | if (cputime_lt(timer->expires.cpu, utime)) { |
493 | timer->expires.cpu = cputime_zero; |
494 | } else { |
495 | timer->expires.cpu = cputime_sub(timer->expires.cpu, |
496 | utime); |
497 | } |
498 | } |
499 | |
500 | ++head; |
501 | list_for_each_entry_safe(timer, next, head, entry) { |
502 | list_del_init(&timer->entry); |
503 | if (timer->expires.sched < sum_exec_runtime) { |
504 | timer->expires.sched = 0; |
505 | } else { |
506 | timer->expires.sched -= sum_exec_runtime; |
507 | } |
508 | } |
509 | } |
510 | |
511 | /* |
512 | * These are both called with the siglock held, when the current thread |
513 | * is being reaped. When the final (leader) thread in the group is reaped, |
514 | * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit. |
515 | */ |
516 | void posix_cpu_timers_exit(struct task_struct *tsk) |
517 | { |
518 | cleanup_timers(tsk->cpu_timers, |
519 | tsk->utime, tsk->stime, tsk->se.sum_exec_runtime); |
520 | |
521 | } |
522 | void posix_cpu_timers_exit_group(struct task_struct *tsk) |
523 | { |
524 | struct signal_struct *const sig = tsk->signal; |
525 | |
526 | cleanup_timers(tsk->signal->cpu_timers, |
527 | cputime_add(tsk->utime, sig->utime), |
528 | cputime_add(tsk->stime, sig->stime), |
529 | tsk->se.sum_exec_runtime + sig->sum_sched_runtime); |
530 | } |
531 | |
532 | static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) |
533 | { |
534 | /* |
535 | * That's all for this thread or process. |
536 | * We leave our residual in expires to be reported. |
537 | */ |
538 | put_task_struct(timer->it.cpu.task); |
539 | timer->it.cpu.task = NULL; |
540 | timer->it.cpu.expires = cpu_time_sub(timer->it_clock, |
541 | timer->it.cpu.expires, |
542 | now); |
543 | } |
544 | |
545 | /* |
546 | * Insert the timer on the appropriate list before any timers that |
547 | * expire later. This must be called with the tasklist_lock held |
548 | * for reading, and interrupts disabled. |
549 | */ |
550 | static void arm_timer(struct k_itimer *timer, union cpu_time_count now) |
551 | { |
552 | struct task_struct *p = timer->it.cpu.task; |
553 | struct list_head *head, *listpos; |
554 | struct cpu_timer_list *const nt = &timer->it.cpu; |
555 | struct cpu_timer_list *next; |
556 | unsigned long i; |
557 | |
558 | head = (CPUCLOCK_PERTHREAD(timer->it_clock) ? |
559 | p->cpu_timers : p->signal->cpu_timers); |
560 | head += CPUCLOCK_WHICH(timer->it_clock); |
561 | |
562 | BUG_ON(!irqs_disabled()); |
563 | spin_lock(&p->sighand->siglock); |
564 | |
565 | listpos = head; |
566 | if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { |
567 | list_for_each_entry(next, head, entry) { |
568 | if (next->expires.sched > nt->expires.sched) |
569 | break; |
570 | listpos = &next->entry; |
571 | } |
572 | } else { |
573 | list_for_each_entry(next, head, entry) { |
574 | if (cputime_gt(next->expires.cpu, nt->expires.cpu)) |
575 | break; |
576 | listpos = &next->entry; |
577 | } |
578 | } |
579 | list_add(&nt->entry, listpos); |
580 | |
581 | if (listpos == head) { |
582 | /* |
583 | * We are the new earliest-expiring timer. |
584 | * If we are a thread timer, there can always |
585 | * be a process timer telling us to stop earlier. |
586 | */ |
587 | |
588 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
589 | switch (CPUCLOCK_WHICH(timer->it_clock)) { |
590 | default: |
591 | BUG(); |
592 | case CPUCLOCK_PROF: |
593 | if (cputime_eq(p->cputime_expires.prof_exp, |
594 | cputime_zero) || |
595 | cputime_gt(p->cputime_expires.prof_exp, |
596 | nt->expires.cpu)) |
597 | p->cputime_expires.prof_exp = |
598 | nt->expires.cpu; |
599 | break; |
600 | case CPUCLOCK_VIRT: |
601 | if (cputime_eq(p->cputime_expires.virt_exp, |
602 | cputime_zero) || |
603 | cputime_gt(p->cputime_expires.virt_exp, |
604 | nt->expires.cpu)) |
605 | p->cputime_expires.virt_exp = |
606 | nt->expires.cpu; |
607 | break; |
608 | case CPUCLOCK_SCHED: |
609 | if (p->cputime_expires.sched_exp == 0 || |
610 | p->cputime_expires.sched_exp > |
611 | nt->expires.sched) |
612 | p->cputime_expires.sched_exp = |
613 | nt->expires.sched; |
614 | break; |
615 | } |
616 | } else { |
617 | /* |
618 | * For a process timer, set the cached expiration time. |
619 | */ |
620 | switch (CPUCLOCK_WHICH(timer->it_clock)) { |
621 | default: |
622 | BUG(); |
623 | case CPUCLOCK_VIRT: |
624 | if (!cputime_eq(p->signal->it_virt_expires, |
625 | cputime_zero) && |
626 | cputime_lt(p->signal->it_virt_expires, |
627 | timer->it.cpu.expires.cpu)) |
628 | break; |
629 | p->signal->cputime_expires.virt_exp = |
630 | timer->it.cpu.expires.cpu; |
631 | break; |
632 | case CPUCLOCK_PROF: |
633 | if (!cputime_eq(p->signal->it_prof_expires, |
634 | cputime_zero) && |
635 | cputime_lt(p->signal->it_prof_expires, |
636 | timer->it.cpu.expires.cpu)) |
637 | break; |
638 | i = p->signal->rlim[RLIMIT_CPU].rlim_cur; |
639 | if (i != RLIM_INFINITY && |
640 | i <= cputime_to_secs(timer->it.cpu.expires.cpu)) |
641 | break; |
642 | p->signal->cputime_expires.prof_exp = |
643 | timer->it.cpu.expires.cpu; |
644 | break; |
645 | case CPUCLOCK_SCHED: |
646 | p->signal->cputime_expires.sched_exp = |
647 | timer->it.cpu.expires.sched; |
648 | break; |
649 | } |
650 | } |
651 | } |
652 | |
653 | spin_unlock(&p->sighand->siglock); |
654 | } |
655 | |
656 | /* |
657 | * The timer is locked, fire it and arrange for its reload. |
658 | */ |
659 | static void cpu_timer_fire(struct k_itimer *timer) |
660 | { |
661 | if (unlikely(timer->sigq == NULL)) { |
662 | /* |
663 | * This a special case for clock_nanosleep, |
664 | * not a normal timer from sys_timer_create. |
665 | */ |
666 | wake_up_process(timer->it_process); |
667 | timer->it.cpu.expires.sched = 0; |
668 | } else if (timer->it.cpu.incr.sched == 0) { |
669 | /* |
670 | * One-shot timer. Clear it as soon as it's fired. |
671 | */ |
672 | posix_timer_event(timer, 0); |
673 | timer->it.cpu.expires.sched = 0; |
674 | } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) { |
675 | /* |
676 | * The signal did not get queued because the signal |
677 | * was ignored, so we won't get any callback to |
678 | * reload the timer. But we need to keep it |
679 | * ticking in case the signal is deliverable next time. |
680 | */ |
681 | posix_cpu_timer_schedule(timer); |
682 | } |
683 | } |
684 | |
685 | /* |
686 | * Sample a process (thread group) timer for the given group_leader task. |
687 | * Must be called with tasklist_lock held for reading. |
688 | */ |
689 | static int cpu_timer_sample_group(const clockid_t which_clock, |
690 | struct task_struct *p, |
691 | union cpu_time_count *cpu) |
692 | { |
693 | struct task_cputime cputime; |
694 | |
695 | thread_group_cputimer(p, &cputime); |
696 | switch (CPUCLOCK_WHICH(which_clock)) { |
697 | default: |
698 | return -EINVAL; |
699 | case CPUCLOCK_PROF: |
700 | cpu->cpu = cputime_add(cputime.utime, cputime.stime); |
701 | break; |
702 | case CPUCLOCK_VIRT: |
703 | cpu->cpu = cputime.utime; |
704 | break; |
705 | case CPUCLOCK_SCHED: |
706 | cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p); |
707 | break; |
708 | } |
709 | return 0; |
710 | } |
711 | |
712 | /* |
713 | * Guts of sys_timer_settime for CPU timers. |
714 | * This is called with the timer locked and interrupts disabled. |
715 | * If we return TIMER_RETRY, it's necessary to release the timer's lock |
716 | * and try again. (This happens when the timer is in the middle of firing.) |
717 | */ |
718 | int posix_cpu_timer_set(struct k_itimer *timer, int flags, |
719 | struct itimerspec *new, struct itimerspec *old) |
720 | { |
721 | struct task_struct *p = timer->it.cpu.task; |
722 | union cpu_time_count old_expires, new_expires, val; |
723 | int ret; |
724 | |
725 | if (unlikely(p == NULL)) { |
726 | /* |
727 | * Timer refers to a dead task's clock. |
728 | */ |
729 | return -ESRCH; |
730 | } |
731 | |
732 | new_expires = timespec_to_sample(timer->it_clock, &new->it_value); |
733 | |
734 | read_lock(&tasklist_lock); |
735 | /* |
736 | * We need the tasklist_lock to protect against reaping that |
737 | * clears p->signal. If p has just been reaped, we can no |
738 | * longer get any information about it at all. |
739 | */ |
740 | if (unlikely(p->signal == NULL)) { |
741 | read_unlock(&tasklist_lock); |
742 | put_task_struct(p); |
743 | timer->it.cpu.task = NULL; |
744 | return -ESRCH; |
745 | } |
746 | |
747 | /* |
748 | * Disarm any old timer after extracting its expiry time. |
749 | */ |
750 | BUG_ON(!irqs_disabled()); |
751 | |
752 | ret = 0; |
753 | spin_lock(&p->sighand->siglock); |
754 | old_expires = timer->it.cpu.expires; |
755 | if (unlikely(timer->it.cpu.firing)) { |
756 | timer->it.cpu.firing = -1; |
757 | ret = TIMER_RETRY; |
758 | } else |
759 | list_del_init(&timer->it.cpu.entry); |
760 | spin_unlock(&p->sighand->siglock); |
761 | |
762 | /* |
763 | * We need to sample the current value to convert the new |
764 | * value from to relative and absolute, and to convert the |
765 | * old value from absolute to relative. To set a process |
766 | * timer, we need a sample to balance the thread expiry |
767 | * times (in arm_timer). With an absolute time, we must |
768 | * check if it's already passed. In short, we need a sample. |
769 | */ |
770 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
771 | cpu_clock_sample(timer->it_clock, p, &val); |
772 | } else { |
773 | cpu_timer_sample_group(timer->it_clock, p, &val); |
774 | } |
775 | |
776 | if (old) { |
777 | if (old_expires.sched == 0) { |
778 | old->it_value.tv_sec = 0; |
779 | old->it_value.tv_nsec = 0; |
780 | } else { |
781 | /* |
782 | * Update the timer in case it has |
783 | * overrun already. If it has, |
784 | * we'll report it as having overrun |
785 | * and with the next reloaded timer |
786 | * already ticking, though we are |
787 | * swallowing that pending |
788 | * notification here to install the |
789 | * new setting. |
790 | */ |
791 | bump_cpu_timer(timer, val); |
792 | if (cpu_time_before(timer->it_clock, val, |
793 | timer->it.cpu.expires)) { |
794 | old_expires = cpu_time_sub( |
795 | timer->it_clock, |
796 | timer->it.cpu.expires, val); |
797 | sample_to_timespec(timer->it_clock, |
798 | old_expires, |
799 | &old->it_value); |
800 | } else { |
801 | old->it_value.tv_nsec = 1; |
802 | old->it_value.tv_sec = 0; |
803 | } |
804 | } |
805 | } |
806 | |
807 | if (unlikely(ret)) { |
808 | /* |
809 | * We are colliding with the timer actually firing. |
810 | * Punt after filling in the timer's old value, and |
811 | * disable this firing since we are already reporting |
812 | * it as an overrun (thanks to bump_cpu_timer above). |
813 | */ |
814 | read_unlock(&tasklist_lock); |
815 | goto out; |
816 | } |
817 | |
818 | if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) { |
819 | cpu_time_add(timer->it_clock, &new_expires, val); |
820 | } |
821 | |
822 | /* |
823 | * Install the new expiry time (or zero). |
824 | * For a timer with no notification action, we don't actually |
825 | * arm the timer (we'll just fake it for timer_gettime). |
826 | */ |
827 | timer->it.cpu.expires = new_expires; |
828 | if (new_expires.sched != 0 && |
829 | (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE && |
830 | cpu_time_before(timer->it_clock, val, new_expires)) { |
831 | arm_timer(timer, val); |
832 | } |
833 | |
834 | read_unlock(&tasklist_lock); |
835 | |
836 | /* |
837 | * Install the new reload setting, and |
838 | * set up the signal and overrun bookkeeping. |
839 | */ |
840 | timer->it.cpu.incr = timespec_to_sample(timer->it_clock, |
841 | &new->it_interval); |
842 | |
843 | /* |
844 | * This acts as a modification timestamp for the timer, |
845 | * so any automatic reload attempt will punt on seeing |
846 | * that we have reset the timer manually. |
847 | */ |
848 | timer->it_requeue_pending = (timer->it_requeue_pending + 2) & |
849 | ~REQUEUE_PENDING; |
850 | timer->it_overrun_last = 0; |
851 | timer->it_overrun = -1; |
852 | |
853 | if (new_expires.sched != 0 && |
854 | (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE && |
855 | !cpu_time_before(timer->it_clock, val, new_expires)) { |
856 | /* |
857 | * The designated time already passed, so we notify |
858 | * immediately, even if the thread never runs to |
859 | * accumulate more time on this clock. |
860 | */ |
861 | cpu_timer_fire(timer); |
862 | } |
863 | |
864 | ret = 0; |
865 | out: |
866 | if (old) { |
867 | sample_to_timespec(timer->it_clock, |
868 | timer->it.cpu.incr, &old->it_interval); |
869 | } |
870 | return ret; |
871 | } |
872 | |
873 | void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) |
874 | { |
875 | union cpu_time_count now; |
876 | struct task_struct *p = timer->it.cpu.task; |
877 | int clear_dead; |
878 | |
879 | /* |
880 | * Easy part: convert the reload time. |
881 | */ |
882 | sample_to_timespec(timer->it_clock, |
883 | timer->it.cpu.incr, &itp->it_interval); |
884 | |
885 | if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all. */ |
886 | itp->it_value.tv_sec = itp->it_value.tv_nsec = 0; |
887 | return; |
888 | } |
889 | |
890 | if (unlikely(p == NULL)) { |
891 | /* |
892 | * This task already died and the timer will never fire. |
893 | * In this case, expires is actually the dead value. |
894 | */ |
895 | dead: |
896 | sample_to_timespec(timer->it_clock, timer->it.cpu.expires, |
897 | &itp->it_value); |
898 | return; |
899 | } |
900 | |
901 | /* |
902 | * Sample the clock to take the difference with the expiry time. |
903 | */ |
904 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
905 | cpu_clock_sample(timer->it_clock, p, &now); |
906 | clear_dead = p->exit_state; |
907 | } else { |
908 | read_lock(&tasklist_lock); |
909 | if (unlikely(p->signal == NULL)) { |
910 | /* |
911 | * The process has been reaped. |
912 | * We can't even collect a sample any more. |
913 | * Call the timer disarmed, nothing else to do. |
914 | */ |
915 | put_task_struct(p); |
916 | timer->it.cpu.task = NULL; |
917 | timer->it.cpu.expires.sched = 0; |
918 | read_unlock(&tasklist_lock); |
919 | goto dead; |
920 | } else { |
921 | cpu_timer_sample_group(timer->it_clock, p, &now); |
922 | clear_dead = (unlikely(p->exit_state) && |
923 | thread_group_empty(p)); |
924 | } |
925 | read_unlock(&tasklist_lock); |
926 | } |
927 | |
928 | if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) { |
929 | if (timer->it.cpu.incr.sched == 0 && |
930 | cpu_time_before(timer->it_clock, |
931 | timer->it.cpu.expires, now)) { |
932 | /* |
933 | * Do-nothing timer expired and has no reload, |
934 | * so it's as if it was never set. |
935 | */ |
936 | timer->it.cpu.expires.sched = 0; |
937 | itp->it_value.tv_sec = itp->it_value.tv_nsec = 0; |
938 | return; |
939 | } |
940 | /* |
941 | * Account for any expirations and reloads that should |
942 | * have happened. |
943 | */ |
944 | bump_cpu_timer(timer, now); |
945 | } |
946 | |
947 | if (unlikely(clear_dead)) { |
948 | /* |
949 | * We've noticed that the thread is dead, but |
950 | * not yet reaped. Take this opportunity to |
951 | * drop our task ref. |
952 | */ |
953 | clear_dead_task(timer, now); |
954 | goto dead; |
955 | } |
956 | |
957 | if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) { |
958 | sample_to_timespec(timer->it_clock, |
959 | cpu_time_sub(timer->it_clock, |
960 | timer->it.cpu.expires, now), |
961 | &itp->it_value); |
962 | } else { |
963 | /* |
964 | * The timer should have expired already, but the firing |
965 | * hasn't taken place yet. Say it's just about to expire. |
966 | */ |
967 | itp->it_value.tv_nsec = 1; |
968 | itp->it_value.tv_sec = 0; |
969 | } |
970 | } |
971 | |
972 | /* |
973 | * Check for any per-thread CPU timers that have fired and move them off |
974 | * the tsk->cpu_timers[N] list onto the firing list. Here we update the |
975 | * tsk->it_*_expires values to reflect the remaining thread CPU timers. |
976 | */ |
977 | static void check_thread_timers(struct task_struct *tsk, |
978 | struct list_head *firing) |
979 | { |
980 | int maxfire; |
981 | struct list_head *timers = tsk->cpu_timers; |
982 | struct signal_struct *const sig = tsk->signal; |
983 | |
984 | maxfire = 20; |
985 | tsk->cputime_expires.prof_exp = cputime_zero; |
986 | while (!list_empty(timers)) { |
987 | struct cpu_timer_list *t = list_first_entry(timers, |
988 | struct cpu_timer_list, |
989 | entry); |
990 | if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) { |
991 | tsk->cputime_expires.prof_exp = t->expires.cpu; |
992 | break; |
993 | } |
994 | t->firing = 1; |
995 | list_move_tail(&t->entry, firing); |
996 | } |
997 | |
998 | ++timers; |
999 | maxfire = 20; |
1000 | tsk->cputime_expires.virt_exp = cputime_zero; |
1001 | while (!list_empty(timers)) { |
1002 | struct cpu_timer_list *t = list_first_entry(timers, |
1003 | struct cpu_timer_list, |
1004 | entry); |
1005 | if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) { |
1006 | tsk->cputime_expires.virt_exp = t->expires.cpu; |
1007 | break; |
1008 | } |
1009 | t->firing = 1; |
1010 | list_move_tail(&t->entry, firing); |
1011 | } |
1012 | |
1013 | ++timers; |
1014 | maxfire = 20; |
1015 | tsk->cputime_expires.sched_exp = 0; |
1016 | while (!list_empty(timers)) { |
1017 | struct cpu_timer_list *t = list_first_entry(timers, |
1018 | struct cpu_timer_list, |
1019 | entry); |
1020 | if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) { |
1021 | tsk->cputime_expires.sched_exp = t->expires.sched; |
1022 | break; |
1023 | } |
1024 | t->firing = 1; |
1025 | list_move_tail(&t->entry, firing); |
1026 | } |
1027 | |
1028 | /* |
1029 | * Check for the special case thread timers. |
1030 | */ |
1031 | if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) { |
1032 | unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max; |
1033 | unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur; |
1034 | |
1035 | if (hard != RLIM_INFINITY && |
1036 | tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { |
1037 | /* |
1038 | * At the hard limit, we just die. |
1039 | * No need to calculate anything else now. |
1040 | */ |
1041 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); |
1042 | return; |
1043 | } |
1044 | if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) { |
1045 | /* |
1046 | * At the soft limit, send a SIGXCPU every second. |
1047 | */ |
1048 | if (sig->rlim[RLIMIT_RTTIME].rlim_cur |
1049 | < sig->rlim[RLIMIT_RTTIME].rlim_max) { |
1050 | sig->rlim[RLIMIT_RTTIME].rlim_cur += |
1051 | USEC_PER_SEC; |
1052 | } |
1053 | printk(KERN_INFO |
1054 | "RT Watchdog Timeout: %s[%d]\n", |
1055 | tsk->comm, task_pid_nr(tsk)); |
1056 | __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); |
1057 | } |
1058 | } |
1059 | } |
1060 | |
1061 | static void stop_process_timers(struct task_struct *tsk) |
1062 | { |
1063 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
1064 | unsigned long flags; |
1065 | |
1066 | if (!cputimer->running) |
1067 | return; |
1068 | |
1069 | spin_lock_irqsave(&cputimer->lock, flags); |
1070 | cputimer->running = 0; |
1071 | spin_unlock_irqrestore(&cputimer->lock, flags); |
1072 | } |
1073 | |
1074 | /* |
1075 | * Check for any per-thread CPU timers that have fired and move them |
1076 | * off the tsk->*_timers list onto the firing list. Per-thread timers |
1077 | * have already been taken off. |
1078 | */ |
1079 | static void check_process_timers(struct task_struct *tsk, |
1080 | struct list_head *firing) |
1081 | { |
1082 | int maxfire; |
1083 | struct signal_struct *const sig = tsk->signal; |
1084 | cputime_t utime, ptime, virt_expires, prof_expires; |
1085 | unsigned long long sum_sched_runtime, sched_expires; |
1086 | struct list_head *timers = sig->cpu_timers; |
1087 | struct task_cputime cputime; |
1088 | |
1089 | /* |
1090 | * Don't sample the current process CPU clocks if there are no timers. |
1091 | */ |
1092 | if (list_empty(&timers[CPUCLOCK_PROF]) && |
1093 | cputime_eq(sig->it_prof_expires, cputime_zero) && |
1094 | sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY && |
1095 | list_empty(&timers[CPUCLOCK_VIRT]) && |
1096 | cputime_eq(sig->it_virt_expires, cputime_zero) && |
1097 | list_empty(&timers[CPUCLOCK_SCHED])) { |
1098 | stop_process_timers(tsk); |
1099 | return; |
1100 | } |
1101 | |
1102 | /* |
1103 | * Collect the current process totals. |
1104 | */ |
1105 | thread_group_cputimer(tsk, &cputime); |
1106 | utime = cputime.utime; |
1107 | ptime = cputime_add(utime, cputime.stime); |
1108 | sum_sched_runtime = cputime.sum_exec_runtime; |
1109 | maxfire = 20; |
1110 | prof_expires = cputime_zero; |
1111 | while (!list_empty(timers)) { |
1112 | struct cpu_timer_list *tl = list_first_entry(timers, |
1113 | struct cpu_timer_list, |
1114 | entry); |
1115 | if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) { |
1116 | prof_expires = tl->expires.cpu; |
1117 | break; |
1118 | } |
1119 | tl->firing = 1; |
1120 | list_move_tail(&tl->entry, firing); |
1121 | } |
1122 | |
1123 | ++timers; |
1124 | maxfire = 20; |
1125 | virt_expires = cputime_zero; |
1126 | while (!list_empty(timers)) { |
1127 | struct cpu_timer_list *tl = list_first_entry(timers, |
1128 | struct cpu_timer_list, |
1129 | entry); |
1130 | if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) { |
1131 | virt_expires = tl->expires.cpu; |
1132 | break; |
1133 | } |
1134 | tl->firing = 1; |
1135 | list_move_tail(&tl->entry, firing); |
1136 | } |
1137 | |
1138 | ++timers; |
1139 | maxfire = 20; |
1140 | sched_expires = 0; |
1141 | while (!list_empty(timers)) { |
1142 | struct cpu_timer_list *tl = list_first_entry(timers, |
1143 | struct cpu_timer_list, |
1144 | entry); |
1145 | if (!--maxfire || sum_sched_runtime < tl->expires.sched) { |
1146 | sched_expires = tl->expires.sched; |
1147 | break; |
1148 | } |
1149 | tl->firing = 1; |
1150 | list_move_tail(&tl->entry, firing); |
1151 | } |
1152 | |
1153 | /* |
1154 | * Check for the special case process timers. |
1155 | */ |
1156 | if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { |
1157 | if (cputime_ge(ptime, sig->it_prof_expires)) { |
1158 | /* ITIMER_PROF fires and reloads. */ |
1159 | sig->it_prof_expires = sig->it_prof_incr; |
1160 | if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { |
1161 | sig->it_prof_expires = cputime_add( |
1162 | sig->it_prof_expires, ptime); |
1163 | } |
1164 | __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk); |
1165 | } |
1166 | if (!cputime_eq(sig->it_prof_expires, cputime_zero) && |
1167 | (cputime_eq(prof_expires, cputime_zero) || |
1168 | cputime_lt(sig->it_prof_expires, prof_expires))) { |
1169 | prof_expires = sig->it_prof_expires; |
1170 | } |
1171 | } |
1172 | if (!cputime_eq(sig->it_virt_expires, cputime_zero)) { |
1173 | if (cputime_ge(utime, sig->it_virt_expires)) { |
1174 | /* ITIMER_VIRTUAL fires and reloads. */ |
1175 | sig->it_virt_expires = sig->it_virt_incr; |
1176 | if (!cputime_eq(sig->it_virt_expires, cputime_zero)) { |
1177 | sig->it_virt_expires = cputime_add( |
1178 | sig->it_virt_expires, utime); |
1179 | } |
1180 | __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk); |
1181 | } |
1182 | if (!cputime_eq(sig->it_virt_expires, cputime_zero) && |
1183 | (cputime_eq(virt_expires, cputime_zero) || |
1184 | cputime_lt(sig->it_virt_expires, virt_expires))) { |
1185 | virt_expires = sig->it_virt_expires; |
1186 | } |
1187 | } |
1188 | if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { |
1189 | unsigned long psecs = cputime_to_secs(ptime); |
1190 | cputime_t x; |
1191 | if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) { |
1192 | /* |
1193 | * At the hard limit, we just die. |
1194 | * No need to calculate anything else now. |
1195 | */ |
1196 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); |
1197 | return; |
1198 | } |
1199 | if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) { |
1200 | /* |
1201 | * At the soft limit, send a SIGXCPU every second. |
1202 | */ |
1203 | __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); |
1204 | if (sig->rlim[RLIMIT_CPU].rlim_cur |
1205 | < sig->rlim[RLIMIT_CPU].rlim_max) { |
1206 | sig->rlim[RLIMIT_CPU].rlim_cur++; |
1207 | } |
1208 | } |
1209 | x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); |
1210 | if (cputime_eq(prof_expires, cputime_zero) || |
1211 | cputime_lt(x, prof_expires)) { |
1212 | prof_expires = x; |
1213 | } |
1214 | } |
1215 | |
1216 | if (!cputime_eq(prof_expires, cputime_zero) && |
1217 | (cputime_eq(sig->cputime_expires.prof_exp, cputime_zero) || |
1218 | cputime_gt(sig->cputime_expires.prof_exp, prof_expires))) |
1219 | sig->cputime_expires.prof_exp = prof_expires; |
1220 | if (!cputime_eq(virt_expires, cputime_zero) && |
1221 | (cputime_eq(sig->cputime_expires.virt_exp, cputime_zero) || |
1222 | cputime_gt(sig->cputime_expires.virt_exp, virt_expires))) |
1223 | sig->cputime_expires.virt_exp = virt_expires; |
1224 | if (sched_expires != 0 && |
1225 | (sig->cputime_expires.sched_exp == 0 || |
1226 | sig->cputime_expires.sched_exp > sched_expires)) |
1227 | sig->cputime_expires.sched_exp = sched_expires; |
1228 | } |
1229 | |
1230 | /* |
1231 | * This is called from the signal code (via do_schedule_next_timer) |
1232 | * when the last timer signal was delivered and we have to reload the timer. |
1233 | */ |
1234 | void posix_cpu_timer_schedule(struct k_itimer *timer) |
1235 | { |
1236 | struct task_struct *p = timer->it.cpu.task; |
1237 | union cpu_time_count now; |
1238 | |
1239 | if (unlikely(p == NULL)) |
1240 | /* |
1241 | * The task was cleaned up already, no future firings. |
1242 | */ |
1243 | goto out; |
1244 | |
1245 | /* |
1246 | * Fetch the current sample and update the timer's expiry time. |
1247 | */ |
1248 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
1249 | cpu_clock_sample(timer->it_clock, p, &now); |
1250 | bump_cpu_timer(timer, now); |
1251 | if (unlikely(p->exit_state)) { |
1252 | clear_dead_task(timer, now); |
1253 | goto out; |
1254 | } |
1255 | read_lock(&tasklist_lock); /* arm_timer needs it. */ |
1256 | } else { |
1257 | read_lock(&tasklist_lock); |
1258 | if (unlikely(p->signal == NULL)) { |
1259 | /* |
1260 | * The process has been reaped. |
1261 | * We can't even collect a sample any more. |
1262 | */ |
1263 | put_task_struct(p); |
1264 | timer->it.cpu.task = p = NULL; |
1265 | timer->it.cpu.expires.sched = 0; |
1266 | goto out_unlock; |
1267 | } else if (unlikely(p->exit_state) && thread_group_empty(p)) { |
1268 | /* |
1269 | * We've noticed that the thread is dead, but |
1270 | * not yet reaped. Take this opportunity to |
1271 | * drop our task ref. |
1272 | */ |
1273 | clear_dead_task(timer, now); |
1274 | goto out_unlock; |
1275 | } |
1276 | cpu_timer_sample_group(timer->it_clock, p, &now); |
1277 | bump_cpu_timer(timer, now); |
1278 | /* Leave the tasklist_lock locked for the call below. */ |
1279 | } |
1280 | |
1281 | /* |
1282 | * Now re-arm for the new expiry time. |
1283 | */ |
1284 | arm_timer(timer, now); |
1285 | |
1286 | out_unlock: |
1287 | read_unlock(&tasklist_lock); |
1288 | |
1289 | out: |
1290 | timer->it_overrun_last = timer->it_overrun; |
1291 | timer->it_overrun = -1; |
1292 | ++timer->it_requeue_pending; |
1293 | } |
1294 | |
1295 | /** |
1296 | * task_cputime_zero - Check a task_cputime struct for all zero fields. |
1297 | * |
1298 | * @cputime: The struct to compare. |
1299 | * |
1300 | * Checks @cputime to see if all fields are zero. Returns true if all fields |
1301 | * are zero, false if any field is nonzero. |
1302 | */ |
1303 | static inline int task_cputime_zero(const struct task_cputime *cputime) |
1304 | { |
1305 | if (cputime_eq(cputime->utime, cputime_zero) && |
1306 | cputime_eq(cputime->stime, cputime_zero) && |
1307 | cputime->sum_exec_runtime == 0) |
1308 | return 1; |
1309 | return 0; |
1310 | } |
1311 | |
1312 | /** |
1313 | * task_cputime_expired - Compare two task_cputime entities. |
1314 | * |
1315 | * @sample: The task_cputime structure to be checked for expiration. |
1316 | * @expires: Expiration times, against which @sample will be checked. |
1317 | * |
1318 | * Checks @sample against @expires to see if any field of @sample has expired. |
1319 | * Returns true if any field of the former is greater than the corresponding |
1320 | * field of the latter if the latter field is set. Otherwise returns false. |
1321 | */ |
1322 | static inline int task_cputime_expired(const struct task_cputime *sample, |
1323 | const struct task_cputime *expires) |
1324 | { |
1325 | if (!cputime_eq(expires->utime, cputime_zero) && |
1326 | cputime_ge(sample->utime, expires->utime)) |
1327 | return 1; |
1328 | if (!cputime_eq(expires->stime, cputime_zero) && |
1329 | cputime_ge(cputime_add(sample->utime, sample->stime), |
1330 | expires->stime)) |
1331 | return 1; |
1332 | if (expires->sum_exec_runtime != 0 && |
1333 | sample->sum_exec_runtime >= expires->sum_exec_runtime) |
1334 | return 1; |
1335 | return 0; |
1336 | } |
1337 | |
1338 | /** |
1339 | * fastpath_timer_check - POSIX CPU timers fast path. |
1340 | * |
1341 | * @tsk: The task (thread) being checked. |
1342 | * |
1343 | * Check the task and thread group timers. If both are zero (there are no |
1344 | * timers set) return false. Otherwise snapshot the task and thread group |
1345 | * timers and compare them with the corresponding expiration times. Return |
1346 | * true if a timer has expired, else return false. |
1347 | */ |
1348 | static inline int fastpath_timer_check(struct task_struct *tsk) |
1349 | { |
1350 | struct signal_struct *sig; |
1351 | |
1352 | /* tsk == current, ensure it is safe to use ->signal/sighand */ |
1353 | if (unlikely(tsk->exit_state)) |
1354 | return 0; |
1355 | |
1356 | if (!task_cputime_zero(&tsk->cputime_expires)) { |
1357 | struct task_cputime task_sample = { |
1358 | .utime = tsk->utime, |
1359 | .stime = tsk->stime, |
1360 | .sum_exec_runtime = tsk->se.sum_exec_runtime |
1361 | }; |
1362 | |
1363 | if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) |
1364 | return 1; |
1365 | } |
1366 | |
1367 | sig = tsk->signal; |
1368 | if (!task_cputime_zero(&sig->cputime_expires)) { |
1369 | struct task_cputime group_sample; |
1370 | |
1371 | thread_group_cputimer(tsk, &group_sample); |
1372 | if (task_cputime_expired(&group_sample, &sig->cputime_expires)) |
1373 | return 1; |
1374 | } |
1375 | |
1376 | return sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY; |
1377 | } |
1378 | |
1379 | /* |
1380 | * This is called from the timer interrupt handler. The irq handler has |
1381 | * already updated our counts. We need to check if any timers fire now. |
1382 | * Interrupts are disabled. |
1383 | */ |
1384 | void run_posix_cpu_timers(struct task_struct *tsk) |
1385 | { |
1386 | LIST_HEAD(firing); |
1387 | struct k_itimer *timer, *next; |
1388 | |
1389 | BUG_ON(!irqs_disabled()); |
1390 | |
1391 | /* |
1392 | * The fast path checks that there are no expired thread or thread |
1393 | * group timers. If that's so, just return. |
1394 | */ |
1395 | if (!fastpath_timer_check(tsk)) |
1396 | return; |
1397 | |
1398 | spin_lock(&tsk->sighand->siglock); |
1399 | /* |
1400 | * Here we take off tsk->signal->cpu_timers[N] and |
1401 | * tsk->cpu_timers[N] all the timers that are firing, and |
1402 | * put them on the firing list. |
1403 | */ |
1404 | check_thread_timers(tsk, &firing); |
1405 | check_process_timers(tsk, &firing); |
1406 | |
1407 | /* |
1408 | * We must release these locks before taking any timer's lock. |
1409 | * There is a potential race with timer deletion here, as the |
1410 | * siglock now protects our private firing list. We have set |
1411 | * the firing flag in each timer, so that a deletion attempt |
1412 | * that gets the timer lock before we do will give it up and |
1413 | * spin until we've taken care of that timer below. |
1414 | */ |
1415 | spin_unlock(&tsk->sighand->siglock); |
1416 | |
1417 | /* |
1418 | * Now that all the timers on our list have the firing flag, |
1419 | * noone will touch their list entries but us. We'll take |
1420 | * each timer's lock before clearing its firing flag, so no |
1421 | * timer call will interfere. |
1422 | */ |
1423 | list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) { |
1424 | int cpu_firing; |
1425 | |
1426 | spin_lock(&timer->it_lock); |
1427 | list_del_init(&timer->it.cpu.entry); |
1428 | cpu_firing = timer->it.cpu.firing; |
1429 | timer->it.cpu.firing = 0; |
1430 | /* |
1431 | * The firing flag is -1 if we collided with a reset |
1432 | * of the timer, which already reported this |
1433 | * almost-firing as an overrun. So don't generate an event. |
1434 | */ |
1435 | if (likely(cpu_firing >= 0)) |
1436 | cpu_timer_fire(timer); |
1437 | spin_unlock(&timer->it_lock); |
1438 | } |
1439 | } |
1440 | |
1441 | /* |
1442 | * Set one of the process-wide special case CPU timers. |
1443 | * The tsk->sighand->siglock must be held by the caller. |
1444 | * The *newval argument is relative and we update it to be absolute, *oldval |
1445 | * is absolute and we update it to be relative. |
1446 | */ |
1447 | void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, |
1448 | cputime_t *newval, cputime_t *oldval) |
1449 | { |
1450 | union cpu_time_count now; |
1451 | struct list_head *head; |
1452 | |
1453 | BUG_ON(clock_idx == CPUCLOCK_SCHED); |
1454 | cpu_timer_sample_group(clock_idx, tsk, &now); |
1455 | |
1456 | if (oldval) { |
1457 | if (!cputime_eq(*oldval, cputime_zero)) { |
1458 | if (cputime_le(*oldval, now.cpu)) { |
1459 | /* Just about to fire. */ |
1460 | *oldval = jiffies_to_cputime(1); |
1461 | } else { |
1462 | *oldval = cputime_sub(*oldval, now.cpu); |
1463 | } |
1464 | } |
1465 | |
1466 | if (cputime_eq(*newval, cputime_zero)) |
1467 | return; |
1468 | *newval = cputime_add(*newval, now.cpu); |
1469 | |
1470 | /* |
1471 | * If the RLIMIT_CPU timer will expire before the |
1472 | * ITIMER_PROF timer, we have nothing else to do. |
1473 | */ |
1474 | if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur |
1475 | < cputime_to_secs(*newval)) |
1476 | return; |
1477 | } |
1478 | |
1479 | /* |
1480 | * Check whether there are any process timers already set to fire |
1481 | * before this one. If so, we don't have anything more to do. |
1482 | */ |
1483 | head = &tsk->signal->cpu_timers[clock_idx]; |
1484 | if (list_empty(head) || |
1485 | cputime_ge(list_first_entry(head, |
1486 | struct cpu_timer_list, entry)->expires.cpu, |
1487 | *newval)) { |
1488 | switch (clock_idx) { |
1489 | case CPUCLOCK_PROF: |
1490 | tsk->signal->cputime_expires.prof_exp = *newval; |
1491 | break; |
1492 | case CPUCLOCK_VIRT: |
1493 | tsk->signal->cputime_expires.virt_exp = *newval; |
1494 | break; |
1495 | } |
1496 | } |
1497 | } |
1498 | |
1499 | static int do_cpu_nanosleep(const clockid_t which_clock, int flags, |
1500 | struct timespec *rqtp, struct itimerspec *it) |
1501 | { |
1502 | struct k_itimer timer; |
1503 | int error; |
1504 | |
1505 | /* |
1506 | * Set up a temporary timer and then wait for it to go off. |
1507 | */ |
1508 | memset(&timer, 0, sizeof timer); |
1509 | spin_lock_init(&timer.it_lock); |
1510 | timer.it_clock = which_clock; |
1511 | timer.it_overrun = -1; |
1512 | error = posix_cpu_timer_create(&timer); |
1513 | timer.it_process = current; |
1514 | if (!error) { |
1515 | static struct itimerspec zero_it; |
1516 | |
1517 | memset(it, 0, sizeof *it); |
1518 | it->it_value = *rqtp; |
1519 | |
1520 | spin_lock_irq(&timer.it_lock); |
1521 | error = posix_cpu_timer_set(&timer, flags, it, NULL); |
1522 | if (error) { |
1523 | spin_unlock_irq(&timer.it_lock); |
1524 | return error; |
1525 | } |
1526 | |
1527 | while (!signal_pending(current)) { |
1528 | if (timer.it.cpu.expires.sched == 0) { |
1529 | /* |
1530 | * Our timer fired and was reset. |
1531 | */ |
1532 | spin_unlock_irq(&timer.it_lock); |
1533 | return 0; |
1534 | } |
1535 | |
1536 | /* |
1537 | * Block until cpu_timer_fire (or a signal) wakes us. |
1538 | */ |
1539 | __set_current_state(TASK_INTERRUPTIBLE); |
1540 | spin_unlock_irq(&timer.it_lock); |
1541 | schedule(); |
1542 | spin_lock_irq(&timer.it_lock); |
1543 | } |
1544 | |
1545 | /* |
1546 | * We were interrupted by a signal. |
1547 | */ |
1548 | sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp); |
1549 | posix_cpu_timer_set(&timer, 0, &zero_it, it); |
1550 | spin_unlock_irq(&timer.it_lock); |
1551 | |
1552 | if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) { |
1553 | /* |
1554 | * It actually did fire already. |
1555 | */ |
1556 | return 0; |
1557 | } |
1558 | |
1559 | error = -ERESTART_RESTARTBLOCK; |
1560 | } |
1561 | |
1562 | return error; |
1563 | } |
1564 | |
1565 | int posix_cpu_nsleep(const clockid_t which_clock, int flags, |
1566 | struct timespec *rqtp, struct timespec __user *rmtp) |
1567 | { |
1568 | struct restart_block *restart_block = |
1569 | ¤t_thread_info()->restart_block; |
1570 | struct itimerspec it; |
1571 | int error; |
1572 | |
1573 | /* |
1574 | * Diagnose required errors first. |
1575 | */ |
1576 | if (CPUCLOCK_PERTHREAD(which_clock) && |
1577 | (CPUCLOCK_PID(which_clock) == 0 || |
1578 | CPUCLOCK_PID(which_clock) == current->pid)) |
1579 | return -EINVAL; |
1580 | |
1581 | error = do_cpu_nanosleep(which_clock, flags, rqtp, &it); |
1582 | |
1583 | if (error == -ERESTART_RESTARTBLOCK) { |
1584 | |
1585 | if (flags & TIMER_ABSTIME) |
1586 | return -ERESTARTNOHAND; |
1587 | /* |
1588 | * Report back to the user the time still remaining. |
1589 | */ |
1590 | if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp)) |
1591 | return -EFAULT; |
1592 | |
1593 | restart_block->fn = posix_cpu_nsleep_restart; |
1594 | restart_block->arg0 = which_clock; |
1595 | restart_block->arg1 = (unsigned long) rmtp; |
1596 | restart_block->arg2 = rqtp->tv_sec; |
1597 | restart_block->arg3 = rqtp->tv_nsec; |
1598 | } |
1599 | return error; |
1600 | } |
1601 | |
1602 | long posix_cpu_nsleep_restart(struct restart_block *restart_block) |
1603 | { |
1604 | clockid_t which_clock = restart_block->arg0; |
1605 | struct timespec __user *rmtp; |
1606 | struct timespec t; |
1607 | struct itimerspec it; |
1608 | int error; |
1609 | |
1610 | rmtp = (struct timespec __user *) restart_block->arg1; |
1611 | t.tv_sec = restart_block->arg2; |
1612 | t.tv_nsec = restart_block->arg3; |
1613 | |
1614 | restart_block->fn = do_no_restart_syscall; |
1615 | error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it); |
1616 | |
1617 | if (error == -ERESTART_RESTARTBLOCK) { |
1618 | /* |
1619 | * Report back to the user the time still remaining. |
1620 | */ |
1621 | if (rmtp != NULL && copy_to_user(rmtp, &it.it_value, sizeof *rmtp)) |
1622 | return -EFAULT; |
1623 | |
1624 | restart_block->fn = posix_cpu_nsleep_restart; |
1625 | restart_block->arg0 = which_clock; |
1626 | restart_block->arg1 = (unsigned long) rmtp; |
1627 | restart_block->arg2 = t.tv_sec; |
1628 | restart_block->arg3 = t.tv_nsec; |
1629 | } |
1630 | return error; |
1631 | |
1632 | } |
1633 | |
1634 | |
1635 | #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED) |
1636 | #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED) |
1637 | |
1638 | static int process_cpu_clock_getres(const clockid_t which_clock, |
1639 | struct timespec *tp) |
1640 | { |
1641 | return posix_cpu_clock_getres(PROCESS_CLOCK, tp); |
1642 | } |
1643 | static int process_cpu_clock_get(const clockid_t which_clock, |
1644 | struct timespec *tp) |
1645 | { |
1646 | return posix_cpu_clock_get(PROCESS_CLOCK, tp); |
1647 | } |
1648 | static int process_cpu_timer_create(struct k_itimer *timer) |
1649 | { |
1650 | timer->it_clock = PROCESS_CLOCK; |
1651 | return posix_cpu_timer_create(timer); |
1652 | } |
1653 | static int process_cpu_nsleep(const clockid_t which_clock, int flags, |
1654 | struct timespec *rqtp, |
1655 | struct timespec __user *rmtp) |
1656 | { |
1657 | return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp); |
1658 | } |
1659 | static long process_cpu_nsleep_restart(struct restart_block *restart_block) |
1660 | { |
1661 | return -EINVAL; |
1662 | } |
1663 | static int thread_cpu_clock_getres(const clockid_t which_clock, |
1664 | struct timespec *tp) |
1665 | { |
1666 | return posix_cpu_clock_getres(THREAD_CLOCK, tp); |
1667 | } |
1668 | static int thread_cpu_clock_get(const clockid_t which_clock, |
1669 | struct timespec *tp) |
1670 | { |
1671 | return posix_cpu_clock_get(THREAD_CLOCK, tp); |
1672 | } |
1673 | static int thread_cpu_timer_create(struct k_itimer *timer) |
1674 | { |
1675 | timer->it_clock = THREAD_CLOCK; |
1676 | return posix_cpu_timer_create(timer); |
1677 | } |
1678 | static int thread_cpu_nsleep(const clockid_t which_clock, int flags, |
1679 | struct timespec *rqtp, struct timespec __user *rmtp) |
1680 | { |
1681 | return -EINVAL; |
1682 | } |
1683 | static long thread_cpu_nsleep_restart(struct restart_block *restart_block) |
1684 | { |
1685 | return -EINVAL; |
1686 | } |
1687 | |
1688 | static __init int init_posix_cpu_timers(void) |
1689 | { |
1690 | struct k_clock process = { |
1691 | .clock_getres = process_cpu_clock_getres, |
1692 | .clock_get = process_cpu_clock_get, |
1693 | .clock_set = do_posix_clock_nosettime, |
1694 | .timer_create = process_cpu_timer_create, |
1695 | .nsleep = process_cpu_nsleep, |
1696 | .nsleep_restart = process_cpu_nsleep_restart, |
1697 | }; |
1698 | struct k_clock thread = { |
1699 | .clock_getres = thread_cpu_clock_getres, |
1700 | .clock_get = thread_cpu_clock_get, |
1701 | .clock_set = do_posix_clock_nosettime, |
1702 | .timer_create = thread_cpu_timer_create, |
1703 | .nsleep = thread_cpu_nsleep, |
1704 | .nsleep_restart = thread_cpu_nsleep_restart, |
1705 | }; |
1706 | |
1707 | register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); |
1708 | register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread); |
1709 | |
1710 | return 0; |
1711 | } |
1712 | __initcall(init_posix_cpu_timers); |
1713 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9