Root/
1 | /* |
2 | * Implement CPU time clocks for the POSIX clock interface. |
3 | */ |
4 | |
5 | #include <linux/sched.h> |
6 | #include <linux/posix-timers.h> |
7 | #include <linux/errno.h> |
8 | #include <linux/math64.h> |
9 | #include <asm/uaccess.h> |
10 | #include <linux/kernel_stat.h> |
11 | #include <trace/events/timer.h> |
12 | #include <linux/random.h> |
13 | |
14 | /* |
15 | * Called after updating RLIMIT_CPU to run cpu timer and update |
16 | * tsk->signal->cputime_expires expiration cache if necessary. Needs |
17 | * siglock protection since other code may update expiration cache as |
18 | * well. |
19 | */ |
20 | void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new) |
21 | { |
22 | cputime_t cputime = secs_to_cputime(rlim_new); |
23 | |
24 | spin_lock_irq(&task->sighand->siglock); |
25 | set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL); |
26 | spin_unlock_irq(&task->sighand->siglock); |
27 | } |
28 | |
29 | static int check_clock(const clockid_t which_clock) |
30 | { |
31 | int error = 0; |
32 | struct task_struct *p; |
33 | const pid_t pid = CPUCLOCK_PID(which_clock); |
34 | |
35 | if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX) |
36 | return -EINVAL; |
37 | |
38 | if (pid == 0) |
39 | return 0; |
40 | |
41 | rcu_read_lock(); |
42 | p = find_task_by_vpid(pid); |
43 | if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ? |
44 | same_thread_group(p, current) : has_group_leader_pid(p))) { |
45 | error = -EINVAL; |
46 | } |
47 | rcu_read_unlock(); |
48 | |
49 | return error; |
50 | } |
51 | |
52 | static inline union cpu_time_count |
53 | timespec_to_sample(const clockid_t which_clock, const struct timespec *tp) |
54 | { |
55 | union cpu_time_count ret; |
56 | ret.sched = 0; /* high half always zero when .cpu used */ |
57 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
58 | ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec; |
59 | } else { |
60 | ret.cpu = timespec_to_cputime(tp); |
61 | } |
62 | return ret; |
63 | } |
64 | |
65 | static void sample_to_timespec(const clockid_t which_clock, |
66 | union cpu_time_count cpu, |
67 | struct timespec *tp) |
68 | { |
69 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) |
70 | *tp = ns_to_timespec(cpu.sched); |
71 | else |
72 | cputime_to_timespec(cpu.cpu, tp); |
73 | } |
74 | |
75 | static inline int cpu_time_before(const clockid_t which_clock, |
76 | union cpu_time_count now, |
77 | union cpu_time_count then) |
78 | { |
79 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
80 | return now.sched < then.sched; |
81 | } else { |
82 | return now.cpu < then.cpu; |
83 | } |
84 | } |
85 | static inline void cpu_time_add(const clockid_t which_clock, |
86 | union cpu_time_count *acc, |
87 | union cpu_time_count val) |
88 | { |
89 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
90 | acc->sched += val.sched; |
91 | } else { |
92 | acc->cpu += val.cpu; |
93 | } |
94 | } |
95 | static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock, |
96 | union cpu_time_count a, |
97 | union cpu_time_count b) |
98 | { |
99 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
100 | a.sched -= b.sched; |
101 | } else { |
102 | a.cpu -= b.cpu; |
103 | } |
104 | return a; |
105 | } |
106 | |
107 | /* |
108 | * Update expiry time from increment, and increase overrun count, |
109 | * given the current clock sample. |
110 | */ |
111 | static void bump_cpu_timer(struct k_itimer *timer, |
112 | union cpu_time_count now) |
113 | { |
114 | int i; |
115 | |
116 | if (timer->it.cpu.incr.sched == 0) |
117 | return; |
118 | |
119 | if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { |
120 | unsigned long long delta, incr; |
121 | |
122 | if (now.sched < timer->it.cpu.expires.sched) |
123 | return; |
124 | incr = timer->it.cpu.incr.sched; |
125 | delta = now.sched + incr - timer->it.cpu.expires.sched; |
126 | /* Don't use (incr*2 < delta), incr*2 might overflow. */ |
127 | for (i = 0; incr < delta - incr; i++) |
128 | incr = incr << 1; |
129 | for (; i >= 0; incr >>= 1, i--) { |
130 | if (delta < incr) |
131 | continue; |
132 | timer->it.cpu.expires.sched += incr; |
133 | timer->it_overrun += 1 << i; |
134 | delta -= incr; |
135 | } |
136 | } else { |
137 | cputime_t delta, incr; |
138 | |
139 | if (now.cpu < timer->it.cpu.expires.cpu) |
140 | return; |
141 | incr = timer->it.cpu.incr.cpu; |
142 | delta = now.cpu + incr - timer->it.cpu.expires.cpu; |
143 | /* Don't use (incr*2 < delta), incr*2 might overflow. */ |
144 | for (i = 0; incr < delta - incr; i++) |
145 | incr += incr; |
146 | for (; i >= 0; incr = incr >> 1, i--) { |
147 | if (delta < incr) |
148 | continue; |
149 | timer->it.cpu.expires.cpu += incr; |
150 | timer->it_overrun += 1 << i; |
151 | delta -= incr; |
152 | } |
153 | } |
154 | } |
155 | |
156 | static inline cputime_t prof_ticks(struct task_struct *p) |
157 | { |
158 | cputime_t utime, stime; |
159 | |
160 | task_cputime(p, &utime, &stime); |
161 | |
162 | return utime + stime; |
163 | } |
164 | static inline cputime_t virt_ticks(struct task_struct *p) |
165 | { |
166 | cputime_t utime; |
167 | |
168 | task_cputime(p, &utime, NULL); |
169 | |
170 | return utime; |
171 | } |
172 | |
173 | static int |
174 | posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp) |
175 | { |
176 | int error = check_clock(which_clock); |
177 | if (!error) { |
178 | tp->tv_sec = 0; |
179 | tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ); |
180 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
181 | /* |
182 | * If sched_clock is using a cycle counter, we |
183 | * don't have any idea of its true resolution |
184 | * exported, but it is much more than 1s/HZ. |
185 | */ |
186 | tp->tv_nsec = 1; |
187 | } |
188 | } |
189 | return error; |
190 | } |
191 | |
192 | static int |
193 | posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp) |
194 | { |
195 | /* |
196 | * You can never reset a CPU clock, but we check for other errors |
197 | * in the call before failing with EPERM. |
198 | */ |
199 | int error = check_clock(which_clock); |
200 | if (error == 0) { |
201 | error = -EPERM; |
202 | } |
203 | return error; |
204 | } |
205 | |
206 | |
207 | /* |
208 | * Sample a per-thread clock for the given task. |
209 | */ |
210 | static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, |
211 | union cpu_time_count *cpu) |
212 | { |
213 | switch (CPUCLOCK_WHICH(which_clock)) { |
214 | default: |
215 | return -EINVAL; |
216 | case CPUCLOCK_PROF: |
217 | cpu->cpu = prof_ticks(p); |
218 | break; |
219 | case CPUCLOCK_VIRT: |
220 | cpu->cpu = virt_ticks(p); |
221 | break; |
222 | case CPUCLOCK_SCHED: |
223 | cpu->sched = task_sched_runtime(p); |
224 | break; |
225 | } |
226 | return 0; |
227 | } |
228 | |
229 | static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b) |
230 | { |
231 | if (b->utime > a->utime) |
232 | a->utime = b->utime; |
233 | |
234 | if (b->stime > a->stime) |
235 | a->stime = b->stime; |
236 | |
237 | if (b->sum_exec_runtime > a->sum_exec_runtime) |
238 | a->sum_exec_runtime = b->sum_exec_runtime; |
239 | } |
240 | |
241 | void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) |
242 | { |
243 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
244 | struct task_cputime sum; |
245 | unsigned long flags; |
246 | |
247 | if (!cputimer->running) { |
248 | /* |
249 | * The POSIX timer interface allows for absolute time expiry |
250 | * values through the TIMER_ABSTIME flag, therefore we have |
251 | * to synchronize the timer to the clock every time we start |
252 | * it. |
253 | */ |
254 | thread_group_cputime(tsk, &sum); |
255 | raw_spin_lock_irqsave(&cputimer->lock, flags); |
256 | cputimer->running = 1; |
257 | update_gt_cputime(&cputimer->cputime, &sum); |
258 | } else |
259 | raw_spin_lock_irqsave(&cputimer->lock, flags); |
260 | *times = cputimer->cputime; |
261 | raw_spin_unlock_irqrestore(&cputimer->lock, flags); |
262 | } |
263 | |
264 | /* |
265 | * Sample a process (thread group) clock for the given group_leader task. |
266 | * Must be called with tasklist_lock held for reading. |
267 | */ |
268 | static int cpu_clock_sample_group(const clockid_t which_clock, |
269 | struct task_struct *p, |
270 | union cpu_time_count *cpu) |
271 | { |
272 | struct task_cputime cputime; |
273 | |
274 | switch (CPUCLOCK_WHICH(which_clock)) { |
275 | default: |
276 | return -EINVAL; |
277 | case CPUCLOCK_PROF: |
278 | thread_group_cputime(p, &cputime); |
279 | cpu->cpu = cputime.utime + cputime.stime; |
280 | break; |
281 | case CPUCLOCK_VIRT: |
282 | thread_group_cputime(p, &cputime); |
283 | cpu->cpu = cputime.utime; |
284 | break; |
285 | case CPUCLOCK_SCHED: |
286 | thread_group_cputime(p, &cputime); |
287 | cpu->sched = cputime.sum_exec_runtime; |
288 | break; |
289 | } |
290 | return 0; |
291 | } |
292 | |
293 | |
294 | static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp) |
295 | { |
296 | const pid_t pid = CPUCLOCK_PID(which_clock); |
297 | int error = -EINVAL; |
298 | union cpu_time_count rtn; |
299 | |
300 | if (pid == 0) { |
301 | /* |
302 | * Special case constant value for our own clocks. |
303 | * We don't have to do any lookup to find ourselves. |
304 | */ |
305 | if (CPUCLOCK_PERTHREAD(which_clock)) { |
306 | /* |
307 | * Sampling just ourselves we can do with no locking. |
308 | */ |
309 | error = cpu_clock_sample(which_clock, |
310 | current, &rtn); |
311 | } else { |
312 | read_lock(&tasklist_lock); |
313 | error = cpu_clock_sample_group(which_clock, |
314 | current, &rtn); |
315 | read_unlock(&tasklist_lock); |
316 | } |
317 | } else { |
318 | /* |
319 | * Find the given PID, and validate that the caller |
320 | * should be able to see it. |
321 | */ |
322 | struct task_struct *p; |
323 | rcu_read_lock(); |
324 | p = find_task_by_vpid(pid); |
325 | if (p) { |
326 | if (CPUCLOCK_PERTHREAD(which_clock)) { |
327 | if (same_thread_group(p, current)) { |
328 | error = cpu_clock_sample(which_clock, |
329 | p, &rtn); |
330 | } |
331 | } else { |
332 | read_lock(&tasklist_lock); |
333 | if (thread_group_leader(p) && p->sighand) { |
334 | error = |
335 | cpu_clock_sample_group(which_clock, |
336 | p, &rtn); |
337 | } |
338 | read_unlock(&tasklist_lock); |
339 | } |
340 | } |
341 | rcu_read_unlock(); |
342 | } |
343 | |
344 | if (error) |
345 | return error; |
346 | sample_to_timespec(which_clock, rtn, tp); |
347 | return 0; |
348 | } |
349 | |
350 | |
351 | /* |
352 | * Validate the clockid_t for a new CPU-clock timer, and initialize the timer. |
353 | * This is called from sys_timer_create() and do_cpu_nanosleep() with the |
354 | * new timer already all-zeros initialized. |
355 | */ |
356 | static int posix_cpu_timer_create(struct k_itimer *new_timer) |
357 | { |
358 | int ret = 0; |
359 | const pid_t pid = CPUCLOCK_PID(new_timer->it_clock); |
360 | struct task_struct *p; |
361 | |
362 | if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX) |
363 | return -EINVAL; |
364 | |
365 | INIT_LIST_HEAD(&new_timer->it.cpu.entry); |
366 | |
367 | rcu_read_lock(); |
368 | if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) { |
369 | if (pid == 0) { |
370 | p = current; |
371 | } else { |
372 | p = find_task_by_vpid(pid); |
373 | if (p && !same_thread_group(p, current)) |
374 | p = NULL; |
375 | } |
376 | } else { |
377 | if (pid == 0) { |
378 | p = current->group_leader; |
379 | } else { |
380 | p = find_task_by_vpid(pid); |
381 | if (p && !has_group_leader_pid(p)) |
382 | p = NULL; |
383 | } |
384 | } |
385 | new_timer->it.cpu.task = p; |
386 | if (p) { |
387 | get_task_struct(p); |
388 | } else { |
389 | ret = -EINVAL; |
390 | } |
391 | rcu_read_unlock(); |
392 | |
393 | return ret; |
394 | } |
395 | |
396 | /* |
397 | * Clean up a CPU-clock timer that is about to be destroyed. |
398 | * This is called from timer deletion with the timer already locked. |
399 | * If we return TIMER_RETRY, it's necessary to release the timer's lock |
400 | * and try again. (This happens when the timer is in the middle of firing.) |
401 | */ |
402 | static int posix_cpu_timer_del(struct k_itimer *timer) |
403 | { |
404 | struct task_struct *p = timer->it.cpu.task; |
405 | int ret = 0; |
406 | |
407 | if (likely(p != NULL)) { |
408 | read_lock(&tasklist_lock); |
409 | if (unlikely(p->sighand == NULL)) { |
410 | /* |
411 | * We raced with the reaping of the task. |
412 | * The deletion should have cleared us off the list. |
413 | */ |
414 | BUG_ON(!list_empty(&timer->it.cpu.entry)); |
415 | } else { |
416 | spin_lock(&p->sighand->siglock); |
417 | if (timer->it.cpu.firing) |
418 | ret = TIMER_RETRY; |
419 | else |
420 | list_del(&timer->it.cpu.entry); |
421 | spin_unlock(&p->sighand->siglock); |
422 | } |
423 | read_unlock(&tasklist_lock); |
424 | |
425 | if (!ret) |
426 | put_task_struct(p); |
427 | } |
428 | |
429 | return ret; |
430 | } |
431 | |
432 | /* |
433 | * Clean out CPU timers still ticking when a thread exited. The task |
434 | * pointer is cleared, and the expiry time is replaced with the residual |
435 | * time for later timer_gettime calls to return. |
436 | * This must be called with the siglock held. |
437 | */ |
438 | static void cleanup_timers(struct list_head *head, |
439 | cputime_t utime, cputime_t stime, |
440 | unsigned long long sum_exec_runtime) |
441 | { |
442 | struct cpu_timer_list *timer, *next; |
443 | cputime_t ptime = utime + stime; |
444 | |
445 | list_for_each_entry_safe(timer, next, head, entry) { |
446 | list_del_init(&timer->entry); |
447 | if (timer->expires.cpu < ptime) { |
448 | timer->expires.cpu = 0; |
449 | } else { |
450 | timer->expires.cpu -= ptime; |
451 | } |
452 | } |
453 | |
454 | ++head; |
455 | list_for_each_entry_safe(timer, next, head, entry) { |
456 | list_del_init(&timer->entry); |
457 | if (timer->expires.cpu < utime) { |
458 | timer->expires.cpu = 0; |
459 | } else { |
460 | timer->expires.cpu -= utime; |
461 | } |
462 | } |
463 | |
464 | ++head; |
465 | list_for_each_entry_safe(timer, next, head, entry) { |
466 | list_del_init(&timer->entry); |
467 | if (timer->expires.sched < sum_exec_runtime) { |
468 | timer->expires.sched = 0; |
469 | } else { |
470 | timer->expires.sched -= sum_exec_runtime; |
471 | } |
472 | } |
473 | } |
474 | |
475 | /* |
476 | * These are both called with the siglock held, when the current thread |
477 | * is being reaped. When the final (leader) thread in the group is reaped, |
478 | * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit. |
479 | */ |
480 | void posix_cpu_timers_exit(struct task_struct *tsk) |
481 | { |
482 | cputime_t utime, stime; |
483 | |
484 | add_device_randomness((const void*) &tsk->se.sum_exec_runtime, |
485 | sizeof(unsigned long long)); |
486 | task_cputime(tsk, &utime, &stime); |
487 | cleanup_timers(tsk->cpu_timers, |
488 | utime, stime, tsk->se.sum_exec_runtime); |
489 | |
490 | } |
491 | void posix_cpu_timers_exit_group(struct task_struct *tsk) |
492 | { |
493 | struct signal_struct *const sig = tsk->signal; |
494 | cputime_t utime, stime; |
495 | |
496 | task_cputime(tsk, &utime, &stime); |
497 | cleanup_timers(tsk->signal->cpu_timers, |
498 | utime + sig->utime, stime + sig->stime, |
499 | tsk->se.sum_exec_runtime + sig->sum_sched_runtime); |
500 | } |
501 | |
502 | static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) |
503 | { |
504 | /* |
505 | * That's all for this thread or process. |
506 | * We leave our residual in expires to be reported. |
507 | */ |
508 | put_task_struct(timer->it.cpu.task); |
509 | timer->it.cpu.task = NULL; |
510 | timer->it.cpu.expires = cpu_time_sub(timer->it_clock, |
511 | timer->it.cpu.expires, |
512 | now); |
513 | } |
514 | |
515 | static inline int expires_gt(cputime_t expires, cputime_t new_exp) |
516 | { |
517 | return expires == 0 || expires > new_exp; |
518 | } |
519 | |
520 | /* |
521 | * Insert the timer on the appropriate list before any timers that |
522 | * expire later. This must be called with the tasklist_lock held |
523 | * for reading, interrupts disabled and p->sighand->siglock taken. |
524 | */ |
525 | static void arm_timer(struct k_itimer *timer) |
526 | { |
527 | struct task_struct *p = timer->it.cpu.task; |
528 | struct list_head *head, *listpos; |
529 | struct task_cputime *cputime_expires; |
530 | struct cpu_timer_list *const nt = &timer->it.cpu; |
531 | struct cpu_timer_list *next; |
532 | |
533 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
534 | head = p->cpu_timers; |
535 | cputime_expires = &p->cputime_expires; |
536 | } else { |
537 | head = p->signal->cpu_timers; |
538 | cputime_expires = &p->signal->cputime_expires; |
539 | } |
540 | head += CPUCLOCK_WHICH(timer->it_clock); |
541 | |
542 | listpos = head; |
543 | list_for_each_entry(next, head, entry) { |
544 | if (cpu_time_before(timer->it_clock, nt->expires, next->expires)) |
545 | break; |
546 | listpos = &next->entry; |
547 | } |
548 | list_add(&nt->entry, listpos); |
549 | |
550 | if (listpos == head) { |
551 | union cpu_time_count *exp = &nt->expires; |
552 | |
553 | /* |
554 | * We are the new earliest-expiring POSIX 1.b timer, hence |
555 | * need to update expiration cache. Take into account that |
556 | * for process timers we share expiration cache with itimers |
557 | * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME. |
558 | */ |
559 | |
560 | switch (CPUCLOCK_WHICH(timer->it_clock)) { |
561 | case CPUCLOCK_PROF: |
562 | if (expires_gt(cputime_expires->prof_exp, exp->cpu)) |
563 | cputime_expires->prof_exp = exp->cpu; |
564 | break; |
565 | case CPUCLOCK_VIRT: |
566 | if (expires_gt(cputime_expires->virt_exp, exp->cpu)) |
567 | cputime_expires->virt_exp = exp->cpu; |
568 | break; |
569 | case CPUCLOCK_SCHED: |
570 | if (cputime_expires->sched_exp == 0 || |
571 | cputime_expires->sched_exp > exp->sched) |
572 | cputime_expires->sched_exp = exp->sched; |
573 | break; |
574 | } |
575 | } |
576 | } |
577 | |
578 | /* |
579 | * The timer is locked, fire it and arrange for its reload. |
580 | */ |
581 | static void cpu_timer_fire(struct k_itimer *timer) |
582 | { |
583 | if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) { |
584 | /* |
585 | * User don't want any signal. |
586 | */ |
587 | timer->it.cpu.expires.sched = 0; |
588 | } else if (unlikely(timer->sigq == NULL)) { |
589 | /* |
590 | * This a special case for clock_nanosleep, |
591 | * not a normal timer from sys_timer_create. |
592 | */ |
593 | wake_up_process(timer->it_process); |
594 | timer->it.cpu.expires.sched = 0; |
595 | } else if (timer->it.cpu.incr.sched == 0) { |
596 | /* |
597 | * One-shot timer. Clear it as soon as it's fired. |
598 | */ |
599 | posix_timer_event(timer, 0); |
600 | timer->it.cpu.expires.sched = 0; |
601 | } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) { |
602 | /* |
603 | * The signal did not get queued because the signal |
604 | * was ignored, so we won't get any callback to |
605 | * reload the timer. But we need to keep it |
606 | * ticking in case the signal is deliverable next time. |
607 | */ |
608 | posix_cpu_timer_schedule(timer); |
609 | } |
610 | } |
611 | |
612 | /* |
613 | * Sample a process (thread group) timer for the given group_leader task. |
614 | * Must be called with tasklist_lock held for reading. |
615 | */ |
616 | static int cpu_timer_sample_group(const clockid_t which_clock, |
617 | struct task_struct *p, |
618 | union cpu_time_count *cpu) |
619 | { |
620 | struct task_cputime cputime; |
621 | |
622 | thread_group_cputimer(p, &cputime); |
623 | switch (CPUCLOCK_WHICH(which_clock)) { |
624 | default: |
625 | return -EINVAL; |
626 | case CPUCLOCK_PROF: |
627 | cpu->cpu = cputime.utime + cputime.stime; |
628 | break; |
629 | case CPUCLOCK_VIRT: |
630 | cpu->cpu = cputime.utime; |
631 | break; |
632 | case CPUCLOCK_SCHED: |
633 | cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p); |
634 | break; |
635 | } |
636 | return 0; |
637 | } |
638 | |
639 | /* |
640 | * Guts of sys_timer_settime for CPU timers. |
641 | * This is called with the timer locked and interrupts disabled. |
642 | * If we return TIMER_RETRY, it's necessary to release the timer's lock |
643 | * and try again. (This happens when the timer is in the middle of firing.) |
644 | */ |
645 | static int posix_cpu_timer_set(struct k_itimer *timer, int flags, |
646 | struct itimerspec *new, struct itimerspec *old) |
647 | { |
648 | struct task_struct *p = timer->it.cpu.task; |
649 | union cpu_time_count old_expires, new_expires, old_incr, val; |
650 | int ret; |
651 | |
652 | if (unlikely(p == NULL)) { |
653 | /* |
654 | * Timer refers to a dead task's clock. |
655 | */ |
656 | return -ESRCH; |
657 | } |
658 | |
659 | new_expires = timespec_to_sample(timer->it_clock, &new->it_value); |
660 | |
661 | read_lock(&tasklist_lock); |
662 | /* |
663 | * We need the tasklist_lock to protect against reaping that |
664 | * clears p->sighand. If p has just been reaped, we can no |
665 | * longer get any information about it at all. |
666 | */ |
667 | if (unlikely(p->sighand == NULL)) { |
668 | read_unlock(&tasklist_lock); |
669 | put_task_struct(p); |
670 | timer->it.cpu.task = NULL; |
671 | return -ESRCH; |
672 | } |
673 | |
674 | /* |
675 | * Disarm any old timer after extracting its expiry time. |
676 | */ |
677 | BUG_ON(!irqs_disabled()); |
678 | |
679 | ret = 0; |
680 | old_incr = timer->it.cpu.incr; |
681 | spin_lock(&p->sighand->siglock); |
682 | old_expires = timer->it.cpu.expires; |
683 | if (unlikely(timer->it.cpu.firing)) { |
684 | timer->it.cpu.firing = -1; |
685 | ret = TIMER_RETRY; |
686 | } else |
687 | list_del_init(&timer->it.cpu.entry); |
688 | |
689 | /* |
690 | * We need to sample the current value to convert the new |
691 | * value from to relative and absolute, and to convert the |
692 | * old value from absolute to relative. To set a process |
693 | * timer, we need a sample to balance the thread expiry |
694 | * times (in arm_timer). With an absolute time, we must |
695 | * check if it's already passed. In short, we need a sample. |
696 | */ |
697 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
698 | cpu_clock_sample(timer->it_clock, p, &val); |
699 | } else { |
700 | cpu_timer_sample_group(timer->it_clock, p, &val); |
701 | } |
702 | |
703 | if (old) { |
704 | if (old_expires.sched == 0) { |
705 | old->it_value.tv_sec = 0; |
706 | old->it_value.tv_nsec = 0; |
707 | } else { |
708 | /* |
709 | * Update the timer in case it has |
710 | * overrun already. If it has, |
711 | * we'll report it as having overrun |
712 | * and with the next reloaded timer |
713 | * already ticking, though we are |
714 | * swallowing that pending |
715 | * notification here to install the |
716 | * new setting. |
717 | */ |
718 | bump_cpu_timer(timer, val); |
719 | if (cpu_time_before(timer->it_clock, val, |
720 | timer->it.cpu.expires)) { |
721 | old_expires = cpu_time_sub( |
722 | timer->it_clock, |
723 | timer->it.cpu.expires, val); |
724 | sample_to_timespec(timer->it_clock, |
725 | old_expires, |
726 | &old->it_value); |
727 | } else { |
728 | old->it_value.tv_nsec = 1; |
729 | old->it_value.tv_sec = 0; |
730 | } |
731 | } |
732 | } |
733 | |
734 | if (unlikely(ret)) { |
735 | /* |
736 | * We are colliding with the timer actually firing. |
737 | * Punt after filling in the timer's old value, and |
738 | * disable this firing since we are already reporting |
739 | * it as an overrun (thanks to bump_cpu_timer above). |
740 | */ |
741 | spin_unlock(&p->sighand->siglock); |
742 | read_unlock(&tasklist_lock); |
743 | goto out; |
744 | } |
745 | |
746 | if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) { |
747 | cpu_time_add(timer->it_clock, &new_expires, val); |
748 | } |
749 | |
750 | /* |
751 | * Install the new expiry time (or zero). |
752 | * For a timer with no notification action, we don't actually |
753 | * arm the timer (we'll just fake it for timer_gettime). |
754 | */ |
755 | timer->it.cpu.expires = new_expires; |
756 | if (new_expires.sched != 0 && |
757 | cpu_time_before(timer->it_clock, val, new_expires)) { |
758 | arm_timer(timer); |
759 | } |
760 | |
761 | spin_unlock(&p->sighand->siglock); |
762 | read_unlock(&tasklist_lock); |
763 | |
764 | /* |
765 | * Install the new reload setting, and |
766 | * set up the signal and overrun bookkeeping. |
767 | */ |
768 | timer->it.cpu.incr = timespec_to_sample(timer->it_clock, |
769 | &new->it_interval); |
770 | |
771 | /* |
772 | * This acts as a modification timestamp for the timer, |
773 | * so any automatic reload attempt will punt on seeing |
774 | * that we have reset the timer manually. |
775 | */ |
776 | timer->it_requeue_pending = (timer->it_requeue_pending + 2) & |
777 | ~REQUEUE_PENDING; |
778 | timer->it_overrun_last = 0; |
779 | timer->it_overrun = -1; |
780 | |
781 | if (new_expires.sched != 0 && |
782 | !cpu_time_before(timer->it_clock, val, new_expires)) { |
783 | /* |
784 | * The designated time already passed, so we notify |
785 | * immediately, even if the thread never runs to |
786 | * accumulate more time on this clock. |
787 | */ |
788 | cpu_timer_fire(timer); |
789 | } |
790 | |
791 | ret = 0; |
792 | out: |
793 | if (old) { |
794 | sample_to_timespec(timer->it_clock, |
795 | old_incr, &old->it_interval); |
796 | } |
797 | return ret; |
798 | } |
799 | |
800 | static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) |
801 | { |
802 | union cpu_time_count now; |
803 | struct task_struct *p = timer->it.cpu.task; |
804 | int clear_dead; |
805 | |
806 | /* |
807 | * Easy part: convert the reload time. |
808 | */ |
809 | sample_to_timespec(timer->it_clock, |
810 | timer->it.cpu.incr, &itp->it_interval); |
811 | |
812 | if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all. */ |
813 | itp->it_value.tv_sec = itp->it_value.tv_nsec = 0; |
814 | return; |
815 | } |
816 | |
817 | if (unlikely(p == NULL)) { |
818 | /* |
819 | * This task already died and the timer will never fire. |
820 | * In this case, expires is actually the dead value. |
821 | */ |
822 | dead: |
823 | sample_to_timespec(timer->it_clock, timer->it.cpu.expires, |
824 | &itp->it_value); |
825 | return; |
826 | } |
827 | |
828 | /* |
829 | * Sample the clock to take the difference with the expiry time. |
830 | */ |
831 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
832 | cpu_clock_sample(timer->it_clock, p, &now); |
833 | clear_dead = p->exit_state; |
834 | } else { |
835 | read_lock(&tasklist_lock); |
836 | if (unlikely(p->sighand == NULL)) { |
837 | /* |
838 | * The process has been reaped. |
839 | * We can't even collect a sample any more. |
840 | * Call the timer disarmed, nothing else to do. |
841 | */ |
842 | put_task_struct(p); |
843 | timer->it.cpu.task = NULL; |
844 | timer->it.cpu.expires.sched = 0; |
845 | read_unlock(&tasklist_lock); |
846 | goto dead; |
847 | } else { |
848 | cpu_timer_sample_group(timer->it_clock, p, &now); |
849 | clear_dead = (unlikely(p->exit_state) && |
850 | thread_group_empty(p)); |
851 | } |
852 | read_unlock(&tasklist_lock); |
853 | } |
854 | |
855 | if (unlikely(clear_dead)) { |
856 | /* |
857 | * We've noticed that the thread is dead, but |
858 | * not yet reaped. Take this opportunity to |
859 | * drop our task ref. |
860 | */ |
861 | clear_dead_task(timer, now); |
862 | goto dead; |
863 | } |
864 | |
865 | if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) { |
866 | sample_to_timespec(timer->it_clock, |
867 | cpu_time_sub(timer->it_clock, |
868 | timer->it.cpu.expires, now), |
869 | &itp->it_value); |
870 | } else { |
871 | /* |
872 | * The timer should have expired already, but the firing |
873 | * hasn't taken place yet. Say it's just about to expire. |
874 | */ |
875 | itp->it_value.tv_nsec = 1; |
876 | itp->it_value.tv_sec = 0; |
877 | } |
878 | } |
879 | |
880 | /* |
881 | * Check for any per-thread CPU timers that have fired and move them off |
882 | * the tsk->cpu_timers[N] list onto the firing list. Here we update the |
883 | * tsk->it_*_expires values to reflect the remaining thread CPU timers. |
884 | */ |
885 | static void check_thread_timers(struct task_struct *tsk, |
886 | struct list_head *firing) |
887 | { |
888 | int maxfire; |
889 | struct list_head *timers = tsk->cpu_timers; |
890 | struct signal_struct *const sig = tsk->signal; |
891 | unsigned long soft; |
892 | |
893 | maxfire = 20; |
894 | tsk->cputime_expires.prof_exp = 0; |
895 | while (!list_empty(timers)) { |
896 | struct cpu_timer_list *t = list_first_entry(timers, |
897 | struct cpu_timer_list, |
898 | entry); |
899 | if (!--maxfire || prof_ticks(tsk) < t->expires.cpu) { |
900 | tsk->cputime_expires.prof_exp = t->expires.cpu; |
901 | break; |
902 | } |
903 | t->firing = 1; |
904 | list_move_tail(&t->entry, firing); |
905 | } |
906 | |
907 | ++timers; |
908 | maxfire = 20; |
909 | tsk->cputime_expires.virt_exp = 0; |
910 | while (!list_empty(timers)) { |
911 | struct cpu_timer_list *t = list_first_entry(timers, |
912 | struct cpu_timer_list, |
913 | entry); |
914 | if (!--maxfire || virt_ticks(tsk) < t->expires.cpu) { |
915 | tsk->cputime_expires.virt_exp = t->expires.cpu; |
916 | break; |
917 | } |
918 | t->firing = 1; |
919 | list_move_tail(&t->entry, firing); |
920 | } |
921 | |
922 | ++timers; |
923 | maxfire = 20; |
924 | tsk->cputime_expires.sched_exp = 0; |
925 | while (!list_empty(timers)) { |
926 | struct cpu_timer_list *t = list_first_entry(timers, |
927 | struct cpu_timer_list, |
928 | entry); |
929 | if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) { |
930 | tsk->cputime_expires.sched_exp = t->expires.sched; |
931 | break; |
932 | } |
933 | t->firing = 1; |
934 | list_move_tail(&t->entry, firing); |
935 | } |
936 | |
937 | /* |
938 | * Check for the special case thread timers. |
939 | */ |
940 | soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur); |
941 | if (soft != RLIM_INFINITY) { |
942 | unsigned long hard = |
943 | ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max); |
944 | |
945 | if (hard != RLIM_INFINITY && |
946 | tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { |
947 | /* |
948 | * At the hard limit, we just die. |
949 | * No need to calculate anything else now. |
950 | */ |
951 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); |
952 | return; |
953 | } |
954 | if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { |
955 | /* |
956 | * At the soft limit, send a SIGXCPU every second. |
957 | */ |
958 | if (soft < hard) { |
959 | soft += USEC_PER_SEC; |
960 | sig->rlim[RLIMIT_RTTIME].rlim_cur = soft; |
961 | } |
962 | printk(KERN_INFO |
963 | "RT Watchdog Timeout: %s[%d]\n", |
964 | tsk->comm, task_pid_nr(tsk)); |
965 | __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); |
966 | } |
967 | } |
968 | } |
969 | |
970 | static void stop_process_timers(struct signal_struct *sig) |
971 | { |
972 | struct thread_group_cputimer *cputimer = &sig->cputimer; |
973 | unsigned long flags; |
974 | |
975 | raw_spin_lock_irqsave(&cputimer->lock, flags); |
976 | cputimer->running = 0; |
977 | raw_spin_unlock_irqrestore(&cputimer->lock, flags); |
978 | } |
979 | |
980 | static u32 onecputick; |
981 | |
982 | static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, |
983 | cputime_t *expires, cputime_t cur_time, int signo) |
984 | { |
985 | if (!it->expires) |
986 | return; |
987 | |
988 | if (cur_time >= it->expires) { |
989 | if (it->incr) { |
990 | it->expires += it->incr; |
991 | it->error += it->incr_error; |
992 | if (it->error >= onecputick) { |
993 | it->expires -= cputime_one_jiffy; |
994 | it->error -= onecputick; |
995 | } |
996 | } else { |
997 | it->expires = 0; |
998 | } |
999 | |
1000 | trace_itimer_expire(signo == SIGPROF ? |
1001 | ITIMER_PROF : ITIMER_VIRTUAL, |
1002 | tsk->signal->leader_pid, cur_time); |
1003 | __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); |
1004 | } |
1005 | |
1006 | if (it->expires && (!*expires || it->expires < *expires)) { |
1007 | *expires = it->expires; |
1008 | } |
1009 | } |
1010 | |
1011 | /** |
1012 | * task_cputime_zero - Check a task_cputime struct for all zero fields. |
1013 | * |
1014 | * @cputime: The struct to compare. |
1015 | * |
1016 | * Checks @cputime to see if all fields are zero. Returns true if all fields |
1017 | * are zero, false if any field is nonzero. |
1018 | */ |
1019 | static inline int task_cputime_zero(const struct task_cputime *cputime) |
1020 | { |
1021 | if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime) |
1022 | return 1; |
1023 | return 0; |
1024 | } |
1025 | |
1026 | /* |
1027 | * Check for any per-thread CPU timers that have fired and move them |
1028 | * off the tsk->*_timers list onto the firing list. Per-thread timers |
1029 | * have already been taken off. |
1030 | */ |
1031 | static void check_process_timers(struct task_struct *tsk, |
1032 | struct list_head *firing) |
1033 | { |
1034 | int maxfire; |
1035 | struct signal_struct *const sig = tsk->signal; |
1036 | cputime_t utime, ptime, virt_expires, prof_expires; |
1037 | unsigned long long sum_sched_runtime, sched_expires; |
1038 | struct list_head *timers = sig->cpu_timers; |
1039 | struct task_cputime cputime; |
1040 | unsigned long soft; |
1041 | |
1042 | /* |
1043 | * Collect the current process totals. |
1044 | */ |
1045 | thread_group_cputimer(tsk, &cputime); |
1046 | utime = cputime.utime; |
1047 | ptime = utime + cputime.stime; |
1048 | sum_sched_runtime = cputime.sum_exec_runtime; |
1049 | maxfire = 20; |
1050 | prof_expires = 0; |
1051 | while (!list_empty(timers)) { |
1052 | struct cpu_timer_list *tl = list_first_entry(timers, |
1053 | struct cpu_timer_list, |
1054 | entry); |
1055 | if (!--maxfire || ptime < tl->expires.cpu) { |
1056 | prof_expires = tl->expires.cpu; |
1057 | break; |
1058 | } |
1059 | tl->firing = 1; |
1060 | list_move_tail(&tl->entry, firing); |
1061 | } |
1062 | |
1063 | ++timers; |
1064 | maxfire = 20; |
1065 | virt_expires = 0; |
1066 | while (!list_empty(timers)) { |
1067 | struct cpu_timer_list *tl = list_first_entry(timers, |
1068 | struct cpu_timer_list, |
1069 | entry); |
1070 | if (!--maxfire || utime < tl->expires.cpu) { |
1071 | virt_expires = tl->expires.cpu; |
1072 | break; |
1073 | } |
1074 | tl->firing = 1; |
1075 | list_move_tail(&tl->entry, firing); |
1076 | } |
1077 | |
1078 | ++timers; |
1079 | maxfire = 20; |
1080 | sched_expires = 0; |
1081 | while (!list_empty(timers)) { |
1082 | struct cpu_timer_list *tl = list_first_entry(timers, |
1083 | struct cpu_timer_list, |
1084 | entry); |
1085 | if (!--maxfire || sum_sched_runtime < tl->expires.sched) { |
1086 | sched_expires = tl->expires.sched; |
1087 | break; |
1088 | } |
1089 | tl->firing = 1; |
1090 | list_move_tail(&tl->entry, firing); |
1091 | } |
1092 | |
1093 | /* |
1094 | * Check for the special case process timers. |
1095 | */ |
1096 | check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime, |
1097 | SIGPROF); |
1098 | check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime, |
1099 | SIGVTALRM); |
1100 | soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); |
1101 | if (soft != RLIM_INFINITY) { |
1102 | unsigned long psecs = cputime_to_secs(ptime); |
1103 | unsigned long hard = |
1104 | ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max); |
1105 | cputime_t x; |
1106 | if (psecs >= hard) { |
1107 | /* |
1108 | * At the hard limit, we just die. |
1109 | * No need to calculate anything else now. |
1110 | */ |
1111 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); |
1112 | return; |
1113 | } |
1114 | if (psecs >= soft) { |
1115 | /* |
1116 | * At the soft limit, send a SIGXCPU every second. |
1117 | */ |
1118 | __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); |
1119 | if (soft < hard) { |
1120 | soft++; |
1121 | sig->rlim[RLIMIT_CPU].rlim_cur = soft; |
1122 | } |
1123 | } |
1124 | x = secs_to_cputime(soft); |
1125 | if (!prof_expires || x < prof_expires) { |
1126 | prof_expires = x; |
1127 | } |
1128 | } |
1129 | |
1130 | sig->cputime_expires.prof_exp = prof_expires; |
1131 | sig->cputime_expires.virt_exp = virt_expires; |
1132 | sig->cputime_expires.sched_exp = sched_expires; |
1133 | if (task_cputime_zero(&sig->cputime_expires)) |
1134 | stop_process_timers(sig); |
1135 | } |
1136 | |
1137 | /* |
1138 | * This is called from the signal code (via do_schedule_next_timer) |
1139 | * when the last timer signal was delivered and we have to reload the timer. |
1140 | */ |
1141 | void posix_cpu_timer_schedule(struct k_itimer *timer) |
1142 | { |
1143 | struct task_struct *p = timer->it.cpu.task; |
1144 | union cpu_time_count now; |
1145 | |
1146 | if (unlikely(p == NULL)) |
1147 | /* |
1148 | * The task was cleaned up already, no future firings. |
1149 | */ |
1150 | goto out; |
1151 | |
1152 | /* |
1153 | * Fetch the current sample and update the timer's expiry time. |
1154 | */ |
1155 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
1156 | cpu_clock_sample(timer->it_clock, p, &now); |
1157 | bump_cpu_timer(timer, now); |
1158 | if (unlikely(p->exit_state)) { |
1159 | clear_dead_task(timer, now); |
1160 | goto out; |
1161 | } |
1162 | read_lock(&tasklist_lock); /* arm_timer needs it. */ |
1163 | spin_lock(&p->sighand->siglock); |
1164 | } else { |
1165 | read_lock(&tasklist_lock); |
1166 | if (unlikely(p->sighand == NULL)) { |
1167 | /* |
1168 | * The process has been reaped. |
1169 | * We can't even collect a sample any more. |
1170 | */ |
1171 | put_task_struct(p); |
1172 | timer->it.cpu.task = p = NULL; |
1173 | timer->it.cpu.expires.sched = 0; |
1174 | goto out_unlock; |
1175 | } else if (unlikely(p->exit_state) && thread_group_empty(p)) { |
1176 | /* |
1177 | * We've noticed that the thread is dead, but |
1178 | * not yet reaped. Take this opportunity to |
1179 | * drop our task ref. |
1180 | */ |
1181 | clear_dead_task(timer, now); |
1182 | goto out_unlock; |
1183 | } |
1184 | spin_lock(&p->sighand->siglock); |
1185 | cpu_timer_sample_group(timer->it_clock, p, &now); |
1186 | bump_cpu_timer(timer, now); |
1187 | /* Leave the tasklist_lock locked for the call below. */ |
1188 | } |
1189 | |
1190 | /* |
1191 | * Now re-arm for the new expiry time. |
1192 | */ |
1193 | BUG_ON(!irqs_disabled()); |
1194 | arm_timer(timer); |
1195 | spin_unlock(&p->sighand->siglock); |
1196 | |
1197 | out_unlock: |
1198 | read_unlock(&tasklist_lock); |
1199 | |
1200 | out: |
1201 | timer->it_overrun_last = timer->it_overrun; |
1202 | timer->it_overrun = -1; |
1203 | ++timer->it_requeue_pending; |
1204 | } |
1205 | |
1206 | /** |
1207 | * task_cputime_expired - Compare two task_cputime entities. |
1208 | * |
1209 | * @sample: The task_cputime structure to be checked for expiration. |
1210 | * @expires: Expiration times, against which @sample will be checked. |
1211 | * |
1212 | * Checks @sample against @expires to see if any field of @sample has expired. |
1213 | * Returns true if any field of the former is greater than the corresponding |
1214 | * field of the latter if the latter field is set. Otherwise returns false. |
1215 | */ |
1216 | static inline int task_cputime_expired(const struct task_cputime *sample, |
1217 | const struct task_cputime *expires) |
1218 | { |
1219 | if (expires->utime && sample->utime >= expires->utime) |
1220 | return 1; |
1221 | if (expires->stime && sample->utime + sample->stime >= expires->stime) |
1222 | return 1; |
1223 | if (expires->sum_exec_runtime != 0 && |
1224 | sample->sum_exec_runtime >= expires->sum_exec_runtime) |
1225 | return 1; |
1226 | return 0; |
1227 | } |
1228 | |
1229 | /** |
1230 | * fastpath_timer_check - POSIX CPU timers fast path. |
1231 | * |
1232 | * @tsk: The task (thread) being checked. |
1233 | * |
1234 | * Check the task and thread group timers. If both are zero (there are no |
1235 | * timers set) return false. Otherwise snapshot the task and thread group |
1236 | * timers and compare them with the corresponding expiration times. Return |
1237 | * true if a timer has expired, else return false. |
1238 | */ |
1239 | static inline int fastpath_timer_check(struct task_struct *tsk) |
1240 | { |
1241 | struct signal_struct *sig; |
1242 | cputime_t utime, stime; |
1243 | |
1244 | task_cputime(tsk, &utime, &stime); |
1245 | |
1246 | if (!task_cputime_zero(&tsk->cputime_expires)) { |
1247 | struct task_cputime task_sample = { |
1248 | .utime = utime, |
1249 | .stime = stime, |
1250 | .sum_exec_runtime = tsk->se.sum_exec_runtime |
1251 | }; |
1252 | |
1253 | if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) |
1254 | return 1; |
1255 | } |
1256 | |
1257 | sig = tsk->signal; |
1258 | if (sig->cputimer.running) { |
1259 | struct task_cputime group_sample; |
1260 | |
1261 | raw_spin_lock(&sig->cputimer.lock); |
1262 | group_sample = sig->cputimer.cputime; |
1263 | raw_spin_unlock(&sig->cputimer.lock); |
1264 | |
1265 | if (task_cputime_expired(&group_sample, &sig->cputime_expires)) |
1266 | return 1; |
1267 | } |
1268 | |
1269 | return 0; |
1270 | } |
1271 | |
1272 | /* |
1273 | * This is called from the timer interrupt handler. The irq handler has |
1274 | * already updated our counts. We need to check if any timers fire now. |
1275 | * Interrupts are disabled. |
1276 | */ |
1277 | void run_posix_cpu_timers(struct task_struct *tsk) |
1278 | { |
1279 | LIST_HEAD(firing); |
1280 | struct k_itimer *timer, *next; |
1281 | unsigned long flags; |
1282 | |
1283 | BUG_ON(!irqs_disabled()); |
1284 | |
1285 | /* |
1286 | * The fast path checks that there are no expired thread or thread |
1287 | * group timers. If that's so, just return. |
1288 | */ |
1289 | if (!fastpath_timer_check(tsk)) |
1290 | return; |
1291 | |
1292 | if (!lock_task_sighand(tsk, &flags)) |
1293 | return; |
1294 | /* |
1295 | * Here we take off tsk->signal->cpu_timers[N] and |
1296 | * tsk->cpu_timers[N] all the timers that are firing, and |
1297 | * put them on the firing list. |
1298 | */ |
1299 | check_thread_timers(tsk, &firing); |
1300 | /* |
1301 | * If there are any active process wide timers (POSIX 1.b, itimers, |
1302 | * RLIMIT_CPU) cputimer must be running. |
1303 | */ |
1304 | if (tsk->signal->cputimer.running) |
1305 | check_process_timers(tsk, &firing); |
1306 | |
1307 | /* |
1308 | * We must release these locks before taking any timer's lock. |
1309 | * There is a potential race with timer deletion here, as the |
1310 | * siglock now protects our private firing list. We have set |
1311 | * the firing flag in each timer, so that a deletion attempt |
1312 | * that gets the timer lock before we do will give it up and |
1313 | * spin until we've taken care of that timer below. |
1314 | */ |
1315 | unlock_task_sighand(tsk, &flags); |
1316 | |
1317 | /* |
1318 | * Now that all the timers on our list have the firing flag, |
1319 | * no one will touch their list entries but us. We'll take |
1320 | * each timer's lock before clearing its firing flag, so no |
1321 | * timer call will interfere. |
1322 | */ |
1323 | list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) { |
1324 | int cpu_firing; |
1325 | |
1326 | spin_lock(&timer->it_lock); |
1327 | list_del_init(&timer->it.cpu.entry); |
1328 | cpu_firing = timer->it.cpu.firing; |
1329 | timer->it.cpu.firing = 0; |
1330 | /* |
1331 | * The firing flag is -1 if we collided with a reset |
1332 | * of the timer, which already reported this |
1333 | * almost-firing as an overrun. So don't generate an event. |
1334 | */ |
1335 | if (likely(cpu_firing >= 0)) |
1336 | cpu_timer_fire(timer); |
1337 | spin_unlock(&timer->it_lock); |
1338 | } |
1339 | } |
1340 | |
1341 | /* |
1342 | * Set one of the process-wide special case CPU timers or RLIMIT_CPU. |
1343 | * The tsk->sighand->siglock must be held by the caller. |
1344 | */ |
1345 | void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, |
1346 | cputime_t *newval, cputime_t *oldval) |
1347 | { |
1348 | union cpu_time_count now; |
1349 | |
1350 | BUG_ON(clock_idx == CPUCLOCK_SCHED); |
1351 | cpu_timer_sample_group(clock_idx, tsk, &now); |
1352 | |
1353 | if (oldval) { |
1354 | /* |
1355 | * We are setting itimer. The *oldval is absolute and we update |
1356 | * it to be relative, *newval argument is relative and we update |
1357 | * it to be absolute. |
1358 | */ |
1359 | if (*oldval) { |
1360 | if (*oldval <= now.cpu) { |
1361 | /* Just about to fire. */ |
1362 | *oldval = cputime_one_jiffy; |
1363 | } else { |
1364 | *oldval -= now.cpu; |
1365 | } |
1366 | } |
1367 | |
1368 | if (!*newval) |
1369 | return; |
1370 | *newval += now.cpu; |
1371 | } |
1372 | |
1373 | /* |
1374 | * Update expiration cache if we are the earliest timer, or eventually |
1375 | * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire. |
1376 | */ |
1377 | switch (clock_idx) { |
1378 | case CPUCLOCK_PROF: |
1379 | if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval)) |
1380 | tsk->signal->cputime_expires.prof_exp = *newval; |
1381 | break; |
1382 | case CPUCLOCK_VIRT: |
1383 | if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval)) |
1384 | tsk->signal->cputime_expires.virt_exp = *newval; |
1385 | break; |
1386 | } |
1387 | } |
1388 | |
1389 | static int do_cpu_nanosleep(const clockid_t which_clock, int flags, |
1390 | struct timespec *rqtp, struct itimerspec *it) |
1391 | { |
1392 | struct k_itimer timer; |
1393 | int error; |
1394 | |
1395 | /* |
1396 | * Set up a temporary timer and then wait for it to go off. |
1397 | */ |
1398 | memset(&timer, 0, sizeof timer); |
1399 | spin_lock_init(&timer.it_lock); |
1400 | timer.it_clock = which_clock; |
1401 | timer.it_overrun = -1; |
1402 | error = posix_cpu_timer_create(&timer); |
1403 | timer.it_process = current; |
1404 | if (!error) { |
1405 | static struct itimerspec zero_it; |
1406 | |
1407 | memset(it, 0, sizeof *it); |
1408 | it->it_value = *rqtp; |
1409 | |
1410 | spin_lock_irq(&timer.it_lock); |
1411 | error = posix_cpu_timer_set(&timer, flags, it, NULL); |
1412 | if (error) { |
1413 | spin_unlock_irq(&timer.it_lock); |
1414 | return error; |
1415 | } |
1416 | |
1417 | while (!signal_pending(current)) { |
1418 | if (timer.it.cpu.expires.sched == 0) { |
1419 | /* |
1420 | * Our timer fired and was reset, below |
1421 | * deletion can not fail. |
1422 | */ |
1423 | posix_cpu_timer_del(&timer); |
1424 | spin_unlock_irq(&timer.it_lock); |
1425 | return 0; |
1426 | } |
1427 | |
1428 | /* |
1429 | * Block until cpu_timer_fire (or a signal) wakes us. |
1430 | */ |
1431 | __set_current_state(TASK_INTERRUPTIBLE); |
1432 | spin_unlock_irq(&timer.it_lock); |
1433 | schedule(); |
1434 | spin_lock_irq(&timer.it_lock); |
1435 | } |
1436 | |
1437 | /* |
1438 | * We were interrupted by a signal. |
1439 | */ |
1440 | sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp); |
1441 | error = posix_cpu_timer_set(&timer, 0, &zero_it, it); |
1442 | if (!error) { |
1443 | /* |
1444 | * Timer is now unarmed, deletion can not fail. |
1445 | */ |
1446 | posix_cpu_timer_del(&timer); |
1447 | } |
1448 | spin_unlock_irq(&timer.it_lock); |
1449 | |
1450 | while (error == TIMER_RETRY) { |
1451 | /* |
1452 | * We need to handle case when timer was or is in the |
1453 | * middle of firing. In other cases we already freed |
1454 | * resources. |
1455 | */ |
1456 | spin_lock_irq(&timer.it_lock); |
1457 | error = posix_cpu_timer_del(&timer); |
1458 | spin_unlock_irq(&timer.it_lock); |
1459 | } |
1460 | |
1461 | if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) { |
1462 | /* |
1463 | * It actually did fire already. |
1464 | */ |
1465 | return 0; |
1466 | } |
1467 | |
1468 | error = -ERESTART_RESTARTBLOCK; |
1469 | } |
1470 | |
1471 | return error; |
1472 | } |
1473 | |
1474 | static long posix_cpu_nsleep_restart(struct restart_block *restart_block); |
1475 | |
1476 | static int posix_cpu_nsleep(const clockid_t which_clock, int flags, |
1477 | struct timespec *rqtp, struct timespec __user *rmtp) |
1478 | { |
1479 | struct restart_block *restart_block = |
1480 | ¤t_thread_info()->restart_block; |
1481 | struct itimerspec it; |
1482 | int error; |
1483 | |
1484 | /* |
1485 | * Diagnose required errors first. |
1486 | */ |
1487 | if (CPUCLOCK_PERTHREAD(which_clock) && |
1488 | (CPUCLOCK_PID(which_clock) == 0 || |
1489 | CPUCLOCK_PID(which_clock) == current->pid)) |
1490 | return -EINVAL; |
1491 | |
1492 | error = do_cpu_nanosleep(which_clock, flags, rqtp, &it); |
1493 | |
1494 | if (error == -ERESTART_RESTARTBLOCK) { |
1495 | |
1496 | if (flags & TIMER_ABSTIME) |
1497 | return -ERESTARTNOHAND; |
1498 | /* |
1499 | * Report back to the user the time still remaining. |
1500 | */ |
1501 | if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp)) |
1502 | return -EFAULT; |
1503 | |
1504 | restart_block->fn = posix_cpu_nsleep_restart; |
1505 | restart_block->nanosleep.clockid = which_clock; |
1506 | restart_block->nanosleep.rmtp = rmtp; |
1507 | restart_block->nanosleep.expires = timespec_to_ns(rqtp); |
1508 | } |
1509 | return error; |
1510 | } |
1511 | |
1512 | static long posix_cpu_nsleep_restart(struct restart_block *restart_block) |
1513 | { |
1514 | clockid_t which_clock = restart_block->nanosleep.clockid; |
1515 | struct timespec t; |
1516 | struct itimerspec it; |
1517 | int error; |
1518 | |
1519 | t = ns_to_timespec(restart_block->nanosleep.expires); |
1520 | |
1521 | error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it); |
1522 | |
1523 | if (error == -ERESTART_RESTARTBLOCK) { |
1524 | struct timespec __user *rmtp = restart_block->nanosleep.rmtp; |
1525 | /* |
1526 | * Report back to the user the time still remaining. |
1527 | */ |
1528 | if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp)) |
1529 | return -EFAULT; |
1530 | |
1531 | restart_block->nanosleep.expires = timespec_to_ns(&t); |
1532 | } |
1533 | return error; |
1534 | |
1535 | } |
1536 | |
1537 | #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED) |
1538 | #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED) |
1539 | |
1540 | static int process_cpu_clock_getres(const clockid_t which_clock, |
1541 | struct timespec *tp) |
1542 | { |
1543 | return posix_cpu_clock_getres(PROCESS_CLOCK, tp); |
1544 | } |
1545 | static int process_cpu_clock_get(const clockid_t which_clock, |
1546 | struct timespec *tp) |
1547 | { |
1548 | return posix_cpu_clock_get(PROCESS_CLOCK, tp); |
1549 | } |
1550 | static int process_cpu_timer_create(struct k_itimer *timer) |
1551 | { |
1552 | timer->it_clock = PROCESS_CLOCK; |
1553 | return posix_cpu_timer_create(timer); |
1554 | } |
1555 | static int process_cpu_nsleep(const clockid_t which_clock, int flags, |
1556 | struct timespec *rqtp, |
1557 | struct timespec __user *rmtp) |
1558 | { |
1559 | return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp); |
1560 | } |
1561 | static long process_cpu_nsleep_restart(struct restart_block *restart_block) |
1562 | { |
1563 | return -EINVAL; |
1564 | } |
1565 | static int thread_cpu_clock_getres(const clockid_t which_clock, |
1566 | struct timespec *tp) |
1567 | { |
1568 | return posix_cpu_clock_getres(THREAD_CLOCK, tp); |
1569 | } |
1570 | static int thread_cpu_clock_get(const clockid_t which_clock, |
1571 | struct timespec *tp) |
1572 | { |
1573 | return posix_cpu_clock_get(THREAD_CLOCK, tp); |
1574 | } |
1575 | static int thread_cpu_timer_create(struct k_itimer *timer) |
1576 | { |
1577 | timer->it_clock = THREAD_CLOCK; |
1578 | return posix_cpu_timer_create(timer); |
1579 | } |
1580 | |
1581 | struct k_clock clock_posix_cpu = { |
1582 | .clock_getres = posix_cpu_clock_getres, |
1583 | .clock_set = posix_cpu_clock_set, |
1584 | .clock_get = posix_cpu_clock_get, |
1585 | .timer_create = posix_cpu_timer_create, |
1586 | .nsleep = posix_cpu_nsleep, |
1587 | .nsleep_restart = posix_cpu_nsleep_restart, |
1588 | .timer_set = posix_cpu_timer_set, |
1589 | .timer_del = posix_cpu_timer_del, |
1590 | .timer_get = posix_cpu_timer_get, |
1591 | }; |
1592 | |
1593 | static __init int init_posix_cpu_timers(void) |
1594 | { |
1595 | struct k_clock process = { |
1596 | .clock_getres = process_cpu_clock_getres, |
1597 | .clock_get = process_cpu_clock_get, |
1598 | .timer_create = process_cpu_timer_create, |
1599 | .nsleep = process_cpu_nsleep, |
1600 | .nsleep_restart = process_cpu_nsleep_restart, |
1601 | }; |
1602 | struct k_clock thread = { |
1603 | .clock_getres = thread_cpu_clock_getres, |
1604 | .clock_get = thread_cpu_clock_get, |
1605 | .timer_create = thread_cpu_timer_create, |
1606 | }; |
1607 | struct timespec ts; |
1608 | |
1609 | posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process); |
1610 | posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread); |
1611 | |
1612 | cputime_to_timespec(cputime_one_jiffy, &ts); |
1613 | onecputick = ts.tv_nsec; |
1614 | WARN_ON(ts.tv_sec != 0); |
1615 | |
1616 | return 0; |
1617 | } |
1618 | __initcall(init_posix_cpu_timers); |
1619 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9