Root/
1 | /* |
2 | * Implement CPU time clocks for the POSIX clock interface. |
3 | */ |
4 | |
5 | #include <linux/sched.h> |
6 | #include <linux/posix-timers.h> |
7 | #include <linux/errno.h> |
8 | #include <linux/math64.h> |
9 | #include <asm/uaccess.h> |
10 | #include <linux/kernel_stat.h> |
11 | #include <trace/events/timer.h> |
12 | #include <linux/random.h> |
13 | #include <linux/tick.h> |
14 | #include <linux/workqueue.h> |
15 | |
16 | /* |
17 | * Called after updating RLIMIT_CPU to run cpu timer and update |
18 | * tsk->signal->cputime_expires expiration cache if necessary. Needs |
19 | * siglock protection since other code may update expiration cache as |
20 | * well. |
21 | */ |
22 | void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new) |
23 | { |
24 | cputime_t cputime = secs_to_cputime(rlim_new); |
25 | |
26 | spin_lock_irq(&task->sighand->siglock); |
27 | set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL); |
28 | spin_unlock_irq(&task->sighand->siglock); |
29 | } |
30 | |
31 | static int check_clock(const clockid_t which_clock) |
32 | { |
33 | int error = 0; |
34 | struct task_struct *p; |
35 | const pid_t pid = CPUCLOCK_PID(which_clock); |
36 | |
37 | if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX) |
38 | return -EINVAL; |
39 | |
40 | if (pid == 0) |
41 | return 0; |
42 | |
43 | rcu_read_lock(); |
44 | p = find_task_by_vpid(pid); |
45 | if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ? |
46 | same_thread_group(p, current) : has_group_leader_pid(p))) { |
47 | error = -EINVAL; |
48 | } |
49 | rcu_read_unlock(); |
50 | |
51 | return error; |
52 | } |
53 | |
54 | static inline unsigned long long |
55 | timespec_to_sample(const clockid_t which_clock, const struct timespec *tp) |
56 | { |
57 | unsigned long long ret; |
58 | |
59 | ret = 0; /* high half always zero when .cpu used */ |
60 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
61 | ret = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec; |
62 | } else { |
63 | ret = cputime_to_expires(timespec_to_cputime(tp)); |
64 | } |
65 | return ret; |
66 | } |
67 | |
68 | static void sample_to_timespec(const clockid_t which_clock, |
69 | unsigned long long expires, |
70 | struct timespec *tp) |
71 | { |
72 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) |
73 | *tp = ns_to_timespec(expires); |
74 | else |
75 | cputime_to_timespec((__force cputime_t)expires, tp); |
76 | } |
77 | |
78 | /* |
79 | * Update expiry time from increment, and increase overrun count, |
80 | * given the current clock sample. |
81 | */ |
82 | static void bump_cpu_timer(struct k_itimer *timer, |
83 | unsigned long long now) |
84 | { |
85 | int i; |
86 | unsigned long long delta, incr; |
87 | |
88 | if (timer->it.cpu.incr == 0) |
89 | return; |
90 | |
91 | if (now < timer->it.cpu.expires) |
92 | return; |
93 | |
94 | incr = timer->it.cpu.incr; |
95 | delta = now + incr - timer->it.cpu.expires; |
96 | |
97 | /* Don't use (incr*2 < delta), incr*2 might overflow. */ |
98 | for (i = 0; incr < delta - incr; i++) |
99 | incr = incr << 1; |
100 | |
101 | for (; i >= 0; incr >>= 1, i--) { |
102 | if (delta < incr) |
103 | continue; |
104 | |
105 | timer->it.cpu.expires += incr; |
106 | timer->it_overrun += 1 << i; |
107 | delta -= incr; |
108 | } |
109 | } |
110 | |
111 | /** |
112 | * task_cputime_zero - Check a task_cputime struct for all zero fields. |
113 | * |
114 | * @cputime: The struct to compare. |
115 | * |
116 | * Checks @cputime to see if all fields are zero. Returns true if all fields |
117 | * are zero, false if any field is nonzero. |
118 | */ |
119 | static inline int task_cputime_zero(const struct task_cputime *cputime) |
120 | { |
121 | if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime) |
122 | return 1; |
123 | return 0; |
124 | } |
125 | |
126 | static inline unsigned long long prof_ticks(struct task_struct *p) |
127 | { |
128 | cputime_t utime, stime; |
129 | |
130 | task_cputime(p, &utime, &stime); |
131 | |
132 | return cputime_to_expires(utime + stime); |
133 | } |
134 | static inline unsigned long long virt_ticks(struct task_struct *p) |
135 | { |
136 | cputime_t utime; |
137 | |
138 | task_cputime(p, &utime, NULL); |
139 | |
140 | return cputime_to_expires(utime); |
141 | } |
142 | |
143 | static int |
144 | posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp) |
145 | { |
146 | int error = check_clock(which_clock); |
147 | if (!error) { |
148 | tp->tv_sec = 0; |
149 | tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ); |
150 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
151 | /* |
152 | * If sched_clock is using a cycle counter, we |
153 | * don't have any idea of its true resolution |
154 | * exported, but it is much more than 1s/HZ. |
155 | */ |
156 | tp->tv_nsec = 1; |
157 | } |
158 | } |
159 | return error; |
160 | } |
161 | |
162 | static int |
163 | posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp) |
164 | { |
165 | /* |
166 | * You can never reset a CPU clock, but we check for other errors |
167 | * in the call before failing with EPERM. |
168 | */ |
169 | int error = check_clock(which_clock); |
170 | if (error == 0) { |
171 | error = -EPERM; |
172 | } |
173 | return error; |
174 | } |
175 | |
176 | |
177 | /* |
178 | * Sample a per-thread clock for the given task. |
179 | */ |
180 | static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, |
181 | unsigned long long *sample) |
182 | { |
183 | switch (CPUCLOCK_WHICH(which_clock)) { |
184 | default: |
185 | return -EINVAL; |
186 | case CPUCLOCK_PROF: |
187 | *sample = prof_ticks(p); |
188 | break; |
189 | case CPUCLOCK_VIRT: |
190 | *sample = virt_ticks(p); |
191 | break; |
192 | case CPUCLOCK_SCHED: |
193 | *sample = task_sched_runtime(p); |
194 | break; |
195 | } |
196 | return 0; |
197 | } |
198 | |
199 | static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b) |
200 | { |
201 | if (b->utime > a->utime) |
202 | a->utime = b->utime; |
203 | |
204 | if (b->stime > a->stime) |
205 | a->stime = b->stime; |
206 | |
207 | if (b->sum_exec_runtime > a->sum_exec_runtime) |
208 | a->sum_exec_runtime = b->sum_exec_runtime; |
209 | } |
210 | |
211 | void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) |
212 | { |
213 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
214 | struct task_cputime sum; |
215 | unsigned long flags; |
216 | |
217 | if (!cputimer->running) { |
218 | /* |
219 | * The POSIX timer interface allows for absolute time expiry |
220 | * values through the TIMER_ABSTIME flag, therefore we have |
221 | * to synchronize the timer to the clock every time we start |
222 | * it. |
223 | */ |
224 | thread_group_cputime(tsk, &sum); |
225 | raw_spin_lock_irqsave(&cputimer->lock, flags); |
226 | cputimer->running = 1; |
227 | update_gt_cputime(&cputimer->cputime, &sum); |
228 | } else |
229 | raw_spin_lock_irqsave(&cputimer->lock, flags); |
230 | *times = cputimer->cputime; |
231 | raw_spin_unlock_irqrestore(&cputimer->lock, flags); |
232 | } |
233 | |
234 | /* |
235 | * Sample a process (thread group) clock for the given group_leader task. |
236 | * Must be called with task sighand lock held for safe while_each_thread() |
237 | * traversal. |
238 | */ |
239 | static int cpu_clock_sample_group(const clockid_t which_clock, |
240 | struct task_struct *p, |
241 | unsigned long long *sample) |
242 | { |
243 | struct task_cputime cputime; |
244 | |
245 | switch (CPUCLOCK_WHICH(which_clock)) { |
246 | default: |
247 | return -EINVAL; |
248 | case CPUCLOCK_PROF: |
249 | thread_group_cputime(p, &cputime); |
250 | *sample = cputime_to_expires(cputime.utime + cputime.stime); |
251 | break; |
252 | case CPUCLOCK_VIRT: |
253 | thread_group_cputime(p, &cputime); |
254 | *sample = cputime_to_expires(cputime.utime); |
255 | break; |
256 | case CPUCLOCK_SCHED: |
257 | thread_group_cputime(p, &cputime); |
258 | *sample = cputime.sum_exec_runtime; |
259 | break; |
260 | } |
261 | return 0; |
262 | } |
263 | |
264 | static int posix_cpu_clock_get_task(struct task_struct *tsk, |
265 | const clockid_t which_clock, |
266 | struct timespec *tp) |
267 | { |
268 | int err = -EINVAL; |
269 | unsigned long long rtn; |
270 | |
271 | if (CPUCLOCK_PERTHREAD(which_clock)) { |
272 | if (same_thread_group(tsk, current)) |
273 | err = cpu_clock_sample(which_clock, tsk, &rtn); |
274 | } else { |
275 | unsigned long flags; |
276 | struct sighand_struct *sighand; |
277 | |
278 | /* |
279 | * while_each_thread() is not yet entirely RCU safe, |
280 | * keep locking the group while sampling process |
281 | * clock for now. |
282 | */ |
283 | sighand = lock_task_sighand(tsk, &flags); |
284 | if (!sighand) |
285 | return err; |
286 | |
287 | if (tsk == current || thread_group_leader(tsk)) |
288 | err = cpu_clock_sample_group(which_clock, tsk, &rtn); |
289 | |
290 | unlock_task_sighand(tsk, &flags); |
291 | } |
292 | |
293 | if (!err) |
294 | sample_to_timespec(which_clock, rtn, tp); |
295 | |
296 | return err; |
297 | } |
298 | |
299 | |
300 | static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp) |
301 | { |
302 | const pid_t pid = CPUCLOCK_PID(which_clock); |
303 | int err = -EINVAL; |
304 | |
305 | if (pid == 0) { |
306 | /* |
307 | * Special case constant value for our own clocks. |
308 | * We don't have to do any lookup to find ourselves. |
309 | */ |
310 | err = posix_cpu_clock_get_task(current, which_clock, tp); |
311 | } else { |
312 | /* |
313 | * Find the given PID, and validate that the caller |
314 | * should be able to see it. |
315 | */ |
316 | struct task_struct *p; |
317 | rcu_read_lock(); |
318 | p = find_task_by_vpid(pid); |
319 | if (p) |
320 | err = posix_cpu_clock_get_task(p, which_clock, tp); |
321 | rcu_read_unlock(); |
322 | } |
323 | |
324 | return err; |
325 | } |
326 | |
327 | |
328 | /* |
329 | * Validate the clockid_t for a new CPU-clock timer, and initialize the timer. |
330 | * This is called from sys_timer_create() and do_cpu_nanosleep() with the |
331 | * new timer already all-zeros initialized. |
332 | */ |
333 | static int posix_cpu_timer_create(struct k_itimer *new_timer) |
334 | { |
335 | int ret = 0; |
336 | const pid_t pid = CPUCLOCK_PID(new_timer->it_clock); |
337 | struct task_struct *p; |
338 | |
339 | if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX) |
340 | return -EINVAL; |
341 | |
342 | INIT_LIST_HEAD(&new_timer->it.cpu.entry); |
343 | |
344 | rcu_read_lock(); |
345 | if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) { |
346 | if (pid == 0) { |
347 | p = current; |
348 | } else { |
349 | p = find_task_by_vpid(pid); |
350 | if (p && !same_thread_group(p, current)) |
351 | p = NULL; |
352 | } |
353 | } else { |
354 | if (pid == 0) { |
355 | p = current->group_leader; |
356 | } else { |
357 | p = find_task_by_vpid(pid); |
358 | if (p && !has_group_leader_pid(p)) |
359 | p = NULL; |
360 | } |
361 | } |
362 | new_timer->it.cpu.task = p; |
363 | if (p) { |
364 | get_task_struct(p); |
365 | } else { |
366 | ret = -EINVAL; |
367 | } |
368 | rcu_read_unlock(); |
369 | |
370 | return ret; |
371 | } |
372 | |
373 | /* |
374 | * Clean up a CPU-clock timer that is about to be destroyed. |
375 | * This is called from timer deletion with the timer already locked. |
376 | * If we return TIMER_RETRY, it's necessary to release the timer's lock |
377 | * and try again. (This happens when the timer is in the middle of firing.) |
378 | */ |
379 | static int posix_cpu_timer_del(struct k_itimer *timer) |
380 | { |
381 | int ret = 0; |
382 | unsigned long flags; |
383 | struct sighand_struct *sighand; |
384 | struct task_struct *p = timer->it.cpu.task; |
385 | |
386 | WARN_ON_ONCE(p == NULL); |
387 | |
388 | /* |
389 | * Protect against sighand release/switch in exit/exec and process/ |
390 | * thread timer list entry concurrent read/writes. |
391 | */ |
392 | sighand = lock_task_sighand(p, &flags); |
393 | if (unlikely(sighand == NULL)) { |
394 | /* |
395 | * We raced with the reaping of the task. |
396 | * The deletion should have cleared us off the list. |
397 | */ |
398 | WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry)); |
399 | } else { |
400 | if (timer->it.cpu.firing) |
401 | ret = TIMER_RETRY; |
402 | else |
403 | list_del(&timer->it.cpu.entry); |
404 | |
405 | unlock_task_sighand(p, &flags); |
406 | } |
407 | |
408 | if (!ret) |
409 | put_task_struct(p); |
410 | |
411 | return ret; |
412 | } |
413 | |
414 | static void cleanup_timers_list(struct list_head *head) |
415 | { |
416 | struct cpu_timer_list *timer, *next; |
417 | |
418 | list_for_each_entry_safe(timer, next, head, entry) |
419 | list_del_init(&timer->entry); |
420 | } |
421 | |
422 | /* |
423 | * Clean out CPU timers still ticking when a thread exited. The task |
424 | * pointer is cleared, and the expiry time is replaced with the residual |
425 | * time for later timer_gettime calls to return. |
426 | * This must be called with the siglock held. |
427 | */ |
428 | static void cleanup_timers(struct list_head *head) |
429 | { |
430 | cleanup_timers_list(head); |
431 | cleanup_timers_list(++head); |
432 | cleanup_timers_list(++head); |
433 | } |
434 | |
435 | /* |
436 | * These are both called with the siglock held, when the current thread |
437 | * is being reaped. When the final (leader) thread in the group is reaped, |
438 | * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit. |
439 | */ |
440 | void posix_cpu_timers_exit(struct task_struct *tsk) |
441 | { |
442 | add_device_randomness((const void*) &tsk->se.sum_exec_runtime, |
443 | sizeof(unsigned long long)); |
444 | cleanup_timers(tsk->cpu_timers); |
445 | |
446 | } |
447 | void posix_cpu_timers_exit_group(struct task_struct *tsk) |
448 | { |
449 | cleanup_timers(tsk->signal->cpu_timers); |
450 | } |
451 | |
452 | static inline int expires_gt(cputime_t expires, cputime_t new_exp) |
453 | { |
454 | return expires == 0 || expires > new_exp; |
455 | } |
456 | |
457 | /* |
458 | * Insert the timer on the appropriate list before any timers that |
459 | * expire later. This must be called with the sighand lock held. |
460 | */ |
461 | static void arm_timer(struct k_itimer *timer) |
462 | { |
463 | struct task_struct *p = timer->it.cpu.task; |
464 | struct list_head *head, *listpos; |
465 | struct task_cputime *cputime_expires; |
466 | struct cpu_timer_list *const nt = &timer->it.cpu; |
467 | struct cpu_timer_list *next; |
468 | |
469 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
470 | head = p->cpu_timers; |
471 | cputime_expires = &p->cputime_expires; |
472 | } else { |
473 | head = p->signal->cpu_timers; |
474 | cputime_expires = &p->signal->cputime_expires; |
475 | } |
476 | head += CPUCLOCK_WHICH(timer->it_clock); |
477 | |
478 | listpos = head; |
479 | list_for_each_entry(next, head, entry) { |
480 | if (nt->expires < next->expires) |
481 | break; |
482 | listpos = &next->entry; |
483 | } |
484 | list_add(&nt->entry, listpos); |
485 | |
486 | if (listpos == head) { |
487 | unsigned long long exp = nt->expires; |
488 | |
489 | /* |
490 | * We are the new earliest-expiring POSIX 1.b timer, hence |
491 | * need to update expiration cache. Take into account that |
492 | * for process timers we share expiration cache with itimers |
493 | * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME. |
494 | */ |
495 | |
496 | switch (CPUCLOCK_WHICH(timer->it_clock)) { |
497 | case CPUCLOCK_PROF: |
498 | if (expires_gt(cputime_expires->prof_exp, expires_to_cputime(exp))) |
499 | cputime_expires->prof_exp = expires_to_cputime(exp); |
500 | break; |
501 | case CPUCLOCK_VIRT: |
502 | if (expires_gt(cputime_expires->virt_exp, expires_to_cputime(exp))) |
503 | cputime_expires->virt_exp = expires_to_cputime(exp); |
504 | break; |
505 | case CPUCLOCK_SCHED: |
506 | if (cputime_expires->sched_exp == 0 || |
507 | cputime_expires->sched_exp > exp) |
508 | cputime_expires->sched_exp = exp; |
509 | break; |
510 | } |
511 | } |
512 | } |
513 | |
514 | /* |
515 | * The timer is locked, fire it and arrange for its reload. |
516 | */ |
517 | static void cpu_timer_fire(struct k_itimer *timer) |
518 | { |
519 | if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) { |
520 | /* |
521 | * User don't want any signal. |
522 | */ |
523 | timer->it.cpu.expires = 0; |
524 | } else if (unlikely(timer->sigq == NULL)) { |
525 | /* |
526 | * This a special case for clock_nanosleep, |
527 | * not a normal timer from sys_timer_create. |
528 | */ |
529 | wake_up_process(timer->it_process); |
530 | timer->it.cpu.expires = 0; |
531 | } else if (timer->it.cpu.incr == 0) { |
532 | /* |
533 | * One-shot timer. Clear it as soon as it's fired. |
534 | */ |
535 | posix_timer_event(timer, 0); |
536 | timer->it.cpu.expires = 0; |
537 | } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) { |
538 | /* |
539 | * The signal did not get queued because the signal |
540 | * was ignored, so we won't get any callback to |
541 | * reload the timer. But we need to keep it |
542 | * ticking in case the signal is deliverable next time. |
543 | */ |
544 | posix_cpu_timer_schedule(timer); |
545 | } |
546 | } |
547 | |
548 | /* |
549 | * Sample a process (thread group) timer for the given group_leader task. |
550 | * Must be called with task sighand lock held for safe while_each_thread() |
551 | * traversal. |
552 | */ |
553 | static int cpu_timer_sample_group(const clockid_t which_clock, |
554 | struct task_struct *p, |
555 | unsigned long long *sample) |
556 | { |
557 | struct task_cputime cputime; |
558 | |
559 | thread_group_cputimer(p, &cputime); |
560 | switch (CPUCLOCK_WHICH(which_clock)) { |
561 | default: |
562 | return -EINVAL; |
563 | case CPUCLOCK_PROF: |
564 | *sample = cputime_to_expires(cputime.utime + cputime.stime); |
565 | break; |
566 | case CPUCLOCK_VIRT: |
567 | *sample = cputime_to_expires(cputime.utime); |
568 | break; |
569 | case CPUCLOCK_SCHED: |
570 | *sample = cputime.sum_exec_runtime + task_delta_exec(p); |
571 | break; |
572 | } |
573 | return 0; |
574 | } |
575 | |
576 | #ifdef CONFIG_NO_HZ_FULL |
577 | static void nohz_kick_work_fn(struct work_struct *work) |
578 | { |
579 | tick_nohz_full_kick_all(); |
580 | } |
581 | |
582 | static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn); |
583 | |
584 | /* |
585 | * We need the IPIs to be sent from sane process context. |
586 | * The posix cpu timers are always set with irqs disabled. |
587 | */ |
588 | static void posix_cpu_timer_kick_nohz(void) |
589 | { |
590 | if (context_tracking_is_enabled()) |
591 | schedule_work(&nohz_kick_work); |
592 | } |
593 | |
594 | bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk) |
595 | { |
596 | if (!task_cputime_zero(&tsk->cputime_expires)) |
597 | return false; |
598 | |
599 | if (tsk->signal->cputimer.running) |
600 | return false; |
601 | |
602 | return true; |
603 | } |
604 | #else |
605 | static inline void posix_cpu_timer_kick_nohz(void) { } |
606 | #endif |
607 | |
608 | /* |
609 | * Guts of sys_timer_settime for CPU timers. |
610 | * This is called with the timer locked and interrupts disabled. |
611 | * If we return TIMER_RETRY, it's necessary to release the timer's lock |
612 | * and try again. (This happens when the timer is in the middle of firing.) |
613 | */ |
614 | static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, |
615 | struct itimerspec *new, struct itimerspec *old) |
616 | { |
617 | unsigned long flags; |
618 | struct sighand_struct *sighand; |
619 | struct task_struct *p = timer->it.cpu.task; |
620 | unsigned long long old_expires, new_expires, old_incr, val; |
621 | int ret; |
622 | |
623 | WARN_ON_ONCE(p == NULL); |
624 | |
625 | new_expires = timespec_to_sample(timer->it_clock, &new->it_value); |
626 | |
627 | /* |
628 | * Protect against sighand release/switch in exit/exec and p->cpu_timers |
629 | * and p->signal->cpu_timers read/write in arm_timer() |
630 | */ |
631 | sighand = lock_task_sighand(p, &flags); |
632 | /* |
633 | * If p has just been reaped, we can no |
634 | * longer get any information about it at all. |
635 | */ |
636 | if (unlikely(sighand == NULL)) { |
637 | return -ESRCH; |
638 | } |
639 | |
640 | /* |
641 | * Disarm any old timer after extracting its expiry time. |
642 | */ |
643 | WARN_ON_ONCE(!irqs_disabled()); |
644 | |
645 | ret = 0; |
646 | old_incr = timer->it.cpu.incr; |
647 | old_expires = timer->it.cpu.expires; |
648 | if (unlikely(timer->it.cpu.firing)) { |
649 | timer->it.cpu.firing = -1; |
650 | ret = TIMER_RETRY; |
651 | } else |
652 | list_del_init(&timer->it.cpu.entry); |
653 | |
654 | /* |
655 | * We need to sample the current value to convert the new |
656 | * value from to relative and absolute, and to convert the |
657 | * old value from absolute to relative. To set a process |
658 | * timer, we need a sample to balance the thread expiry |
659 | * times (in arm_timer). With an absolute time, we must |
660 | * check if it's already passed. In short, we need a sample. |
661 | */ |
662 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
663 | cpu_clock_sample(timer->it_clock, p, &val); |
664 | } else { |
665 | cpu_timer_sample_group(timer->it_clock, p, &val); |
666 | } |
667 | |
668 | if (old) { |
669 | if (old_expires == 0) { |
670 | old->it_value.tv_sec = 0; |
671 | old->it_value.tv_nsec = 0; |
672 | } else { |
673 | /* |
674 | * Update the timer in case it has |
675 | * overrun already. If it has, |
676 | * we'll report it as having overrun |
677 | * and with the next reloaded timer |
678 | * already ticking, though we are |
679 | * swallowing that pending |
680 | * notification here to install the |
681 | * new setting. |
682 | */ |
683 | bump_cpu_timer(timer, val); |
684 | if (val < timer->it.cpu.expires) { |
685 | old_expires = timer->it.cpu.expires - val; |
686 | sample_to_timespec(timer->it_clock, |
687 | old_expires, |
688 | &old->it_value); |
689 | } else { |
690 | old->it_value.tv_nsec = 1; |
691 | old->it_value.tv_sec = 0; |
692 | } |
693 | } |
694 | } |
695 | |
696 | if (unlikely(ret)) { |
697 | /* |
698 | * We are colliding with the timer actually firing. |
699 | * Punt after filling in the timer's old value, and |
700 | * disable this firing since we are already reporting |
701 | * it as an overrun (thanks to bump_cpu_timer above). |
702 | */ |
703 | unlock_task_sighand(p, &flags); |
704 | goto out; |
705 | } |
706 | |
707 | if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) { |
708 | new_expires += val; |
709 | } |
710 | |
711 | /* |
712 | * Install the new expiry time (or zero). |
713 | * For a timer with no notification action, we don't actually |
714 | * arm the timer (we'll just fake it for timer_gettime). |
715 | */ |
716 | timer->it.cpu.expires = new_expires; |
717 | if (new_expires != 0 && val < new_expires) { |
718 | arm_timer(timer); |
719 | } |
720 | |
721 | unlock_task_sighand(p, &flags); |
722 | /* |
723 | * Install the new reload setting, and |
724 | * set up the signal and overrun bookkeeping. |
725 | */ |
726 | timer->it.cpu.incr = timespec_to_sample(timer->it_clock, |
727 | &new->it_interval); |
728 | |
729 | /* |
730 | * This acts as a modification timestamp for the timer, |
731 | * so any automatic reload attempt will punt on seeing |
732 | * that we have reset the timer manually. |
733 | */ |
734 | timer->it_requeue_pending = (timer->it_requeue_pending + 2) & |
735 | ~REQUEUE_PENDING; |
736 | timer->it_overrun_last = 0; |
737 | timer->it_overrun = -1; |
738 | |
739 | if (new_expires != 0 && !(val < new_expires)) { |
740 | /* |
741 | * The designated time already passed, so we notify |
742 | * immediately, even if the thread never runs to |
743 | * accumulate more time on this clock. |
744 | */ |
745 | cpu_timer_fire(timer); |
746 | } |
747 | |
748 | ret = 0; |
749 | out: |
750 | if (old) { |
751 | sample_to_timespec(timer->it_clock, |
752 | old_incr, &old->it_interval); |
753 | } |
754 | if (!ret) |
755 | posix_cpu_timer_kick_nohz(); |
756 | return ret; |
757 | } |
758 | |
759 | static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) |
760 | { |
761 | unsigned long long now; |
762 | struct task_struct *p = timer->it.cpu.task; |
763 | |
764 | WARN_ON_ONCE(p == NULL); |
765 | |
766 | /* |
767 | * Easy part: convert the reload time. |
768 | */ |
769 | sample_to_timespec(timer->it_clock, |
770 | timer->it.cpu.incr, &itp->it_interval); |
771 | |
772 | if (timer->it.cpu.expires == 0) { /* Timer not armed at all. */ |
773 | itp->it_value.tv_sec = itp->it_value.tv_nsec = 0; |
774 | return; |
775 | } |
776 | |
777 | /* |
778 | * Sample the clock to take the difference with the expiry time. |
779 | */ |
780 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
781 | cpu_clock_sample(timer->it_clock, p, &now); |
782 | } else { |
783 | struct sighand_struct *sighand; |
784 | unsigned long flags; |
785 | |
786 | /* |
787 | * Protect against sighand release/switch in exit/exec and |
788 | * also make timer sampling safe if it ends up calling |
789 | * thread_group_cputime(). |
790 | */ |
791 | sighand = lock_task_sighand(p, &flags); |
792 | if (unlikely(sighand == NULL)) { |
793 | /* |
794 | * The process has been reaped. |
795 | * We can't even collect a sample any more. |
796 | * Call the timer disarmed, nothing else to do. |
797 | */ |
798 | timer->it.cpu.expires = 0; |
799 | sample_to_timespec(timer->it_clock, timer->it.cpu.expires, |
800 | &itp->it_value); |
801 | } else { |
802 | cpu_timer_sample_group(timer->it_clock, p, &now); |
803 | unlock_task_sighand(p, &flags); |
804 | } |
805 | } |
806 | |
807 | if (now < timer->it.cpu.expires) { |
808 | sample_to_timespec(timer->it_clock, |
809 | timer->it.cpu.expires - now, |
810 | &itp->it_value); |
811 | } else { |
812 | /* |
813 | * The timer should have expired already, but the firing |
814 | * hasn't taken place yet. Say it's just about to expire. |
815 | */ |
816 | itp->it_value.tv_nsec = 1; |
817 | itp->it_value.tv_sec = 0; |
818 | } |
819 | } |
820 | |
821 | static unsigned long long |
822 | check_timers_list(struct list_head *timers, |
823 | struct list_head *firing, |
824 | unsigned long long curr) |
825 | { |
826 | int maxfire = 20; |
827 | |
828 | while (!list_empty(timers)) { |
829 | struct cpu_timer_list *t; |
830 | |
831 | t = list_first_entry(timers, struct cpu_timer_list, entry); |
832 | |
833 | if (!--maxfire || curr < t->expires) |
834 | return t->expires; |
835 | |
836 | t->firing = 1; |
837 | list_move_tail(&t->entry, firing); |
838 | } |
839 | |
840 | return 0; |
841 | } |
842 | |
843 | /* |
844 | * Check for any per-thread CPU timers that have fired and move them off |
845 | * the tsk->cpu_timers[N] list onto the firing list. Here we update the |
846 | * tsk->it_*_expires values to reflect the remaining thread CPU timers. |
847 | */ |
848 | static void check_thread_timers(struct task_struct *tsk, |
849 | struct list_head *firing) |
850 | { |
851 | struct list_head *timers = tsk->cpu_timers; |
852 | struct signal_struct *const sig = tsk->signal; |
853 | struct task_cputime *tsk_expires = &tsk->cputime_expires; |
854 | unsigned long long expires; |
855 | unsigned long soft; |
856 | |
857 | expires = check_timers_list(timers, firing, prof_ticks(tsk)); |
858 | tsk_expires->prof_exp = expires_to_cputime(expires); |
859 | |
860 | expires = check_timers_list(++timers, firing, virt_ticks(tsk)); |
861 | tsk_expires->virt_exp = expires_to_cputime(expires); |
862 | |
863 | tsk_expires->sched_exp = check_timers_list(++timers, firing, |
864 | tsk->se.sum_exec_runtime); |
865 | |
866 | /* |
867 | * Check for the special case thread timers. |
868 | */ |
869 | soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur); |
870 | if (soft != RLIM_INFINITY) { |
871 | unsigned long hard = |
872 | ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max); |
873 | |
874 | if (hard != RLIM_INFINITY && |
875 | tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { |
876 | /* |
877 | * At the hard limit, we just die. |
878 | * No need to calculate anything else now. |
879 | */ |
880 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); |
881 | return; |
882 | } |
883 | if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { |
884 | /* |
885 | * At the soft limit, send a SIGXCPU every second. |
886 | */ |
887 | if (soft < hard) { |
888 | soft += USEC_PER_SEC; |
889 | sig->rlim[RLIMIT_RTTIME].rlim_cur = soft; |
890 | } |
891 | printk(KERN_INFO |
892 | "RT Watchdog Timeout: %s[%d]\n", |
893 | tsk->comm, task_pid_nr(tsk)); |
894 | __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); |
895 | } |
896 | } |
897 | } |
898 | |
899 | static void stop_process_timers(struct signal_struct *sig) |
900 | { |
901 | struct thread_group_cputimer *cputimer = &sig->cputimer; |
902 | unsigned long flags; |
903 | |
904 | raw_spin_lock_irqsave(&cputimer->lock, flags); |
905 | cputimer->running = 0; |
906 | raw_spin_unlock_irqrestore(&cputimer->lock, flags); |
907 | } |
908 | |
909 | static u32 onecputick; |
910 | |
911 | static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, |
912 | unsigned long long *expires, |
913 | unsigned long long cur_time, int signo) |
914 | { |
915 | if (!it->expires) |
916 | return; |
917 | |
918 | if (cur_time >= it->expires) { |
919 | if (it->incr) { |
920 | it->expires += it->incr; |
921 | it->error += it->incr_error; |
922 | if (it->error >= onecputick) { |
923 | it->expires -= cputime_one_jiffy; |
924 | it->error -= onecputick; |
925 | } |
926 | } else { |
927 | it->expires = 0; |
928 | } |
929 | |
930 | trace_itimer_expire(signo == SIGPROF ? |
931 | ITIMER_PROF : ITIMER_VIRTUAL, |
932 | tsk->signal->leader_pid, cur_time); |
933 | __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); |
934 | } |
935 | |
936 | if (it->expires && (!*expires || it->expires < *expires)) { |
937 | *expires = it->expires; |
938 | } |
939 | } |
940 | |
941 | /* |
942 | * Check for any per-thread CPU timers that have fired and move them |
943 | * off the tsk->*_timers list onto the firing list. Per-thread timers |
944 | * have already been taken off. |
945 | */ |
946 | static void check_process_timers(struct task_struct *tsk, |
947 | struct list_head *firing) |
948 | { |
949 | struct signal_struct *const sig = tsk->signal; |
950 | unsigned long long utime, ptime, virt_expires, prof_expires; |
951 | unsigned long long sum_sched_runtime, sched_expires; |
952 | struct list_head *timers = sig->cpu_timers; |
953 | struct task_cputime cputime; |
954 | unsigned long soft; |
955 | |
956 | /* |
957 | * Collect the current process totals. |
958 | */ |
959 | thread_group_cputimer(tsk, &cputime); |
960 | utime = cputime_to_expires(cputime.utime); |
961 | ptime = utime + cputime_to_expires(cputime.stime); |
962 | sum_sched_runtime = cputime.sum_exec_runtime; |
963 | |
964 | prof_expires = check_timers_list(timers, firing, ptime); |
965 | virt_expires = check_timers_list(++timers, firing, utime); |
966 | sched_expires = check_timers_list(++timers, firing, sum_sched_runtime); |
967 | |
968 | /* |
969 | * Check for the special case process timers. |
970 | */ |
971 | check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime, |
972 | SIGPROF); |
973 | check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime, |
974 | SIGVTALRM); |
975 | soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); |
976 | if (soft != RLIM_INFINITY) { |
977 | unsigned long psecs = cputime_to_secs(ptime); |
978 | unsigned long hard = |
979 | ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max); |
980 | cputime_t x; |
981 | if (psecs >= hard) { |
982 | /* |
983 | * At the hard limit, we just die. |
984 | * No need to calculate anything else now. |
985 | */ |
986 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); |
987 | return; |
988 | } |
989 | if (psecs >= soft) { |
990 | /* |
991 | * At the soft limit, send a SIGXCPU every second. |
992 | */ |
993 | __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); |
994 | if (soft < hard) { |
995 | soft++; |
996 | sig->rlim[RLIMIT_CPU].rlim_cur = soft; |
997 | } |
998 | } |
999 | x = secs_to_cputime(soft); |
1000 | if (!prof_expires || x < prof_expires) { |
1001 | prof_expires = x; |
1002 | } |
1003 | } |
1004 | |
1005 | sig->cputime_expires.prof_exp = expires_to_cputime(prof_expires); |
1006 | sig->cputime_expires.virt_exp = expires_to_cputime(virt_expires); |
1007 | sig->cputime_expires.sched_exp = sched_expires; |
1008 | if (task_cputime_zero(&sig->cputime_expires)) |
1009 | stop_process_timers(sig); |
1010 | } |
1011 | |
1012 | /* |
1013 | * This is called from the signal code (via do_schedule_next_timer) |
1014 | * when the last timer signal was delivered and we have to reload the timer. |
1015 | */ |
1016 | void posix_cpu_timer_schedule(struct k_itimer *timer) |
1017 | { |
1018 | struct sighand_struct *sighand; |
1019 | unsigned long flags; |
1020 | struct task_struct *p = timer->it.cpu.task; |
1021 | unsigned long long now; |
1022 | |
1023 | WARN_ON_ONCE(p == NULL); |
1024 | |
1025 | /* |
1026 | * Fetch the current sample and update the timer's expiry time. |
1027 | */ |
1028 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
1029 | cpu_clock_sample(timer->it_clock, p, &now); |
1030 | bump_cpu_timer(timer, now); |
1031 | if (unlikely(p->exit_state)) |
1032 | goto out; |
1033 | |
1034 | /* Protect timer list r/w in arm_timer() */ |
1035 | sighand = lock_task_sighand(p, &flags); |
1036 | if (!sighand) |
1037 | goto out; |
1038 | } else { |
1039 | /* |
1040 | * Protect arm_timer() and timer sampling in case of call to |
1041 | * thread_group_cputime(). |
1042 | */ |
1043 | sighand = lock_task_sighand(p, &flags); |
1044 | if (unlikely(sighand == NULL)) { |
1045 | /* |
1046 | * The process has been reaped. |
1047 | * We can't even collect a sample any more. |
1048 | */ |
1049 | timer->it.cpu.expires = 0; |
1050 | goto out; |
1051 | } else if (unlikely(p->exit_state) && thread_group_empty(p)) { |
1052 | unlock_task_sighand(p, &flags); |
1053 | /* Optimizations: if the process is dying, no need to rearm */ |
1054 | goto out; |
1055 | } |
1056 | cpu_timer_sample_group(timer->it_clock, p, &now); |
1057 | bump_cpu_timer(timer, now); |
1058 | /* Leave the sighand locked for the call below. */ |
1059 | } |
1060 | |
1061 | /* |
1062 | * Now re-arm for the new expiry time. |
1063 | */ |
1064 | WARN_ON_ONCE(!irqs_disabled()); |
1065 | arm_timer(timer); |
1066 | unlock_task_sighand(p, &flags); |
1067 | |
1068 | /* Kick full dynticks CPUs in case they need to tick on the new timer */ |
1069 | posix_cpu_timer_kick_nohz(); |
1070 | out: |
1071 | timer->it_overrun_last = timer->it_overrun; |
1072 | timer->it_overrun = -1; |
1073 | ++timer->it_requeue_pending; |
1074 | } |
1075 | |
1076 | /** |
1077 | * task_cputime_expired - Compare two task_cputime entities. |
1078 | * |
1079 | * @sample: The task_cputime structure to be checked for expiration. |
1080 | * @expires: Expiration times, against which @sample will be checked. |
1081 | * |
1082 | * Checks @sample against @expires to see if any field of @sample has expired. |
1083 | * Returns true if any field of the former is greater than the corresponding |
1084 | * field of the latter if the latter field is set. Otherwise returns false. |
1085 | */ |
1086 | static inline int task_cputime_expired(const struct task_cputime *sample, |
1087 | const struct task_cputime *expires) |
1088 | { |
1089 | if (expires->utime && sample->utime >= expires->utime) |
1090 | return 1; |
1091 | if (expires->stime && sample->utime + sample->stime >= expires->stime) |
1092 | return 1; |
1093 | if (expires->sum_exec_runtime != 0 && |
1094 | sample->sum_exec_runtime >= expires->sum_exec_runtime) |
1095 | return 1; |
1096 | return 0; |
1097 | } |
1098 | |
1099 | /** |
1100 | * fastpath_timer_check - POSIX CPU timers fast path. |
1101 | * |
1102 | * @tsk: The task (thread) being checked. |
1103 | * |
1104 | * Check the task and thread group timers. If both are zero (there are no |
1105 | * timers set) return false. Otherwise snapshot the task and thread group |
1106 | * timers and compare them with the corresponding expiration times. Return |
1107 | * true if a timer has expired, else return false. |
1108 | */ |
1109 | static inline int fastpath_timer_check(struct task_struct *tsk) |
1110 | { |
1111 | struct signal_struct *sig; |
1112 | cputime_t utime, stime; |
1113 | |
1114 | task_cputime(tsk, &utime, &stime); |
1115 | |
1116 | if (!task_cputime_zero(&tsk->cputime_expires)) { |
1117 | struct task_cputime task_sample = { |
1118 | .utime = utime, |
1119 | .stime = stime, |
1120 | .sum_exec_runtime = tsk->se.sum_exec_runtime |
1121 | }; |
1122 | |
1123 | if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) |
1124 | return 1; |
1125 | } |
1126 | |
1127 | sig = tsk->signal; |
1128 | if (sig->cputimer.running) { |
1129 | struct task_cputime group_sample; |
1130 | |
1131 | raw_spin_lock(&sig->cputimer.lock); |
1132 | group_sample = sig->cputimer.cputime; |
1133 | raw_spin_unlock(&sig->cputimer.lock); |
1134 | |
1135 | if (task_cputime_expired(&group_sample, &sig->cputime_expires)) |
1136 | return 1; |
1137 | } |
1138 | |
1139 | return 0; |
1140 | } |
1141 | |
1142 | /* |
1143 | * This is called from the timer interrupt handler. The irq handler has |
1144 | * already updated our counts. We need to check if any timers fire now. |
1145 | * Interrupts are disabled. |
1146 | */ |
1147 | void run_posix_cpu_timers(struct task_struct *tsk) |
1148 | { |
1149 | LIST_HEAD(firing); |
1150 | struct k_itimer *timer, *next; |
1151 | unsigned long flags; |
1152 | |
1153 | WARN_ON_ONCE(!irqs_disabled()); |
1154 | |
1155 | /* |
1156 | * The fast path checks that there are no expired thread or thread |
1157 | * group timers. If that's so, just return. |
1158 | */ |
1159 | if (!fastpath_timer_check(tsk)) |
1160 | return; |
1161 | |
1162 | if (!lock_task_sighand(tsk, &flags)) |
1163 | return; |
1164 | /* |
1165 | * Here we take off tsk->signal->cpu_timers[N] and |
1166 | * tsk->cpu_timers[N] all the timers that are firing, and |
1167 | * put them on the firing list. |
1168 | */ |
1169 | check_thread_timers(tsk, &firing); |
1170 | /* |
1171 | * If there are any active process wide timers (POSIX 1.b, itimers, |
1172 | * RLIMIT_CPU) cputimer must be running. |
1173 | */ |
1174 | if (tsk->signal->cputimer.running) |
1175 | check_process_timers(tsk, &firing); |
1176 | |
1177 | /* |
1178 | * We must release these locks before taking any timer's lock. |
1179 | * There is a potential race with timer deletion here, as the |
1180 | * siglock now protects our private firing list. We have set |
1181 | * the firing flag in each timer, so that a deletion attempt |
1182 | * that gets the timer lock before we do will give it up and |
1183 | * spin until we've taken care of that timer below. |
1184 | */ |
1185 | unlock_task_sighand(tsk, &flags); |
1186 | |
1187 | /* |
1188 | * Now that all the timers on our list have the firing flag, |
1189 | * no one will touch their list entries but us. We'll take |
1190 | * each timer's lock before clearing its firing flag, so no |
1191 | * timer call will interfere. |
1192 | */ |
1193 | list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) { |
1194 | int cpu_firing; |
1195 | |
1196 | spin_lock(&timer->it_lock); |
1197 | list_del_init(&timer->it.cpu.entry); |
1198 | cpu_firing = timer->it.cpu.firing; |
1199 | timer->it.cpu.firing = 0; |
1200 | /* |
1201 | * The firing flag is -1 if we collided with a reset |
1202 | * of the timer, which already reported this |
1203 | * almost-firing as an overrun. So don't generate an event. |
1204 | */ |
1205 | if (likely(cpu_firing >= 0)) |
1206 | cpu_timer_fire(timer); |
1207 | spin_unlock(&timer->it_lock); |
1208 | } |
1209 | } |
1210 | |
1211 | /* |
1212 | * Set one of the process-wide special case CPU timers or RLIMIT_CPU. |
1213 | * The tsk->sighand->siglock must be held by the caller. |
1214 | */ |
1215 | void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, |
1216 | cputime_t *newval, cputime_t *oldval) |
1217 | { |
1218 | unsigned long long now; |
1219 | |
1220 | WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED); |
1221 | cpu_timer_sample_group(clock_idx, tsk, &now); |
1222 | |
1223 | if (oldval) { |
1224 | /* |
1225 | * We are setting itimer. The *oldval is absolute and we update |
1226 | * it to be relative, *newval argument is relative and we update |
1227 | * it to be absolute. |
1228 | */ |
1229 | if (*oldval) { |
1230 | if (*oldval <= now) { |
1231 | /* Just about to fire. */ |
1232 | *oldval = cputime_one_jiffy; |
1233 | } else { |
1234 | *oldval -= now; |
1235 | } |
1236 | } |
1237 | |
1238 | if (!*newval) |
1239 | goto out; |
1240 | *newval += now; |
1241 | } |
1242 | |
1243 | /* |
1244 | * Update expiration cache if we are the earliest timer, or eventually |
1245 | * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire. |
1246 | */ |
1247 | switch (clock_idx) { |
1248 | case CPUCLOCK_PROF: |
1249 | if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval)) |
1250 | tsk->signal->cputime_expires.prof_exp = *newval; |
1251 | break; |
1252 | case CPUCLOCK_VIRT: |
1253 | if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval)) |
1254 | tsk->signal->cputime_expires.virt_exp = *newval; |
1255 | break; |
1256 | } |
1257 | out: |
1258 | posix_cpu_timer_kick_nohz(); |
1259 | } |
1260 | |
1261 | static int do_cpu_nanosleep(const clockid_t which_clock, int flags, |
1262 | struct timespec *rqtp, struct itimerspec *it) |
1263 | { |
1264 | struct k_itimer timer; |
1265 | int error; |
1266 | |
1267 | /* |
1268 | * Set up a temporary timer and then wait for it to go off. |
1269 | */ |
1270 | memset(&timer, 0, sizeof timer); |
1271 | spin_lock_init(&timer.it_lock); |
1272 | timer.it_clock = which_clock; |
1273 | timer.it_overrun = -1; |
1274 | error = posix_cpu_timer_create(&timer); |
1275 | timer.it_process = current; |
1276 | if (!error) { |
1277 | static struct itimerspec zero_it; |
1278 | |
1279 | memset(it, 0, sizeof *it); |
1280 | it->it_value = *rqtp; |
1281 | |
1282 | spin_lock_irq(&timer.it_lock); |
1283 | error = posix_cpu_timer_set(&timer, flags, it, NULL); |
1284 | if (error) { |
1285 | spin_unlock_irq(&timer.it_lock); |
1286 | return error; |
1287 | } |
1288 | |
1289 | while (!signal_pending(current)) { |
1290 | if (timer.it.cpu.expires == 0) { |
1291 | /* |
1292 | * Our timer fired and was reset, below |
1293 | * deletion can not fail. |
1294 | */ |
1295 | posix_cpu_timer_del(&timer); |
1296 | spin_unlock_irq(&timer.it_lock); |
1297 | return 0; |
1298 | } |
1299 | |
1300 | /* |
1301 | * Block until cpu_timer_fire (or a signal) wakes us. |
1302 | */ |
1303 | __set_current_state(TASK_INTERRUPTIBLE); |
1304 | spin_unlock_irq(&timer.it_lock); |
1305 | schedule(); |
1306 | spin_lock_irq(&timer.it_lock); |
1307 | } |
1308 | |
1309 | /* |
1310 | * We were interrupted by a signal. |
1311 | */ |
1312 | sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp); |
1313 | error = posix_cpu_timer_set(&timer, 0, &zero_it, it); |
1314 | if (!error) { |
1315 | /* |
1316 | * Timer is now unarmed, deletion can not fail. |
1317 | */ |
1318 | posix_cpu_timer_del(&timer); |
1319 | } |
1320 | spin_unlock_irq(&timer.it_lock); |
1321 | |
1322 | while (error == TIMER_RETRY) { |
1323 | /* |
1324 | * We need to handle case when timer was or is in the |
1325 | * middle of firing. In other cases we already freed |
1326 | * resources. |
1327 | */ |
1328 | spin_lock_irq(&timer.it_lock); |
1329 | error = posix_cpu_timer_del(&timer); |
1330 | spin_unlock_irq(&timer.it_lock); |
1331 | } |
1332 | |
1333 | if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) { |
1334 | /* |
1335 | * It actually did fire already. |
1336 | */ |
1337 | return 0; |
1338 | } |
1339 | |
1340 | error = -ERESTART_RESTARTBLOCK; |
1341 | } |
1342 | |
1343 | return error; |
1344 | } |
1345 | |
1346 | static long posix_cpu_nsleep_restart(struct restart_block *restart_block); |
1347 | |
1348 | static int posix_cpu_nsleep(const clockid_t which_clock, int flags, |
1349 | struct timespec *rqtp, struct timespec __user *rmtp) |
1350 | { |
1351 | struct restart_block *restart_block = |
1352 | ¤t_thread_info()->restart_block; |
1353 | struct itimerspec it; |
1354 | int error; |
1355 | |
1356 | /* |
1357 | * Diagnose required errors first. |
1358 | */ |
1359 | if (CPUCLOCK_PERTHREAD(which_clock) && |
1360 | (CPUCLOCK_PID(which_clock) == 0 || |
1361 | CPUCLOCK_PID(which_clock) == current->pid)) |
1362 | return -EINVAL; |
1363 | |
1364 | error = do_cpu_nanosleep(which_clock, flags, rqtp, &it); |
1365 | |
1366 | if (error == -ERESTART_RESTARTBLOCK) { |
1367 | |
1368 | if (flags & TIMER_ABSTIME) |
1369 | return -ERESTARTNOHAND; |
1370 | /* |
1371 | * Report back to the user the time still remaining. |
1372 | */ |
1373 | if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp)) |
1374 | return -EFAULT; |
1375 | |
1376 | restart_block->fn = posix_cpu_nsleep_restart; |
1377 | restart_block->nanosleep.clockid = which_clock; |
1378 | restart_block->nanosleep.rmtp = rmtp; |
1379 | restart_block->nanosleep.expires = timespec_to_ns(rqtp); |
1380 | } |
1381 | return error; |
1382 | } |
1383 | |
1384 | static long posix_cpu_nsleep_restart(struct restart_block *restart_block) |
1385 | { |
1386 | clockid_t which_clock = restart_block->nanosleep.clockid; |
1387 | struct timespec t; |
1388 | struct itimerspec it; |
1389 | int error; |
1390 | |
1391 | t = ns_to_timespec(restart_block->nanosleep.expires); |
1392 | |
1393 | error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it); |
1394 | |
1395 | if (error == -ERESTART_RESTARTBLOCK) { |
1396 | struct timespec __user *rmtp = restart_block->nanosleep.rmtp; |
1397 | /* |
1398 | * Report back to the user the time still remaining. |
1399 | */ |
1400 | if (rmtp && copy_to_user(rmtp, &it.it_value, sizeof *rmtp)) |
1401 | return -EFAULT; |
1402 | |
1403 | restart_block->nanosleep.expires = timespec_to_ns(&t); |
1404 | } |
1405 | return error; |
1406 | |
1407 | } |
1408 | |
1409 | #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED) |
1410 | #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED) |
1411 | |
1412 | static int process_cpu_clock_getres(const clockid_t which_clock, |
1413 | struct timespec *tp) |
1414 | { |
1415 | return posix_cpu_clock_getres(PROCESS_CLOCK, tp); |
1416 | } |
1417 | static int process_cpu_clock_get(const clockid_t which_clock, |
1418 | struct timespec *tp) |
1419 | { |
1420 | return posix_cpu_clock_get(PROCESS_CLOCK, tp); |
1421 | } |
1422 | static int process_cpu_timer_create(struct k_itimer *timer) |
1423 | { |
1424 | timer->it_clock = PROCESS_CLOCK; |
1425 | return posix_cpu_timer_create(timer); |
1426 | } |
1427 | static int process_cpu_nsleep(const clockid_t which_clock, int flags, |
1428 | struct timespec *rqtp, |
1429 | struct timespec __user *rmtp) |
1430 | { |
1431 | return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp); |
1432 | } |
1433 | static long process_cpu_nsleep_restart(struct restart_block *restart_block) |
1434 | { |
1435 | return -EINVAL; |
1436 | } |
1437 | static int thread_cpu_clock_getres(const clockid_t which_clock, |
1438 | struct timespec *tp) |
1439 | { |
1440 | return posix_cpu_clock_getres(THREAD_CLOCK, tp); |
1441 | } |
1442 | static int thread_cpu_clock_get(const clockid_t which_clock, |
1443 | struct timespec *tp) |
1444 | { |
1445 | return posix_cpu_clock_get(THREAD_CLOCK, tp); |
1446 | } |
1447 | static int thread_cpu_timer_create(struct k_itimer *timer) |
1448 | { |
1449 | timer->it_clock = THREAD_CLOCK; |
1450 | return posix_cpu_timer_create(timer); |
1451 | } |
1452 | |
1453 | struct k_clock clock_posix_cpu = { |
1454 | .clock_getres = posix_cpu_clock_getres, |
1455 | .clock_set = posix_cpu_clock_set, |
1456 | .clock_get = posix_cpu_clock_get, |
1457 | .timer_create = posix_cpu_timer_create, |
1458 | .nsleep = posix_cpu_nsleep, |
1459 | .nsleep_restart = posix_cpu_nsleep_restart, |
1460 | .timer_set = posix_cpu_timer_set, |
1461 | .timer_del = posix_cpu_timer_del, |
1462 | .timer_get = posix_cpu_timer_get, |
1463 | }; |
1464 | |
1465 | static __init int init_posix_cpu_timers(void) |
1466 | { |
1467 | struct k_clock process = { |
1468 | .clock_getres = process_cpu_clock_getres, |
1469 | .clock_get = process_cpu_clock_get, |
1470 | .timer_create = process_cpu_timer_create, |
1471 | .nsleep = process_cpu_nsleep, |
1472 | .nsleep_restart = process_cpu_nsleep_restart, |
1473 | }; |
1474 | struct k_clock thread = { |
1475 | .clock_getres = thread_cpu_clock_getres, |
1476 | .clock_get = thread_cpu_clock_get, |
1477 | .timer_create = thread_cpu_timer_create, |
1478 | }; |
1479 | struct timespec ts; |
1480 | |
1481 | posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process); |
1482 | posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread); |
1483 | |
1484 | cputime_to_timespec(cputime_one_jiffy, &ts); |
1485 | onecputick = ts.tv_nsec; |
1486 | WARN_ON(ts.tv_sec != 0); |
1487 | |
1488 | return 0; |
1489 | } |
1490 | __initcall(init_posix_cpu_timers); |
1491 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9