Root/
1 | /* |
2 | * linux/kernel/signal.c |
3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | * |
6 | * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson |
7 | * |
8 | * 2003-06-02 Jim Houston - Concurrent Computer Corp. |
9 | * Changes to use preallocated sigqueue structures |
10 | * to allow signals to be sent reliably. |
11 | */ |
12 | |
13 | #include <linux/slab.h> |
14 | #include <linux/module.h> |
15 | #include <linux/init.h> |
16 | #include <linux/sched.h> |
17 | #include <linux/fs.h> |
18 | #include <linux/tty.h> |
19 | #include <linux/binfmts.h> |
20 | #include <linux/security.h> |
21 | #include <linux/syscalls.h> |
22 | #include <linux/ptrace.h> |
23 | #include <linux/signal.h> |
24 | #include <linux/signalfd.h> |
25 | #include <linux/ratelimit.h> |
26 | #include <linux/tracehook.h> |
27 | #include <linux/capability.h> |
28 | #include <linux/freezer.h> |
29 | #include <linux/pid_namespace.h> |
30 | #include <linux/nsproxy.h> |
31 | #define CREATE_TRACE_POINTS |
32 | #include <trace/events/signal.h> |
33 | |
34 | #include <asm/param.h> |
35 | #include <asm/uaccess.h> |
36 | #include <asm/unistd.h> |
37 | #include <asm/siginfo.h> |
38 | #include "audit.h" /* audit_signal_info() */ |
39 | |
40 | /* |
41 | * SLAB caches for signal bits. |
42 | */ |
43 | |
44 | static struct kmem_cache *sigqueue_cachep; |
45 | |
46 | int print_fatal_signals __read_mostly; |
47 | |
48 | static void __user *sig_handler(struct task_struct *t, int sig) |
49 | { |
50 | return t->sighand->action[sig - 1].sa.sa_handler; |
51 | } |
52 | |
53 | static int sig_handler_ignored(void __user *handler, int sig) |
54 | { |
55 | /* Is it explicitly or implicitly ignored? */ |
56 | return handler == SIG_IGN || |
57 | (handler == SIG_DFL && sig_kernel_ignore(sig)); |
58 | } |
59 | |
60 | static int sig_task_ignored(struct task_struct *t, int sig, |
61 | int from_ancestor_ns) |
62 | { |
63 | void __user *handler; |
64 | |
65 | handler = sig_handler(t, sig); |
66 | |
67 | if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && |
68 | handler == SIG_DFL && !from_ancestor_ns) |
69 | return 1; |
70 | |
71 | return sig_handler_ignored(handler, sig); |
72 | } |
73 | |
74 | static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns) |
75 | { |
76 | /* |
77 | * Blocked signals are never ignored, since the |
78 | * signal handler may change by the time it is |
79 | * unblocked. |
80 | */ |
81 | if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) |
82 | return 0; |
83 | |
84 | if (!sig_task_ignored(t, sig, from_ancestor_ns)) |
85 | return 0; |
86 | |
87 | /* |
88 | * Tracers may want to know about even ignored signals. |
89 | */ |
90 | return !tracehook_consider_ignored_signal(t, sig); |
91 | } |
92 | |
93 | /* |
94 | * Re-calculate pending state from the set of locally pending |
95 | * signals, globally pending signals, and blocked signals. |
96 | */ |
97 | static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) |
98 | { |
99 | unsigned long ready; |
100 | long i; |
101 | |
102 | switch (_NSIG_WORDS) { |
103 | default: |
104 | for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) |
105 | ready |= signal->sig[i] &~ blocked->sig[i]; |
106 | break; |
107 | |
108 | case 4: ready = signal->sig[3] &~ blocked->sig[3]; |
109 | ready |= signal->sig[2] &~ blocked->sig[2]; |
110 | ready |= signal->sig[1] &~ blocked->sig[1]; |
111 | ready |= signal->sig[0] &~ blocked->sig[0]; |
112 | break; |
113 | |
114 | case 2: ready = signal->sig[1] &~ blocked->sig[1]; |
115 | ready |= signal->sig[0] &~ blocked->sig[0]; |
116 | break; |
117 | |
118 | case 1: ready = signal->sig[0] &~ blocked->sig[0]; |
119 | } |
120 | return ready != 0; |
121 | } |
122 | |
123 | #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) |
124 | |
125 | static int recalc_sigpending_tsk(struct task_struct *t) |
126 | { |
127 | if (t->signal->group_stop_count > 0 || |
128 | PENDING(&t->pending, &t->blocked) || |
129 | PENDING(&t->signal->shared_pending, &t->blocked)) { |
130 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
131 | return 1; |
132 | } |
133 | /* |
134 | * We must never clear the flag in another thread, or in current |
135 | * when it's possible the current syscall is returning -ERESTART*. |
136 | * So we don't clear it here, and only callers who know they should do. |
137 | */ |
138 | return 0; |
139 | } |
140 | |
141 | /* |
142 | * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. |
143 | * This is superfluous when called on current, the wakeup is a harmless no-op. |
144 | */ |
145 | void recalc_sigpending_and_wake(struct task_struct *t) |
146 | { |
147 | if (recalc_sigpending_tsk(t)) |
148 | signal_wake_up(t, 0); |
149 | } |
150 | |
151 | void recalc_sigpending(void) |
152 | { |
153 | if (unlikely(tracehook_force_sigpending())) |
154 | set_thread_flag(TIF_SIGPENDING); |
155 | else if (!recalc_sigpending_tsk(current) && !freezing(current)) |
156 | clear_thread_flag(TIF_SIGPENDING); |
157 | |
158 | } |
159 | |
160 | /* Given the mask, find the first available signal that should be serviced. */ |
161 | |
162 | #define SYNCHRONOUS_MASK \ |
163 | (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ |
164 | sigmask(SIGTRAP) | sigmask(SIGFPE)) |
165 | |
166 | int next_signal(struct sigpending *pending, sigset_t *mask) |
167 | { |
168 | unsigned long i, *s, *m, x; |
169 | int sig = 0; |
170 | |
171 | s = pending->signal.sig; |
172 | m = mask->sig; |
173 | |
174 | /* |
175 | * Handle the first word specially: it contains the |
176 | * synchronous signals that need to be dequeued first. |
177 | */ |
178 | x = *s &~ *m; |
179 | if (x) { |
180 | if (x & SYNCHRONOUS_MASK) |
181 | x &= SYNCHRONOUS_MASK; |
182 | sig = ffz(~x) + 1; |
183 | return sig; |
184 | } |
185 | |
186 | switch (_NSIG_WORDS) { |
187 | default: |
188 | for (i = 1; i < _NSIG_WORDS; ++i) { |
189 | x = *++s &~ *++m; |
190 | if (!x) |
191 | continue; |
192 | sig = ffz(~x) + i*_NSIG_BPW + 1; |
193 | break; |
194 | } |
195 | break; |
196 | |
197 | case 2: |
198 | x = s[1] &~ m[1]; |
199 | if (!x) |
200 | break; |
201 | sig = ffz(~x) + _NSIG_BPW + 1; |
202 | break; |
203 | |
204 | case 1: |
205 | /* Nothing to do */ |
206 | break; |
207 | } |
208 | |
209 | return sig; |
210 | } |
211 | |
212 | static inline void print_dropped_signal(int sig) |
213 | { |
214 | static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); |
215 | |
216 | if (!print_fatal_signals) |
217 | return; |
218 | |
219 | if (!__ratelimit(&ratelimit_state)) |
220 | return; |
221 | |
222 | printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", |
223 | current->comm, current->pid, sig); |
224 | } |
225 | |
226 | /* |
227 | * allocate a new signal queue record |
228 | * - this may be called without locks if and only if t == current, otherwise an |
229 | * appopriate lock must be held to stop the target task from exiting |
230 | */ |
231 | static struct sigqueue * |
232 | __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) |
233 | { |
234 | struct sigqueue *q = NULL; |
235 | struct user_struct *user; |
236 | |
237 | /* |
238 | * Protect access to @t credentials. This can go away when all |
239 | * callers hold rcu read lock. |
240 | */ |
241 | rcu_read_lock(); |
242 | user = get_uid(__task_cred(t)->user); |
243 | atomic_inc(&user->sigpending); |
244 | rcu_read_unlock(); |
245 | |
246 | if (override_rlimit || |
247 | atomic_read(&user->sigpending) <= |
248 | task_rlimit(t, RLIMIT_SIGPENDING)) { |
249 | q = kmem_cache_alloc(sigqueue_cachep, flags); |
250 | } else { |
251 | print_dropped_signal(sig); |
252 | } |
253 | |
254 | if (unlikely(q == NULL)) { |
255 | atomic_dec(&user->sigpending); |
256 | free_uid(user); |
257 | } else { |
258 | INIT_LIST_HEAD(&q->list); |
259 | q->flags = 0; |
260 | q->user = user; |
261 | } |
262 | |
263 | return q; |
264 | } |
265 | |
266 | static void __sigqueue_free(struct sigqueue *q) |
267 | { |
268 | if (q->flags & SIGQUEUE_PREALLOC) |
269 | return; |
270 | atomic_dec(&q->user->sigpending); |
271 | free_uid(q->user); |
272 | kmem_cache_free(sigqueue_cachep, q); |
273 | } |
274 | |
275 | void flush_sigqueue(struct sigpending *queue) |
276 | { |
277 | struct sigqueue *q; |
278 | |
279 | sigemptyset(&queue->signal); |
280 | while (!list_empty(&queue->list)) { |
281 | q = list_entry(queue->list.next, struct sigqueue , list); |
282 | list_del_init(&q->list); |
283 | __sigqueue_free(q); |
284 | } |
285 | } |
286 | |
287 | /* |
288 | * Flush all pending signals for a task. |
289 | */ |
290 | void __flush_signals(struct task_struct *t) |
291 | { |
292 | clear_tsk_thread_flag(t, TIF_SIGPENDING); |
293 | flush_sigqueue(&t->pending); |
294 | flush_sigqueue(&t->signal->shared_pending); |
295 | } |
296 | |
297 | void flush_signals(struct task_struct *t) |
298 | { |
299 | unsigned long flags; |
300 | |
301 | spin_lock_irqsave(&t->sighand->siglock, flags); |
302 | __flush_signals(t); |
303 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
304 | } |
305 | |
306 | static void __flush_itimer_signals(struct sigpending *pending) |
307 | { |
308 | sigset_t signal, retain; |
309 | struct sigqueue *q, *n; |
310 | |
311 | signal = pending->signal; |
312 | sigemptyset(&retain); |
313 | |
314 | list_for_each_entry_safe(q, n, &pending->list, list) { |
315 | int sig = q->info.si_signo; |
316 | |
317 | if (likely(q->info.si_code != SI_TIMER)) { |
318 | sigaddset(&retain, sig); |
319 | } else { |
320 | sigdelset(&signal, sig); |
321 | list_del_init(&q->list); |
322 | __sigqueue_free(q); |
323 | } |
324 | } |
325 | |
326 | sigorsets(&pending->signal, &signal, &retain); |
327 | } |
328 | |
329 | void flush_itimer_signals(void) |
330 | { |
331 | struct task_struct *tsk = current; |
332 | unsigned long flags; |
333 | |
334 | spin_lock_irqsave(&tsk->sighand->siglock, flags); |
335 | __flush_itimer_signals(&tsk->pending); |
336 | __flush_itimer_signals(&tsk->signal->shared_pending); |
337 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); |
338 | } |
339 | |
340 | void ignore_signals(struct task_struct *t) |
341 | { |
342 | int i; |
343 | |
344 | for (i = 0; i < _NSIG; ++i) |
345 | t->sighand->action[i].sa.sa_handler = SIG_IGN; |
346 | |
347 | flush_signals(t); |
348 | } |
349 | |
350 | /* |
351 | * Flush all handlers for a task. |
352 | */ |
353 | |
354 | void |
355 | flush_signal_handlers(struct task_struct *t, int force_default) |
356 | { |
357 | int i; |
358 | struct k_sigaction *ka = &t->sighand->action[0]; |
359 | for (i = _NSIG ; i != 0 ; i--) { |
360 | if (force_default || ka->sa.sa_handler != SIG_IGN) |
361 | ka->sa.sa_handler = SIG_DFL; |
362 | ka->sa.sa_flags = 0; |
363 | sigemptyset(&ka->sa.sa_mask); |
364 | ka++; |
365 | } |
366 | } |
367 | |
368 | int unhandled_signal(struct task_struct *tsk, int sig) |
369 | { |
370 | void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; |
371 | if (is_global_init(tsk)) |
372 | return 1; |
373 | if (handler != SIG_IGN && handler != SIG_DFL) |
374 | return 0; |
375 | return !tracehook_consider_fatal_signal(tsk, sig); |
376 | } |
377 | |
378 | |
379 | /* Notify the system that a driver wants to block all signals for this |
380 | * process, and wants to be notified if any signals at all were to be |
381 | * sent/acted upon. If the notifier routine returns non-zero, then the |
382 | * signal will be acted upon after all. If the notifier routine returns 0, |
383 | * then then signal will be blocked. Only one block per process is |
384 | * allowed. priv is a pointer to private data that the notifier routine |
385 | * can use to determine if the signal should be blocked or not. */ |
386 | |
387 | void |
388 | block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) |
389 | { |
390 | unsigned long flags; |
391 | |
392 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
393 | current->notifier_mask = mask; |
394 | current->notifier_data = priv; |
395 | current->notifier = notifier; |
396 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
397 | } |
398 | |
399 | /* Notify the system that blocking has ended. */ |
400 | |
401 | void |
402 | unblock_all_signals(void) |
403 | { |
404 | unsigned long flags; |
405 | |
406 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
407 | current->notifier = NULL; |
408 | current->notifier_data = NULL; |
409 | recalc_sigpending(); |
410 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
411 | } |
412 | |
413 | static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) |
414 | { |
415 | struct sigqueue *q, *first = NULL; |
416 | |
417 | /* |
418 | * Collect the siginfo appropriate to this signal. Check if |
419 | * there is another siginfo for the same signal. |
420 | */ |
421 | list_for_each_entry(q, &list->list, list) { |
422 | if (q->info.si_signo == sig) { |
423 | if (first) |
424 | goto still_pending; |
425 | first = q; |
426 | } |
427 | } |
428 | |
429 | sigdelset(&list->signal, sig); |
430 | |
431 | if (first) { |
432 | still_pending: |
433 | list_del_init(&first->list); |
434 | copy_siginfo(info, &first->info); |
435 | __sigqueue_free(first); |
436 | } else { |
437 | /* Ok, it wasn't in the queue. This must be |
438 | a fast-pathed signal or we must have been |
439 | out of queue space. So zero out the info. |
440 | */ |
441 | info->si_signo = sig; |
442 | info->si_errno = 0; |
443 | info->si_code = SI_USER; |
444 | info->si_pid = 0; |
445 | info->si_uid = 0; |
446 | } |
447 | } |
448 | |
449 | static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, |
450 | siginfo_t *info) |
451 | { |
452 | int sig = next_signal(pending, mask); |
453 | |
454 | if (sig) { |
455 | if (current->notifier) { |
456 | if (sigismember(current->notifier_mask, sig)) { |
457 | if (!(current->notifier)(current->notifier_data)) { |
458 | clear_thread_flag(TIF_SIGPENDING); |
459 | return 0; |
460 | } |
461 | } |
462 | } |
463 | |
464 | collect_signal(sig, pending, info); |
465 | } |
466 | |
467 | return sig; |
468 | } |
469 | |
470 | /* |
471 | * Dequeue a signal and return the element to the caller, which is |
472 | * expected to free it. |
473 | * |
474 | * All callers have to hold the siglock. |
475 | */ |
476 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) |
477 | { |
478 | int signr; |
479 | |
480 | /* We only dequeue private signals from ourselves, we don't let |
481 | * signalfd steal them |
482 | */ |
483 | signr = __dequeue_signal(&tsk->pending, mask, info); |
484 | if (!signr) { |
485 | signr = __dequeue_signal(&tsk->signal->shared_pending, |
486 | mask, info); |
487 | /* |
488 | * itimer signal ? |
489 | * |
490 | * itimers are process shared and we restart periodic |
491 | * itimers in the signal delivery path to prevent DoS |
492 | * attacks in the high resolution timer case. This is |
493 | * compliant with the old way of self restarting |
494 | * itimers, as the SIGALRM is a legacy signal and only |
495 | * queued once. Changing the restart behaviour to |
496 | * restart the timer in the signal dequeue path is |
497 | * reducing the timer noise on heavy loaded !highres |
498 | * systems too. |
499 | */ |
500 | if (unlikely(signr == SIGALRM)) { |
501 | struct hrtimer *tmr = &tsk->signal->real_timer; |
502 | |
503 | if (!hrtimer_is_queued(tmr) && |
504 | tsk->signal->it_real_incr.tv64 != 0) { |
505 | hrtimer_forward(tmr, tmr->base->get_time(), |
506 | tsk->signal->it_real_incr); |
507 | hrtimer_restart(tmr); |
508 | } |
509 | } |
510 | } |
511 | |
512 | recalc_sigpending(); |
513 | if (!signr) |
514 | return 0; |
515 | |
516 | if (unlikely(sig_kernel_stop(signr))) { |
517 | /* |
518 | * Set a marker that we have dequeued a stop signal. Our |
519 | * caller might release the siglock and then the pending |
520 | * stop signal it is about to process is no longer in the |
521 | * pending bitmasks, but must still be cleared by a SIGCONT |
522 | * (and overruled by a SIGKILL). So those cases clear this |
523 | * shared flag after we've set it. Note that this flag may |
524 | * remain set after the signal we return is ignored or |
525 | * handled. That doesn't matter because its only purpose |
526 | * is to alert stop-signal processing code when another |
527 | * processor has come along and cleared the flag. |
528 | */ |
529 | tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; |
530 | } |
531 | if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { |
532 | /* |
533 | * Release the siglock to ensure proper locking order |
534 | * of timer locks outside of siglocks. Note, we leave |
535 | * irqs disabled here, since the posix-timers code is |
536 | * about to disable them again anyway. |
537 | */ |
538 | spin_unlock(&tsk->sighand->siglock); |
539 | do_schedule_next_timer(info); |
540 | spin_lock(&tsk->sighand->siglock); |
541 | } |
542 | return signr; |
543 | } |
544 | |
545 | /* |
546 | * Tell a process that it has a new active signal.. |
547 | * |
548 | * NOTE! we rely on the previous spin_lock to |
549 | * lock interrupts for us! We can only be called with |
550 | * "siglock" held, and the local interrupt must |
551 | * have been disabled when that got acquired! |
552 | * |
553 | * No need to set need_resched since signal event passing |
554 | * goes through ->blocked |
555 | */ |
556 | void signal_wake_up(struct task_struct *t, int resume) |
557 | { |
558 | unsigned int mask; |
559 | |
560 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
561 | |
562 | /* |
563 | * For SIGKILL, we want to wake it up in the stopped/traced/killable |
564 | * case. We don't check t->state here because there is a race with it |
565 | * executing another processor and just now entering stopped state. |
566 | * By using wake_up_state, we ensure the process will wake up and |
567 | * handle its death signal. |
568 | */ |
569 | mask = TASK_INTERRUPTIBLE; |
570 | if (resume) |
571 | mask |= TASK_WAKEKILL; |
572 | if (!wake_up_state(t, mask)) |
573 | kick_process(t); |
574 | } |
575 | |
576 | /* |
577 | * Remove signals in mask from the pending set and queue. |
578 | * Returns 1 if any signals were found. |
579 | * |
580 | * All callers must be holding the siglock. |
581 | * |
582 | * This version takes a sigset mask and looks at all signals, |
583 | * not just those in the first mask word. |
584 | */ |
585 | static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) |
586 | { |
587 | struct sigqueue *q, *n; |
588 | sigset_t m; |
589 | |
590 | sigandsets(&m, mask, &s->signal); |
591 | if (sigisemptyset(&m)) |
592 | return 0; |
593 | |
594 | signandsets(&s->signal, &s->signal, mask); |
595 | list_for_each_entry_safe(q, n, &s->list, list) { |
596 | if (sigismember(mask, q->info.si_signo)) { |
597 | list_del_init(&q->list); |
598 | __sigqueue_free(q); |
599 | } |
600 | } |
601 | return 1; |
602 | } |
603 | /* |
604 | * Remove signals in mask from the pending set and queue. |
605 | * Returns 1 if any signals were found. |
606 | * |
607 | * All callers must be holding the siglock. |
608 | */ |
609 | static int rm_from_queue(unsigned long mask, struct sigpending *s) |
610 | { |
611 | struct sigqueue *q, *n; |
612 | |
613 | if (!sigtestsetmask(&s->signal, mask)) |
614 | return 0; |
615 | |
616 | sigdelsetmask(&s->signal, mask); |
617 | list_for_each_entry_safe(q, n, &s->list, list) { |
618 | if (q->info.si_signo < SIGRTMIN && |
619 | (mask & sigmask(q->info.si_signo))) { |
620 | list_del_init(&q->list); |
621 | __sigqueue_free(q); |
622 | } |
623 | } |
624 | return 1; |
625 | } |
626 | |
627 | static inline int is_si_special(const struct siginfo *info) |
628 | { |
629 | return info <= SEND_SIG_FORCED; |
630 | } |
631 | |
632 | static inline bool si_fromuser(const struct siginfo *info) |
633 | { |
634 | return info == SEND_SIG_NOINFO || |
635 | (!is_si_special(info) && SI_FROMUSER(info)); |
636 | } |
637 | |
638 | /* |
639 | * Bad permissions for sending the signal |
640 | * - the caller must hold the RCU read lock |
641 | */ |
642 | static int check_kill_permission(int sig, struct siginfo *info, |
643 | struct task_struct *t) |
644 | { |
645 | const struct cred *cred, *tcred; |
646 | struct pid *sid; |
647 | int error; |
648 | |
649 | if (!valid_signal(sig)) |
650 | return -EINVAL; |
651 | |
652 | if (!si_fromuser(info)) |
653 | return 0; |
654 | |
655 | error = audit_signal_info(sig, t); /* Let audit system see the signal */ |
656 | if (error) |
657 | return error; |
658 | |
659 | cred = current_cred(); |
660 | tcred = __task_cred(t); |
661 | if (!same_thread_group(current, t) && |
662 | (cred->euid ^ tcred->suid) && |
663 | (cred->euid ^ tcred->uid) && |
664 | (cred->uid ^ tcred->suid) && |
665 | (cred->uid ^ tcred->uid) && |
666 | !capable(CAP_KILL)) { |
667 | switch (sig) { |
668 | case SIGCONT: |
669 | sid = task_session(t); |
670 | /* |
671 | * We don't return the error if sid == NULL. The |
672 | * task was unhashed, the caller must notice this. |
673 | */ |
674 | if (!sid || sid == task_session(current)) |
675 | break; |
676 | default: |
677 | return -EPERM; |
678 | } |
679 | } |
680 | |
681 | return security_task_kill(t, info, sig, 0); |
682 | } |
683 | |
684 | /* |
685 | * Handle magic process-wide effects of stop/continue signals. Unlike |
686 | * the signal actions, these happen immediately at signal-generation |
687 | * time regardless of blocking, ignoring, or handling. This does the |
688 | * actual continuing for SIGCONT, but not the actual stopping for stop |
689 | * signals. The process stop is done as a signal action for SIG_DFL. |
690 | * |
691 | * Returns true if the signal should be actually delivered, otherwise |
692 | * it should be dropped. |
693 | */ |
694 | static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns) |
695 | { |
696 | struct signal_struct *signal = p->signal; |
697 | struct task_struct *t; |
698 | |
699 | if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) { |
700 | /* |
701 | * The process is in the middle of dying, nothing to do. |
702 | */ |
703 | } else if (sig_kernel_stop(sig)) { |
704 | /* |
705 | * This is a stop signal. Remove SIGCONT from all queues. |
706 | */ |
707 | rm_from_queue(sigmask(SIGCONT), &signal->shared_pending); |
708 | t = p; |
709 | do { |
710 | rm_from_queue(sigmask(SIGCONT), &t->pending); |
711 | } while_each_thread(p, t); |
712 | } else if (sig == SIGCONT) { |
713 | unsigned int why; |
714 | /* |
715 | * Remove all stop signals from all queues, |
716 | * and wake all threads. |
717 | */ |
718 | rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); |
719 | t = p; |
720 | do { |
721 | unsigned int state; |
722 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); |
723 | /* |
724 | * If there is a handler for SIGCONT, we must make |
725 | * sure that no thread returns to user mode before |
726 | * we post the signal, in case it was the only |
727 | * thread eligible to run the signal handler--then |
728 | * it must not do anything between resuming and |
729 | * running the handler. With the TIF_SIGPENDING |
730 | * flag set, the thread will pause and acquire the |
731 | * siglock that we hold now and until we've queued |
732 | * the pending signal. |
733 | * |
734 | * Wake up the stopped thread _after_ setting |
735 | * TIF_SIGPENDING |
736 | */ |
737 | state = __TASK_STOPPED; |
738 | if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) { |
739 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
740 | state |= TASK_INTERRUPTIBLE; |
741 | } |
742 | wake_up_state(t, state); |
743 | } while_each_thread(p, t); |
744 | |
745 | /* |
746 | * Notify the parent with CLD_CONTINUED if we were stopped. |
747 | * |
748 | * If we were in the middle of a group stop, we pretend it |
749 | * was already finished, and then continued. Since SIGCHLD |
750 | * doesn't queue we report only CLD_STOPPED, as if the next |
751 | * CLD_CONTINUED was dropped. |
752 | */ |
753 | why = 0; |
754 | if (signal->flags & SIGNAL_STOP_STOPPED) |
755 | why |= SIGNAL_CLD_CONTINUED; |
756 | else if (signal->group_stop_count) |
757 | why |= SIGNAL_CLD_STOPPED; |
758 | |
759 | if (why) { |
760 | /* |
761 | * The first thread which returns from do_signal_stop() |
762 | * will take ->siglock, notice SIGNAL_CLD_MASK, and |
763 | * notify its parent. See get_signal_to_deliver(). |
764 | */ |
765 | signal->flags = why | SIGNAL_STOP_CONTINUED; |
766 | signal->group_stop_count = 0; |
767 | signal->group_exit_code = 0; |
768 | } else { |
769 | /* |
770 | * We are not stopped, but there could be a stop |
771 | * signal in the middle of being processed after |
772 | * being removed from the queue. Clear that too. |
773 | */ |
774 | signal->flags &= ~SIGNAL_STOP_DEQUEUED; |
775 | } |
776 | } |
777 | |
778 | return !sig_ignored(p, sig, from_ancestor_ns); |
779 | } |
780 | |
781 | /* |
782 | * Test if P wants to take SIG. After we've checked all threads with this, |
783 | * it's equivalent to finding no threads not blocking SIG. Any threads not |
784 | * blocking SIG were ruled out because they are not running and already |
785 | * have pending signals. Such threads will dequeue from the shared queue |
786 | * as soon as they're available, so putting the signal on the shared queue |
787 | * will be equivalent to sending it to one such thread. |
788 | */ |
789 | static inline int wants_signal(int sig, struct task_struct *p) |
790 | { |
791 | if (sigismember(&p->blocked, sig)) |
792 | return 0; |
793 | if (p->flags & PF_EXITING) |
794 | return 0; |
795 | if (sig == SIGKILL) |
796 | return 1; |
797 | if (task_is_stopped_or_traced(p)) |
798 | return 0; |
799 | return task_curr(p) || !signal_pending(p); |
800 | } |
801 | |
802 | static void complete_signal(int sig, struct task_struct *p, int group) |
803 | { |
804 | struct signal_struct *signal = p->signal; |
805 | struct task_struct *t; |
806 | |
807 | /* |
808 | * Now find a thread we can wake up to take the signal off the queue. |
809 | * |
810 | * If the main thread wants the signal, it gets first crack. |
811 | * Probably the least surprising to the average bear. |
812 | */ |
813 | if (wants_signal(sig, p)) |
814 | t = p; |
815 | else if (!group || thread_group_empty(p)) |
816 | /* |
817 | * There is just one thread and it does not need to be woken. |
818 | * It will dequeue unblocked signals before it runs again. |
819 | */ |
820 | return; |
821 | else { |
822 | /* |
823 | * Otherwise try to find a suitable thread. |
824 | */ |
825 | t = signal->curr_target; |
826 | while (!wants_signal(sig, t)) { |
827 | t = next_thread(t); |
828 | if (t == signal->curr_target) |
829 | /* |
830 | * No thread needs to be woken. |
831 | * Any eligible threads will see |
832 | * the signal in the queue soon. |
833 | */ |
834 | return; |
835 | } |
836 | signal->curr_target = t; |
837 | } |
838 | |
839 | /* |
840 | * Found a killable thread. If the signal will be fatal, |
841 | * then start taking the whole group down immediately. |
842 | */ |
843 | if (sig_fatal(p, sig) && |
844 | !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && |
845 | !sigismember(&t->real_blocked, sig) && |
846 | (sig == SIGKILL || |
847 | !tracehook_consider_fatal_signal(t, sig))) { |
848 | /* |
849 | * This signal will be fatal to the whole group. |
850 | */ |
851 | if (!sig_kernel_coredump(sig)) { |
852 | /* |
853 | * Start a group exit and wake everybody up. |
854 | * This way we don't have other threads |
855 | * running and doing things after a slower |
856 | * thread has the fatal signal pending. |
857 | */ |
858 | signal->flags = SIGNAL_GROUP_EXIT; |
859 | signal->group_exit_code = sig; |
860 | signal->group_stop_count = 0; |
861 | t = p; |
862 | do { |
863 | sigaddset(&t->pending.signal, SIGKILL); |
864 | signal_wake_up(t, 1); |
865 | } while_each_thread(p, t); |
866 | return; |
867 | } |
868 | } |
869 | |
870 | /* |
871 | * The signal is already in the shared-pending queue. |
872 | * Tell the chosen thread to wake up and dequeue it. |
873 | */ |
874 | signal_wake_up(t, sig == SIGKILL); |
875 | return; |
876 | } |
877 | |
878 | static inline int legacy_queue(struct sigpending *signals, int sig) |
879 | { |
880 | return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); |
881 | } |
882 | |
883 | static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, |
884 | int group, int from_ancestor_ns) |
885 | { |
886 | struct sigpending *pending; |
887 | struct sigqueue *q; |
888 | int override_rlimit; |
889 | |
890 | trace_signal_generate(sig, info, t); |
891 | |
892 | assert_spin_locked(&t->sighand->siglock); |
893 | |
894 | if (!prepare_signal(sig, t, from_ancestor_ns)) |
895 | return 0; |
896 | |
897 | pending = group ? &t->signal->shared_pending : &t->pending; |
898 | /* |
899 | * Short-circuit ignored signals and support queuing |
900 | * exactly one non-rt signal, so that we can get more |
901 | * detailed information about the cause of the signal. |
902 | */ |
903 | if (legacy_queue(pending, sig)) |
904 | return 0; |
905 | /* |
906 | * fast-pathed signals for kernel-internal things like SIGSTOP |
907 | * or SIGKILL. |
908 | */ |
909 | if (info == SEND_SIG_FORCED) |
910 | goto out_set; |
911 | |
912 | /* Real-time signals must be queued if sent by sigqueue, or |
913 | some other real-time mechanism. It is implementation |
914 | defined whether kill() does so. We attempt to do so, on |
915 | the principle of least surprise, but since kill is not |
916 | allowed to fail with EAGAIN when low on memory we just |
917 | make sure at least one signal gets delivered and don't |
918 | pass on the info struct. */ |
919 | |
920 | if (sig < SIGRTMIN) |
921 | override_rlimit = (is_si_special(info) || info->si_code >= 0); |
922 | else |
923 | override_rlimit = 0; |
924 | |
925 | q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, |
926 | override_rlimit); |
927 | if (q) { |
928 | list_add_tail(&q->list, &pending->list); |
929 | switch ((unsigned long) info) { |
930 | case (unsigned long) SEND_SIG_NOINFO: |
931 | q->info.si_signo = sig; |
932 | q->info.si_errno = 0; |
933 | q->info.si_code = SI_USER; |
934 | q->info.si_pid = task_tgid_nr_ns(current, |
935 | task_active_pid_ns(t)); |
936 | q->info.si_uid = current_uid(); |
937 | break; |
938 | case (unsigned long) SEND_SIG_PRIV: |
939 | q->info.si_signo = sig; |
940 | q->info.si_errno = 0; |
941 | q->info.si_code = SI_KERNEL; |
942 | q->info.si_pid = 0; |
943 | q->info.si_uid = 0; |
944 | break; |
945 | default: |
946 | copy_siginfo(&q->info, info); |
947 | if (from_ancestor_ns) |
948 | q->info.si_pid = 0; |
949 | break; |
950 | } |
951 | } else if (!is_si_special(info)) { |
952 | if (sig >= SIGRTMIN && info->si_code != SI_USER) { |
953 | /* |
954 | * Queue overflow, abort. We may abort if the |
955 | * signal was rt and sent by user using something |
956 | * other than kill(). |
957 | */ |
958 | trace_signal_overflow_fail(sig, group, info); |
959 | return -EAGAIN; |
960 | } else { |
961 | /* |
962 | * This is a silent loss of information. We still |
963 | * send the signal, but the *info bits are lost. |
964 | */ |
965 | trace_signal_lose_info(sig, group, info); |
966 | } |
967 | } |
968 | |
969 | out_set: |
970 | signalfd_notify(t, sig); |
971 | sigaddset(&pending->signal, sig); |
972 | complete_signal(sig, t, group); |
973 | return 0; |
974 | } |
975 | |
976 | static int send_signal(int sig, struct siginfo *info, struct task_struct *t, |
977 | int group) |
978 | { |
979 | int from_ancestor_ns = 0; |
980 | |
981 | #ifdef CONFIG_PID_NS |
982 | from_ancestor_ns = si_fromuser(info) && |
983 | !task_pid_nr_ns(current, task_active_pid_ns(t)); |
984 | #endif |
985 | |
986 | return __send_signal(sig, info, t, group, from_ancestor_ns); |
987 | } |
988 | |
989 | static void print_fatal_signal(struct pt_regs *regs, int signr) |
990 | { |
991 | printk("%s/%d: potentially unexpected fatal signal %d.\n", |
992 | current->comm, task_pid_nr(current), signr); |
993 | |
994 | #if defined(__i386__) && !defined(__arch_um__) |
995 | printk("code at %08lx: ", regs->ip); |
996 | { |
997 | int i; |
998 | for (i = 0; i < 16; i++) { |
999 | unsigned char insn; |
1000 | |
1001 | if (get_user(insn, (unsigned char *)(regs->ip + i))) |
1002 | break; |
1003 | printk("%02x ", insn); |
1004 | } |
1005 | } |
1006 | #endif |
1007 | printk("\n"); |
1008 | preempt_disable(); |
1009 | show_regs(regs); |
1010 | preempt_enable(); |
1011 | } |
1012 | |
1013 | static int __init setup_print_fatal_signals(char *str) |
1014 | { |
1015 | get_option (&str, &print_fatal_signals); |
1016 | |
1017 | return 1; |
1018 | } |
1019 | |
1020 | __setup("print-fatal-signals=", setup_print_fatal_signals); |
1021 | |
1022 | int |
1023 | __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1024 | { |
1025 | return send_signal(sig, info, p, 1); |
1026 | } |
1027 | |
1028 | static int |
1029 | specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) |
1030 | { |
1031 | return send_signal(sig, info, t, 0); |
1032 | } |
1033 | |
1034 | int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, |
1035 | bool group) |
1036 | { |
1037 | unsigned long flags; |
1038 | int ret = -ESRCH; |
1039 | |
1040 | if (lock_task_sighand(p, &flags)) { |
1041 | ret = send_signal(sig, info, p, group); |
1042 | unlock_task_sighand(p, &flags); |
1043 | } |
1044 | |
1045 | return ret; |
1046 | } |
1047 | |
1048 | /* |
1049 | * Force a signal that the process can't ignore: if necessary |
1050 | * we unblock the signal and change any SIG_IGN to SIG_DFL. |
1051 | * |
1052 | * Note: If we unblock the signal, we always reset it to SIG_DFL, |
1053 | * since we do not want to have a signal handler that was blocked |
1054 | * be invoked when user space had explicitly blocked it. |
1055 | * |
1056 | * We don't want to have recursive SIGSEGV's etc, for example, |
1057 | * that is why we also clear SIGNAL_UNKILLABLE. |
1058 | */ |
1059 | int |
1060 | force_sig_info(int sig, struct siginfo *info, struct task_struct *t) |
1061 | { |
1062 | unsigned long int flags; |
1063 | int ret, blocked, ignored; |
1064 | struct k_sigaction *action; |
1065 | |
1066 | spin_lock_irqsave(&t->sighand->siglock, flags); |
1067 | action = &t->sighand->action[sig-1]; |
1068 | ignored = action->sa.sa_handler == SIG_IGN; |
1069 | blocked = sigismember(&t->blocked, sig); |
1070 | if (blocked || ignored) { |
1071 | action->sa.sa_handler = SIG_DFL; |
1072 | if (blocked) { |
1073 | sigdelset(&t->blocked, sig); |
1074 | recalc_sigpending_and_wake(t); |
1075 | } |
1076 | } |
1077 | if (action->sa.sa_handler == SIG_DFL) |
1078 | t->signal->flags &= ~SIGNAL_UNKILLABLE; |
1079 | ret = specific_send_sig_info(sig, info, t); |
1080 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
1081 | |
1082 | return ret; |
1083 | } |
1084 | |
1085 | /* |
1086 | * Nuke all other threads in the group. |
1087 | */ |
1088 | int zap_other_threads(struct task_struct *p) |
1089 | { |
1090 | struct task_struct *t = p; |
1091 | int count = 0; |
1092 | |
1093 | p->signal->group_stop_count = 0; |
1094 | |
1095 | while_each_thread(p, t) { |
1096 | count++; |
1097 | |
1098 | /* Don't bother with already dead threads */ |
1099 | if (t->exit_state) |
1100 | continue; |
1101 | sigaddset(&t->pending.signal, SIGKILL); |
1102 | signal_wake_up(t, 1); |
1103 | } |
1104 | |
1105 | return count; |
1106 | } |
1107 | |
1108 | struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, |
1109 | unsigned long *flags) |
1110 | { |
1111 | struct sighand_struct *sighand; |
1112 | |
1113 | rcu_read_lock(); |
1114 | for (;;) { |
1115 | sighand = rcu_dereference(tsk->sighand); |
1116 | if (unlikely(sighand == NULL)) |
1117 | break; |
1118 | |
1119 | spin_lock_irqsave(&sighand->siglock, *flags); |
1120 | if (likely(sighand == tsk->sighand)) |
1121 | break; |
1122 | spin_unlock_irqrestore(&sighand->siglock, *flags); |
1123 | } |
1124 | rcu_read_unlock(); |
1125 | |
1126 | return sighand; |
1127 | } |
1128 | |
1129 | /* |
1130 | * send signal info to all the members of a group |
1131 | */ |
1132 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1133 | { |
1134 | int ret; |
1135 | |
1136 | rcu_read_lock(); |
1137 | ret = check_kill_permission(sig, info, p); |
1138 | rcu_read_unlock(); |
1139 | |
1140 | if (!ret && sig) |
1141 | ret = do_send_sig_info(sig, info, p, true); |
1142 | |
1143 | return ret; |
1144 | } |
1145 | |
1146 | /* |
1147 | * __kill_pgrp_info() sends a signal to a process group: this is what the tty |
1148 | * control characters do (^C, ^Z etc) |
1149 | * - the caller must hold at least a readlock on tasklist_lock |
1150 | */ |
1151 | int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) |
1152 | { |
1153 | struct task_struct *p = NULL; |
1154 | int retval, success; |
1155 | |
1156 | success = 0; |
1157 | retval = -ESRCH; |
1158 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { |
1159 | int err = group_send_sig_info(sig, info, p); |
1160 | success |= !err; |
1161 | retval = err; |
1162 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); |
1163 | return success ? 0 : retval; |
1164 | } |
1165 | |
1166 | int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) |
1167 | { |
1168 | int error = -ESRCH; |
1169 | struct task_struct *p; |
1170 | |
1171 | rcu_read_lock(); |
1172 | retry: |
1173 | p = pid_task(pid, PIDTYPE_PID); |
1174 | if (p) { |
1175 | error = group_send_sig_info(sig, info, p); |
1176 | if (unlikely(error == -ESRCH)) |
1177 | /* |
1178 | * The task was unhashed in between, try again. |
1179 | * If it is dead, pid_task() will return NULL, |
1180 | * if we race with de_thread() it will find the |
1181 | * new leader. |
1182 | */ |
1183 | goto retry; |
1184 | } |
1185 | rcu_read_unlock(); |
1186 | |
1187 | return error; |
1188 | } |
1189 | |
1190 | int |
1191 | kill_proc_info(int sig, struct siginfo *info, pid_t pid) |
1192 | { |
1193 | int error; |
1194 | rcu_read_lock(); |
1195 | error = kill_pid_info(sig, info, find_vpid(pid)); |
1196 | rcu_read_unlock(); |
1197 | return error; |
1198 | } |
1199 | |
1200 | /* like kill_pid_info(), but doesn't use uid/euid of "current" */ |
1201 | int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, |
1202 | uid_t uid, uid_t euid, u32 secid) |
1203 | { |
1204 | int ret = -EINVAL; |
1205 | struct task_struct *p; |
1206 | const struct cred *pcred; |
1207 | unsigned long flags; |
1208 | |
1209 | if (!valid_signal(sig)) |
1210 | return ret; |
1211 | |
1212 | rcu_read_lock(); |
1213 | p = pid_task(pid, PIDTYPE_PID); |
1214 | if (!p) { |
1215 | ret = -ESRCH; |
1216 | goto out_unlock; |
1217 | } |
1218 | pcred = __task_cred(p); |
1219 | if (si_fromuser(info) && |
1220 | euid != pcred->suid && euid != pcred->uid && |
1221 | uid != pcred->suid && uid != pcred->uid) { |
1222 | ret = -EPERM; |
1223 | goto out_unlock; |
1224 | } |
1225 | ret = security_task_kill(p, info, sig, secid); |
1226 | if (ret) |
1227 | goto out_unlock; |
1228 | |
1229 | if (sig) { |
1230 | if (lock_task_sighand(p, &flags)) { |
1231 | ret = __send_signal(sig, info, p, 1, 0); |
1232 | unlock_task_sighand(p, &flags); |
1233 | } else |
1234 | ret = -ESRCH; |
1235 | } |
1236 | out_unlock: |
1237 | rcu_read_unlock(); |
1238 | return ret; |
1239 | } |
1240 | EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); |
1241 | |
1242 | /* |
1243 | * kill_something_info() interprets pid in interesting ways just like kill(2). |
1244 | * |
1245 | * POSIX specifies that kill(-1,sig) is unspecified, but what we have |
1246 | * is probably wrong. Should make it like BSD or SYSV. |
1247 | */ |
1248 | |
1249 | static int kill_something_info(int sig, struct siginfo *info, pid_t pid) |
1250 | { |
1251 | int ret; |
1252 | |
1253 | if (pid > 0) { |
1254 | rcu_read_lock(); |
1255 | ret = kill_pid_info(sig, info, find_vpid(pid)); |
1256 | rcu_read_unlock(); |
1257 | return ret; |
1258 | } |
1259 | |
1260 | read_lock(&tasklist_lock); |
1261 | if (pid != -1) { |
1262 | ret = __kill_pgrp_info(sig, info, |
1263 | pid ? find_vpid(-pid) : task_pgrp(current)); |
1264 | } else { |
1265 | int retval = 0, count = 0; |
1266 | struct task_struct * p; |
1267 | |
1268 | for_each_process(p) { |
1269 | if (task_pid_vnr(p) > 1 && |
1270 | !same_thread_group(p, current)) { |
1271 | int err = group_send_sig_info(sig, info, p); |
1272 | ++count; |
1273 | if (err != -EPERM) |
1274 | retval = err; |
1275 | } |
1276 | } |
1277 | ret = count ? retval : -ESRCH; |
1278 | } |
1279 | read_unlock(&tasklist_lock); |
1280 | |
1281 | return ret; |
1282 | } |
1283 | |
1284 | /* |
1285 | * These are for backward compatibility with the rest of the kernel source. |
1286 | */ |
1287 | |
1288 | int |
1289 | send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1290 | { |
1291 | /* |
1292 | * Make sure legacy kernel users don't send in bad values |
1293 | * (normal paths check this in check_kill_permission). |
1294 | */ |
1295 | if (!valid_signal(sig)) |
1296 | return -EINVAL; |
1297 | |
1298 | return do_send_sig_info(sig, info, p, false); |
1299 | } |
1300 | |
1301 | #define __si_special(priv) \ |
1302 | ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) |
1303 | |
1304 | int |
1305 | send_sig(int sig, struct task_struct *p, int priv) |
1306 | { |
1307 | return send_sig_info(sig, __si_special(priv), p); |
1308 | } |
1309 | |
1310 | void |
1311 | force_sig(int sig, struct task_struct *p) |
1312 | { |
1313 | force_sig_info(sig, SEND_SIG_PRIV, p); |
1314 | } |
1315 | |
1316 | /* |
1317 | * When things go south during signal handling, we |
1318 | * will force a SIGSEGV. And if the signal that caused |
1319 | * the problem was already a SIGSEGV, we'll want to |
1320 | * make sure we don't even try to deliver the signal.. |
1321 | */ |
1322 | int |
1323 | force_sigsegv(int sig, struct task_struct *p) |
1324 | { |
1325 | if (sig == SIGSEGV) { |
1326 | unsigned long flags; |
1327 | spin_lock_irqsave(&p->sighand->siglock, flags); |
1328 | p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; |
1329 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
1330 | } |
1331 | force_sig(SIGSEGV, p); |
1332 | return 0; |
1333 | } |
1334 | |
1335 | int kill_pgrp(struct pid *pid, int sig, int priv) |
1336 | { |
1337 | int ret; |
1338 | |
1339 | read_lock(&tasklist_lock); |
1340 | ret = __kill_pgrp_info(sig, __si_special(priv), pid); |
1341 | read_unlock(&tasklist_lock); |
1342 | |
1343 | return ret; |
1344 | } |
1345 | EXPORT_SYMBOL(kill_pgrp); |
1346 | |
1347 | int kill_pid(struct pid *pid, int sig, int priv) |
1348 | { |
1349 | return kill_pid_info(sig, __si_special(priv), pid); |
1350 | } |
1351 | EXPORT_SYMBOL(kill_pid); |
1352 | |
1353 | /* |
1354 | * These functions support sending signals using preallocated sigqueue |
1355 | * structures. This is needed "because realtime applications cannot |
1356 | * afford to lose notifications of asynchronous events, like timer |
1357 | * expirations or I/O completions". In the case of Posix Timers |
1358 | * we allocate the sigqueue structure from the timer_create. If this |
1359 | * allocation fails we are able to report the failure to the application |
1360 | * with an EAGAIN error. |
1361 | */ |
1362 | struct sigqueue *sigqueue_alloc(void) |
1363 | { |
1364 | struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); |
1365 | |
1366 | if (q) |
1367 | q->flags |= SIGQUEUE_PREALLOC; |
1368 | |
1369 | return q; |
1370 | } |
1371 | |
1372 | void sigqueue_free(struct sigqueue *q) |
1373 | { |
1374 | unsigned long flags; |
1375 | spinlock_t *lock = ¤t->sighand->siglock; |
1376 | |
1377 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1378 | /* |
1379 | * We must hold ->siglock while testing q->list |
1380 | * to serialize with collect_signal() or with |
1381 | * __exit_signal()->flush_sigqueue(). |
1382 | */ |
1383 | spin_lock_irqsave(lock, flags); |
1384 | q->flags &= ~SIGQUEUE_PREALLOC; |
1385 | /* |
1386 | * If it is queued it will be freed when dequeued, |
1387 | * like the "regular" sigqueue. |
1388 | */ |
1389 | if (!list_empty(&q->list)) |
1390 | q = NULL; |
1391 | spin_unlock_irqrestore(lock, flags); |
1392 | |
1393 | if (q) |
1394 | __sigqueue_free(q); |
1395 | } |
1396 | |
1397 | int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) |
1398 | { |
1399 | int sig = q->info.si_signo; |
1400 | struct sigpending *pending; |
1401 | unsigned long flags; |
1402 | int ret; |
1403 | |
1404 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1405 | |
1406 | ret = -1; |
1407 | if (!likely(lock_task_sighand(t, &flags))) |
1408 | goto ret; |
1409 | |
1410 | ret = 1; /* the signal is ignored */ |
1411 | if (!prepare_signal(sig, t, 0)) |
1412 | goto out; |
1413 | |
1414 | ret = 0; |
1415 | if (unlikely(!list_empty(&q->list))) { |
1416 | /* |
1417 | * If an SI_TIMER entry is already queue just increment |
1418 | * the overrun count. |
1419 | */ |
1420 | BUG_ON(q->info.si_code != SI_TIMER); |
1421 | q->info.si_overrun++; |
1422 | goto out; |
1423 | } |
1424 | q->info.si_overrun = 0; |
1425 | |
1426 | signalfd_notify(t, sig); |
1427 | pending = group ? &t->signal->shared_pending : &t->pending; |
1428 | list_add_tail(&q->list, &pending->list); |
1429 | sigaddset(&pending->signal, sig); |
1430 | complete_signal(sig, t, group); |
1431 | out: |
1432 | unlock_task_sighand(t, &flags); |
1433 | ret: |
1434 | return ret; |
1435 | } |
1436 | |
1437 | /* |
1438 | * Let a parent know about the death of a child. |
1439 | * For a stopped/continued status change, use do_notify_parent_cldstop instead. |
1440 | * |
1441 | * Returns -1 if our parent ignored us and so we've switched to |
1442 | * self-reaping, or else @sig. |
1443 | */ |
1444 | int do_notify_parent(struct task_struct *tsk, int sig) |
1445 | { |
1446 | struct siginfo info; |
1447 | unsigned long flags; |
1448 | struct sighand_struct *psig; |
1449 | int ret = sig; |
1450 | |
1451 | BUG_ON(sig == -1); |
1452 | |
1453 | /* do_notify_parent_cldstop should have been called instead. */ |
1454 | BUG_ON(task_is_stopped_or_traced(tsk)); |
1455 | |
1456 | BUG_ON(!task_ptrace(tsk) && |
1457 | (tsk->group_leader != tsk || !thread_group_empty(tsk))); |
1458 | |
1459 | info.si_signo = sig; |
1460 | info.si_errno = 0; |
1461 | /* |
1462 | * we are under tasklist_lock here so our parent is tied to |
1463 | * us and cannot exit and release its namespace. |
1464 | * |
1465 | * the only it can is to switch its nsproxy with sys_unshare, |
1466 | * bu uncharing pid namespaces is not allowed, so we'll always |
1467 | * see relevant namespace |
1468 | * |
1469 | * write_lock() currently calls preempt_disable() which is the |
1470 | * same as rcu_read_lock(), but according to Oleg, this is not |
1471 | * correct to rely on this |
1472 | */ |
1473 | rcu_read_lock(); |
1474 | info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); |
1475 | info.si_uid = __task_cred(tsk)->uid; |
1476 | rcu_read_unlock(); |
1477 | |
1478 | info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime, |
1479 | tsk->signal->utime)); |
1480 | info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime, |
1481 | tsk->signal->stime)); |
1482 | |
1483 | info.si_status = tsk->exit_code & 0x7f; |
1484 | if (tsk->exit_code & 0x80) |
1485 | info.si_code = CLD_DUMPED; |
1486 | else if (tsk->exit_code & 0x7f) |
1487 | info.si_code = CLD_KILLED; |
1488 | else { |
1489 | info.si_code = CLD_EXITED; |
1490 | info.si_status = tsk->exit_code >> 8; |
1491 | } |
1492 | |
1493 | psig = tsk->parent->sighand; |
1494 | spin_lock_irqsave(&psig->siglock, flags); |
1495 | if (!task_ptrace(tsk) && sig == SIGCHLD && |
1496 | (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || |
1497 | (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { |
1498 | /* |
1499 | * We are exiting and our parent doesn't care. POSIX.1 |
1500 | * defines special semantics for setting SIGCHLD to SIG_IGN |
1501 | * or setting the SA_NOCLDWAIT flag: we should be reaped |
1502 | * automatically and not left for our parent's wait4 call. |
1503 | * Rather than having the parent do it as a magic kind of |
1504 | * signal handler, we just set this to tell do_exit that we |
1505 | * can be cleaned up without becoming a zombie. Note that |
1506 | * we still call __wake_up_parent in this case, because a |
1507 | * blocked sys_wait4 might now return -ECHILD. |
1508 | * |
1509 | * Whether we send SIGCHLD or not for SA_NOCLDWAIT |
1510 | * is implementation-defined: we do (if you don't want |
1511 | * it, just use SIG_IGN instead). |
1512 | */ |
1513 | ret = tsk->exit_signal = -1; |
1514 | if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) |
1515 | sig = -1; |
1516 | } |
1517 | if (valid_signal(sig) && sig > 0) |
1518 | __group_send_sig_info(sig, &info, tsk->parent); |
1519 | __wake_up_parent(tsk, tsk->parent); |
1520 | spin_unlock_irqrestore(&psig->siglock, flags); |
1521 | |
1522 | return ret; |
1523 | } |
1524 | |
1525 | static void do_notify_parent_cldstop(struct task_struct *tsk, int why) |
1526 | { |
1527 | struct siginfo info; |
1528 | unsigned long flags; |
1529 | struct task_struct *parent; |
1530 | struct sighand_struct *sighand; |
1531 | |
1532 | if (task_ptrace(tsk)) |
1533 | parent = tsk->parent; |
1534 | else { |
1535 | tsk = tsk->group_leader; |
1536 | parent = tsk->real_parent; |
1537 | } |
1538 | |
1539 | info.si_signo = SIGCHLD; |
1540 | info.si_errno = 0; |
1541 | /* |
1542 | * see comment in do_notify_parent() abot the following 3 lines |
1543 | */ |
1544 | rcu_read_lock(); |
1545 | info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns); |
1546 | info.si_uid = __task_cred(tsk)->uid; |
1547 | rcu_read_unlock(); |
1548 | |
1549 | info.si_utime = cputime_to_clock_t(tsk->utime); |
1550 | info.si_stime = cputime_to_clock_t(tsk->stime); |
1551 | |
1552 | info.si_code = why; |
1553 | switch (why) { |
1554 | case CLD_CONTINUED: |
1555 | info.si_status = SIGCONT; |
1556 | break; |
1557 | case CLD_STOPPED: |
1558 | info.si_status = tsk->signal->group_exit_code & 0x7f; |
1559 | break; |
1560 | case CLD_TRAPPED: |
1561 | info.si_status = tsk->exit_code & 0x7f; |
1562 | break; |
1563 | default: |
1564 | BUG(); |
1565 | } |
1566 | |
1567 | sighand = parent->sighand; |
1568 | spin_lock_irqsave(&sighand->siglock, flags); |
1569 | if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && |
1570 | !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) |
1571 | __group_send_sig_info(SIGCHLD, &info, parent); |
1572 | /* |
1573 | * Even if SIGCHLD is not generated, we must wake up wait4 calls. |
1574 | */ |
1575 | __wake_up_parent(tsk, parent); |
1576 | spin_unlock_irqrestore(&sighand->siglock, flags); |
1577 | } |
1578 | |
1579 | static inline int may_ptrace_stop(void) |
1580 | { |
1581 | if (!likely(task_ptrace(current))) |
1582 | return 0; |
1583 | /* |
1584 | * Are we in the middle of do_coredump? |
1585 | * If so and our tracer is also part of the coredump stopping |
1586 | * is a deadlock situation, and pointless because our tracer |
1587 | * is dead so don't allow us to stop. |
1588 | * If SIGKILL was already sent before the caller unlocked |
1589 | * ->siglock we must see ->core_state != NULL. Otherwise it |
1590 | * is safe to enter schedule(). |
1591 | */ |
1592 | if (unlikely(current->mm->core_state) && |
1593 | unlikely(current->mm == current->parent->mm)) |
1594 | return 0; |
1595 | |
1596 | return 1; |
1597 | } |
1598 | |
1599 | /* |
1600 | * Return nonzero if there is a SIGKILL that should be waking us up. |
1601 | * Called with the siglock held. |
1602 | */ |
1603 | static int sigkill_pending(struct task_struct *tsk) |
1604 | { |
1605 | return sigismember(&tsk->pending.signal, SIGKILL) || |
1606 | sigismember(&tsk->signal->shared_pending.signal, SIGKILL); |
1607 | } |
1608 | |
1609 | /* |
1610 | * This must be called with current->sighand->siglock held. |
1611 | * |
1612 | * This should be the path for all ptrace stops. |
1613 | * We always set current->last_siginfo while stopped here. |
1614 | * That makes it a way to test a stopped process for |
1615 | * being ptrace-stopped vs being job-control-stopped. |
1616 | * |
1617 | * If we actually decide not to stop at all because the tracer |
1618 | * is gone, we keep current->exit_code unless clear_code. |
1619 | */ |
1620 | static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info) |
1621 | __releases(¤t->sighand->siglock) |
1622 | __acquires(¤t->sighand->siglock) |
1623 | { |
1624 | if (arch_ptrace_stop_needed(exit_code, info)) { |
1625 | /* |
1626 | * The arch code has something special to do before a |
1627 | * ptrace stop. This is allowed to block, e.g. for faults |
1628 | * on user stack pages. We can't keep the siglock while |
1629 | * calling arch_ptrace_stop, so we must release it now. |
1630 | * To preserve proper semantics, we must do this before |
1631 | * any signal bookkeeping like checking group_stop_count. |
1632 | * Meanwhile, a SIGKILL could come in before we retake the |
1633 | * siglock. That must prevent us from sleeping in TASK_TRACED. |
1634 | * So after regaining the lock, we must check for SIGKILL. |
1635 | */ |
1636 | spin_unlock_irq(¤t->sighand->siglock); |
1637 | arch_ptrace_stop(exit_code, info); |
1638 | spin_lock_irq(¤t->sighand->siglock); |
1639 | if (sigkill_pending(current)) |
1640 | return; |
1641 | } |
1642 | |
1643 | /* |
1644 | * If there is a group stop in progress, |
1645 | * we must participate in the bookkeeping. |
1646 | */ |
1647 | if (current->signal->group_stop_count > 0) |
1648 | --current->signal->group_stop_count; |
1649 | |
1650 | current->last_siginfo = info; |
1651 | current->exit_code = exit_code; |
1652 | |
1653 | /* Let the debugger run. */ |
1654 | __set_current_state(TASK_TRACED); |
1655 | spin_unlock_irq(¤t->sighand->siglock); |
1656 | read_lock(&tasklist_lock); |
1657 | if (may_ptrace_stop()) { |
1658 | do_notify_parent_cldstop(current, CLD_TRAPPED); |
1659 | /* |
1660 | * Don't want to allow preemption here, because |
1661 | * sys_ptrace() needs this task to be inactive. |
1662 | * |
1663 | * XXX: implement read_unlock_no_resched(). |
1664 | */ |
1665 | preempt_disable(); |
1666 | read_unlock(&tasklist_lock); |
1667 | preempt_enable_no_resched(); |
1668 | schedule(); |
1669 | } else { |
1670 | /* |
1671 | * By the time we got the lock, our tracer went away. |
1672 | * Don't drop the lock yet, another tracer may come. |
1673 | */ |
1674 | __set_current_state(TASK_RUNNING); |
1675 | if (clear_code) |
1676 | current->exit_code = 0; |
1677 | read_unlock(&tasklist_lock); |
1678 | } |
1679 | |
1680 | /* |
1681 | * While in TASK_TRACED, we were considered "frozen enough". |
1682 | * Now that we woke up, it's crucial if we're supposed to be |
1683 | * frozen that we freeze now before running anything substantial. |
1684 | */ |
1685 | try_to_freeze(); |
1686 | |
1687 | /* |
1688 | * We are back. Now reacquire the siglock before touching |
1689 | * last_siginfo, so that we are sure to have synchronized with |
1690 | * any signal-sending on another CPU that wants to examine it. |
1691 | */ |
1692 | spin_lock_irq(¤t->sighand->siglock); |
1693 | current->last_siginfo = NULL; |
1694 | |
1695 | /* |
1696 | * Queued signals ignored us while we were stopped for tracing. |
1697 | * So check for any that we should take before resuming user mode. |
1698 | * This sets TIF_SIGPENDING, but never clears it. |
1699 | */ |
1700 | recalc_sigpending_tsk(current); |
1701 | } |
1702 | |
1703 | void ptrace_notify(int exit_code) |
1704 | { |
1705 | siginfo_t info; |
1706 | |
1707 | BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); |
1708 | |
1709 | memset(&info, 0, sizeof info); |
1710 | info.si_signo = SIGTRAP; |
1711 | info.si_code = exit_code; |
1712 | info.si_pid = task_pid_vnr(current); |
1713 | info.si_uid = current_uid(); |
1714 | |
1715 | /* Let the debugger run. */ |
1716 | spin_lock_irq(¤t->sighand->siglock); |
1717 | ptrace_stop(exit_code, 1, &info); |
1718 | spin_unlock_irq(¤t->sighand->siglock); |
1719 | } |
1720 | |
1721 | /* |
1722 | * This performs the stopping for SIGSTOP and other stop signals. |
1723 | * We have to stop all threads in the thread group. |
1724 | * Returns nonzero if we've actually stopped and released the siglock. |
1725 | * Returns zero if we didn't stop and still hold the siglock. |
1726 | */ |
1727 | static int do_signal_stop(int signr) |
1728 | { |
1729 | struct signal_struct *sig = current->signal; |
1730 | int notify; |
1731 | |
1732 | if (!sig->group_stop_count) { |
1733 | struct task_struct *t; |
1734 | |
1735 | if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || |
1736 | unlikely(signal_group_exit(sig))) |
1737 | return 0; |
1738 | /* |
1739 | * There is no group stop already in progress. |
1740 | * We must initiate one now. |
1741 | */ |
1742 | sig->group_exit_code = signr; |
1743 | |
1744 | sig->group_stop_count = 1; |
1745 | for (t = next_thread(current); t != current; t = next_thread(t)) |
1746 | /* |
1747 | * Setting state to TASK_STOPPED for a group |
1748 | * stop is always done with the siglock held, |
1749 | * so this check has no races. |
1750 | */ |
1751 | if (!(t->flags & PF_EXITING) && |
1752 | !task_is_stopped_or_traced(t)) { |
1753 | sig->group_stop_count++; |
1754 | signal_wake_up(t, 0); |
1755 | } |
1756 | } |
1757 | /* |
1758 | * If there are no other threads in the group, or if there is |
1759 | * a group stop in progress and we are the last to stop, report |
1760 | * to the parent. When ptraced, every thread reports itself. |
1761 | */ |
1762 | notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0; |
1763 | notify = tracehook_notify_jctl(notify, CLD_STOPPED); |
1764 | /* |
1765 | * tracehook_notify_jctl() can drop and reacquire siglock, so |
1766 | * we keep ->group_stop_count != 0 before the call. If SIGCONT |
1767 | * or SIGKILL comes in between ->group_stop_count == 0. |
1768 | */ |
1769 | if (sig->group_stop_count) { |
1770 | if (!--sig->group_stop_count) |
1771 | sig->flags = SIGNAL_STOP_STOPPED; |
1772 | current->exit_code = sig->group_exit_code; |
1773 | __set_current_state(TASK_STOPPED); |
1774 | } |
1775 | spin_unlock_irq(¤t->sighand->siglock); |
1776 | |
1777 | if (notify) { |
1778 | read_lock(&tasklist_lock); |
1779 | do_notify_parent_cldstop(current, notify); |
1780 | read_unlock(&tasklist_lock); |
1781 | } |
1782 | |
1783 | /* Now we don't run again until woken by SIGCONT or SIGKILL */ |
1784 | do { |
1785 | schedule(); |
1786 | } while (try_to_freeze()); |
1787 | |
1788 | tracehook_finish_jctl(); |
1789 | current->exit_code = 0; |
1790 | |
1791 | return 1; |
1792 | } |
1793 | |
1794 | static int ptrace_signal(int signr, siginfo_t *info, |
1795 | struct pt_regs *regs, void *cookie) |
1796 | { |
1797 | if (!task_ptrace(current)) |
1798 | return signr; |
1799 | |
1800 | ptrace_signal_deliver(regs, cookie); |
1801 | |
1802 | /* Let the debugger run. */ |
1803 | ptrace_stop(signr, 0, info); |
1804 | |
1805 | /* We're back. Did the debugger cancel the sig? */ |
1806 | signr = current->exit_code; |
1807 | if (signr == 0) |
1808 | return signr; |
1809 | |
1810 | current->exit_code = 0; |
1811 | |
1812 | /* Update the siginfo structure if the signal has |
1813 | changed. If the debugger wanted something |
1814 | specific in the siginfo structure then it should |
1815 | have updated *info via PTRACE_SETSIGINFO. */ |
1816 | if (signr != info->si_signo) { |
1817 | info->si_signo = signr; |
1818 | info->si_errno = 0; |
1819 | info->si_code = SI_USER; |
1820 | info->si_pid = task_pid_vnr(current->parent); |
1821 | info->si_uid = task_uid(current->parent); |
1822 | } |
1823 | |
1824 | /* If the (new) signal is now blocked, requeue it. */ |
1825 | if (sigismember(¤t->blocked, signr)) { |
1826 | specific_send_sig_info(signr, info, current); |
1827 | signr = 0; |
1828 | } |
1829 | |
1830 | return signr; |
1831 | } |
1832 | |
1833 | int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, |
1834 | struct pt_regs *regs, void *cookie) |
1835 | { |
1836 | struct sighand_struct *sighand = current->sighand; |
1837 | struct signal_struct *signal = current->signal; |
1838 | int signr; |
1839 | |
1840 | relock: |
1841 | /* |
1842 | * We'll jump back here after any time we were stopped in TASK_STOPPED. |
1843 | * While in TASK_STOPPED, we were considered "frozen enough". |
1844 | * Now that we woke up, it's crucial if we're supposed to be |
1845 | * frozen that we freeze now before running anything substantial. |
1846 | */ |
1847 | try_to_freeze(); |
1848 | |
1849 | spin_lock_irq(&sighand->siglock); |
1850 | /* |
1851 | * Every stopped thread goes here after wakeup. Check to see if |
1852 | * we should notify the parent, prepare_signal(SIGCONT) encodes |
1853 | * the CLD_ si_code into SIGNAL_CLD_MASK bits. |
1854 | */ |
1855 | if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { |
1856 | int why = (signal->flags & SIGNAL_STOP_CONTINUED) |
1857 | ? CLD_CONTINUED : CLD_STOPPED; |
1858 | signal->flags &= ~SIGNAL_CLD_MASK; |
1859 | |
1860 | why = tracehook_notify_jctl(why, CLD_CONTINUED); |
1861 | spin_unlock_irq(&sighand->siglock); |
1862 | |
1863 | if (why) { |
1864 | read_lock(&tasklist_lock); |
1865 | do_notify_parent_cldstop(current->group_leader, why); |
1866 | read_unlock(&tasklist_lock); |
1867 | } |
1868 | goto relock; |
1869 | } |
1870 | |
1871 | for (;;) { |
1872 | struct k_sigaction *ka; |
1873 | /* |
1874 | * Tracing can induce an artifical signal and choose sigaction. |
1875 | * The return value in @signr determines the default action, |
1876 | * but @info->si_signo is the signal number we will report. |
1877 | */ |
1878 | signr = tracehook_get_signal(current, regs, info, return_ka); |
1879 | if (unlikely(signr < 0)) |
1880 | goto relock; |
1881 | if (unlikely(signr != 0)) |
1882 | ka = return_ka; |
1883 | else { |
1884 | if (unlikely(signal->group_stop_count > 0) && |
1885 | do_signal_stop(0)) |
1886 | goto relock; |
1887 | |
1888 | signr = dequeue_signal(current, ¤t->blocked, |
1889 | info); |
1890 | |
1891 | if (!signr) |
1892 | break; /* will return 0 */ |
1893 | |
1894 | if (signr != SIGKILL) { |
1895 | signr = ptrace_signal(signr, info, |
1896 | regs, cookie); |
1897 | if (!signr) |
1898 | continue; |
1899 | } |
1900 | |
1901 | ka = &sighand->action[signr-1]; |
1902 | } |
1903 | |
1904 | /* Trace actually delivered signals. */ |
1905 | trace_signal_deliver(signr, info, ka); |
1906 | |
1907 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ |
1908 | continue; |
1909 | if (ka->sa.sa_handler != SIG_DFL) { |
1910 | /* Run the handler. */ |
1911 | *return_ka = *ka; |
1912 | |
1913 | if (ka->sa.sa_flags & SA_ONESHOT) |
1914 | ka->sa.sa_handler = SIG_DFL; |
1915 | |
1916 | break; /* will return non-zero "signr" value */ |
1917 | } |
1918 | |
1919 | /* |
1920 | * Now we are doing the default action for this signal. |
1921 | */ |
1922 | if (sig_kernel_ignore(signr)) /* Default is nothing. */ |
1923 | continue; |
1924 | |
1925 | /* |
1926 | * Global init gets no signals it doesn't want. |
1927 | * Container-init gets no signals it doesn't want from same |
1928 | * container. |
1929 | * |
1930 | * Note that if global/container-init sees a sig_kernel_only() |
1931 | * signal here, the signal must have been generated internally |
1932 | * or must have come from an ancestor namespace. In either |
1933 | * case, the signal cannot be dropped. |
1934 | */ |
1935 | if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && |
1936 | !sig_kernel_only(signr)) |
1937 | continue; |
1938 | |
1939 | if (sig_kernel_stop(signr)) { |
1940 | /* |
1941 | * The default action is to stop all threads in |
1942 | * the thread group. The job control signals |
1943 | * do nothing in an orphaned pgrp, but SIGSTOP |
1944 | * always works. Note that siglock needs to be |
1945 | * dropped during the call to is_orphaned_pgrp() |
1946 | * because of lock ordering with tasklist_lock. |
1947 | * This allows an intervening SIGCONT to be posted. |
1948 | * We need to check for that and bail out if necessary. |
1949 | */ |
1950 | if (signr != SIGSTOP) { |
1951 | spin_unlock_irq(&sighand->siglock); |
1952 | |
1953 | /* signals can be posted during this window */ |
1954 | |
1955 | if (is_current_pgrp_orphaned()) |
1956 | goto relock; |
1957 | |
1958 | spin_lock_irq(&sighand->siglock); |
1959 | } |
1960 | |
1961 | if (likely(do_signal_stop(info->si_signo))) { |
1962 | /* It released the siglock. */ |
1963 | goto relock; |
1964 | } |
1965 | |
1966 | /* |
1967 | * We didn't actually stop, due to a race |
1968 | * with SIGCONT or something like that. |
1969 | */ |
1970 | continue; |
1971 | } |
1972 | |
1973 | spin_unlock_irq(&sighand->siglock); |
1974 | |
1975 | /* |
1976 | * Anything else is fatal, maybe with a core dump. |
1977 | */ |
1978 | current->flags |= PF_SIGNALED; |
1979 | |
1980 | if (sig_kernel_coredump(signr)) { |
1981 | if (print_fatal_signals) |
1982 | print_fatal_signal(regs, info->si_signo); |
1983 | /* |
1984 | * If it was able to dump core, this kills all |
1985 | * other threads in the group and synchronizes with |
1986 | * their demise. If we lost the race with another |
1987 | * thread getting here, it set group_exit_code |
1988 | * first and our do_group_exit call below will use |
1989 | * that value and ignore the one we pass it. |
1990 | */ |
1991 | do_coredump(info->si_signo, info->si_signo, regs); |
1992 | } |
1993 | |
1994 | /* |
1995 | * Death signals, no core dump. |
1996 | */ |
1997 | do_group_exit(info->si_signo); |
1998 | /* NOTREACHED */ |
1999 | } |
2000 | spin_unlock_irq(&sighand->siglock); |
2001 | return signr; |
2002 | } |
2003 | |
2004 | void exit_signals(struct task_struct *tsk) |
2005 | { |
2006 | int group_stop = 0; |
2007 | struct task_struct *t; |
2008 | |
2009 | if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { |
2010 | tsk->flags |= PF_EXITING; |
2011 | return; |
2012 | } |
2013 | |
2014 | spin_lock_irq(&tsk->sighand->siglock); |
2015 | /* |
2016 | * From now this task is not visible for group-wide signals, |
2017 | * see wants_signal(), do_signal_stop(). |
2018 | */ |
2019 | tsk->flags |= PF_EXITING; |
2020 | if (!signal_pending(tsk)) |
2021 | goto out; |
2022 | |
2023 | /* It could be that __group_complete_signal() choose us to |
2024 | * notify about group-wide signal. Another thread should be |
2025 | * woken now to take the signal since we will not. |
2026 | */ |
2027 | for (t = tsk; (t = next_thread(t)) != tsk; ) |
2028 | if (!signal_pending(t) && !(t->flags & PF_EXITING)) |
2029 | recalc_sigpending_and_wake(t); |
2030 | |
2031 | if (unlikely(tsk->signal->group_stop_count) && |
2032 | !--tsk->signal->group_stop_count) { |
2033 | tsk->signal->flags = SIGNAL_STOP_STOPPED; |
2034 | group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED); |
2035 | } |
2036 | out: |
2037 | spin_unlock_irq(&tsk->sighand->siglock); |
2038 | |
2039 | if (unlikely(group_stop)) { |
2040 | read_lock(&tasklist_lock); |
2041 | do_notify_parent_cldstop(tsk, group_stop); |
2042 | read_unlock(&tasklist_lock); |
2043 | } |
2044 | } |
2045 | |
2046 | EXPORT_SYMBOL(recalc_sigpending); |
2047 | EXPORT_SYMBOL_GPL(dequeue_signal); |
2048 | EXPORT_SYMBOL(flush_signals); |
2049 | EXPORT_SYMBOL(force_sig); |
2050 | EXPORT_SYMBOL(send_sig); |
2051 | EXPORT_SYMBOL(send_sig_info); |
2052 | EXPORT_SYMBOL(sigprocmask); |
2053 | EXPORT_SYMBOL(block_all_signals); |
2054 | EXPORT_SYMBOL(unblock_all_signals); |
2055 | |
2056 | |
2057 | /* |
2058 | * System call entry points. |
2059 | */ |
2060 | |
2061 | SYSCALL_DEFINE0(restart_syscall) |
2062 | { |
2063 | struct restart_block *restart = ¤t_thread_info()->restart_block; |
2064 | return restart->fn(restart); |
2065 | } |
2066 | |
2067 | long do_no_restart_syscall(struct restart_block *param) |
2068 | { |
2069 | return -EINTR; |
2070 | } |
2071 | |
2072 | /* |
2073 | * We don't need to get the kernel lock - this is all local to this |
2074 | * particular thread.. (and that's good, because this is _heavily_ |
2075 | * used by various programs) |
2076 | */ |
2077 | |
2078 | /* |
2079 | * This is also useful for kernel threads that want to temporarily |
2080 | * (or permanently) block certain signals. |
2081 | * |
2082 | * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel |
2083 | * interface happily blocks "unblockable" signals like SIGKILL |
2084 | * and friends. |
2085 | */ |
2086 | int sigprocmask(int how, sigset_t *set, sigset_t *oldset) |
2087 | { |
2088 | int error; |
2089 | |
2090 | spin_lock_irq(¤t->sighand->siglock); |
2091 | if (oldset) |
2092 | *oldset = current->blocked; |
2093 | |
2094 | error = 0; |
2095 | switch (how) { |
2096 | case SIG_BLOCK: |
2097 | sigorsets(¤t->blocked, ¤t->blocked, set); |
2098 | break; |
2099 | case SIG_UNBLOCK: |
2100 | signandsets(¤t->blocked, ¤t->blocked, set); |
2101 | break; |
2102 | case SIG_SETMASK: |
2103 | current->blocked = *set; |
2104 | break; |
2105 | default: |
2106 | error = -EINVAL; |
2107 | } |
2108 | recalc_sigpending(); |
2109 | spin_unlock_irq(¤t->sighand->siglock); |
2110 | |
2111 | return error; |
2112 | } |
2113 | |
2114 | SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set, |
2115 | sigset_t __user *, oset, size_t, sigsetsize) |
2116 | { |
2117 | int error = -EINVAL; |
2118 | sigset_t old_set, new_set; |
2119 | |
2120 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2121 | if (sigsetsize != sizeof(sigset_t)) |
2122 | goto out; |
2123 | |
2124 | if (set) { |
2125 | error = -EFAULT; |
2126 | if (copy_from_user(&new_set, set, sizeof(*set))) |
2127 | goto out; |
2128 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
2129 | |
2130 | error = sigprocmask(how, &new_set, &old_set); |
2131 | if (error) |
2132 | goto out; |
2133 | if (oset) |
2134 | goto set_old; |
2135 | } else if (oset) { |
2136 | spin_lock_irq(¤t->sighand->siglock); |
2137 | old_set = current->blocked; |
2138 | spin_unlock_irq(¤t->sighand->siglock); |
2139 | |
2140 | set_old: |
2141 | error = -EFAULT; |
2142 | if (copy_to_user(oset, &old_set, sizeof(*oset))) |
2143 | goto out; |
2144 | } |
2145 | error = 0; |
2146 | out: |
2147 | return error; |
2148 | } |
2149 | |
2150 | long do_sigpending(void __user *set, unsigned long sigsetsize) |
2151 | { |
2152 | long error = -EINVAL; |
2153 | sigset_t pending; |
2154 | |
2155 | if (sigsetsize > sizeof(sigset_t)) |
2156 | goto out; |
2157 | |
2158 | spin_lock_irq(¤t->sighand->siglock); |
2159 | sigorsets(&pending, ¤t->pending.signal, |
2160 | ¤t->signal->shared_pending.signal); |
2161 | spin_unlock_irq(¤t->sighand->siglock); |
2162 | |
2163 | /* Outside the lock because only this thread touches it. */ |
2164 | sigandsets(&pending, ¤t->blocked, &pending); |
2165 | |
2166 | error = -EFAULT; |
2167 | if (!copy_to_user(set, &pending, sigsetsize)) |
2168 | error = 0; |
2169 | |
2170 | out: |
2171 | return error; |
2172 | } |
2173 | |
2174 | SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize) |
2175 | { |
2176 | return do_sigpending(set, sigsetsize); |
2177 | } |
2178 | |
2179 | #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER |
2180 | |
2181 | int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) |
2182 | { |
2183 | int err; |
2184 | |
2185 | if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) |
2186 | return -EFAULT; |
2187 | if (from->si_code < 0) |
2188 | return __copy_to_user(to, from, sizeof(siginfo_t)) |
2189 | ? -EFAULT : 0; |
2190 | /* |
2191 | * If you change siginfo_t structure, please be sure |
2192 | * this code is fixed accordingly. |
2193 | * Please remember to update the signalfd_copyinfo() function |
2194 | * inside fs/signalfd.c too, in case siginfo_t changes. |
2195 | * It should never copy any pad contained in the structure |
2196 | * to avoid security leaks, but must copy the generic |
2197 | * 3 ints plus the relevant union member. |
2198 | */ |
2199 | err = __put_user(from->si_signo, &to->si_signo); |
2200 | err |= __put_user(from->si_errno, &to->si_errno); |
2201 | err |= __put_user((short)from->si_code, &to->si_code); |
2202 | switch (from->si_code & __SI_MASK) { |
2203 | case __SI_KILL: |
2204 | err |= __put_user(from->si_pid, &to->si_pid); |
2205 | err |= __put_user(from->si_uid, &to->si_uid); |
2206 | break; |
2207 | case __SI_TIMER: |
2208 | err |= __put_user(from->si_tid, &to->si_tid); |
2209 | err |= __put_user(from->si_overrun, &to->si_overrun); |
2210 | err |= __put_user(from->si_ptr, &to->si_ptr); |
2211 | break; |
2212 | case __SI_POLL: |
2213 | err |= __put_user(from->si_band, &to->si_band); |
2214 | err |= __put_user(from->si_fd, &to->si_fd); |
2215 | break; |
2216 | case __SI_FAULT: |
2217 | err |= __put_user(from->si_addr, &to->si_addr); |
2218 | #ifdef __ARCH_SI_TRAPNO |
2219 | err |= __put_user(from->si_trapno, &to->si_trapno); |
2220 | #endif |
2221 | #ifdef BUS_MCEERR_AO |
2222 | /* |
2223 | * Other callers might not initialize the si_lsb field, |
2224 | * so check explicitely for the right codes here. |
2225 | */ |
2226 | if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) |
2227 | err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); |
2228 | #endif |
2229 | break; |
2230 | case __SI_CHLD: |
2231 | err |= __put_user(from->si_pid, &to->si_pid); |
2232 | err |= __put_user(from->si_uid, &to->si_uid); |
2233 | err |= __put_user(from->si_status, &to->si_status); |
2234 | err |= __put_user(from->si_utime, &to->si_utime); |
2235 | err |= __put_user(from->si_stime, &to->si_stime); |
2236 | break; |
2237 | case __SI_RT: /* This is not generated by the kernel as of now. */ |
2238 | case __SI_MESGQ: /* But this is */ |
2239 | err |= __put_user(from->si_pid, &to->si_pid); |
2240 | err |= __put_user(from->si_uid, &to->si_uid); |
2241 | err |= __put_user(from->si_ptr, &to->si_ptr); |
2242 | break; |
2243 | default: /* this is just in case for now ... */ |
2244 | err |= __put_user(from->si_pid, &to->si_pid); |
2245 | err |= __put_user(from->si_uid, &to->si_uid); |
2246 | break; |
2247 | } |
2248 | return err; |
2249 | } |
2250 | |
2251 | #endif |
2252 | |
2253 | SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, |
2254 | siginfo_t __user *, uinfo, const struct timespec __user *, uts, |
2255 | size_t, sigsetsize) |
2256 | { |
2257 | int ret, sig; |
2258 | sigset_t these; |
2259 | struct timespec ts; |
2260 | siginfo_t info; |
2261 | long timeout = 0; |
2262 | |
2263 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2264 | if (sigsetsize != sizeof(sigset_t)) |
2265 | return -EINVAL; |
2266 | |
2267 | if (copy_from_user(&these, uthese, sizeof(these))) |
2268 | return -EFAULT; |
2269 | |
2270 | /* |
2271 | * Invert the set of allowed signals to get those we |
2272 | * want to block. |
2273 | */ |
2274 | sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
2275 | signotset(&these); |
2276 | |
2277 | if (uts) { |
2278 | if (copy_from_user(&ts, uts, sizeof(ts))) |
2279 | return -EFAULT; |
2280 | if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0 |
2281 | || ts.tv_sec < 0) |
2282 | return -EINVAL; |
2283 | } |
2284 | |
2285 | spin_lock_irq(¤t->sighand->siglock); |
2286 | sig = dequeue_signal(current, &these, &info); |
2287 | if (!sig) { |
2288 | timeout = MAX_SCHEDULE_TIMEOUT; |
2289 | if (uts) |
2290 | timeout = (timespec_to_jiffies(&ts) |
2291 | + (ts.tv_sec || ts.tv_nsec)); |
2292 | |
2293 | if (timeout) { |
2294 | /* None ready -- temporarily unblock those we're |
2295 | * interested while we are sleeping in so that we'll |
2296 | * be awakened when they arrive. */ |
2297 | current->real_blocked = current->blocked; |
2298 | sigandsets(¤t->blocked, ¤t->blocked, &these); |
2299 | recalc_sigpending(); |
2300 | spin_unlock_irq(¤t->sighand->siglock); |
2301 | |
2302 | timeout = schedule_timeout_interruptible(timeout); |
2303 | |
2304 | spin_lock_irq(¤t->sighand->siglock); |
2305 | sig = dequeue_signal(current, &these, &info); |
2306 | current->blocked = current->real_blocked; |
2307 | siginitset(¤t->real_blocked, 0); |
2308 | recalc_sigpending(); |
2309 | } |
2310 | } |
2311 | spin_unlock_irq(¤t->sighand->siglock); |
2312 | |
2313 | if (sig) { |
2314 | ret = sig; |
2315 | if (uinfo) { |
2316 | if (copy_siginfo_to_user(uinfo, &info)) |
2317 | ret = -EFAULT; |
2318 | } |
2319 | } else { |
2320 | ret = -EAGAIN; |
2321 | if (timeout) |
2322 | ret = -EINTR; |
2323 | } |
2324 | |
2325 | return ret; |
2326 | } |
2327 | |
2328 | SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) |
2329 | { |
2330 | struct siginfo info; |
2331 | |
2332 | info.si_signo = sig; |
2333 | info.si_errno = 0; |
2334 | info.si_code = SI_USER; |
2335 | info.si_pid = task_tgid_vnr(current); |
2336 | info.si_uid = current_uid(); |
2337 | |
2338 | return kill_something_info(sig, &info, pid); |
2339 | } |
2340 | |
2341 | static int |
2342 | do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) |
2343 | { |
2344 | struct task_struct *p; |
2345 | int error = -ESRCH; |
2346 | |
2347 | rcu_read_lock(); |
2348 | p = find_task_by_vpid(pid); |
2349 | if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { |
2350 | error = check_kill_permission(sig, info, p); |
2351 | /* |
2352 | * The null signal is a permissions and process existence |
2353 | * probe. No signal is actually delivered. |
2354 | */ |
2355 | if (!error && sig) { |
2356 | error = do_send_sig_info(sig, info, p, false); |
2357 | /* |
2358 | * If lock_task_sighand() failed we pretend the task |
2359 | * dies after receiving the signal. The window is tiny, |
2360 | * and the signal is private anyway. |
2361 | */ |
2362 | if (unlikely(error == -ESRCH)) |
2363 | error = 0; |
2364 | } |
2365 | } |
2366 | rcu_read_unlock(); |
2367 | |
2368 | return error; |
2369 | } |
2370 | |
2371 | static int do_tkill(pid_t tgid, pid_t pid, int sig) |
2372 | { |
2373 | struct siginfo info; |
2374 | |
2375 | info.si_signo = sig; |
2376 | info.si_errno = 0; |
2377 | info.si_code = SI_TKILL; |
2378 | info.si_pid = task_tgid_vnr(current); |
2379 | info.si_uid = current_uid(); |
2380 | |
2381 | return do_send_specific(tgid, pid, sig, &info); |
2382 | } |
2383 | |
2384 | /** |
2385 | * sys_tgkill - send signal to one specific thread |
2386 | * @tgid: the thread group ID of the thread |
2387 | * @pid: the PID of the thread |
2388 | * @sig: signal to be sent |
2389 | * |
2390 | * This syscall also checks the @tgid and returns -ESRCH even if the PID |
2391 | * exists but it's not belonging to the target process anymore. This |
2392 | * method solves the problem of threads exiting and PIDs getting reused. |
2393 | */ |
2394 | SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) |
2395 | { |
2396 | /* This is only valid for single tasks */ |
2397 | if (pid <= 0 || tgid <= 0) |
2398 | return -EINVAL; |
2399 | |
2400 | return do_tkill(tgid, pid, sig); |
2401 | } |
2402 | |
2403 | /* |
2404 | * Send a signal to only one task, even if it's a CLONE_THREAD task. |
2405 | */ |
2406 | SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) |
2407 | { |
2408 | /* This is only valid for single tasks */ |
2409 | if (pid <= 0) |
2410 | return -EINVAL; |
2411 | |
2412 | return do_tkill(0, pid, sig); |
2413 | } |
2414 | |
2415 | SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, |
2416 | siginfo_t __user *, uinfo) |
2417 | { |
2418 | siginfo_t info; |
2419 | |
2420 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) |
2421 | return -EFAULT; |
2422 | |
2423 | /* Not even root can pretend to send signals from the kernel. |
2424 | Nor can they impersonate a kill(), which adds source info. */ |
2425 | if (info.si_code >= 0) |
2426 | return -EPERM; |
2427 | info.si_signo = sig; |
2428 | |
2429 | /* POSIX.1b doesn't mention process groups. */ |
2430 | return kill_proc_info(sig, &info, pid); |
2431 | } |
2432 | |
2433 | long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) |
2434 | { |
2435 | /* This is only valid for single tasks */ |
2436 | if (pid <= 0 || tgid <= 0) |
2437 | return -EINVAL; |
2438 | |
2439 | /* Not even root can pretend to send signals from the kernel. |
2440 | Nor can they impersonate a kill(), which adds source info. */ |
2441 | if (info->si_code >= 0) |
2442 | return -EPERM; |
2443 | info->si_signo = sig; |
2444 | |
2445 | return do_send_specific(tgid, pid, sig, info); |
2446 | } |
2447 | |
2448 | SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, |
2449 | siginfo_t __user *, uinfo) |
2450 | { |
2451 | siginfo_t info; |
2452 | |
2453 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) |
2454 | return -EFAULT; |
2455 | |
2456 | return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); |
2457 | } |
2458 | |
2459 | int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) |
2460 | { |
2461 | struct task_struct *t = current; |
2462 | struct k_sigaction *k; |
2463 | sigset_t mask; |
2464 | |
2465 | if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) |
2466 | return -EINVAL; |
2467 | |
2468 | k = &t->sighand->action[sig-1]; |
2469 | |
2470 | spin_lock_irq(¤t->sighand->siglock); |
2471 | if (oact) |
2472 | *oact = *k; |
2473 | |
2474 | if (act) { |
2475 | sigdelsetmask(&act->sa.sa_mask, |
2476 | sigmask(SIGKILL) | sigmask(SIGSTOP)); |
2477 | *k = *act; |
2478 | /* |
2479 | * POSIX 3.3.1.3: |
2480 | * "Setting a signal action to SIG_IGN for a signal that is |
2481 | * pending shall cause the pending signal to be discarded, |
2482 | * whether or not it is blocked." |
2483 | * |
2484 | * "Setting a signal action to SIG_DFL for a signal that is |
2485 | * pending and whose default action is to ignore the signal |
2486 | * (for example, SIGCHLD), shall cause the pending signal to |
2487 | * be discarded, whether or not it is blocked" |
2488 | */ |
2489 | if (sig_handler_ignored(sig_handler(t, sig), sig)) { |
2490 | sigemptyset(&mask); |
2491 | sigaddset(&mask, sig); |
2492 | rm_from_queue_full(&mask, &t->signal->shared_pending); |
2493 | do { |
2494 | rm_from_queue_full(&mask, &t->pending); |
2495 | t = next_thread(t); |
2496 | } while (t != current); |
2497 | } |
2498 | } |
2499 | |
2500 | spin_unlock_irq(¤t->sighand->siglock); |
2501 | return 0; |
2502 | } |
2503 | |
2504 | int |
2505 | do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) |
2506 | { |
2507 | stack_t oss; |
2508 | int error; |
2509 | |
2510 | oss.ss_sp = (void __user *) current->sas_ss_sp; |
2511 | oss.ss_size = current->sas_ss_size; |
2512 | oss.ss_flags = sas_ss_flags(sp); |
2513 | |
2514 | if (uss) { |
2515 | void __user *ss_sp; |
2516 | size_t ss_size; |
2517 | int ss_flags; |
2518 | |
2519 | error = -EFAULT; |
2520 | if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) |
2521 | goto out; |
2522 | error = __get_user(ss_sp, &uss->ss_sp) | |
2523 | __get_user(ss_flags, &uss->ss_flags) | |
2524 | __get_user(ss_size, &uss->ss_size); |
2525 | if (error) |
2526 | goto out; |
2527 | |
2528 | error = -EPERM; |
2529 | if (on_sig_stack(sp)) |
2530 | goto out; |
2531 | |
2532 | error = -EINVAL; |
2533 | /* |
2534 | * |
2535 | * Note - this code used to test ss_flags incorrectly |
2536 | * old code may have been written using ss_flags==0 |
2537 | * to mean ss_flags==SS_ONSTACK (as this was the only |
2538 | * way that worked) - this fix preserves that older |
2539 | * mechanism |
2540 | */ |
2541 | if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) |
2542 | goto out; |
2543 | |
2544 | if (ss_flags == SS_DISABLE) { |
2545 | ss_size = 0; |
2546 | ss_sp = NULL; |
2547 | } else { |
2548 | error = -ENOMEM; |
2549 | if (ss_size < MINSIGSTKSZ) |
2550 | goto out; |
2551 | } |
2552 | |
2553 | current->sas_ss_sp = (unsigned long) ss_sp; |
2554 | current->sas_ss_size = ss_size; |
2555 | } |
2556 | |
2557 | error = 0; |
2558 | if (uoss) { |
2559 | error = -EFAULT; |
2560 | if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) |
2561 | goto out; |
2562 | error = __put_user(oss.ss_sp, &uoss->ss_sp) | |
2563 | __put_user(oss.ss_size, &uoss->ss_size) | |
2564 | __put_user(oss.ss_flags, &uoss->ss_flags); |
2565 | } |
2566 | |
2567 | out: |
2568 | return error; |
2569 | } |
2570 | |
2571 | #ifdef __ARCH_WANT_SYS_SIGPENDING |
2572 | |
2573 | SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) |
2574 | { |
2575 | return do_sigpending(set, sizeof(*set)); |
2576 | } |
2577 | |
2578 | #endif |
2579 | |
2580 | #ifdef __ARCH_WANT_SYS_SIGPROCMASK |
2581 | /* Some platforms have their own version with special arguments others |
2582 | support only sys_rt_sigprocmask. */ |
2583 | |
2584 | SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set, |
2585 | old_sigset_t __user *, oset) |
2586 | { |
2587 | int error; |
2588 | old_sigset_t old_set, new_set; |
2589 | |
2590 | if (set) { |
2591 | error = -EFAULT; |
2592 | if (copy_from_user(&new_set, set, sizeof(*set))) |
2593 | goto out; |
2594 | new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); |
2595 | |
2596 | spin_lock_irq(¤t->sighand->siglock); |
2597 | old_set = current->blocked.sig[0]; |
2598 | |
2599 | error = 0; |
2600 | switch (how) { |
2601 | default: |
2602 | error = -EINVAL; |
2603 | break; |
2604 | case SIG_BLOCK: |
2605 | sigaddsetmask(¤t->blocked, new_set); |
2606 | break; |
2607 | case SIG_UNBLOCK: |
2608 | sigdelsetmask(¤t->blocked, new_set); |
2609 | break; |
2610 | case SIG_SETMASK: |
2611 | current->blocked.sig[0] = new_set; |
2612 | break; |
2613 | } |
2614 | |
2615 | recalc_sigpending(); |
2616 | spin_unlock_irq(¤t->sighand->siglock); |
2617 | if (error) |
2618 | goto out; |
2619 | if (oset) |
2620 | goto set_old; |
2621 | } else if (oset) { |
2622 | old_set = current->blocked.sig[0]; |
2623 | set_old: |
2624 | error = -EFAULT; |
2625 | if (copy_to_user(oset, &old_set, sizeof(*oset))) |
2626 | goto out; |
2627 | } |
2628 | error = 0; |
2629 | out: |
2630 | return error; |
2631 | } |
2632 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ |
2633 | |
2634 | #ifdef __ARCH_WANT_SYS_RT_SIGACTION |
2635 | SYSCALL_DEFINE4(rt_sigaction, int, sig, |
2636 | const struct sigaction __user *, act, |
2637 | struct sigaction __user *, oact, |
2638 | size_t, sigsetsize) |
2639 | { |
2640 | struct k_sigaction new_sa, old_sa; |
2641 | int ret = -EINVAL; |
2642 | |
2643 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2644 | if (sigsetsize != sizeof(sigset_t)) |
2645 | goto out; |
2646 | |
2647 | if (act) { |
2648 | if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) |
2649 | return -EFAULT; |
2650 | } |
2651 | |
2652 | ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); |
2653 | |
2654 | if (!ret && oact) { |
2655 | if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) |
2656 | return -EFAULT; |
2657 | } |
2658 | out: |
2659 | return ret; |
2660 | } |
2661 | #endif /* __ARCH_WANT_SYS_RT_SIGACTION */ |
2662 | |
2663 | #ifdef __ARCH_WANT_SYS_SGETMASK |
2664 | |
2665 | /* |
2666 | * For backwards compatibility. Functionality superseded by sigprocmask. |
2667 | */ |
2668 | SYSCALL_DEFINE0(sgetmask) |
2669 | { |
2670 | /* SMP safe */ |
2671 | return current->blocked.sig[0]; |
2672 | } |
2673 | |
2674 | SYSCALL_DEFINE1(ssetmask, int, newmask) |
2675 | { |
2676 | int old; |
2677 | |
2678 | spin_lock_irq(¤t->sighand->siglock); |
2679 | old = current->blocked.sig[0]; |
2680 | |
2681 | siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)| |
2682 | sigmask(SIGSTOP))); |
2683 | recalc_sigpending(); |
2684 | spin_unlock_irq(¤t->sighand->siglock); |
2685 | |
2686 | return old; |
2687 | } |
2688 | #endif /* __ARCH_WANT_SGETMASK */ |
2689 | |
2690 | #ifdef __ARCH_WANT_SYS_SIGNAL |
2691 | /* |
2692 | * For backwards compatibility. Functionality superseded by sigaction. |
2693 | */ |
2694 | SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) |
2695 | { |
2696 | struct k_sigaction new_sa, old_sa; |
2697 | int ret; |
2698 | |
2699 | new_sa.sa.sa_handler = handler; |
2700 | new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; |
2701 | sigemptyset(&new_sa.sa.sa_mask); |
2702 | |
2703 | ret = do_sigaction(sig, &new_sa, &old_sa); |
2704 | |
2705 | return ret ? ret : (unsigned long)old_sa.sa.sa_handler; |
2706 | } |
2707 | #endif /* __ARCH_WANT_SYS_SIGNAL */ |
2708 | |
2709 | #ifdef __ARCH_WANT_SYS_PAUSE |
2710 | |
2711 | SYSCALL_DEFINE0(pause) |
2712 | { |
2713 | current->state = TASK_INTERRUPTIBLE; |
2714 | schedule(); |
2715 | return -ERESTARTNOHAND; |
2716 | } |
2717 | |
2718 | #endif |
2719 | |
2720 | #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND |
2721 | SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) |
2722 | { |
2723 | sigset_t newset; |
2724 | |
2725 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2726 | if (sigsetsize != sizeof(sigset_t)) |
2727 | return -EINVAL; |
2728 | |
2729 | if (copy_from_user(&newset, unewset, sizeof(newset))) |
2730 | return -EFAULT; |
2731 | sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
2732 | |
2733 | spin_lock_irq(¤t->sighand->siglock); |
2734 | current->saved_sigmask = current->blocked; |
2735 | current->blocked = newset; |
2736 | recalc_sigpending(); |
2737 | spin_unlock_irq(¤t->sighand->siglock); |
2738 | |
2739 | current->state = TASK_INTERRUPTIBLE; |
2740 | schedule(); |
2741 | set_restore_sigmask(); |
2742 | return -ERESTARTNOHAND; |
2743 | } |
2744 | #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ |
2745 | |
2746 | __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) |
2747 | { |
2748 | return NULL; |
2749 | } |
2750 | |
2751 | void __init signals_init(void) |
2752 | { |
2753 | sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); |
2754 | } |
2755 | |
2756 | #ifdef CONFIG_KGDB_KDB |
2757 | #include <linux/kdb.h> |
2758 | /* |
2759 | * kdb_send_sig_info - Allows kdb to send signals without exposing |
2760 | * signal internals. This function checks if the required locks are |
2761 | * available before calling the main signal code, to avoid kdb |
2762 | * deadlocks. |
2763 | */ |
2764 | void |
2765 | kdb_send_sig_info(struct task_struct *t, struct siginfo *info) |
2766 | { |
2767 | static struct task_struct *kdb_prev_t; |
2768 | int sig, new_t; |
2769 | if (!spin_trylock(&t->sighand->siglock)) { |
2770 | kdb_printf("Can't do kill command now.\n" |
2771 | "The sigmask lock is held somewhere else in " |
2772 | "kernel, try again later\n"); |
2773 | return; |
2774 | } |
2775 | spin_unlock(&t->sighand->siglock); |
2776 | new_t = kdb_prev_t != t; |
2777 | kdb_prev_t = t; |
2778 | if (t->state != TASK_RUNNING && new_t) { |
2779 | kdb_printf("Process is not RUNNING, sending a signal from " |
2780 | "kdb risks deadlock\n" |
2781 | "on the run queue locks. " |
2782 | "The signal has _not_ been sent.\n" |
2783 | "Reissue the kill command if you want to risk " |
2784 | "the deadlock.\n"); |
2785 | return; |
2786 | } |
2787 | sig = info->si_signo; |
2788 | if (send_sig_info(sig, info, t)) |
2789 | kdb_printf("Fail to deliver Signal %d to process %d.\n", |
2790 | sig, t->pid); |
2791 | else |
2792 | kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); |
2793 | } |
2794 | #endif /* CONFIG_KGDB_KDB */ |
2795 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9