Root/
1 | /* |
2 | * linux/kernel/signal.c |
3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | * |
6 | * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson |
7 | * |
8 | * 2003-06-02 Jim Houston - Concurrent Computer Corp. |
9 | * Changes to use preallocated sigqueue structures |
10 | * to allow signals to be sent reliably. |
11 | */ |
12 | |
13 | #include <linux/slab.h> |
14 | #include <linux/export.h> |
15 | #include <linux/init.h> |
16 | #include <linux/sched.h> |
17 | #include <linux/fs.h> |
18 | #include <linux/tty.h> |
19 | #include <linux/binfmts.h> |
20 | #include <linux/coredump.h> |
21 | #include <linux/security.h> |
22 | #include <linux/syscalls.h> |
23 | #include <linux/ptrace.h> |
24 | #include <linux/signal.h> |
25 | #include <linux/signalfd.h> |
26 | #include <linux/ratelimit.h> |
27 | #include <linux/tracehook.h> |
28 | #include <linux/capability.h> |
29 | #include <linux/freezer.h> |
30 | #include <linux/pid_namespace.h> |
31 | #include <linux/nsproxy.h> |
32 | #include <linux/user_namespace.h> |
33 | #include <linux/uprobes.h> |
34 | #include <linux/compat.h> |
35 | #include <linux/cn_proc.h> |
36 | #define CREATE_TRACE_POINTS |
37 | #include <trace/events/signal.h> |
38 | |
39 | #include <asm/param.h> |
40 | #include <asm/uaccess.h> |
41 | #include <asm/unistd.h> |
42 | #include <asm/siginfo.h> |
43 | #include <asm/cacheflush.h> |
44 | #include "audit.h" /* audit_signal_info() */ |
45 | |
46 | /* |
47 | * SLAB caches for signal bits. |
48 | */ |
49 | |
50 | static struct kmem_cache *sigqueue_cachep; |
51 | |
52 | int print_fatal_signals __read_mostly; |
53 | |
54 | static void __user *sig_handler(struct task_struct *t, int sig) |
55 | { |
56 | return t->sighand->action[sig - 1].sa.sa_handler; |
57 | } |
58 | |
59 | static int sig_handler_ignored(void __user *handler, int sig) |
60 | { |
61 | /* Is it explicitly or implicitly ignored? */ |
62 | return handler == SIG_IGN || |
63 | (handler == SIG_DFL && sig_kernel_ignore(sig)); |
64 | } |
65 | |
66 | static int sig_task_ignored(struct task_struct *t, int sig, bool force) |
67 | { |
68 | void __user *handler; |
69 | |
70 | handler = sig_handler(t, sig); |
71 | |
72 | if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && |
73 | handler == SIG_DFL && !force) |
74 | return 1; |
75 | |
76 | return sig_handler_ignored(handler, sig); |
77 | } |
78 | |
79 | static int sig_ignored(struct task_struct *t, int sig, bool force) |
80 | { |
81 | /* |
82 | * Blocked signals are never ignored, since the |
83 | * signal handler may change by the time it is |
84 | * unblocked. |
85 | */ |
86 | if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) |
87 | return 0; |
88 | |
89 | if (!sig_task_ignored(t, sig, force)) |
90 | return 0; |
91 | |
92 | /* |
93 | * Tracers may want to know about even ignored signals. |
94 | */ |
95 | return !t->ptrace; |
96 | } |
97 | |
98 | /* |
99 | * Re-calculate pending state from the set of locally pending |
100 | * signals, globally pending signals, and blocked signals. |
101 | */ |
102 | static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) |
103 | { |
104 | unsigned long ready; |
105 | long i; |
106 | |
107 | switch (_NSIG_WORDS) { |
108 | default: |
109 | for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) |
110 | ready |= signal->sig[i] &~ blocked->sig[i]; |
111 | break; |
112 | |
113 | case 4: ready = signal->sig[3] &~ blocked->sig[3]; |
114 | ready |= signal->sig[2] &~ blocked->sig[2]; |
115 | ready |= signal->sig[1] &~ blocked->sig[1]; |
116 | ready |= signal->sig[0] &~ blocked->sig[0]; |
117 | break; |
118 | |
119 | case 2: ready = signal->sig[1] &~ blocked->sig[1]; |
120 | ready |= signal->sig[0] &~ blocked->sig[0]; |
121 | break; |
122 | |
123 | case 1: ready = signal->sig[0] &~ blocked->sig[0]; |
124 | } |
125 | return ready != 0; |
126 | } |
127 | |
128 | #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) |
129 | |
130 | static int recalc_sigpending_tsk(struct task_struct *t) |
131 | { |
132 | if ((t->jobctl & JOBCTL_PENDING_MASK) || |
133 | PENDING(&t->pending, &t->blocked) || |
134 | PENDING(&t->signal->shared_pending, &t->blocked)) { |
135 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
136 | return 1; |
137 | } |
138 | /* |
139 | * We must never clear the flag in another thread, or in current |
140 | * when it's possible the current syscall is returning -ERESTART*. |
141 | * So we don't clear it here, and only callers who know they should do. |
142 | */ |
143 | return 0; |
144 | } |
145 | |
146 | /* |
147 | * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. |
148 | * This is superfluous when called on current, the wakeup is a harmless no-op. |
149 | */ |
150 | void recalc_sigpending_and_wake(struct task_struct *t) |
151 | { |
152 | if (recalc_sigpending_tsk(t)) |
153 | signal_wake_up(t, 0); |
154 | } |
155 | |
156 | void recalc_sigpending(void) |
157 | { |
158 | if (!recalc_sigpending_tsk(current) && !freezing(current)) |
159 | clear_thread_flag(TIF_SIGPENDING); |
160 | |
161 | } |
162 | |
163 | /* Given the mask, find the first available signal that should be serviced. */ |
164 | |
165 | #define SYNCHRONOUS_MASK \ |
166 | (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ |
167 | sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS)) |
168 | |
169 | int next_signal(struct sigpending *pending, sigset_t *mask) |
170 | { |
171 | unsigned long i, *s, *m, x; |
172 | int sig = 0; |
173 | |
174 | s = pending->signal.sig; |
175 | m = mask->sig; |
176 | |
177 | /* |
178 | * Handle the first word specially: it contains the |
179 | * synchronous signals that need to be dequeued first. |
180 | */ |
181 | x = *s &~ *m; |
182 | if (x) { |
183 | if (x & SYNCHRONOUS_MASK) |
184 | x &= SYNCHRONOUS_MASK; |
185 | sig = ffz(~x) + 1; |
186 | return sig; |
187 | } |
188 | |
189 | switch (_NSIG_WORDS) { |
190 | default: |
191 | for (i = 1; i < _NSIG_WORDS; ++i) { |
192 | x = *++s &~ *++m; |
193 | if (!x) |
194 | continue; |
195 | sig = ffz(~x) + i*_NSIG_BPW + 1; |
196 | break; |
197 | } |
198 | break; |
199 | |
200 | case 2: |
201 | x = s[1] &~ m[1]; |
202 | if (!x) |
203 | break; |
204 | sig = ffz(~x) + _NSIG_BPW + 1; |
205 | break; |
206 | |
207 | case 1: |
208 | /* Nothing to do */ |
209 | break; |
210 | } |
211 | |
212 | return sig; |
213 | } |
214 | |
215 | static inline void print_dropped_signal(int sig) |
216 | { |
217 | static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); |
218 | |
219 | if (!print_fatal_signals) |
220 | return; |
221 | |
222 | if (!__ratelimit(&ratelimit_state)) |
223 | return; |
224 | |
225 | printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", |
226 | current->comm, current->pid, sig); |
227 | } |
228 | |
229 | /** |
230 | * task_set_jobctl_pending - set jobctl pending bits |
231 | * @task: target task |
232 | * @mask: pending bits to set |
233 | * |
234 | * Clear @mask from @task->jobctl. @mask must be subset of |
235 | * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | |
236 | * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is |
237 | * cleared. If @task is already being killed or exiting, this function |
238 | * becomes noop. |
239 | * |
240 | * CONTEXT: |
241 | * Must be called with @task->sighand->siglock held. |
242 | * |
243 | * RETURNS: |
244 | * %true if @mask is set, %false if made noop because @task was dying. |
245 | */ |
246 | bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask) |
247 | { |
248 | BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | |
249 | JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); |
250 | BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); |
251 | |
252 | if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) |
253 | return false; |
254 | |
255 | if (mask & JOBCTL_STOP_SIGMASK) |
256 | task->jobctl &= ~JOBCTL_STOP_SIGMASK; |
257 | |
258 | task->jobctl |= mask; |
259 | return true; |
260 | } |
261 | |
262 | /** |
263 | * task_clear_jobctl_trapping - clear jobctl trapping bit |
264 | * @task: target task |
265 | * |
266 | * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. |
267 | * Clear it and wake up the ptracer. Note that we don't need any further |
268 | * locking. @task->siglock guarantees that @task->parent points to the |
269 | * ptracer. |
270 | * |
271 | * CONTEXT: |
272 | * Must be called with @task->sighand->siglock held. |
273 | */ |
274 | void task_clear_jobctl_trapping(struct task_struct *task) |
275 | { |
276 | if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { |
277 | task->jobctl &= ~JOBCTL_TRAPPING; |
278 | wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); |
279 | } |
280 | } |
281 | |
282 | /** |
283 | * task_clear_jobctl_pending - clear jobctl pending bits |
284 | * @task: target task |
285 | * @mask: pending bits to clear |
286 | * |
287 | * Clear @mask from @task->jobctl. @mask must be subset of |
288 | * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other |
289 | * STOP bits are cleared together. |
290 | * |
291 | * If clearing of @mask leaves no stop or trap pending, this function calls |
292 | * task_clear_jobctl_trapping(). |
293 | * |
294 | * CONTEXT: |
295 | * Must be called with @task->sighand->siglock held. |
296 | */ |
297 | void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask) |
298 | { |
299 | BUG_ON(mask & ~JOBCTL_PENDING_MASK); |
300 | |
301 | if (mask & JOBCTL_STOP_PENDING) |
302 | mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; |
303 | |
304 | task->jobctl &= ~mask; |
305 | |
306 | if (!(task->jobctl & JOBCTL_PENDING_MASK)) |
307 | task_clear_jobctl_trapping(task); |
308 | } |
309 | |
310 | /** |
311 | * task_participate_group_stop - participate in a group stop |
312 | * @task: task participating in a group stop |
313 | * |
314 | * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. |
315 | * Group stop states are cleared and the group stop count is consumed if |
316 | * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group |
317 | * stop, the appropriate %SIGNAL_* flags are set. |
318 | * |
319 | * CONTEXT: |
320 | * Must be called with @task->sighand->siglock held. |
321 | * |
322 | * RETURNS: |
323 | * %true if group stop completion should be notified to the parent, %false |
324 | * otherwise. |
325 | */ |
326 | static bool task_participate_group_stop(struct task_struct *task) |
327 | { |
328 | struct signal_struct *sig = task->signal; |
329 | bool consume = task->jobctl & JOBCTL_STOP_CONSUME; |
330 | |
331 | WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); |
332 | |
333 | task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); |
334 | |
335 | if (!consume) |
336 | return false; |
337 | |
338 | if (!WARN_ON_ONCE(sig->group_stop_count == 0)) |
339 | sig->group_stop_count--; |
340 | |
341 | /* |
342 | * Tell the caller to notify completion iff we are entering into a |
343 | * fresh group stop. Read comment in do_signal_stop() for details. |
344 | */ |
345 | if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { |
346 | sig->flags = SIGNAL_STOP_STOPPED; |
347 | return true; |
348 | } |
349 | return false; |
350 | } |
351 | |
352 | /* |
353 | * allocate a new signal queue record |
354 | * - this may be called without locks if and only if t == current, otherwise an |
355 | * appropriate lock must be held to stop the target task from exiting |
356 | */ |
357 | static struct sigqueue * |
358 | __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) |
359 | { |
360 | struct sigqueue *q = NULL; |
361 | struct user_struct *user; |
362 | |
363 | /* |
364 | * Protect access to @t credentials. This can go away when all |
365 | * callers hold rcu read lock. |
366 | */ |
367 | rcu_read_lock(); |
368 | user = get_uid(__task_cred(t)->user); |
369 | atomic_inc(&user->sigpending); |
370 | rcu_read_unlock(); |
371 | |
372 | if (override_rlimit || |
373 | atomic_read(&user->sigpending) <= |
374 | task_rlimit(t, RLIMIT_SIGPENDING)) { |
375 | q = kmem_cache_alloc(sigqueue_cachep, flags); |
376 | } else { |
377 | print_dropped_signal(sig); |
378 | } |
379 | |
380 | if (unlikely(q == NULL)) { |
381 | atomic_dec(&user->sigpending); |
382 | free_uid(user); |
383 | } else { |
384 | INIT_LIST_HEAD(&q->list); |
385 | q->flags = 0; |
386 | q->user = user; |
387 | } |
388 | |
389 | return q; |
390 | } |
391 | |
392 | static void __sigqueue_free(struct sigqueue *q) |
393 | { |
394 | if (q->flags & SIGQUEUE_PREALLOC) |
395 | return; |
396 | atomic_dec(&q->user->sigpending); |
397 | free_uid(q->user); |
398 | kmem_cache_free(sigqueue_cachep, q); |
399 | } |
400 | |
401 | void flush_sigqueue(struct sigpending *queue) |
402 | { |
403 | struct sigqueue *q; |
404 | |
405 | sigemptyset(&queue->signal); |
406 | while (!list_empty(&queue->list)) { |
407 | q = list_entry(queue->list.next, struct sigqueue , list); |
408 | list_del_init(&q->list); |
409 | __sigqueue_free(q); |
410 | } |
411 | } |
412 | |
413 | /* |
414 | * Flush all pending signals for a task. |
415 | */ |
416 | void __flush_signals(struct task_struct *t) |
417 | { |
418 | clear_tsk_thread_flag(t, TIF_SIGPENDING); |
419 | flush_sigqueue(&t->pending); |
420 | flush_sigqueue(&t->signal->shared_pending); |
421 | } |
422 | |
423 | void flush_signals(struct task_struct *t) |
424 | { |
425 | unsigned long flags; |
426 | |
427 | spin_lock_irqsave(&t->sighand->siglock, flags); |
428 | __flush_signals(t); |
429 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
430 | } |
431 | |
432 | static void __flush_itimer_signals(struct sigpending *pending) |
433 | { |
434 | sigset_t signal, retain; |
435 | struct sigqueue *q, *n; |
436 | |
437 | signal = pending->signal; |
438 | sigemptyset(&retain); |
439 | |
440 | list_for_each_entry_safe(q, n, &pending->list, list) { |
441 | int sig = q->info.si_signo; |
442 | |
443 | if (likely(q->info.si_code != SI_TIMER)) { |
444 | sigaddset(&retain, sig); |
445 | } else { |
446 | sigdelset(&signal, sig); |
447 | list_del_init(&q->list); |
448 | __sigqueue_free(q); |
449 | } |
450 | } |
451 | |
452 | sigorsets(&pending->signal, &signal, &retain); |
453 | } |
454 | |
455 | void flush_itimer_signals(void) |
456 | { |
457 | struct task_struct *tsk = current; |
458 | unsigned long flags; |
459 | |
460 | spin_lock_irqsave(&tsk->sighand->siglock, flags); |
461 | __flush_itimer_signals(&tsk->pending); |
462 | __flush_itimer_signals(&tsk->signal->shared_pending); |
463 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); |
464 | } |
465 | |
466 | void ignore_signals(struct task_struct *t) |
467 | { |
468 | int i; |
469 | |
470 | for (i = 0; i < _NSIG; ++i) |
471 | t->sighand->action[i].sa.sa_handler = SIG_IGN; |
472 | |
473 | flush_signals(t); |
474 | } |
475 | |
476 | /* |
477 | * Flush all handlers for a task. |
478 | */ |
479 | |
480 | void |
481 | flush_signal_handlers(struct task_struct *t, int force_default) |
482 | { |
483 | int i; |
484 | struct k_sigaction *ka = &t->sighand->action[0]; |
485 | for (i = _NSIG ; i != 0 ; i--) { |
486 | if (force_default || ka->sa.sa_handler != SIG_IGN) |
487 | ka->sa.sa_handler = SIG_DFL; |
488 | ka->sa.sa_flags = 0; |
489 | #ifdef __ARCH_HAS_SA_RESTORER |
490 | ka->sa.sa_restorer = NULL; |
491 | #endif |
492 | sigemptyset(&ka->sa.sa_mask); |
493 | ka++; |
494 | } |
495 | } |
496 | |
497 | int unhandled_signal(struct task_struct *tsk, int sig) |
498 | { |
499 | void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; |
500 | if (is_global_init(tsk)) |
501 | return 1; |
502 | if (handler != SIG_IGN && handler != SIG_DFL) |
503 | return 0; |
504 | /* if ptraced, let the tracer determine */ |
505 | return !tsk->ptrace; |
506 | } |
507 | |
508 | /* |
509 | * Notify the system that a driver wants to block all signals for this |
510 | * process, and wants to be notified if any signals at all were to be |
511 | * sent/acted upon. If the notifier routine returns non-zero, then the |
512 | * signal will be acted upon after all. If the notifier routine returns 0, |
513 | * then then signal will be blocked. Only one block per process is |
514 | * allowed. priv is a pointer to private data that the notifier routine |
515 | * can use to determine if the signal should be blocked or not. |
516 | */ |
517 | void |
518 | block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) |
519 | { |
520 | unsigned long flags; |
521 | |
522 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
523 | current->notifier_mask = mask; |
524 | current->notifier_data = priv; |
525 | current->notifier = notifier; |
526 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
527 | } |
528 | |
529 | /* Notify the system that blocking has ended. */ |
530 | |
531 | void |
532 | unblock_all_signals(void) |
533 | { |
534 | unsigned long flags; |
535 | |
536 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
537 | current->notifier = NULL; |
538 | current->notifier_data = NULL; |
539 | recalc_sigpending(); |
540 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
541 | } |
542 | |
543 | static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) |
544 | { |
545 | struct sigqueue *q, *first = NULL; |
546 | |
547 | /* |
548 | * Collect the siginfo appropriate to this signal. Check if |
549 | * there is another siginfo for the same signal. |
550 | */ |
551 | list_for_each_entry(q, &list->list, list) { |
552 | if (q->info.si_signo == sig) { |
553 | if (first) |
554 | goto still_pending; |
555 | first = q; |
556 | } |
557 | } |
558 | |
559 | sigdelset(&list->signal, sig); |
560 | |
561 | if (first) { |
562 | still_pending: |
563 | list_del_init(&first->list); |
564 | copy_siginfo(info, &first->info); |
565 | __sigqueue_free(first); |
566 | } else { |
567 | /* |
568 | * Ok, it wasn't in the queue. This must be |
569 | * a fast-pathed signal or we must have been |
570 | * out of queue space. So zero out the info. |
571 | */ |
572 | info->si_signo = sig; |
573 | info->si_errno = 0; |
574 | info->si_code = SI_USER; |
575 | info->si_pid = 0; |
576 | info->si_uid = 0; |
577 | } |
578 | } |
579 | |
580 | static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, |
581 | siginfo_t *info) |
582 | { |
583 | int sig = next_signal(pending, mask); |
584 | |
585 | if (sig) { |
586 | if (current->notifier) { |
587 | if (sigismember(current->notifier_mask, sig)) { |
588 | if (!(current->notifier)(current->notifier_data)) { |
589 | clear_thread_flag(TIF_SIGPENDING); |
590 | return 0; |
591 | } |
592 | } |
593 | } |
594 | |
595 | collect_signal(sig, pending, info); |
596 | } |
597 | |
598 | return sig; |
599 | } |
600 | |
601 | /* |
602 | * Dequeue a signal and return the element to the caller, which is |
603 | * expected to free it. |
604 | * |
605 | * All callers have to hold the siglock. |
606 | */ |
607 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) |
608 | { |
609 | int signr; |
610 | |
611 | /* We only dequeue private signals from ourselves, we don't let |
612 | * signalfd steal them |
613 | */ |
614 | signr = __dequeue_signal(&tsk->pending, mask, info); |
615 | if (!signr) { |
616 | signr = __dequeue_signal(&tsk->signal->shared_pending, |
617 | mask, info); |
618 | /* |
619 | * itimer signal ? |
620 | * |
621 | * itimers are process shared and we restart periodic |
622 | * itimers in the signal delivery path to prevent DoS |
623 | * attacks in the high resolution timer case. This is |
624 | * compliant with the old way of self-restarting |
625 | * itimers, as the SIGALRM is a legacy signal and only |
626 | * queued once. Changing the restart behaviour to |
627 | * restart the timer in the signal dequeue path is |
628 | * reducing the timer noise on heavy loaded !highres |
629 | * systems too. |
630 | */ |
631 | if (unlikely(signr == SIGALRM)) { |
632 | struct hrtimer *tmr = &tsk->signal->real_timer; |
633 | |
634 | if (!hrtimer_is_queued(tmr) && |
635 | tsk->signal->it_real_incr.tv64 != 0) { |
636 | hrtimer_forward(tmr, tmr->base->get_time(), |
637 | tsk->signal->it_real_incr); |
638 | hrtimer_restart(tmr); |
639 | } |
640 | } |
641 | } |
642 | |
643 | recalc_sigpending(); |
644 | if (!signr) |
645 | return 0; |
646 | |
647 | if (unlikely(sig_kernel_stop(signr))) { |
648 | /* |
649 | * Set a marker that we have dequeued a stop signal. Our |
650 | * caller might release the siglock and then the pending |
651 | * stop signal it is about to process is no longer in the |
652 | * pending bitmasks, but must still be cleared by a SIGCONT |
653 | * (and overruled by a SIGKILL). So those cases clear this |
654 | * shared flag after we've set it. Note that this flag may |
655 | * remain set after the signal we return is ignored or |
656 | * handled. That doesn't matter because its only purpose |
657 | * is to alert stop-signal processing code when another |
658 | * processor has come along and cleared the flag. |
659 | */ |
660 | current->jobctl |= JOBCTL_STOP_DEQUEUED; |
661 | } |
662 | if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { |
663 | /* |
664 | * Release the siglock to ensure proper locking order |
665 | * of timer locks outside of siglocks. Note, we leave |
666 | * irqs disabled here, since the posix-timers code is |
667 | * about to disable them again anyway. |
668 | */ |
669 | spin_unlock(&tsk->sighand->siglock); |
670 | do_schedule_next_timer(info); |
671 | spin_lock(&tsk->sighand->siglock); |
672 | } |
673 | return signr; |
674 | } |
675 | |
676 | /* |
677 | * Tell a process that it has a new active signal.. |
678 | * |
679 | * NOTE! we rely on the previous spin_lock to |
680 | * lock interrupts for us! We can only be called with |
681 | * "siglock" held, and the local interrupt must |
682 | * have been disabled when that got acquired! |
683 | * |
684 | * No need to set need_resched since signal event passing |
685 | * goes through ->blocked |
686 | */ |
687 | void signal_wake_up_state(struct task_struct *t, unsigned int state) |
688 | { |
689 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
690 | /* |
691 | * TASK_WAKEKILL also means wake it up in the stopped/traced/killable |
692 | * case. We don't check t->state here because there is a race with it |
693 | * executing another processor and just now entering stopped state. |
694 | * By using wake_up_state, we ensure the process will wake up and |
695 | * handle its death signal. |
696 | */ |
697 | if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) |
698 | kick_process(t); |
699 | } |
700 | |
701 | /* |
702 | * Remove signals in mask from the pending set and queue. |
703 | * Returns 1 if any signals were found. |
704 | * |
705 | * All callers must be holding the siglock. |
706 | * |
707 | * This version takes a sigset mask and looks at all signals, |
708 | * not just those in the first mask word. |
709 | */ |
710 | static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) |
711 | { |
712 | struct sigqueue *q, *n; |
713 | sigset_t m; |
714 | |
715 | sigandsets(&m, mask, &s->signal); |
716 | if (sigisemptyset(&m)) |
717 | return 0; |
718 | |
719 | sigandnsets(&s->signal, &s->signal, mask); |
720 | list_for_each_entry_safe(q, n, &s->list, list) { |
721 | if (sigismember(mask, q->info.si_signo)) { |
722 | list_del_init(&q->list); |
723 | __sigqueue_free(q); |
724 | } |
725 | } |
726 | return 1; |
727 | } |
728 | /* |
729 | * Remove signals in mask from the pending set and queue. |
730 | * Returns 1 if any signals were found. |
731 | * |
732 | * All callers must be holding the siglock. |
733 | */ |
734 | static int rm_from_queue(unsigned long mask, struct sigpending *s) |
735 | { |
736 | struct sigqueue *q, *n; |
737 | |
738 | if (!sigtestsetmask(&s->signal, mask)) |
739 | return 0; |
740 | |
741 | sigdelsetmask(&s->signal, mask); |
742 | list_for_each_entry_safe(q, n, &s->list, list) { |
743 | if (q->info.si_signo < SIGRTMIN && |
744 | (mask & sigmask(q->info.si_signo))) { |
745 | list_del_init(&q->list); |
746 | __sigqueue_free(q); |
747 | } |
748 | } |
749 | return 1; |
750 | } |
751 | |
752 | static inline int is_si_special(const struct siginfo *info) |
753 | { |
754 | return info <= SEND_SIG_FORCED; |
755 | } |
756 | |
757 | static inline bool si_fromuser(const struct siginfo *info) |
758 | { |
759 | return info == SEND_SIG_NOINFO || |
760 | (!is_si_special(info) && SI_FROMUSER(info)); |
761 | } |
762 | |
763 | /* |
764 | * called with RCU read lock from check_kill_permission() |
765 | */ |
766 | static int kill_ok_by_cred(struct task_struct *t) |
767 | { |
768 | const struct cred *cred = current_cred(); |
769 | const struct cred *tcred = __task_cred(t); |
770 | |
771 | if (uid_eq(cred->euid, tcred->suid) || |
772 | uid_eq(cred->euid, tcred->uid) || |
773 | uid_eq(cred->uid, tcred->suid) || |
774 | uid_eq(cred->uid, tcred->uid)) |
775 | return 1; |
776 | |
777 | if (ns_capable(tcred->user_ns, CAP_KILL)) |
778 | return 1; |
779 | |
780 | return 0; |
781 | } |
782 | |
783 | /* |
784 | * Bad permissions for sending the signal |
785 | * - the caller must hold the RCU read lock |
786 | */ |
787 | static int check_kill_permission(int sig, struct siginfo *info, |
788 | struct task_struct *t) |
789 | { |
790 | struct pid *sid; |
791 | int error; |
792 | |
793 | if (!valid_signal(sig)) |
794 | return -EINVAL; |
795 | |
796 | if (!si_fromuser(info)) |
797 | return 0; |
798 | |
799 | error = audit_signal_info(sig, t); /* Let audit system see the signal */ |
800 | if (error) |
801 | return error; |
802 | |
803 | if (!same_thread_group(current, t) && |
804 | !kill_ok_by_cred(t)) { |
805 | switch (sig) { |
806 | case SIGCONT: |
807 | sid = task_session(t); |
808 | /* |
809 | * We don't return the error if sid == NULL. The |
810 | * task was unhashed, the caller must notice this. |
811 | */ |
812 | if (!sid || sid == task_session(current)) |
813 | break; |
814 | default: |
815 | return -EPERM; |
816 | } |
817 | } |
818 | |
819 | return security_task_kill(t, info, sig, 0); |
820 | } |
821 | |
822 | /** |
823 | * ptrace_trap_notify - schedule trap to notify ptracer |
824 | * @t: tracee wanting to notify tracer |
825 | * |
826 | * This function schedules sticky ptrace trap which is cleared on the next |
827 | * TRAP_STOP to notify ptracer of an event. @t must have been seized by |
828 | * ptracer. |
829 | * |
830 | * If @t is running, STOP trap will be taken. If trapped for STOP and |
831 | * ptracer is listening for events, tracee is woken up so that it can |
832 | * re-trap for the new event. If trapped otherwise, STOP trap will be |
833 | * eventually taken without returning to userland after the existing traps |
834 | * are finished by PTRACE_CONT. |
835 | * |
836 | * CONTEXT: |
837 | * Must be called with @task->sighand->siglock held. |
838 | */ |
839 | static void ptrace_trap_notify(struct task_struct *t) |
840 | { |
841 | WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); |
842 | assert_spin_locked(&t->sighand->siglock); |
843 | |
844 | task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); |
845 | ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); |
846 | } |
847 | |
848 | /* |
849 | * Handle magic process-wide effects of stop/continue signals. Unlike |
850 | * the signal actions, these happen immediately at signal-generation |
851 | * time regardless of blocking, ignoring, or handling. This does the |
852 | * actual continuing for SIGCONT, but not the actual stopping for stop |
853 | * signals. The process stop is done as a signal action for SIG_DFL. |
854 | * |
855 | * Returns true if the signal should be actually delivered, otherwise |
856 | * it should be dropped. |
857 | */ |
858 | static bool prepare_signal(int sig, struct task_struct *p, bool force) |
859 | { |
860 | struct signal_struct *signal = p->signal; |
861 | struct task_struct *t; |
862 | |
863 | if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) { |
864 | if (signal->flags & SIGNAL_GROUP_COREDUMP) |
865 | return sig == SIGKILL; |
866 | /* |
867 | * The process is in the middle of dying, nothing to do. |
868 | */ |
869 | } else if (sig_kernel_stop(sig)) { |
870 | /* |
871 | * This is a stop signal. Remove SIGCONT from all queues. |
872 | */ |
873 | rm_from_queue(sigmask(SIGCONT), &signal->shared_pending); |
874 | t = p; |
875 | do { |
876 | rm_from_queue(sigmask(SIGCONT), &t->pending); |
877 | } while_each_thread(p, t); |
878 | } else if (sig == SIGCONT) { |
879 | unsigned int why; |
880 | /* |
881 | * Remove all stop signals from all queues, wake all threads. |
882 | */ |
883 | rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); |
884 | t = p; |
885 | do { |
886 | task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); |
887 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); |
888 | if (likely(!(t->ptrace & PT_SEIZED))) |
889 | wake_up_state(t, __TASK_STOPPED); |
890 | else |
891 | ptrace_trap_notify(t); |
892 | } while_each_thread(p, t); |
893 | |
894 | /* |
895 | * Notify the parent with CLD_CONTINUED if we were stopped. |
896 | * |
897 | * If we were in the middle of a group stop, we pretend it |
898 | * was already finished, and then continued. Since SIGCHLD |
899 | * doesn't queue we report only CLD_STOPPED, as if the next |
900 | * CLD_CONTINUED was dropped. |
901 | */ |
902 | why = 0; |
903 | if (signal->flags & SIGNAL_STOP_STOPPED) |
904 | why |= SIGNAL_CLD_CONTINUED; |
905 | else if (signal->group_stop_count) |
906 | why |= SIGNAL_CLD_STOPPED; |
907 | |
908 | if (why) { |
909 | /* |
910 | * The first thread which returns from do_signal_stop() |
911 | * will take ->siglock, notice SIGNAL_CLD_MASK, and |
912 | * notify its parent. See get_signal_to_deliver(). |
913 | */ |
914 | signal->flags = why | SIGNAL_STOP_CONTINUED; |
915 | signal->group_stop_count = 0; |
916 | signal->group_exit_code = 0; |
917 | } |
918 | } |
919 | |
920 | return !sig_ignored(p, sig, force); |
921 | } |
922 | |
923 | /* |
924 | * Test if P wants to take SIG. After we've checked all threads with this, |
925 | * it's equivalent to finding no threads not blocking SIG. Any threads not |
926 | * blocking SIG were ruled out because they are not running and already |
927 | * have pending signals. Such threads will dequeue from the shared queue |
928 | * as soon as they're available, so putting the signal on the shared queue |
929 | * will be equivalent to sending it to one such thread. |
930 | */ |
931 | static inline int wants_signal(int sig, struct task_struct *p) |
932 | { |
933 | if (sigismember(&p->blocked, sig)) |
934 | return 0; |
935 | if (p->flags & PF_EXITING) |
936 | return 0; |
937 | if (sig == SIGKILL) |
938 | return 1; |
939 | if (task_is_stopped_or_traced(p)) |
940 | return 0; |
941 | return task_curr(p) || !signal_pending(p); |
942 | } |
943 | |
944 | static void complete_signal(int sig, struct task_struct *p, int group) |
945 | { |
946 | struct signal_struct *signal = p->signal; |
947 | struct task_struct *t; |
948 | |
949 | /* |
950 | * Now find a thread we can wake up to take the signal off the queue. |
951 | * |
952 | * If the main thread wants the signal, it gets first crack. |
953 | * Probably the least surprising to the average bear. |
954 | */ |
955 | if (wants_signal(sig, p)) |
956 | t = p; |
957 | else if (!group || thread_group_empty(p)) |
958 | /* |
959 | * There is just one thread and it does not need to be woken. |
960 | * It will dequeue unblocked signals before it runs again. |
961 | */ |
962 | return; |
963 | else { |
964 | /* |
965 | * Otherwise try to find a suitable thread. |
966 | */ |
967 | t = signal->curr_target; |
968 | while (!wants_signal(sig, t)) { |
969 | t = next_thread(t); |
970 | if (t == signal->curr_target) |
971 | /* |
972 | * No thread needs to be woken. |
973 | * Any eligible threads will see |
974 | * the signal in the queue soon. |
975 | */ |
976 | return; |
977 | } |
978 | signal->curr_target = t; |
979 | } |
980 | |
981 | /* |
982 | * Found a killable thread. If the signal will be fatal, |
983 | * then start taking the whole group down immediately. |
984 | */ |
985 | if (sig_fatal(p, sig) && |
986 | !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && |
987 | !sigismember(&t->real_blocked, sig) && |
988 | (sig == SIGKILL || !t->ptrace)) { |
989 | /* |
990 | * This signal will be fatal to the whole group. |
991 | */ |
992 | if (!sig_kernel_coredump(sig)) { |
993 | /* |
994 | * Start a group exit and wake everybody up. |
995 | * This way we don't have other threads |
996 | * running and doing things after a slower |
997 | * thread has the fatal signal pending. |
998 | */ |
999 | signal->flags = SIGNAL_GROUP_EXIT; |
1000 | signal->group_exit_code = sig; |
1001 | signal->group_stop_count = 0; |
1002 | t = p; |
1003 | do { |
1004 | task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); |
1005 | sigaddset(&t->pending.signal, SIGKILL); |
1006 | signal_wake_up(t, 1); |
1007 | } while_each_thread(p, t); |
1008 | return; |
1009 | } |
1010 | } |
1011 | |
1012 | /* |
1013 | * The signal is already in the shared-pending queue. |
1014 | * Tell the chosen thread to wake up and dequeue it. |
1015 | */ |
1016 | signal_wake_up(t, sig == SIGKILL); |
1017 | return; |
1018 | } |
1019 | |
1020 | static inline int legacy_queue(struct sigpending *signals, int sig) |
1021 | { |
1022 | return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); |
1023 | } |
1024 | |
1025 | #ifdef CONFIG_USER_NS |
1026 | static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t) |
1027 | { |
1028 | if (current_user_ns() == task_cred_xxx(t, user_ns)) |
1029 | return; |
1030 | |
1031 | if (SI_FROMKERNEL(info)) |
1032 | return; |
1033 | |
1034 | rcu_read_lock(); |
1035 | info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns), |
1036 | make_kuid(current_user_ns(), info->si_uid)); |
1037 | rcu_read_unlock(); |
1038 | } |
1039 | #else |
1040 | static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t) |
1041 | { |
1042 | return; |
1043 | } |
1044 | #endif |
1045 | |
1046 | static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, |
1047 | int group, int from_ancestor_ns) |
1048 | { |
1049 | struct sigpending *pending; |
1050 | struct sigqueue *q; |
1051 | int override_rlimit; |
1052 | int ret = 0, result; |
1053 | |
1054 | assert_spin_locked(&t->sighand->siglock); |
1055 | |
1056 | result = TRACE_SIGNAL_IGNORED; |
1057 | if (!prepare_signal(sig, t, |
1058 | from_ancestor_ns || (info == SEND_SIG_FORCED))) |
1059 | goto ret; |
1060 | |
1061 | pending = group ? &t->signal->shared_pending : &t->pending; |
1062 | /* |
1063 | * Short-circuit ignored signals and support queuing |
1064 | * exactly one non-rt signal, so that we can get more |
1065 | * detailed information about the cause of the signal. |
1066 | */ |
1067 | result = TRACE_SIGNAL_ALREADY_PENDING; |
1068 | if (legacy_queue(pending, sig)) |
1069 | goto ret; |
1070 | |
1071 | result = TRACE_SIGNAL_DELIVERED; |
1072 | /* |
1073 | * fast-pathed signals for kernel-internal things like SIGSTOP |
1074 | * or SIGKILL. |
1075 | */ |
1076 | if (info == SEND_SIG_FORCED) |
1077 | goto out_set; |
1078 | |
1079 | /* |
1080 | * Real-time signals must be queued if sent by sigqueue, or |
1081 | * some other real-time mechanism. It is implementation |
1082 | * defined whether kill() does so. We attempt to do so, on |
1083 | * the principle of least surprise, but since kill is not |
1084 | * allowed to fail with EAGAIN when low on memory we just |
1085 | * make sure at least one signal gets delivered and don't |
1086 | * pass on the info struct. |
1087 | */ |
1088 | if (sig < SIGRTMIN) |
1089 | override_rlimit = (is_si_special(info) || info->si_code >= 0); |
1090 | else |
1091 | override_rlimit = 0; |
1092 | |
1093 | q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, |
1094 | override_rlimit); |
1095 | if (q) { |
1096 | list_add_tail(&q->list, &pending->list); |
1097 | switch ((unsigned long) info) { |
1098 | case (unsigned long) SEND_SIG_NOINFO: |
1099 | q->info.si_signo = sig; |
1100 | q->info.si_errno = 0; |
1101 | q->info.si_code = SI_USER; |
1102 | q->info.si_pid = task_tgid_nr_ns(current, |
1103 | task_active_pid_ns(t)); |
1104 | q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
1105 | break; |
1106 | case (unsigned long) SEND_SIG_PRIV: |
1107 | q->info.si_signo = sig; |
1108 | q->info.si_errno = 0; |
1109 | q->info.si_code = SI_KERNEL; |
1110 | q->info.si_pid = 0; |
1111 | q->info.si_uid = 0; |
1112 | break; |
1113 | default: |
1114 | copy_siginfo(&q->info, info); |
1115 | if (from_ancestor_ns) |
1116 | q->info.si_pid = 0; |
1117 | break; |
1118 | } |
1119 | |
1120 | userns_fixup_signal_uid(&q->info, t); |
1121 | |
1122 | } else if (!is_si_special(info)) { |
1123 | if (sig >= SIGRTMIN && info->si_code != SI_USER) { |
1124 | /* |
1125 | * Queue overflow, abort. We may abort if the |
1126 | * signal was rt and sent by user using something |
1127 | * other than kill(). |
1128 | */ |
1129 | result = TRACE_SIGNAL_OVERFLOW_FAIL; |
1130 | ret = -EAGAIN; |
1131 | goto ret; |
1132 | } else { |
1133 | /* |
1134 | * This is a silent loss of information. We still |
1135 | * send the signal, but the *info bits are lost. |
1136 | */ |
1137 | result = TRACE_SIGNAL_LOSE_INFO; |
1138 | } |
1139 | } |
1140 | |
1141 | out_set: |
1142 | signalfd_notify(t, sig); |
1143 | sigaddset(&pending->signal, sig); |
1144 | complete_signal(sig, t, group); |
1145 | ret: |
1146 | trace_signal_generate(sig, info, t, group, result); |
1147 | return ret; |
1148 | } |
1149 | |
1150 | static int send_signal(int sig, struct siginfo *info, struct task_struct *t, |
1151 | int group) |
1152 | { |
1153 | int from_ancestor_ns = 0; |
1154 | |
1155 | #ifdef CONFIG_PID_NS |
1156 | from_ancestor_ns = si_fromuser(info) && |
1157 | !task_pid_nr_ns(current, task_active_pid_ns(t)); |
1158 | #endif |
1159 | |
1160 | return __send_signal(sig, info, t, group, from_ancestor_ns); |
1161 | } |
1162 | |
1163 | static void print_fatal_signal(int signr) |
1164 | { |
1165 | struct pt_regs *regs = signal_pt_regs(); |
1166 | printk(KERN_INFO "potentially unexpected fatal signal %d.\n", signr); |
1167 | |
1168 | #if defined(__i386__) && !defined(__arch_um__) |
1169 | printk(KERN_INFO "code at %08lx: ", regs->ip); |
1170 | { |
1171 | int i; |
1172 | for (i = 0; i < 16; i++) { |
1173 | unsigned char insn; |
1174 | |
1175 | if (get_user(insn, (unsigned char *)(regs->ip + i))) |
1176 | break; |
1177 | printk(KERN_CONT "%02x ", insn); |
1178 | } |
1179 | } |
1180 | printk(KERN_CONT "\n"); |
1181 | #endif |
1182 | preempt_disable(); |
1183 | show_regs(regs); |
1184 | preempt_enable(); |
1185 | } |
1186 | |
1187 | static int __init setup_print_fatal_signals(char *str) |
1188 | { |
1189 | get_option (&str, &print_fatal_signals); |
1190 | |
1191 | return 1; |
1192 | } |
1193 | |
1194 | __setup("print-fatal-signals=", setup_print_fatal_signals); |
1195 | |
1196 | int |
1197 | __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1198 | { |
1199 | return send_signal(sig, info, p, 1); |
1200 | } |
1201 | |
1202 | static int |
1203 | specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) |
1204 | { |
1205 | return send_signal(sig, info, t, 0); |
1206 | } |
1207 | |
1208 | int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, |
1209 | bool group) |
1210 | { |
1211 | unsigned long flags; |
1212 | int ret = -ESRCH; |
1213 | |
1214 | if (lock_task_sighand(p, &flags)) { |
1215 | ret = send_signal(sig, info, p, group); |
1216 | unlock_task_sighand(p, &flags); |
1217 | } |
1218 | |
1219 | return ret; |
1220 | } |
1221 | |
1222 | /* |
1223 | * Force a signal that the process can't ignore: if necessary |
1224 | * we unblock the signal and change any SIG_IGN to SIG_DFL. |
1225 | * |
1226 | * Note: If we unblock the signal, we always reset it to SIG_DFL, |
1227 | * since we do not want to have a signal handler that was blocked |
1228 | * be invoked when user space had explicitly blocked it. |
1229 | * |
1230 | * We don't want to have recursive SIGSEGV's etc, for example, |
1231 | * that is why we also clear SIGNAL_UNKILLABLE. |
1232 | */ |
1233 | int |
1234 | force_sig_info(int sig, struct siginfo *info, struct task_struct *t) |
1235 | { |
1236 | unsigned long int flags; |
1237 | int ret, blocked, ignored; |
1238 | struct k_sigaction *action; |
1239 | |
1240 | spin_lock_irqsave(&t->sighand->siglock, flags); |
1241 | action = &t->sighand->action[sig-1]; |
1242 | ignored = action->sa.sa_handler == SIG_IGN; |
1243 | blocked = sigismember(&t->blocked, sig); |
1244 | if (blocked || ignored) { |
1245 | action->sa.sa_handler = SIG_DFL; |
1246 | if (blocked) { |
1247 | sigdelset(&t->blocked, sig); |
1248 | recalc_sigpending_and_wake(t); |
1249 | } |
1250 | } |
1251 | if (action->sa.sa_handler == SIG_DFL) |
1252 | t->signal->flags &= ~SIGNAL_UNKILLABLE; |
1253 | ret = specific_send_sig_info(sig, info, t); |
1254 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
1255 | |
1256 | return ret; |
1257 | } |
1258 | |
1259 | /* |
1260 | * Nuke all other threads in the group. |
1261 | */ |
1262 | int zap_other_threads(struct task_struct *p) |
1263 | { |
1264 | struct task_struct *t = p; |
1265 | int count = 0; |
1266 | |
1267 | p->signal->group_stop_count = 0; |
1268 | |
1269 | while_each_thread(p, t) { |
1270 | task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); |
1271 | count++; |
1272 | |
1273 | /* Don't bother with already dead threads */ |
1274 | if (t->exit_state) |
1275 | continue; |
1276 | sigaddset(&t->pending.signal, SIGKILL); |
1277 | signal_wake_up(t, 1); |
1278 | } |
1279 | |
1280 | return count; |
1281 | } |
1282 | |
1283 | struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, |
1284 | unsigned long *flags) |
1285 | { |
1286 | struct sighand_struct *sighand; |
1287 | |
1288 | for (;;) { |
1289 | local_irq_save(*flags); |
1290 | rcu_read_lock(); |
1291 | sighand = rcu_dereference(tsk->sighand); |
1292 | if (unlikely(sighand == NULL)) { |
1293 | rcu_read_unlock(); |
1294 | local_irq_restore(*flags); |
1295 | break; |
1296 | } |
1297 | |
1298 | spin_lock(&sighand->siglock); |
1299 | if (likely(sighand == tsk->sighand)) { |
1300 | rcu_read_unlock(); |
1301 | break; |
1302 | } |
1303 | spin_unlock(&sighand->siglock); |
1304 | rcu_read_unlock(); |
1305 | local_irq_restore(*flags); |
1306 | } |
1307 | |
1308 | return sighand; |
1309 | } |
1310 | |
1311 | /* |
1312 | * send signal info to all the members of a group |
1313 | */ |
1314 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1315 | { |
1316 | int ret; |
1317 | |
1318 | rcu_read_lock(); |
1319 | ret = check_kill_permission(sig, info, p); |
1320 | rcu_read_unlock(); |
1321 | |
1322 | if (!ret && sig) |
1323 | ret = do_send_sig_info(sig, info, p, true); |
1324 | |
1325 | return ret; |
1326 | } |
1327 | |
1328 | /* |
1329 | * __kill_pgrp_info() sends a signal to a process group: this is what the tty |
1330 | * control characters do (^C, ^Z etc) |
1331 | * - the caller must hold at least a readlock on tasklist_lock |
1332 | */ |
1333 | int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) |
1334 | { |
1335 | struct task_struct *p = NULL; |
1336 | int retval, success; |
1337 | |
1338 | success = 0; |
1339 | retval = -ESRCH; |
1340 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { |
1341 | int err = group_send_sig_info(sig, info, p); |
1342 | success |= !err; |
1343 | retval = err; |
1344 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); |
1345 | return success ? 0 : retval; |
1346 | } |
1347 | |
1348 | int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) |
1349 | { |
1350 | int error = -ESRCH; |
1351 | struct task_struct *p; |
1352 | |
1353 | rcu_read_lock(); |
1354 | retry: |
1355 | p = pid_task(pid, PIDTYPE_PID); |
1356 | if (p) { |
1357 | error = group_send_sig_info(sig, info, p); |
1358 | if (unlikely(error == -ESRCH)) |
1359 | /* |
1360 | * The task was unhashed in between, try again. |
1361 | * If it is dead, pid_task() will return NULL, |
1362 | * if we race with de_thread() it will find the |
1363 | * new leader. |
1364 | */ |
1365 | goto retry; |
1366 | } |
1367 | rcu_read_unlock(); |
1368 | |
1369 | return error; |
1370 | } |
1371 | |
1372 | int kill_proc_info(int sig, struct siginfo *info, pid_t pid) |
1373 | { |
1374 | int error; |
1375 | rcu_read_lock(); |
1376 | error = kill_pid_info(sig, info, find_vpid(pid)); |
1377 | rcu_read_unlock(); |
1378 | return error; |
1379 | } |
1380 | |
1381 | static int kill_as_cred_perm(const struct cred *cred, |
1382 | struct task_struct *target) |
1383 | { |
1384 | const struct cred *pcred = __task_cred(target); |
1385 | if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) && |
1386 | !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid)) |
1387 | return 0; |
1388 | return 1; |
1389 | } |
1390 | |
1391 | /* like kill_pid_info(), but doesn't use uid/euid of "current" */ |
1392 | int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid, |
1393 | const struct cred *cred, u32 secid) |
1394 | { |
1395 | int ret = -EINVAL; |
1396 | struct task_struct *p; |
1397 | unsigned long flags; |
1398 | |
1399 | if (!valid_signal(sig)) |
1400 | return ret; |
1401 | |
1402 | rcu_read_lock(); |
1403 | p = pid_task(pid, PIDTYPE_PID); |
1404 | if (!p) { |
1405 | ret = -ESRCH; |
1406 | goto out_unlock; |
1407 | } |
1408 | if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) { |
1409 | ret = -EPERM; |
1410 | goto out_unlock; |
1411 | } |
1412 | ret = security_task_kill(p, info, sig, secid); |
1413 | if (ret) |
1414 | goto out_unlock; |
1415 | |
1416 | if (sig) { |
1417 | if (lock_task_sighand(p, &flags)) { |
1418 | ret = __send_signal(sig, info, p, 1, 0); |
1419 | unlock_task_sighand(p, &flags); |
1420 | } else |
1421 | ret = -ESRCH; |
1422 | } |
1423 | out_unlock: |
1424 | rcu_read_unlock(); |
1425 | return ret; |
1426 | } |
1427 | EXPORT_SYMBOL_GPL(kill_pid_info_as_cred); |
1428 | |
1429 | /* |
1430 | * kill_something_info() interprets pid in interesting ways just like kill(2). |
1431 | * |
1432 | * POSIX specifies that kill(-1,sig) is unspecified, but what we have |
1433 | * is probably wrong. Should make it like BSD or SYSV. |
1434 | */ |
1435 | |
1436 | static int kill_something_info(int sig, struct siginfo *info, pid_t pid) |
1437 | { |
1438 | int ret; |
1439 | |
1440 | if (pid > 0) { |
1441 | rcu_read_lock(); |
1442 | ret = kill_pid_info(sig, info, find_vpid(pid)); |
1443 | rcu_read_unlock(); |
1444 | return ret; |
1445 | } |
1446 | |
1447 | read_lock(&tasklist_lock); |
1448 | if (pid != -1) { |
1449 | ret = __kill_pgrp_info(sig, info, |
1450 | pid ? find_vpid(-pid) : task_pgrp(current)); |
1451 | } else { |
1452 | int retval = 0, count = 0; |
1453 | struct task_struct * p; |
1454 | |
1455 | for_each_process(p) { |
1456 | if (task_pid_vnr(p) > 1 && |
1457 | !same_thread_group(p, current)) { |
1458 | int err = group_send_sig_info(sig, info, p); |
1459 | ++count; |
1460 | if (err != -EPERM) |
1461 | retval = err; |
1462 | } |
1463 | } |
1464 | ret = count ? retval : -ESRCH; |
1465 | } |
1466 | read_unlock(&tasklist_lock); |
1467 | |
1468 | return ret; |
1469 | } |
1470 | |
1471 | /* |
1472 | * These are for backward compatibility with the rest of the kernel source. |
1473 | */ |
1474 | |
1475 | int send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1476 | { |
1477 | /* |
1478 | * Make sure legacy kernel users don't send in bad values |
1479 | * (normal paths check this in check_kill_permission). |
1480 | */ |
1481 | if (!valid_signal(sig)) |
1482 | return -EINVAL; |
1483 | |
1484 | return do_send_sig_info(sig, info, p, false); |
1485 | } |
1486 | |
1487 | #define __si_special(priv) \ |
1488 | ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) |
1489 | |
1490 | int |
1491 | send_sig(int sig, struct task_struct *p, int priv) |
1492 | { |
1493 | return send_sig_info(sig, __si_special(priv), p); |
1494 | } |
1495 | |
1496 | void |
1497 | force_sig(int sig, struct task_struct *p) |
1498 | { |
1499 | force_sig_info(sig, SEND_SIG_PRIV, p); |
1500 | } |
1501 | |
1502 | /* |
1503 | * When things go south during signal handling, we |
1504 | * will force a SIGSEGV. And if the signal that caused |
1505 | * the problem was already a SIGSEGV, we'll want to |
1506 | * make sure we don't even try to deliver the signal.. |
1507 | */ |
1508 | int |
1509 | force_sigsegv(int sig, struct task_struct *p) |
1510 | { |
1511 | if (sig == SIGSEGV) { |
1512 | unsigned long flags; |
1513 | spin_lock_irqsave(&p->sighand->siglock, flags); |
1514 | p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; |
1515 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
1516 | } |
1517 | force_sig(SIGSEGV, p); |
1518 | return 0; |
1519 | } |
1520 | |
1521 | int kill_pgrp(struct pid *pid, int sig, int priv) |
1522 | { |
1523 | int ret; |
1524 | |
1525 | read_lock(&tasklist_lock); |
1526 | ret = __kill_pgrp_info(sig, __si_special(priv), pid); |
1527 | read_unlock(&tasklist_lock); |
1528 | |
1529 | return ret; |
1530 | } |
1531 | EXPORT_SYMBOL(kill_pgrp); |
1532 | |
1533 | int kill_pid(struct pid *pid, int sig, int priv) |
1534 | { |
1535 | return kill_pid_info(sig, __si_special(priv), pid); |
1536 | } |
1537 | EXPORT_SYMBOL(kill_pid); |
1538 | |
1539 | /* |
1540 | * These functions support sending signals using preallocated sigqueue |
1541 | * structures. This is needed "because realtime applications cannot |
1542 | * afford to lose notifications of asynchronous events, like timer |
1543 | * expirations or I/O completions". In the case of POSIX Timers |
1544 | * we allocate the sigqueue structure from the timer_create. If this |
1545 | * allocation fails we are able to report the failure to the application |
1546 | * with an EAGAIN error. |
1547 | */ |
1548 | struct sigqueue *sigqueue_alloc(void) |
1549 | { |
1550 | struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); |
1551 | |
1552 | if (q) |
1553 | q->flags |= SIGQUEUE_PREALLOC; |
1554 | |
1555 | return q; |
1556 | } |
1557 | |
1558 | void sigqueue_free(struct sigqueue *q) |
1559 | { |
1560 | unsigned long flags; |
1561 | spinlock_t *lock = ¤t->sighand->siglock; |
1562 | |
1563 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1564 | /* |
1565 | * We must hold ->siglock while testing q->list |
1566 | * to serialize with collect_signal() or with |
1567 | * __exit_signal()->flush_sigqueue(). |
1568 | */ |
1569 | spin_lock_irqsave(lock, flags); |
1570 | q->flags &= ~SIGQUEUE_PREALLOC; |
1571 | /* |
1572 | * If it is queued it will be freed when dequeued, |
1573 | * like the "regular" sigqueue. |
1574 | */ |
1575 | if (!list_empty(&q->list)) |
1576 | q = NULL; |
1577 | spin_unlock_irqrestore(lock, flags); |
1578 | |
1579 | if (q) |
1580 | __sigqueue_free(q); |
1581 | } |
1582 | |
1583 | int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) |
1584 | { |
1585 | int sig = q->info.si_signo; |
1586 | struct sigpending *pending; |
1587 | unsigned long flags; |
1588 | int ret, result; |
1589 | |
1590 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1591 | |
1592 | ret = -1; |
1593 | if (!likely(lock_task_sighand(t, &flags))) |
1594 | goto ret; |
1595 | |
1596 | ret = 1; /* the signal is ignored */ |
1597 | result = TRACE_SIGNAL_IGNORED; |
1598 | if (!prepare_signal(sig, t, false)) |
1599 | goto out; |
1600 | |
1601 | ret = 0; |
1602 | if (unlikely(!list_empty(&q->list))) { |
1603 | /* |
1604 | * If an SI_TIMER entry is already queue just increment |
1605 | * the overrun count. |
1606 | */ |
1607 | BUG_ON(q->info.si_code != SI_TIMER); |
1608 | q->info.si_overrun++; |
1609 | result = TRACE_SIGNAL_ALREADY_PENDING; |
1610 | goto out; |
1611 | } |
1612 | q->info.si_overrun = 0; |
1613 | |
1614 | signalfd_notify(t, sig); |
1615 | pending = group ? &t->signal->shared_pending : &t->pending; |
1616 | list_add_tail(&q->list, &pending->list); |
1617 | sigaddset(&pending->signal, sig); |
1618 | complete_signal(sig, t, group); |
1619 | result = TRACE_SIGNAL_DELIVERED; |
1620 | out: |
1621 | trace_signal_generate(sig, &q->info, t, group, result); |
1622 | unlock_task_sighand(t, &flags); |
1623 | ret: |
1624 | return ret; |
1625 | } |
1626 | |
1627 | /* |
1628 | * Let a parent know about the death of a child. |
1629 | * For a stopped/continued status change, use do_notify_parent_cldstop instead. |
1630 | * |
1631 | * Returns true if our parent ignored us and so we've switched to |
1632 | * self-reaping. |
1633 | */ |
1634 | bool do_notify_parent(struct task_struct *tsk, int sig) |
1635 | { |
1636 | struct siginfo info; |
1637 | unsigned long flags; |
1638 | struct sighand_struct *psig; |
1639 | bool autoreap = false; |
1640 | cputime_t utime, stime; |
1641 | |
1642 | BUG_ON(sig == -1); |
1643 | |
1644 | /* do_notify_parent_cldstop should have been called instead. */ |
1645 | BUG_ON(task_is_stopped_or_traced(tsk)); |
1646 | |
1647 | BUG_ON(!tsk->ptrace && |
1648 | (tsk->group_leader != tsk || !thread_group_empty(tsk))); |
1649 | |
1650 | if (sig != SIGCHLD) { |
1651 | /* |
1652 | * This is only possible if parent == real_parent. |
1653 | * Check if it has changed security domain. |
1654 | */ |
1655 | if (tsk->parent_exec_id != tsk->parent->self_exec_id) |
1656 | sig = SIGCHLD; |
1657 | } |
1658 | |
1659 | info.si_signo = sig; |
1660 | info.si_errno = 0; |
1661 | /* |
1662 | * We are under tasklist_lock here so our parent is tied to |
1663 | * us and cannot change. |
1664 | * |
1665 | * task_active_pid_ns will always return the same pid namespace |
1666 | * until a task passes through release_task. |
1667 | * |
1668 | * write_lock() currently calls preempt_disable() which is the |
1669 | * same as rcu_read_lock(), but according to Oleg, this is not |
1670 | * correct to rely on this |
1671 | */ |
1672 | rcu_read_lock(); |
1673 | info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent)); |
1674 | info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns), |
1675 | task_uid(tsk)); |
1676 | rcu_read_unlock(); |
1677 | |
1678 | task_cputime(tsk, &utime, &stime); |
1679 | info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime); |
1680 | info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime); |
1681 | |
1682 | info.si_status = tsk->exit_code & 0x7f; |
1683 | if (tsk->exit_code & 0x80) |
1684 | info.si_code = CLD_DUMPED; |
1685 | else if (tsk->exit_code & 0x7f) |
1686 | info.si_code = CLD_KILLED; |
1687 | else { |
1688 | info.si_code = CLD_EXITED; |
1689 | info.si_status = tsk->exit_code >> 8; |
1690 | } |
1691 | |
1692 | psig = tsk->parent->sighand; |
1693 | spin_lock_irqsave(&psig->siglock, flags); |
1694 | if (!tsk->ptrace && sig == SIGCHLD && |
1695 | (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || |
1696 | (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { |
1697 | /* |
1698 | * We are exiting and our parent doesn't care. POSIX.1 |
1699 | * defines special semantics for setting SIGCHLD to SIG_IGN |
1700 | * or setting the SA_NOCLDWAIT flag: we should be reaped |
1701 | * automatically and not left for our parent's wait4 call. |
1702 | * Rather than having the parent do it as a magic kind of |
1703 | * signal handler, we just set this to tell do_exit that we |
1704 | * can be cleaned up without becoming a zombie. Note that |
1705 | * we still call __wake_up_parent in this case, because a |
1706 | * blocked sys_wait4 might now return -ECHILD. |
1707 | * |
1708 | * Whether we send SIGCHLD or not for SA_NOCLDWAIT |
1709 | * is implementation-defined: we do (if you don't want |
1710 | * it, just use SIG_IGN instead). |
1711 | */ |
1712 | autoreap = true; |
1713 | if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) |
1714 | sig = 0; |
1715 | } |
1716 | if (valid_signal(sig) && sig) |
1717 | __group_send_sig_info(sig, &info, tsk->parent); |
1718 | __wake_up_parent(tsk, tsk->parent); |
1719 | spin_unlock_irqrestore(&psig->siglock, flags); |
1720 | |
1721 | return autoreap; |
1722 | } |
1723 | |
1724 | /** |
1725 | * do_notify_parent_cldstop - notify parent of stopped/continued state change |
1726 | * @tsk: task reporting the state change |
1727 | * @for_ptracer: the notification is for ptracer |
1728 | * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report |
1729 | * |
1730 | * Notify @tsk's parent that the stopped/continued state has changed. If |
1731 | * @for_ptracer is %false, @tsk's group leader notifies to its real parent. |
1732 | * If %true, @tsk reports to @tsk->parent which should be the ptracer. |
1733 | * |
1734 | * CONTEXT: |
1735 | * Must be called with tasklist_lock at least read locked. |
1736 | */ |
1737 | static void do_notify_parent_cldstop(struct task_struct *tsk, |
1738 | bool for_ptracer, int why) |
1739 | { |
1740 | struct siginfo info; |
1741 | unsigned long flags; |
1742 | struct task_struct *parent; |
1743 | struct sighand_struct *sighand; |
1744 | cputime_t utime, stime; |
1745 | |
1746 | if (for_ptracer) { |
1747 | parent = tsk->parent; |
1748 | } else { |
1749 | tsk = tsk->group_leader; |
1750 | parent = tsk->real_parent; |
1751 | } |
1752 | |
1753 | info.si_signo = SIGCHLD; |
1754 | info.si_errno = 0; |
1755 | /* |
1756 | * see comment in do_notify_parent() about the following 4 lines |
1757 | */ |
1758 | rcu_read_lock(); |
1759 | info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent)); |
1760 | info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); |
1761 | rcu_read_unlock(); |
1762 | |
1763 | task_cputime(tsk, &utime, &stime); |
1764 | info.si_utime = cputime_to_clock_t(utime); |
1765 | info.si_stime = cputime_to_clock_t(stime); |
1766 | |
1767 | info.si_code = why; |
1768 | switch (why) { |
1769 | case CLD_CONTINUED: |
1770 | info.si_status = SIGCONT; |
1771 | break; |
1772 | case CLD_STOPPED: |
1773 | info.si_status = tsk->signal->group_exit_code & 0x7f; |
1774 | break; |
1775 | case CLD_TRAPPED: |
1776 | info.si_status = tsk->exit_code & 0x7f; |
1777 | break; |
1778 | default: |
1779 | BUG(); |
1780 | } |
1781 | |
1782 | sighand = parent->sighand; |
1783 | spin_lock_irqsave(&sighand->siglock, flags); |
1784 | if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && |
1785 | !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) |
1786 | __group_send_sig_info(SIGCHLD, &info, parent); |
1787 | /* |
1788 | * Even if SIGCHLD is not generated, we must wake up wait4 calls. |
1789 | */ |
1790 | __wake_up_parent(tsk, parent); |
1791 | spin_unlock_irqrestore(&sighand->siglock, flags); |
1792 | } |
1793 | |
1794 | static inline int may_ptrace_stop(void) |
1795 | { |
1796 | if (!likely(current->ptrace)) |
1797 | return 0; |
1798 | /* |
1799 | * Are we in the middle of do_coredump? |
1800 | * If so and our tracer is also part of the coredump stopping |
1801 | * is a deadlock situation, and pointless because our tracer |
1802 | * is dead so don't allow us to stop. |
1803 | * If SIGKILL was already sent before the caller unlocked |
1804 | * ->siglock we must see ->core_state != NULL. Otherwise it |
1805 | * is safe to enter schedule(). |
1806 | * |
1807 | * This is almost outdated, a task with the pending SIGKILL can't |
1808 | * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported |
1809 | * after SIGKILL was already dequeued. |
1810 | */ |
1811 | if (unlikely(current->mm->core_state) && |
1812 | unlikely(current->mm == current->parent->mm)) |
1813 | return 0; |
1814 | |
1815 | return 1; |
1816 | } |
1817 | |
1818 | /* |
1819 | * Return non-zero if there is a SIGKILL that should be waking us up. |
1820 | * Called with the siglock held. |
1821 | */ |
1822 | static int sigkill_pending(struct task_struct *tsk) |
1823 | { |
1824 | return sigismember(&tsk->pending.signal, SIGKILL) || |
1825 | sigismember(&tsk->signal->shared_pending.signal, SIGKILL); |
1826 | } |
1827 | |
1828 | /* |
1829 | * This must be called with current->sighand->siglock held. |
1830 | * |
1831 | * This should be the path for all ptrace stops. |
1832 | * We always set current->last_siginfo while stopped here. |
1833 | * That makes it a way to test a stopped process for |
1834 | * being ptrace-stopped vs being job-control-stopped. |
1835 | * |
1836 | * If we actually decide not to stop at all because the tracer |
1837 | * is gone, we keep current->exit_code unless clear_code. |
1838 | */ |
1839 | static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) |
1840 | __releases(¤t->sighand->siglock) |
1841 | __acquires(¤t->sighand->siglock) |
1842 | { |
1843 | bool gstop_done = false; |
1844 | |
1845 | if (arch_ptrace_stop_needed(exit_code, info)) { |
1846 | /* |
1847 | * The arch code has something special to do before a |
1848 | * ptrace stop. This is allowed to block, e.g. for faults |
1849 | * on user stack pages. We can't keep the siglock while |
1850 | * calling arch_ptrace_stop, so we must release it now. |
1851 | * To preserve proper semantics, we must do this before |
1852 | * any signal bookkeeping like checking group_stop_count. |
1853 | * Meanwhile, a SIGKILL could come in before we retake the |
1854 | * siglock. That must prevent us from sleeping in TASK_TRACED. |
1855 | * So after regaining the lock, we must check for SIGKILL. |
1856 | */ |
1857 | spin_unlock_irq(¤t->sighand->siglock); |
1858 | arch_ptrace_stop(exit_code, info); |
1859 | spin_lock_irq(¤t->sighand->siglock); |
1860 | if (sigkill_pending(current)) |
1861 | return; |
1862 | } |
1863 | |
1864 | /* |
1865 | * We're committing to trapping. TRACED should be visible before |
1866 | * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). |
1867 | * Also, transition to TRACED and updates to ->jobctl should be |
1868 | * atomic with respect to siglock and should be done after the arch |
1869 | * hook as siglock is released and regrabbed across it. |
1870 | */ |
1871 | set_current_state(TASK_TRACED); |
1872 | |
1873 | current->last_siginfo = info; |
1874 | current->exit_code = exit_code; |
1875 | |
1876 | /* |
1877 | * If @why is CLD_STOPPED, we're trapping to participate in a group |
1878 | * stop. Do the bookkeeping. Note that if SIGCONT was delievered |
1879 | * across siglock relocks since INTERRUPT was scheduled, PENDING |
1880 | * could be clear now. We act as if SIGCONT is received after |
1881 | * TASK_TRACED is entered - ignore it. |
1882 | */ |
1883 | if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) |
1884 | gstop_done = task_participate_group_stop(current); |
1885 | |
1886 | /* any trap clears pending STOP trap, STOP trap clears NOTIFY */ |
1887 | task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP); |
1888 | if (info && info->si_code >> 8 == PTRACE_EVENT_STOP) |
1889 | task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY); |
1890 | |
1891 | /* entering a trap, clear TRAPPING */ |
1892 | task_clear_jobctl_trapping(current); |
1893 | |
1894 | spin_unlock_irq(¤t->sighand->siglock); |
1895 | read_lock(&tasklist_lock); |
1896 | if (may_ptrace_stop()) { |
1897 | /* |
1898 | * Notify parents of the stop. |
1899 | * |
1900 | * While ptraced, there are two parents - the ptracer and |
1901 | * the real_parent of the group_leader. The ptracer should |
1902 | * know about every stop while the real parent is only |
1903 | * interested in the completion of group stop. The states |
1904 | * for the two don't interact with each other. Notify |
1905 | * separately unless they're gonna be duplicates. |
1906 | */ |
1907 | do_notify_parent_cldstop(current, true, why); |
1908 | if (gstop_done && ptrace_reparented(current)) |
1909 | do_notify_parent_cldstop(current, false, why); |
1910 | |
1911 | /* |
1912 | * Don't want to allow preemption here, because |
1913 | * sys_ptrace() needs this task to be inactive. |
1914 | * |
1915 | * XXX: implement read_unlock_no_resched(). |
1916 | */ |
1917 | preempt_disable(); |
1918 | read_unlock(&tasklist_lock); |
1919 | preempt_enable_no_resched(); |
1920 | freezable_schedule(); |
1921 | } else { |
1922 | /* |
1923 | * By the time we got the lock, our tracer went away. |
1924 | * Don't drop the lock yet, another tracer may come. |
1925 | * |
1926 | * If @gstop_done, the ptracer went away between group stop |
1927 | * completion and here. During detach, it would have set |
1928 | * JOBCTL_STOP_PENDING on us and we'll re-enter |
1929 | * TASK_STOPPED in do_signal_stop() on return, so notifying |
1930 | * the real parent of the group stop completion is enough. |
1931 | */ |
1932 | if (gstop_done) |
1933 | do_notify_parent_cldstop(current, false, why); |
1934 | |
1935 | /* tasklist protects us from ptrace_freeze_traced() */ |
1936 | __set_current_state(TASK_RUNNING); |
1937 | if (clear_code) |
1938 | current->exit_code = 0; |
1939 | read_unlock(&tasklist_lock); |
1940 | } |
1941 | |
1942 | /* |
1943 | * We are back. Now reacquire the siglock before touching |
1944 | * last_siginfo, so that we are sure to have synchronized with |
1945 | * any signal-sending on another CPU that wants to examine it. |
1946 | */ |
1947 | spin_lock_irq(¤t->sighand->siglock); |
1948 | current->last_siginfo = NULL; |
1949 | |
1950 | /* LISTENING can be set only during STOP traps, clear it */ |
1951 | current->jobctl &= ~JOBCTL_LISTENING; |
1952 | |
1953 | /* |
1954 | * Queued signals ignored us while we were stopped for tracing. |
1955 | * So check for any that we should take before resuming user mode. |
1956 | * This sets TIF_SIGPENDING, but never clears it. |
1957 | */ |
1958 | recalc_sigpending_tsk(current); |
1959 | } |
1960 | |
1961 | static void ptrace_do_notify(int signr, int exit_code, int why) |
1962 | { |
1963 | siginfo_t info; |
1964 | |
1965 | memset(&info, 0, sizeof info); |
1966 | info.si_signo = signr; |
1967 | info.si_code = exit_code; |
1968 | info.si_pid = task_pid_vnr(current); |
1969 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
1970 | |
1971 | /* Let the debugger run. */ |
1972 | ptrace_stop(exit_code, why, 1, &info); |
1973 | } |
1974 | |
1975 | void ptrace_notify(int exit_code) |
1976 | { |
1977 | BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); |
1978 | if (unlikely(current->task_works)) |
1979 | task_work_run(); |
1980 | |
1981 | spin_lock_irq(¤t->sighand->siglock); |
1982 | ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); |
1983 | spin_unlock_irq(¤t->sighand->siglock); |
1984 | } |
1985 | |
1986 | /** |
1987 | * do_signal_stop - handle group stop for SIGSTOP and other stop signals |
1988 | * @signr: signr causing group stop if initiating |
1989 | * |
1990 | * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr |
1991 | * and participate in it. If already set, participate in the existing |
1992 | * group stop. If participated in a group stop (and thus slept), %true is |
1993 | * returned with siglock released. |
1994 | * |
1995 | * If ptraced, this function doesn't handle stop itself. Instead, |
1996 | * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock |
1997 | * untouched. The caller must ensure that INTERRUPT trap handling takes |
1998 | * places afterwards. |
1999 | * |
2000 | * CONTEXT: |
2001 | * Must be called with @current->sighand->siglock held, which is released |
2002 | * on %true return. |
2003 | * |
2004 | * RETURNS: |
2005 | * %false if group stop is already cancelled or ptrace trap is scheduled. |
2006 | * %true if participated in group stop. |
2007 | */ |
2008 | static bool do_signal_stop(int signr) |
2009 | __releases(¤t->sighand->siglock) |
2010 | { |
2011 | struct signal_struct *sig = current->signal; |
2012 | |
2013 | if (!(current->jobctl & JOBCTL_STOP_PENDING)) { |
2014 | unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; |
2015 | struct task_struct *t; |
2016 | |
2017 | /* signr will be recorded in task->jobctl for retries */ |
2018 | WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); |
2019 | |
2020 | if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || |
2021 | unlikely(signal_group_exit(sig))) |
2022 | return false; |
2023 | /* |
2024 | * There is no group stop already in progress. We must |
2025 | * initiate one now. |
2026 | * |
2027 | * While ptraced, a task may be resumed while group stop is |
2028 | * still in effect and then receive a stop signal and |
2029 | * initiate another group stop. This deviates from the |
2030 | * usual behavior as two consecutive stop signals can't |
2031 | * cause two group stops when !ptraced. That is why we |
2032 | * also check !task_is_stopped(t) below. |
2033 | * |
2034 | * The condition can be distinguished by testing whether |
2035 | * SIGNAL_STOP_STOPPED is already set. Don't generate |
2036 | * group_exit_code in such case. |
2037 | * |
2038 | * This is not necessary for SIGNAL_STOP_CONTINUED because |
2039 | * an intervening stop signal is required to cause two |
2040 | * continued events regardless of ptrace. |
2041 | */ |
2042 | if (!(sig->flags & SIGNAL_STOP_STOPPED)) |
2043 | sig->group_exit_code = signr; |
2044 | |
2045 | sig->group_stop_count = 0; |
2046 | |
2047 | if (task_set_jobctl_pending(current, signr | gstop)) |
2048 | sig->group_stop_count++; |
2049 | |
2050 | for (t = next_thread(current); t != current; |
2051 | t = next_thread(t)) { |
2052 | /* |
2053 | * Setting state to TASK_STOPPED for a group |
2054 | * stop is always done with the siglock held, |
2055 | * so this check has no races. |
2056 | */ |
2057 | if (!task_is_stopped(t) && |
2058 | task_set_jobctl_pending(t, signr | gstop)) { |
2059 | sig->group_stop_count++; |
2060 | if (likely(!(t->ptrace & PT_SEIZED))) |
2061 | signal_wake_up(t, 0); |
2062 | else |
2063 | ptrace_trap_notify(t); |
2064 | } |
2065 | } |
2066 | } |
2067 | |
2068 | if (likely(!current->ptrace)) { |
2069 | int notify = 0; |
2070 | |
2071 | /* |
2072 | * If there are no other threads in the group, or if there |
2073 | * is a group stop in progress and we are the last to stop, |
2074 | * report to the parent. |
2075 | */ |
2076 | if (task_participate_group_stop(current)) |
2077 | notify = CLD_STOPPED; |
2078 | |
2079 | __set_current_state(TASK_STOPPED); |
2080 | spin_unlock_irq(¤t->sighand->siglock); |
2081 | |
2082 | /* |
2083 | * Notify the parent of the group stop completion. Because |
2084 | * we're not holding either the siglock or tasklist_lock |
2085 | * here, ptracer may attach inbetween; however, this is for |
2086 | * group stop and should always be delivered to the real |
2087 | * parent of the group leader. The new ptracer will get |
2088 | * its notification when this task transitions into |
2089 | * TASK_TRACED. |
2090 | */ |
2091 | if (notify) { |
2092 | read_lock(&tasklist_lock); |
2093 | do_notify_parent_cldstop(current, false, notify); |
2094 | read_unlock(&tasklist_lock); |
2095 | } |
2096 | |
2097 | /* Now we don't run again until woken by SIGCONT or SIGKILL */ |
2098 | freezable_schedule(); |
2099 | return true; |
2100 | } else { |
2101 | /* |
2102 | * While ptraced, group stop is handled by STOP trap. |
2103 | * Schedule it and let the caller deal with it. |
2104 | */ |
2105 | task_set_jobctl_pending(current, JOBCTL_TRAP_STOP); |
2106 | return false; |
2107 | } |
2108 | } |
2109 | |
2110 | /** |
2111 | * do_jobctl_trap - take care of ptrace jobctl traps |
2112 | * |
2113 | * When PT_SEIZED, it's used for both group stop and explicit |
2114 | * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with |
2115 | * accompanying siginfo. If stopped, lower eight bits of exit_code contain |
2116 | * the stop signal; otherwise, %SIGTRAP. |
2117 | * |
2118 | * When !PT_SEIZED, it's used only for group stop trap with stop signal |
2119 | * number as exit_code and no siginfo. |
2120 | * |
2121 | * CONTEXT: |
2122 | * Must be called with @current->sighand->siglock held, which may be |
2123 | * released and re-acquired before returning with intervening sleep. |
2124 | */ |
2125 | static void do_jobctl_trap(void) |
2126 | { |
2127 | struct signal_struct *signal = current->signal; |
2128 | int signr = current->jobctl & JOBCTL_STOP_SIGMASK; |
2129 | |
2130 | if (current->ptrace & PT_SEIZED) { |
2131 | if (!signal->group_stop_count && |
2132 | !(signal->flags & SIGNAL_STOP_STOPPED)) |
2133 | signr = SIGTRAP; |
2134 | WARN_ON_ONCE(!signr); |
2135 | ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), |
2136 | CLD_STOPPED); |
2137 | } else { |
2138 | WARN_ON_ONCE(!signr); |
2139 | ptrace_stop(signr, CLD_STOPPED, 0, NULL); |
2140 | current->exit_code = 0; |
2141 | } |
2142 | } |
2143 | |
2144 | static int ptrace_signal(int signr, siginfo_t *info) |
2145 | { |
2146 | ptrace_signal_deliver(); |
2147 | /* |
2148 | * We do not check sig_kernel_stop(signr) but set this marker |
2149 | * unconditionally because we do not know whether debugger will |
2150 | * change signr. This flag has no meaning unless we are going |
2151 | * to stop after return from ptrace_stop(). In this case it will |
2152 | * be checked in do_signal_stop(), we should only stop if it was |
2153 | * not cleared by SIGCONT while we were sleeping. See also the |
2154 | * comment in dequeue_signal(). |
2155 | */ |
2156 | current->jobctl |= JOBCTL_STOP_DEQUEUED; |
2157 | ptrace_stop(signr, CLD_TRAPPED, 0, info); |
2158 | |
2159 | /* We're back. Did the debugger cancel the sig? */ |
2160 | signr = current->exit_code; |
2161 | if (signr == 0) |
2162 | return signr; |
2163 | |
2164 | current->exit_code = 0; |
2165 | |
2166 | /* |
2167 | * Update the siginfo structure if the signal has |
2168 | * changed. If the debugger wanted something |
2169 | * specific in the siginfo structure then it should |
2170 | * have updated *info via PTRACE_SETSIGINFO. |
2171 | */ |
2172 | if (signr != info->si_signo) { |
2173 | info->si_signo = signr; |
2174 | info->si_errno = 0; |
2175 | info->si_code = SI_USER; |
2176 | rcu_read_lock(); |
2177 | info->si_pid = task_pid_vnr(current->parent); |
2178 | info->si_uid = from_kuid_munged(current_user_ns(), |
2179 | task_uid(current->parent)); |
2180 | rcu_read_unlock(); |
2181 | } |
2182 | |
2183 | /* If the (new) signal is now blocked, requeue it. */ |
2184 | if (sigismember(¤t->blocked, signr)) { |
2185 | specific_send_sig_info(signr, info, current); |
2186 | signr = 0; |
2187 | } |
2188 | |
2189 | return signr; |
2190 | } |
2191 | |
2192 | int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, |
2193 | struct pt_regs *regs, void *cookie) |
2194 | { |
2195 | struct sighand_struct *sighand = current->sighand; |
2196 | struct signal_struct *signal = current->signal; |
2197 | int signr; |
2198 | |
2199 | if (unlikely(current->task_works)) |
2200 | task_work_run(); |
2201 | |
2202 | if (unlikely(uprobe_deny_signal())) |
2203 | return 0; |
2204 | |
2205 | /* |
2206 | * Do this once, we can't return to user-mode if freezing() == T. |
2207 | * do_signal_stop() and ptrace_stop() do freezable_schedule() and |
2208 | * thus do not need another check after return. |
2209 | */ |
2210 | try_to_freeze(); |
2211 | |
2212 | relock: |
2213 | spin_lock_irq(&sighand->siglock); |
2214 | /* |
2215 | * Every stopped thread goes here after wakeup. Check to see if |
2216 | * we should notify the parent, prepare_signal(SIGCONT) encodes |
2217 | * the CLD_ si_code into SIGNAL_CLD_MASK bits. |
2218 | */ |
2219 | if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { |
2220 | int why; |
2221 | |
2222 | if (signal->flags & SIGNAL_CLD_CONTINUED) |
2223 | why = CLD_CONTINUED; |
2224 | else |
2225 | why = CLD_STOPPED; |
2226 | |
2227 | signal->flags &= ~SIGNAL_CLD_MASK; |
2228 | |
2229 | spin_unlock_irq(&sighand->siglock); |
2230 | |
2231 | /* |
2232 | * Notify the parent that we're continuing. This event is |
2233 | * always per-process and doesn't make whole lot of sense |
2234 | * for ptracers, who shouldn't consume the state via |
2235 | * wait(2) either, but, for backward compatibility, notify |
2236 | * the ptracer of the group leader too unless it's gonna be |
2237 | * a duplicate. |
2238 | */ |
2239 | read_lock(&tasklist_lock); |
2240 | do_notify_parent_cldstop(current, false, why); |
2241 | |
2242 | if (ptrace_reparented(current->group_leader)) |
2243 | do_notify_parent_cldstop(current->group_leader, |
2244 | true, why); |
2245 | read_unlock(&tasklist_lock); |
2246 | |
2247 | goto relock; |
2248 | } |
2249 | |
2250 | for (;;) { |
2251 | struct k_sigaction *ka; |
2252 | |
2253 | if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && |
2254 | do_signal_stop(0)) |
2255 | goto relock; |
2256 | |
2257 | if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) { |
2258 | do_jobctl_trap(); |
2259 | spin_unlock_irq(&sighand->siglock); |
2260 | goto relock; |
2261 | } |
2262 | |
2263 | signr = dequeue_signal(current, ¤t->blocked, info); |
2264 | |
2265 | if (!signr) |
2266 | break; /* will return 0 */ |
2267 | |
2268 | if (unlikely(current->ptrace) && signr != SIGKILL) { |
2269 | signr = ptrace_signal(signr, info); |
2270 | if (!signr) |
2271 | continue; |
2272 | } |
2273 | |
2274 | ka = &sighand->action[signr-1]; |
2275 | |
2276 | /* Trace actually delivered signals. */ |
2277 | trace_signal_deliver(signr, info, ka); |
2278 | |
2279 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ |
2280 | continue; |
2281 | if (ka->sa.sa_handler != SIG_DFL) { |
2282 | /* Run the handler. */ |
2283 | *return_ka = *ka; |
2284 | |
2285 | if (ka->sa.sa_flags & SA_ONESHOT) |
2286 | ka->sa.sa_handler = SIG_DFL; |
2287 | |
2288 | break; /* will return non-zero "signr" value */ |
2289 | } |
2290 | |
2291 | /* |
2292 | * Now we are doing the default action for this signal. |
2293 | */ |
2294 | if (sig_kernel_ignore(signr)) /* Default is nothing. */ |
2295 | continue; |
2296 | |
2297 | /* |
2298 | * Global init gets no signals it doesn't want. |
2299 | * Container-init gets no signals it doesn't want from same |
2300 | * container. |
2301 | * |
2302 | * Note that if global/container-init sees a sig_kernel_only() |
2303 | * signal here, the signal must have been generated internally |
2304 | * or must have come from an ancestor namespace. In either |
2305 | * case, the signal cannot be dropped. |
2306 | */ |
2307 | if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && |
2308 | !sig_kernel_only(signr)) |
2309 | continue; |
2310 | |
2311 | if (sig_kernel_stop(signr)) { |
2312 | /* |
2313 | * The default action is to stop all threads in |
2314 | * the thread group. The job control signals |
2315 | * do nothing in an orphaned pgrp, but SIGSTOP |
2316 | * always works. Note that siglock needs to be |
2317 | * dropped during the call to is_orphaned_pgrp() |
2318 | * because of lock ordering with tasklist_lock. |
2319 | * This allows an intervening SIGCONT to be posted. |
2320 | * We need to check for that and bail out if necessary. |
2321 | */ |
2322 | if (signr != SIGSTOP) { |
2323 | spin_unlock_irq(&sighand->siglock); |
2324 | |
2325 | /* signals can be posted during this window */ |
2326 | |
2327 | if (is_current_pgrp_orphaned()) |
2328 | goto relock; |
2329 | |
2330 | spin_lock_irq(&sighand->siglock); |
2331 | } |
2332 | |
2333 | if (likely(do_signal_stop(info->si_signo))) { |
2334 | /* It released the siglock. */ |
2335 | goto relock; |
2336 | } |
2337 | |
2338 | /* |
2339 | * We didn't actually stop, due to a race |
2340 | * with SIGCONT or something like that. |
2341 | */ |
2342 | continue; |
2343 | } |
2344 | |
2345 | spin_unlock_irq(&sighand->siglock); |
2346 | |
2347 | /* |
2348 | * Anything else is fatal, maybe with a core dump. |
2349 | */ |
2350 | current->flags |= PF_SIGNALED; |
2351 | |
2352 | if (sig_kernel_coredump(signr)) { |
2353 | if (print_fatal_signals) |
2354 | print_fatal_signal(info->si_signo); |
2355 | proc_coredump_connector(current); |
2356 | /* |
2357 | * If it was able to dump core, this kills all |
2358 | * other threads in the group and synchronizes with |
2359 | * their demise. If we lost the race with another |
2360 | * thread getting here, it set group_exit_code |
2361 | * first and our do_group_exit call below will use |
2362 | * that value and ignore the one we pass it. |
2363 | */ |
2364 | do_coredump(info); |
2365 | } |
2366 | |
2367 | /* |
2368 | * Death signals, no core dump. |
2369 | */ |
2370 | do_group_exit(info->si_signo); |
2371 | /* NOTREACHED */ |
2372 | } |
2373 | spin_unlock_irq(&sighand->siglock); |
2374 | return signr; |
2375 | } |
2376 | |
2377 | /** |
2378 | * signal_delivered - |
2379 | * @sig: number of signal being delivered |
2380 | * @info: siginfo_t of signal being delivered |
2381 | * @ka: sigaction setting that chose the handler |
2382 | * @regs: user register state |
2383 | * @stepping: nonzero if debugger single-step or block-step in use |
2384 | * |
2385 | * This function should be called when a signal has succesfully been |
2386 | * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask |
2387 | * is always blocked, and the signal itself is blocked unless %SA_NODEFER |
2388 | * is set in @ka->sa.sa_flags. Tracing is notified. |
2389 | */ |
2390 | void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka, |
2391 | struct pt_regs *regs, int stepping) |
2392 | { |
2393 | sigset_t blocked; |
2394 | |
2395 | /* A signal was successfully delivered, and the |
2396 | saved sigmask was stored on the signal frame, |
2397 | and will be restored by sigreturn. So we can |
2398 | simply clear the restore sigmask flag. */ |
2399 | clear_restore_sigmask(); |
2400 | |
2401 | sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); |
2402 | if (!(ka->sa.sa_flags & SA_NODEFER)) |
2403 | sigaddset(&blocked, sig); |
2404 | set_current_blocked(&blocked); |
2405 | tracehook_signal_handler(sig, info, ka, regs, stepping); |
2406 | } |
2407 | |
2408 | void signal_setup_done(int failed, struct ksignal *ksig, int stepping) |
2409 | { |
2410 | if (failed) |
2411 | force_sigsegv(ksig->sig, current); |
2412 | else |
2413 | signal_delivered(ksig->sig, &ksig->info, &ksig->ka, |
2414 | signal_pt_regs(), stepping); |
2415 | } |
2416 | |
2417 | /* |
2418 | * It could be that complete_signal() picked us to notify about the |
2419 | * group-wide signal. Other threads should be notified now to take |
2420 | * the shared signals in @which since we will not. |
2421 | */ |
2422 | static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) |
2423 | { |
2424 | sigset_t retarget; |
2425 | struct task_struct *t; |
2426 | |
2427 | sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); |
2428 | if (sigisemptyset(&retarget)) |
2429 | return; |
2430 | |
2431 | t = tsk; |
2432 | while_each_thread(tsk, t) { |
2433 | if (t->flags & PF_EXITING) |
2434 | continue; |
2435 | |
2436 | if (!has_pending_signals(&retarget, &t->blocked)) |
2437 | continue; |
2438 | /* Remove the signals this thread can handle. */ |
2439 | sigandsets(&retarget, &retarget, &t->blocked); |
2440 | |
2441 | if (!signal_pending(t)) |
2442 | signal_wake_up(t, 0); |
2443 | |
2444 | if (sigisemptyset(&retarget)) |
2445 | break; |
2446 | } |
2447 | } |
2448 | |
2449 | void exit_signals(struct task_struct *tsk) |
2450 | { |
2451 | int group_stop = 0; |
2452 | sigset_t unblocked; |
2453 | |
2454 | /* |
2455 | * @tsk is about to have PF_EXITING set - lock out users which |
2456 | * expect stable threadgroup. |
2457 | */ |
2458 | threadgroup_change_begin(tsk); |
2459 | |
2460 | if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { |
2461 | tsk->flags |= PF_EXITING; |
2462 | threadgroup_change_end(tsk); |
2463 | return; |
2464 | } |
2465 | |
2466 | spin_lock_irq(&tsk->sighand->siglock); |
2467 | /* |
2468 | * From now this task is not visible for group-wide signals, |
2469 | * see wants_signal(), do_signal_stop(). |
2470 | */ |
2471 | tsk->flags |= PF_EXITING; |
2472 | |
2473 | threadgroup_change_end(tsk); |
2474 | |
2475 | if (!signal_pending(tsk)) |
2476 | goto out; |
2477 | |
2478 | unblocked = tsk->blocked; |
2479 | signotset(&unblocked); |
2480 | retarget_shared_pending(tsk, &unblocked); |
2481 | |
2482 | if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && |
2483 | task_participate_group_stop(tsk)) |
2484 | group_stop = CLD_STOPPED; |
2485 | out: |
2486 | spin_unlock_irq(&tsk->sighand->siglock); |
2487 | |
2488 | /* |
2489 | * If group stop has completed, deliver the notification. This |
2490 | * should always go to the real parent of the group leader. |
2491 | */ |
2492 | if (unlikely(group_stop)) { |
2493 | read_lock(&tasklist_lock); |
2494 | do_notify_parent_cldstop(tsk, false, group_stop); |
2495 | read_unlock(&tasklist_lock); |
2496 | } |
2497 | } |
2498 | |
2499 | EXPORT_SYMBOL(recalc_sigpending); |
2500 | EXPORT_SYMBOL_GPL(dequeue_signal); |
2501 | EXPORT_SYMBOL(flush_signals); |
2502 | EXPORT_SYMBOL(force_sig); |
2503 | EXPORT_SYMBOL(send_sig); |
2504 | EXPORT_SYMBOL(send_sig_info); |
2505 | EXPORT_SYMBOL(sigprocmask); |
2506 | EXPORT_SYMBOL(block_all_signals); |
2507 | EXPORT_SYMBOL(unblock_all_signals); |
2508 | |
2509 | |
2510 | /* |
2511 | * System call entry points. |
2512 | */ |
2513 | |
2514 | /** |
2515 | * sys_restart_syscall - restart a system call |
2516 | */ |
2517 | SYSCALL_DEFINE0(restart_syscall) |
2518 | { |
2519 | struct restart_block *restart = ¤t_thread_info()->restart_block; |
2520 | return restart->fn(restart); |
2521 | } |
2522 | |
2523 | long do_no_restart_syscall(struct restart_block *param) |
2524 | { |
2525 | return -EINTR; |
2526 | } |
2527 | |
2528 | static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) |
2529 | { |
2530 | if (signal_pending(tsk) && !thread_group_empty(tsk)) { |
2531 | sigset_t newblocked; |
2532 | /* A set of now blocked but previously unblocked signals. */ |
2533 | sigandnsets(&newblocked, newset, ¤t->blocked); |
2534 | retarget_shared_pending(tsk, &newblocked); |
2535 | } |
2536 | tsk->blocked = *newset; |
2537 | recalc_sigpending(); |
2538 | } |
2539 | |
2540 | /** |
2541 | * set_current_blocked - change current->blocked mask |
2542 | * @newset: new mask |
2543 | * |
2544 | * It is wrong to change ->blocked directly, this helper should be used |
2545 | * to ensure the process can't miss a shared signal we are going to block. |
2546 | */ |
2547 | void set_current_blocked(sigset_t *newset) |
2548 | { |
2549 | sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP)); |
2550 | __set_current_blocked(newset); |
2551 | } |
2552 | |
2553 | void __set_current_blocked(const sigset_t *newset) |
2554 | { |
2555 | struct task_struct *tsk = current; |
2556 | |
2557 | spin_lock_irq(&tsk->sighand->siglock); |
2558 | __set_task_blocked(tsk, newset); |
2559 | spin_unlock_irq(&tsk->sighand->siglock); |
2560 | } |
2561 | |
2562 | /* |
2563 | * This is also useful for kernel threads that want to temporarily |
2564 | * (or permanently) block certain signals. |
2565 | * |
2566 | * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel |
2567 | * interface happily blocks "unblockable" signals like SIGKILL |
2568 | * and friends. |
2569 | */ |
2570 | int sigprocmask(int how, sigset_t *set, sigset_t *oldset) |
2571 | { |
2572 | struct task_struct *tsk = current; |
2573 | sigset_t newset; |
2574 | |
2575 | /* Lockless, only current can change ->blocked, never from irq */ |
2576 | if (oldset) |
2577 | *oldset = tsk->blocked; |
2578 | |
2579 | switch (how) { |
2580 | case SIG_BLOCK: |
2581 | sigorsets(&newset, &tsk->blocked, set); |
2582 | break; |
2583 | case SIG_UNBLOCK: |
2584 | sigandnsets(&newset, &tsk->blocked, set); |
2585 | break; |
2586 | case SIG_SETMASK: |
2587 | newset = *set; |
2588 | break; |
2589 | default: |
2590 | return -EINVAL; |
2591 | } |
2592 | |
2593 | __set_current_blocked(&newset); |
2594 | return 0; |
2595 | } |
2596 | |
2597 | /** |
2598 | * sys_rt_sigprocmask - change the list of currently blocked signals |
2599 | * @how: whether to add, remove, or set signals |
2600 | * @nset: stores pending signals |
2601 | * @oset: previous value of signal mask if non-null |
2602 | * @sigsetsize: size of sigset_t type |
2603 | */ |
2604 | SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, |
2605 | sigset_t __user *, oset, size_t, sigsetsize) |
2606 | { |
2607 | sigset_t old_set, new_set; |
2608 | int error; |
2609 | |
2610 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2611 | if (sigsetsize != sizeof(sigset_t)) |
2612 | return -EINVAL; |
2613 | |
2614 | old_set = current->blocked; |
2615 | |
2616 | if (nset) { |
2617 | if (copy_from_user(&new_set, nset, sizeof(sigset_t))) |
2618 | return -EFAULT; |
2619 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
2620 | |
2621 | error = sigprocmask(how, &new_set, NULL); |
2622 | if (error) |
2623 | return error; |
2624 | } |
2625 | |
2626 | if (oset) { |
2627 | if (copy_to_user(oset, &old_set, sizeof(sigset_t))) |
2628 | return -EFAULT; |
2629 | } |
2630 | |
2631 | return 0; |
2632 | } |
2633 | |
2634 | #ifdef CONFIG_COMPAT |
2635 | COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset, |
2636 | compat_sigset_t __user *, oset, compat_size_t, sigsetsize) |
2637 | { |
2638 | #ifdef __BIG_ENDIAN |
2639 | sigset_t old_set = current->blocked; |
2640 | |
2641 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2642 | if (sigsetsize != sizeof(sigset_t)) |
2643 | return -EINVAL; |
2644 | |
2645 | if (nset) { |
2646 | compat_sigset_t new32; |
2647 | sigset_t new_set; |
2648 | int error; |
2649 | if (copy_from_user(&new32, nset, sizeof(compat_sigset_t))) |
2650 | return -EFAULT; |
2651 | |
2652 | sigset_from_compat(&new_set, &new32); |
2653 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
2654 | |
2655 | error = sigprocmask(how, &new_set, NULL); |
2656 | if (error) |
2657 | return error; |
2658 | } |
2659 | if (oset) { |
2660 | compat_sigset_t old32; |
2661 | sigset_to_compat(&old32, &old_set); |
2662 | if (copy_to_user(oset, &old32, sizeof(compat_sigset_t))) |
2663 | return -EFAULT; |
2664 | } |
2665 | return 0; |
2666 | #else |
2667 | return sys_rt_sigprocmask(how, (sigset_t __user *)nset, |
2668 | (sigset_t __user *)oset, sigsetsize); |
2669 | #endif |
2670 | } |
2671 | #endif |
2672 | |
2673 | static int do_sigpending(void *set, unsigned long sigsetsize) |
2674 | { |
2675 | if (sigsetsize > sizeof(sigset_t)) |
2676 | return -EINVAL; |
2677 | |
2678 | spin_lock_irq(¤t->sighand->siglock); |
2679 | sigorsets(set, ¤t->pending.signal, |
2680 | ¤t->signal->shared_pending.signal); |
2681 | spin_unlock_irq(¤t->sighand->siglock); |
2682 | |
2683 | /* Outside the lock because only this thread touches it. */ |
2684 | sigandsets(set, ¤t->blocked, set); |
2685 | return 0; |
2686 | } |
2687 | |
2688 | /** |
2689 | * sys_rt_sigpending - examine a pending signal that has been raised |
2690 | * while blocked |
2691 | * @uset: stores pending signals |
2692 | * @sigsetsize: size of sigset_t type or larger |
2693 | */ |
2694 | SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) |
2695 | { |
2696 | sigset_t set; |
2697 | int err = do_sigpending(&set, sigsetsize); |
2698 | if (!err && copy_to_user(uset, &set, sigsetsize)) |
2699 | err = -EFAULT; |
2700 | return err; |
2701 | } |
2702 | |
2703 | #ifdef CONFIG_COMPAT |
2704 | COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset, |
2705 | compat_size_t, sigsetsize) |
2706 | { |
2707 | #ifdef __BIG_ENDIAN |
2708 | sigset_t set; |
2709 | int err = do_sigpending(&set, sigsetsize); |
2710 | if (!err) { |
2711 | compat_sigset_t set32; |
2712 | sigset_to_compat(&set32, &set); |
2713 | /* we can get here only if sigsetsize <= sizeof(set) */ |
2714 | if (copy_to_user(uset, &set32, sigsetsize)) |
2715 | err = -EFAULT; |
2716 | } |
2717 | return err; |
2718 | #else |
2719 | return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize); |
2720 | #endif |
2721 | } |
2722 | #endif |
2723 | |
2724 | #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER |
2725 | |
2726 | int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) |
2727 | { |
2728 | int err; |
2729 | |
2730 | if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) |
2731 | return -EFAULT; |
2732 | if (from->si_code < 0) |
2733 | return __copy_to_user(to, from, sizeof(siginfo_t)) |
2734 | ? -EFAULT : 0; |
2735 | /* |
2736 | * If you change siginfo_t structure, please be sure |
2737 | * this code is fixed accordingly. |
2738 | * Please remember to update the signalfd_copyinfo() function |
2739 | * inside fs/signalfd.c too, in case siginfo_t changes. |
2740 | * It should never copy any pad contained in the structure |
2741 | * to avoid security leaks, but must copy the generic |
2742 | * 3 ints plus the relevant union member. |
2743 | */ |
2744 | err = __put_user(from->si_signo, &to->si_signo); |
2745 | err |= __put_user(from->si_errno, &to->si_errno); |
2746 | err |= __put_user((short)from->si_code, &to->si_code); |
2747 | switch (from->si_code & __SI_MASK) { |
2748 | case __SI_KILL: |
2749 | err |= __put_user(from->si_pid, &to->si_pid); |
2750 | err |= __put_user(from->si_uid, &to->si_uid); |
2751 | break; |
2752 | case __SI_TIMER: |
2753 | err |= __put_user(from->si_tid, &to->si_tid); |
2754 | err |= __put_user(from->si_overrun, &to->si_overrun); |
2755 | err |= __put_user(from->si_ptr, &to->si_ptr); |
2756 | break; |
2757 | case __SI_POLL: |
2758 | err |= __put_user(from->si_band, &to->si_band); |
2759 | err |= __put_user(from->si_fd, &to->si_fd); |
2760 | break; |
2761 | case __SI_FAULT: |
2762 | err |= __put_user(from->si_addr, &to->si_addr); |
2763 | #ifdef __ARCH_SI_TRAPNO |
2764 | err |= __put_user(from->si_trapno, &to->si_trapno); |
2765 | #endif |
2766 | #ifdef BUS_MCEERR_AO |
2767 | /* |
2768 | * Other callers might not initialize the si_lsb field, |
2769 | * so check explicitly for the right codes here. |
2770 | */ |
2771 | if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) |
2772 | err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); |
2773 | #endif |
2774 | break; |
2775 | case __SI_CHLD: |
2776 | err |= __put_user(from->si_pid, &to->si_pid); |
2777 | err |= __put_user(from->si_uid, &to->si_uid); |
2778 | err |= __put_user(from->si_status, &to->si_status); |
2779 | err |= __put_user(from->si_utime, &to->si_utime); |
2780 | err |= __put_user(from->si_stime, &to->si_stime); |
2781 | break; |
2782 | case __SI_RT: /* This is not generated by the kernel as of now. */ |
2783 | case __SI_MESGQ: /* But this is */ |
2784 | err |= __put_user(from->si_pid, &to->si_pid); |
2785 | err |= __put_user(from->si_uid, &to->si_uid); |
2786 | err |= __put_user(from->si_ptr, &to->si_ptr); |
2787 | break; |
2788 | #ifdef __ARCH_SIGSYS |
2789 | case __SI_SYS: |
2790 | err |= __put_user(from->si_call_addr, &to->si_call_addr); |
2791 | err |= __put_user(from->si_syscall, &to->si_syscall); |
2792 | err |= __put_user(from->si_arch, &to->si_arch); |
2793 | break; |
2794 | #endif |
2795 | default: /* this is just in case for now ... */ |
2796 | err |= __put_user(from->si_pid, &to->si_pid); |
2797 | err |= __put_user(from->si_uid, &to->si_uid); |
2798 | break; |
2799 | } |
2800 | return err; |
2801 | } |
2802 | |
2803 | #endif |
2804 | |
2805 | /** |
2806 | * do_sigtimedwait - wait for queued signals specified in @which |
2807 | * @which: queued signals to wait for |
2808 | * @info: if non-null, the signal's siginfo is returned here |
2809 | * @ts: upper bound on process time suspension |
2810 | */ |
2811 | int do_sigtimedwait(const sigset_t *which, siginfo_t *info, |
2812 | const struct timespec *ts) |
2813 | { |
2814 | struct task_struct *tsk = current; |
2815 | long timeout = MAX_SCHEDULE_TIMEOUT; |
2816 | sigset_t mask = *which; |
2817 | int sig; |
2818 | |
2819 | if (ts) { |
2820 | if (!timespec_valid(ts)) |
2821 | return -EINVAL; |
2822 | timeout = timespec_to_jiffies(ts); |
2823 | /* |
2824 | * We can be close to the next tick, add another one |
2825 | * to ensure we will wait at least the time asked for. |
2826 | */ |
2827 | if (ts->tv_sec || ts->tv_nsec) |
2828 | timeout++; |
2829 | } |
2830 | |
2831 | /* |
2832 | * Invert the set of allowed signals to get those we want to block. |
2833 | */ |
2834 | sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); |
2835 | signotset(&mask); |
2836 | |
2837 | spin_lock_irq(&tsk->sighand->siglock); |
2838 | sig = dequeue_signal(tsk, &mask, info); |
2839 | if (!sig && timeout) { |
2840 | /* |
2841 | * None ready, temporarily unblock those we're interested |
2842 | * while we are sleeping in so that we'll be awakened when |
2843 | * they arrive. Unblocking is always fine, we can avoid |
2844 | * set_current_blocked(). |
2845 | */ |
2846 | tsk->real_blocked = tsk->blocked; |
2847 | sigandsets(&tsk->blocked, &tsk->blocked, &mask); |
2848 | recalc_sigpending(); |
2849 | spin_unlock_irq(&tsk->sighand->siglock); |
2850 | |
2851 | timeout = freezable_schedule_timeout_interruptible(timeout); |
2852 | |
2853 | spin_lock_irq(&tsk->sighand->siglock); |
2854 | __set_task_blocked(tsk, &tsk->real_blocked); |
2855 | siginitset(&tsk->real_blocked, 0); |
2856 | sig = dequeue_signal(tsk, &mask, info); |
2857 | } |
2858 | spin_unlock_irq(&tsk->sighand->siglock); |
2859 | |
2860 | if (sig) |
2861 | return sig; |
2862 | return timeout ? -EINTR : -EAGAIN; |
2863 | } |
2864 | |
2865 | /** |
2866 | * sys_rt_sigtimedwait - synchronously wait for queued signals specified |
2867 | * in @uthese |
2868 | * @uthese: queued signals to wait for |
2869 | * @uinfo: if non-null, the signal's siginfo is returned here |
2870 | * @uts: upper bound on process time suspension |
2871 | * @sigsetsize: size of sigset_t type |
2872 | */ |
2873 | SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, |
2874 | siginfo_t __user *, uinfo, const struct timespec __user *, uts, |
2875 | size_t, sigsetsize) |
2876 | { |
2877 | sigset_t these; |
2878 | struct timespec ts; |
2879 | siginfo_t info; |
2880 | int ret; |
2881 | |
2882 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2883 | if (sigsetsize != sizeof(sigset_t)) |
2884 | return -EINVAL; |
2885 | |
2886 | if (copy_from_user(&these, uthese, sizeof(these))) |
2887 | return -EFAULT; |
2888 | |
2889 | if (uts) { |
2890 | if (copy_from_user(&ts, uts, sizeof(ts))) |
2891 | return -EFAULT; |
2892 | } |
2893 | |
2894 | ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); |
2895 | |
2896 | if (ret > 0 && uinfo) { |
2897 | if (copy_siginfo_to_user(uinfo, &info)) |
2898 | ret = -EFAULT; |
2899 | } |
2900 | |
2901 | return ret; |
2902 | } |
2903 | |
2904 | /** |
2905 | * sys_kill - send a signal to a process |
2906 | * @pid: the PID of the process |
2907 | * @sig: signal to be sent |
2908 | */ |
2909 | SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) |
2910 | { |
2911 | struct siginfo info; |
2912 | |
2913 | info.si_signo = sig; |
2914 | info.si_errno = 0; |
2915 | info.si_code = SI_USER; |
2916 | info.si_pid = task_tgid_vnr(current); |
2917 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
2918 | |
2919 | return kill_something_info(sig, &info, pid); |
2920 | } |
2921 | |
2922 | static int |
2923 | do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) |
2924 | { |
2925 | struct task_struct *p; |
2926 | int error = -ESRCH; |
2927 | |
2928 | rcu_read_lock(); |
2929 | p = find_task_by_vpid(pid); |
2930 | if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { |
2931 | error = check_kill_permission(sig, info, p); |
2932 | /* |
2933 | * The null signal is a permissions and process existence |
2934 | * probe. No signal is actually delivered. |
2935 | */ |
2936 | if (!error && sig) { |
2937 | error = do_send_sig_info(sig, info, p, false); |
2938 | /* |
2939 | * If lock_task_sighand() failed we pretend the task |
2940 | * dies after receiving the signal. The window is tiny, |
2941 | * and the signal is private anyway. |
2942 | */ |
2943 | if (unlikely(error == -ESRCH)) |
2944 | error = 0; |
2945 | } |
2946 | } |
2947 | rcu_read_unlock(); |
2948 | |
2949 | return error; |
2950 | } |
2951 | |
2952 | static int do_tkill(pid_t tgid, pid_t pid, int sig) |
2953 | { |
2954 | struct siginfo info = {}; |
2955 | |
2956 | info.si_signo = sig; |
2957 | info.si_errno = 0; |
2958 | info.si_code = SI_TKILL; |
2959 | info.si_pid = task_tgid_vnr(current); |
2960 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
2961 | |
2962 | return do_send_specific(tgid, pid, sig, &info); |
2963 | } |
2964 | |
2965 | /** |
2966 | * sys_tgkill - send signal to one specific thread |
2967 | * @tgid: the thread group ID of the thread |
2968 | * @pid: the PID of the thread |
2969 | * @sig: signal to be sent |
2970 | * |
2971 | * This syscall also checks the @tgid and returns -ESRCH even if the PID |
2972 | * exists but it's not belonging to the target process anymore. This |
2973 | * method solves the problem of threads exiting and PIDs getting reused. |
2974 | */ |
2975 | SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) |
2976 | { |
2977 | /* This is only valid for single tasks */ |
2978 | if (pid <= 0 || tgid <= 0) |
2979 | return -EINVAL; |
2980 | |
2981 | return do_tkill(tgid, pid, sig); |
2982 | } |
2983 | |
2984 | /** |
2985 | * sys_tkill - send signal to one specific task |
2986 | * @pid: the PID of the task |
2987 | * @sig: signal to be sent |
2988 | * |
2989 | * Send a signal to only one task, even if it's a CLONE_THREAD task. |
2990 | */ |
2991 | SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) |
2992 | { |
2993 | /* This is only valid for single tasks */ |
2994 | if (pid <= 0) |
2995 | return -EINVAL; |
2996 | |
2997 | return do_tkill(0, pid, sig); |
2998 | } |
2999 | |
3000 | static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info) |
3001 | { |
3002 | /* Not even root can pretend to send signals from the kernel. |
3003 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
3004 | */ |
3005 | if ((info->si_code >= 0 || info->si_code == SI_TKILL) && |
3006 | (task_pid_vnr(current) != pid)) { |
3007 | /* We used to allow any < 0 si_code */ |
3008 | WARN_ON_ONCE(info->si_code < 0); |
3009 | return -EPERM; |
3010 | } |
3011 | info->si_signo = sig; |
3012 | |
3013 | /* POSIX.1b doesn't mention process groups. */ |
3014 | return kill_proc_info(sig, info, pid); |
3015 | } |
3016 | |
3017 | /** |
3018 | * sys_rt_sigqueueinfo - send signal information to a signal |
3019 | * @pid: the PID of the thread |
3020 | * @sig: signal to be sent |
3021 | * @uinfo: signal info to be sent |
3022 | */ |
3023 | SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, |
3024 | siginfo_t __user *, uinfo) |
3025 | { |
3026 | siginfo_t info; |
3027 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) |
3028 | return -EFAULT; |
3029 | return do_rt_sigqueueinfo(pid, sig, &info); |
3030 | } |
3031 | |
3032 | #ifdef CONFIG_COMPAT |
3033 | COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, |
3034 | compat_pid_t, pid, |
3035 | int, sig, |
3036 | struct compat_siginfo __user *, uinfo) |
3037 | { |
3038 | siginfo_t info; |
3039 | int ret = copy_siginfo_from_user32(&info, uinfo); |
3040 | if (unlikely(ret)) |
3041 | return ret; |
3042 | return do_rt_sigqueueinfo(pid, sig, &info); |
3043 | } |
3044 | #endif |
3045 | |
3046 | static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) |
3047 | { |
3048 | /* This is only valid for single tasks */ |
3049 | if (pid <= 0 || tgid <= 0) |
3050 | return -EINVAL; |
3051 | |
3052 | /* Not even root can pretend to send signals from the kernel. |
3053 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
3054 | */ |
3055 | if (((info->si_code >= 0 || info->si_code == SI_TKILL)) && |
3056 | (task_pid_vnr(current) != pid)) { |
3057 | /* We used to allow any < 0 si_code */ |
3058 | WARN_ON_ONCE(info->si_code < 0); |
3059 | return -EPERM; |
3060 | } |
3061 | info->si_signo = sig; |
3062 | |
3063 | return do_send_specific(tgid, pid, sig, info); |
3064 | } |
3065 | |
3066 | SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, |
3067 | siginfo_t __user *, uinfo) |
3068 | { |
3069 | siginfo_t info; |
3070 | |
3071 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) |
3072 | return -EFAULT; |
3073 | |
3074 | return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); |
3075 | } |
3076 | |
3077 | #ifdef CONFIG_COMPAT |
3078 | COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, |
3079 | compat_pid_t, tgid, |
3080 | compat_pid_t, pid, |
3081 | int, sig, |
3082 | struct compat_siginfo __user *, uinfo) |
3083 | { |
3084 | siginfo_t info; |
3085 | |
3086 | if (copy_siginfo_from_user32(&info, uinfo)) |
3087 | return -EFAULT; |
3088 | return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); |
3089 | } |
3090 | #endif |
3091 | |
3092 | int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) |
3093 | { |
3094 | struct task_struct *t = current; |
3095 | struct k_sigaction *k; |
3096 | sigset_t mask; |
3097 | |
3098 | if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) |
3099 | return -EINVAL; |
3100 | |
3101 | k = &t->sighand->action[sig-1]; |
3102 | |
3103 | spin_lock_irq(¤t->sighand->siglock); |
3104 | if (oact) |
3105 | *oact = *k; |
3106 | |
3107 | if (act) { |
3108 | sigdelsetmask(&act->sa.sa_mask, |
3109 | sigmask(SIGKILL) | sigmask(SIGSTOP)); |
3110 | *k = *act; |
3111 | /* |
3112 | * POSIX 3.3.1.3: |
3113 | * "Setting a signal action to SIG_IGN for a signal that is |
3114 | * pending shall cause the pending signal to be discarded, |
3115 | * whether or not it is blocked." |
3116 | * |
3117 | * "Setting a signal action to SIG_DFL for a signal that is |
3118 | * pending and whose default action is to ignore the signal |
3119 | * (for example, SIGCHLD), shall cause the pending signal to |
3120 | * be discarded, whether or not it is blocked" |
3121 | */ |
3122 | if (sig_handler_ignored(sig_handler(t, sig), sig)) { |
3123 | sigemptyset(&mask); |
3124 | sigaddset(&mask, sig); |
3125 | rm_from_queue_full(&mask, &t->signal->shared_pending); |
3126 | do { |
3127 | rm_from_queue_full(&mask, &t->pending); |
3128 | t = next_thread(t); |
3129 | } while (t != current); |
3130 | } |
3131 | } |
3132 | |
3133 | spin_unlock_irq(¤t->sighand->siglock); |
3134 | return 0; |
3135 | } |
3136 | |
3137 | static int |
3138 | do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) |
3139 | { |
3140 | stack_t oss; |
3141 | int error; |
3142 | |
3143 | oss.ss_sp = (void __user *) current->sas_ss_sp; |
3144 | oss.ss_size = current->sas_ss_size; |
3145 | oss.ss_flags = sas_ss_flags(sp); |
3146 | |
3147 | if (uss) { |
3148 | void __user *ss_sp; |
3149 | size_t ss_size; |
3150 | int ss_flags; |
3151 | |
3152 | error = -EFAULT; |
3153 | if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) |
3154 | goto out; |
3155 | error = __get_user(ss_sp, &uss->ss_sp) | |
3156 | __get_user(ss_flags, &uss->ss_flags) | |
3157 | __get_user(ss_size, &uss->ss_size); |
3158 | if (error) |
3159 | goto out; |
3160 | |
3161 | error = -EPERM; |
3162 | if (on_sig_stack(sp)) |
3163 | goto out; |
3164 | |
3165 | error = -EINVAL; |
3166 | /* |
3167 | * Note - this code used to test ss_flags incorrectly: |
3168 | * old code may have been written using ss_flags==0 |
3169 | * to mean ss_flags==SS_ONSTACK (as this was the only |
3170 | * way that worked) - this fix preserves that older |
3171 | * mechanism. |
3172 | */ |
3173 | if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) |
3174 | goto out; |
3175 | |
3176 | if (ss_flags == SS_DISABLE) { |
3177 | ss_size = 0; |
3178 | ss_sp = NULL; |
3179 | } else { |
3180 | error = -ENOMEM; |
3181 | if (ss_size < MINSIGSTKSZ) |
3182 | goto out; |
3183 | } |
3184 | |
3185 | current->sas_ss_sp = (unsigned long) ss_sp; |
3186 | current->sas_ss_size = ss_size; |
3187 | } |
3188 | |
3189 | error = 0; |
3190 | if (uoss) { |
3191 | error = -EFAULT; |
3192 | if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) |
3193 | goto out; |
3194 | error = __put_user(oss.ss_sp, &uoss->ss_sp) | |
3195 | __put_user(oss.ss_size, &uoss->ss_size) | |
3196 | __put_user(oss.ss_flags, &uoss->ss_flags); |
3197 | } |
3198 | |
3199 | out: |
3200 | return error; |
3201 | } |
3202 | SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) |
3203 | { |
3204 | return do_sigaltstack(uss, uoss, current_user_stack_pointer()); |
3205 | } |
3206 | |
3207 | int restore_altstack(const stack_t __user *uss) |
3208 | { |
3209 | int err = do_sigaltstack(uss, NULL, current_user_stack_pointer()); |
3210 | /* squash all but EFAULT for now */ |
3211 | return err == -EFAULT ? err : 0; |
3212 | } |
3213 | |
3214 | int __save_altstack(stack_t __user *uss, unsigned long sp) |
3215 | { |
3216 | struct task_struct *t = current; |
3217 | return __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) | |
3218 | __put_user(sas_ss_flags(sp), &uss->ss_flags) | |
3219 | __put_user(t->sas_ss_size, &uss->ss_size); |
3220 | } |
3221 | |
3222 | #ifdef CONFIG_COMPAT |
3223 | COMPAT_SYSCALL_DEFINE2(sigaltstack, |
3224 | const compat_stack_t __user *, uss_ptr, |
3225 | compat_stack_t __user *, uoss_ptr) |
3226 | { |
3227 | stack_t uss, uoss; |
3228 | int ret; |
3229 | mm_segment_t seg; |
3230 | |
3231 | if (uss_ptr) { |
3232 | compat_stack_t uss32; |
3233 | |
3234 | memset(&uss, 0, sizeof(stack_t)); |
3235 | if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t))) |
3236 | return -EFAULT; |
3237 | uss.ss_sp = compat_ptr(uss32.ss_sp); |
3238 | uss.ss_flags = uss32.ss_flags; |
3239 | uss.ss_size = uss32.ss_size; |
3240 | } |
3241 | seg = get_fs(); |
3242 | set_fs(KERNEL_DS); |
3243 | ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL), |
3244 | (stack_t __force __user *) &uoss, |
3245 | compat_user_stack_pointer()); |
3246 | set_fs(seg); |
3247 | if (ret >= 0 && uoss_ptr) { |
3248 | if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) || |
3249 | __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) || |
3250 | __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) || |
3251 | __put_user(uoss.ss_size, &uoss_ptr->ss_size)) |
3252 | ret = -EFAULT; |
3253 | } |
3254 | return ret; |
3255 | } |
3256 | |
3257 | int compat_restore_altstack(const compat_stack_t __user *uss) |
3258 | { |
3259 | int err = compat_sys_sigaltstack(uss, NULL); |
3260 | /* squash all but -EFAULT for now */ |
3261 | return err == -EFAULT ? err : 0; |
3262 | } |
3263 | |
3264 | int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) |
3265 | { |
3266 | struct task_struct *t = current; |
3267 | return __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) | |
3268 | __put_user(sas_ss_flags(sp), &uss->ss_flags) | |
3269 | __put_user(t->sas_ss_size, &uss->ss_size); |
3270 | } |
3271 | #endif |
3272 | |
3273 | #ifdef __ARCH_WANT_SYS_SIGPENDING |
3274 | |
3275 | /** |
3276 | * sys_sigpending - examine pending signals |
3277 | * @set: where mask of pending signal is returned |
3278 | */ |
3279 | SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) |
3280 | { |
3281 | return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t)); |
3282 | } |
3283 | |
3284 | #endif |
3285 | |
3286 | #ifdef __ARCH_WANT_SYS_SIGPROCMASK |
3287 | /** |
3288 | * sys_sigprocmask - examine and change blocked signals |
3289 | * @how: whether to add, remove, or set signals |
3290 | * @nset: signals to add or remove (if non-null) |
3291 | * @oset: previous value of signal mask if non-null |
3292 | * |
3293 | * Some platforms have their own version with special arguments; |
3294 | * others support only sys_rt_sigprocmask. |
3295 | */ |
3296 | |
3297 | SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, |
3298 | old_sigset_t __user *, oset) |
3299 | { |
3300 | old_sigset_t old_set, new_set; |
3301 | sigset_t new_blocked; |
3302 | |
3303 | old_set = current->blocked.sig[0]; |
3304 | |
3305 | if (nset) { |
3306 | if (copy_from_user(&new_set, nset, sizeof(*nset))) |
3307 | return -EFAULT; |
3308 | |
3309 | new_blocked = current->blocked; |
3310 | |
3311 | switch (how) { |
3312 | case SIG_BLOCK: |
3313 | sigaddsetmask(&new_blocked, new_set); |
3314 | break; |
3315 | case SIG_UNBLOCK: |
3316 | sigdelsetmask(&new_blocked, new_set); |
3317 | break; |
3318 | case SIG_SETMASK: |
3319 | new_blocked.sig[0] = new_set; |
3320 | break; |
3321 | default: |
3322 | return -EINVAL; |
3323 | } |
3324 | |
3325 | set_current_blocked(&new_blocked); |
3326 | } |
3327 | |
3328 | if (oset) { |
3329 | if (copy_to_user(oset, &old_set, sizeof(*oset))) |
3330 | return -EFAULT; |
3331 | } |
3332 | |
3333 | return 0; |
3334 | } |
3335 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ |
3336 | |
3337 | #ifndef CONFIG_ODD_RT_SIGACTION |
3338 | /** |
3339 | * sys_rt_sigaction - alter an action taken by a process |
3340 | * @sig: signal to be sent |
3341 | * @act: new sigaction |
3342 | * @oact: used to save the previous sigaction |
3343 | * @sigsetsize: size of sigset_t type |
3344 | */ |
3345 | SYSCALL_DEFINE4(rt_sigaction, int, sig, |
3346 | const struct sigaction __user *, act, |
3347 | struct sigaction __user *, oact, |
3348 | size_t, sigsetsize) |
3349 | { |
3350 | struct k_sigaction new_sa, old_sa; |
3351 | int ret = -EINVAL; |
3352 | |
3353 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
3354 | if (sigsetsize != sizeof(sigset_t)) |
3355 | goto out; |
3356 | |
3357 | if (act) { |
3358 | if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) |
3359 | return -EFAULT; |
3360 | } |
3361 | |
3362 | ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); |
3363 | |
3364 | if (!ret && oact) { |
3365 | if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) |
3366 | return -EFAULT; |
3367 | } |
3368 | out: |
3369 | return ret; |
3370 | } |
3371 | #ifdef CONFIG_COMPAT |
3372 | COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig, |
3373 | const struct compat_sigaction __user *, act, |
3374 | struct compat_sigaction __user *, oact, |
3375 | compat_size_t, sigsetsize) |
3376 | { |
3377 | struct k_sigaction new_ka, old_ka; |
3378 | compat_sigset_t mask; |
3379 | #ifdef __ARCH_HAS_SA_RESTORER |
3380 | compat_uptr_t restorer; |
3381 | #endif |
3382 | int ret; |
3383 | |
3384 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
3385 | if (sigsetsize != sizeof(compat_sigset_t)) |
3386 | return -EINVAL; |
3387 | |
3388 | if (act) { |
3389 | compat_uptr_t handler; |
3390 | ret = get_user(handler, &act->sa_handler); |
3391 | new_ka.sa.sa_handler = compat_ptr(handler); |
3392 | #ifdef __ARCH_HAS_SA_RESTORER |
3393 | ret |= get_user(restorer, &act->sa_restorer); |
3394 | new_ka.sa.sa_restorer = compat_ptr(restorer); |
3395 | #endif |
3396 | ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask)); |
3397 | ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); |
3398 | if (ret) |
3399 | return -EFAULT; |
3400 | sigset_from_compat(&new_ka.sa.sa_mask, &mask); |
3401 | } |
3402 | |
3403 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); |
3404 | if (!ret && oact) { |
3405 | sigset_to_compat(&mask, &old_ka.sa.sa_mask); |
3406 | ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), |
3407 | &oact->sa_handler); |
3408 | ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask)); |
3409 | ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); |
3410 | #ifdef __ARCH_HAS_SA_RESTORER |
3411 | ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), |
3412 | &oact->sa_restorer); |
3413 | #endif |
3414 | } |
3415 | return ret; |
3416 | } |
3417 | #endif |
3418 | #endif /* !CONFIG_ODD_RT_SIGACTION */ |
3419 | |
3420 | #ifdef CONFIG_OLD_SIGACTION |
3421 | SYSCALL_DEFINE3(sigaction, int, sig, |
3422 | const struct old_sigaction __user *, act, |
3423 | struct old_sigaction __user *, oact) |
3424 | { |
3425 | struct k_sigaction new_ka, old_ka; |
3426 | int ret; |
3427 | |
3428 | if (act) { |
3429 | old_sigset_t mask; |
3430 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || |
3431 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || |
3432 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || |
3433 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || |
3434 | __get_user(mask, &act->sa_mask)) |
3435 | return -EFAULT; |
3436 | #ifdef __ARCH_HAS_KA_RESTORER |
3437 | new_ka.ka_restorer = NULL; |
3438 | #endif |
3439 | siginitset(&new_ka.sa.sa_mask, mask); |
3440 | } |
3441 | |
3442 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); |
3443 | |
3444 | if (!ret && oact) { |
3445 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || |
3446 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || |
3447 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || |
3448 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || |
3449 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) |
3450 | return -EFAULT; |
3451 | } |
3452 | |
3453 | return ret; |
3454 | } |
3455 | #endif |
3456 | #ifdef CONFIG_COMPAT_OLD_SIGACTION |
3457 | COMPAT_SYSCALL_DEFINE3(sigaction, int, sig, |
3458 | const struct compat_old_sigaction __user *, act, |
3459 | struct compat_old_sigaction __user *, oact) |
3460 | { |
3461 | struct k_sigaction new_ka, old_ka; |
3462 | int ret; |
3463 | compat_old_sigset_t mask; |
3464 | compat_uptr_t handler, restorer; |
3465 | |
3466 | if (act) { |
3467 | if (!access_ok(VERIFY_READ, act, sizeof(*act)) || |
3468 | __get_user(handler, &act->sa_handler) || |
3469 | __get_user(restorer, &act->sa_restorer) || |
3470 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || |
3471 | __get_user(mask, &act->sa_mask)) |
3472 | return -EFAULT; |
3473 | |
3474 | #ifdef __ARCH_HAS_KA_RESTORER |
3475 | new_ka.ka_restorer = NULL; |
3476 | #endif |
3477 | new_ka.sa.sa_handler = compat_ptr(handler); |
3478 | new_ka.sa.sa_restorer = compat_ptr(restorer); |
3479 | siginitset(&new_ka.sa.sa_mask, mask); |
3480 | } |
3481 | |
3482 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); |
3483 | |
3484 | if (!ret && oact) { |
3485 | if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || |
3486 | __put_user(ptr_to_compat(old_ka.sa.sa_handler), |
3487 | &oact->sa_handler) || |
3488 | __put_user(ptr_to_compat(old_ka.sa.sa_restorer), |
3489 | &oact->sa_restorer) || |
3490 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || |
3491 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) |
3492 | return -EFAULT; |
3493 | } |
3494 | return ret; |
3495 | } |
3496 | #endif |
3497 | |
3498 | #ifdef __ARCH_WANT_SYS_SGETMASK |
3499 | |
3500 | /* |
3501 | * For backwards compatibility. Functionality superseded by sigprocmask. |
3502 | */ |
3503 | SYSCALL_DEFINE0(sgetmask) |
3504 | { |
3505 | /* SMP safe */ |
3506 | return current->blocked.sig[0]; |
3507 | } |
3508 | |
3509 | SYSCALL_DEFINE1(ssetmask, int, newmask) |
3510 | { |
3511 | int old = current->blocked.sig[0]; |
3512 | sigset_t newset; |
3513 | |
3514 | siginitset(&newset, newmask); |
3515 | set_current_blocked(&newset); |
3516 | |
3517 | return old; |
3518 | } |
3519 | #endif /* __ARCH_WANT_SGETMASK */ |
3520 | |
3521 | #ifdef __ARCH_WANT_SYS_SIGNAL |
3522 | /* |
3523 | * For backwards compatibility. Functionality superseded by sigaction. |
3524 | */ |
3525 | SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) |
3526 | { |
3527 | struct k_sigaction new_sa, old_sa; |
3528 | int ret; |
3529 | |
3530 | new_sa.sa.sa_handler = handler; |
3531 | new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; |
3532 | sigemptyset(&new_sa.sa.sa_mask); |
3533 | |
3534 | ret = do_sigaction(sig, &new_sa, &old_sa); |
3535 | |
3536 | return ret ? ret : (unsigned long)old_sa.sa.sa_handler; |
3537 | } |
3538 | #endif /* __ARCH_WANT_SYS_SIGNAL */ |
3539 | |
3540 | #ifdef __ARCH_WANT_SYS_PAUSE |
3541 | |
3542 | SYSCALL_DEFINE0(pause) |
3543 | { |
3544 | while (!signal_pending(current)) { |
3545 | current->state = TASK_INTERRUPTIBLE; |
3546 | schedule(); |
3547 | } |
3548 | return -ERESTARTNOHAND; |
3549 | } |
3550 | |
3551 | #endif |
3552 | |
3553 | int sigsuspend(sigset_t *set) |
3554 | { |
3555 | current->saved_sigmask = current->blocked; |
3556 | set_current_blocked(set); |
3557 | |
3558 | current->state = TASK_INTERRUPTIBLE; |
3559 | schedule(); |
3560 | set_restore_sigmask(); |
3561 | return -ERESTARTNOHAND; |
3562 | } |
3563 | |
3564 | /** |
3565 | * sys_rt_sigsuspend - replace the signal mask for a value with the |
3566 | * @unewset value until a signal is received |
3567 | * @unewset: new signal mask value |
3568 | * @sigsetsize: size of sigset_t type |
3569 | */ |
3570 | SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) |
3571 | { |
3572 | sigset_t newset; |
3573 | |
3574 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
3575 | if (sigsetsize != sizeof(sigset_t)) |
3576 | return -EINVAL; |
3577 | |
3578 | if (copy_from_user(&newset, unewset, sizeof(newset))) |
3579 | return -EFAULT; |
3580 | return sigsuspend(&newset); |
3581 | } |
3582 | |
3583 | #ifdef CONFIG_COMPAT |
3584 | COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize) |
3585 | { |
3586 | #ifdef __BIG_ENDIAN |
3587 | sigset_t newset; |
3588 | compat_sigset_t newset32; |
3589 | |
3590 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
3591 | if (sigsetsize != sizeof(sigset_t)) |
3592 | return -EINVAL; |
3593 | |
3594 | if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t))) |
3595 | return -EFAULT; |
3596 | sigset_from_compat(&newset, &newset32); |
3597 | return sigsuspend(&newset); |
3598 | #else |
3599 | /* on little-endian bitmaps don't care about granularity */ |
3600 | return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize); |
3601 | #endif |
3602 | } |
3603 | #endif |
3604 | |
3605 | #ifdef CONFIG_OLD_SIGSUSPEND |
3606 | SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask) |
3607 | { |
3608 | sigset_t blocked; |
3609 | siginitset(&blocked, mask); |
3610 | return sigsuspend(&blocked); |
3611 | } |
3612 | #endif |
3613 | #ifdef CONFIG_OLD_SIGSUSPEND3 |
3614 | SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask) |
3615 | { |
3616 | sigset_t blocked; |
3617 | siginitset(&blocked, mask); |
3618 | return sigsuspend(&blocked); |
3619 | } |
3620 | #endif |
3621 | |
3622 | __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) |
3623 | { |
3624 | return NULL; |
3625 | } |
3626 | |
3627 | void __init signals_init(void) |
3628 | { |
3629 | sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); |
3630 | } |
3631 | |
3632 | #ifdef CONFIG_KGDB_KDB |
3633 | #include <linux/kdb.h> |
3634 | /* |
3635 | * kdb_send_sig_info - Allows kdb to send signals without exposing |
3636 | * signal internals. This function checks if the required locks are |
3637 | * available before calling the main signal code, to avoid kdb |
3638 | * deadlocks. |
3639 | */ |
3640 | void |
3641 | kdb_send_sig_info(struct task_struct *t, struct siginfo *info) |
3642 | { |
3643 | static struct task_struct *kdb_prev_t; |
3644 | int sig, new_t; |
3645 | if (!spin_trylock(&t->sighand->siglock)) { |
3646 | kdb_printf("Can't do kill command now.\n" |
3647 | "The sigmask lock is held somewhere else in " |
3648 | "kernel, try again later\n"); |
3649 | return; |
3650 | } |
3651 | spin_unlock(&t->sighand->siglock); |
3652 | new_t = kdb_prev_t != t; |
3653 | kdb_prev_t = t; |
3654 | if (t->state != TASK_RUNNING && new_t) { |
3655 | kdb_printf("Process is not RUNNING, sending a signal from " |
3656 | "kdb risks deadlock\n" |
3657 | "on the run queue locks. " |
3658 | "The signal has _not_ been sent.\n" |
3659 | "Reissue the kill command if you want to risk " |
3660 | "the deadlock.\n"); |
3661 | return; |
3662 | } |
3663 | sig = info->si_signo; |
3664 | if (send_sig_info(sig, info, t)) |
3665 | kdb_printf("Fail to deliver Signal %d to process %d.\n", |
3666 | sig, t->pid); |
3667 | else |
3668 | kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); |
3669 | } |
3670 | #endif /* CONFIG_KGDB_KDB */ |
3671 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9