Root/kernel/signal.c

1/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/tty.h>
19#include <linux/binfmts.h>
20#include <linux/security.h>
21#include <linux/syscalls.h>
22#include <linux/ptrace.h>
23#include <linux/signal.h>
24#include <linux/signalfd.h>
25#include <linux/tracehook.h>
26#include <linux/capability.h>
27#include <linux/freezer.h>
28#include <linux/pid_namespace.h>
29#include <linux/nsproxy.h>
30#include <trace/events/sched.h>
31
32#include <asm/param.h>
33#include <asm/uaccess.h>
34#include <asm/unistd.h>
35#include <asm/siginfo.h>
36#include "audit.h" /* audit_signal_info() */
37
38/*
39 * SLAB caches for signal bits.
40 */
41
42static struct kmem_cache *sigqueue_cachep;
43
44static void __user *sig_handler(struct task_struct *t, int sig)
45{
46    return t->sighand->action[sig - 1].sa.sa_handler;
47}
48
49static int sig_handler_ignored(void __user *handler, int sig)
50{
51    /* Is it explicitly or implicitly ignored? */
52    return handler == SIG_IGN ||
53        (handler == SIG_DFL && sig_kernel_ignore(sig));
54}
55
56static int sig_task_ignored(struct task_struct *t, int sig,
57        int from_ancestor_ns)
58{
59    void __user *handler;
60
61    handler = sig_handler(t, sig);
62
63    if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
64            handler == SIG_DFL && !from_ancestor_ns)
65        return 1;
66
67    return sig_handler_ignored(handler, sig);
68}
69
70static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
71{
72    /*
73     * Blocked signals are never ignored, since the
74     * signal handler may change by the time it is
75     * unblocked.
76     */
77    if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
78        return 0;
79
80    if (!sig_task_ignored(t, sig, from_ancestor_ns))
81        return 0;
82
83    /*
84     * Tracers may want to know about even ignored signals.
85     */
86    return !tracehook_consider_ignored_signal(t, sig);
87}
88
89/*
90 * Re-calculate pending state from the set of locally pending
91 * signals, globally pending signals, and blocked signals.
92 */
93static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
94{
95    unsigned long ready;
96    long i;
97
98    switch (_NSIG_WORDS) {
99    default:
100        for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
101            ready |= signal->sig[i] &~ blocked->sig[i];
102        break;
103
104    case 4: ready = signal->sig[3] &~ blocked->sig[3];
105        ready |= signal->sig[2] &~ blocked->sig[2];
106        ready |= signal->sig[1] &~ blocked->sig[1];
107        ready |= signal->sig[0] &~ blocked->sig[0];
108        break;
109
110    case 2: ready = signal->sig[1] &~ blocked->sig[1];
111        ready |= signal->sig[0] &~ blocked->sig[0];
112        break;
113
114    case 1: ready = signal->sig[0] &~ blocked->sig[0];
115    }
116    return ready != 0;
117}
118
119#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
120
121static int recalc_sigpending_tsk(struct task_struct *t)
122{
123    if (t->signal->group_stop_count > 0 ||
124        PENDING(&t->pending, &t->blocked) ||
125        PENDING(&t->signal->shared_pending, &t->blocked)) {
126        set_tsk_thread_flag(t, TIF_SIGPENDING);
127        return 1;
128    }
129    /*
130     * We must never clear the flag in another thread, or in current
131     * when it's possible the current syscall is returning -ERESTART*.
132     * So we don't clear it here, and only callers who know they should do.
133     */
134    return 0;
135}
136
137/*
138 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
139 * This is superfluous when called on current, the wakeup is a harmless no-op.
140 */
141void recalc_sigpending_and_wake(struct task_struct *t)
142{
143    if (recalc_sigpending_tsk(t))
144        signal_wake_up(t, 0);
145}
146
147void recalc_sigpending(void)
148{
149    if (unlikely(tracehook_force_sigpending()))
150        set_thread_flag(TIF_SIGPENDING);
151    else if (!recalc_sigpending_tsk(current) && !freezing(current))
152        clear_thread_flag(TIF_SIGPENDING);
153
154}
155
156/* Given the mask, find the first available signal that should be serviced. */
157
158int next_signal(struct sigpending *pending, sigset_t *mask)
159{
160    unsigned long i, *s, *m, x;
161    int sig = 0;
162    
163    s = pending->signal.sig;
164    m = mask->sig;
165    switch (_NSIG_WORDS) {
166    default:
167        for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
168            if ((x = *s &~ *m) != 0) {
169                sig = ffz(~x) + i*_NSIG_BPW + 1;
170                break;
171            }
172        break;
173
174    case 2: if ((x = s[0] &~ m[0]) != 0)
175            sig = 1;
176        else if ((x = s[1] &~ m[1]) != 0)
177            sig = _NSIG_BPW + 1;
178        else
179            break;
180        sig += ffz(~x);
181        break;
182
183    case 1: if ((x = *s &~ *m) != 0)
184            sig = ffz(~x) + 1;
185        break;
186    }
187    
188    return sig;
189}
190
191/*
192 * allocate a new signal queue record
193 * - this may be called without locks if and only if t == current, otherwise an
194 * appopriate lock must be held to stop the target task from exiting
195 */
196static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
197                     int override_rlimit)
198{
199    struct sigqueue *q = NULL;
200    struct user_struct *user;
201
202    /*
203     * We won't get problems with the target's UID changing under us
204     * because changing it requires RCU be used, and if t != current, the
205     * caller must be holding the RCU readlock (by way of a spinlock) and
206     * we use RCU protection here
207     */
208    user = get_uid(__task_cred(t)->user);
209    atomic_inc(&user->sigpending);
210    if (override_rlimit ||
211        atomic_read(&user->sigpending) <=
212            t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
213        q = kmem_cache_alloc(sigqueue_cachep, flags);
214    if (unlikely(q == NULL)) {
215        atomic_dec(&user->sigpending);
216        free_uid(user);
217    } else {
218        INIT_LIST_HEAD(&q->list);
219        q->flags = 0;
220        q->user = user;
221    }
222
223    return q;
224}
225
226static void __sigqueue_free(struct sigqueue *q)
227{
228    if (q->flags & SIGQUEUE_PREALLOC)
229        return;
230    atomic_dec(&q->user->sigpending);
231    free_uid(q->user);
232    kmem_cache_free(sigqueue_cachep, q);
233}
234
235void flush_sigqueue(struct sigpending *queue)
236{
237    struct sigqueue *q;
238
239    sigemptyset(&queue->signal);
240    while (!list_empty(&queue->list)) {
241        q = list_entry(queue->list.next, struct sigqueue , list);
242        list_del_init(&q->list);
243        __sigqueue_free(q);
244    }
245}
246
247/*
248 * Flush all pending signals for a task.
249 */
250void __flush_signals(struct task_struct *t)
251{
252    clear_tsk_thread_flag(t, TIF_SIGPENDING);
253    flush_sigqueue(&t->pending);
254    flush_sigqueue(&t->signal->shared_pending);
255}
256
257void flush_signals(struct task_struct *t)
258{
259    unsigned long flags;
260
261    spin_lock_irqsave(&t->sighand->siglock, flags);
262    __flush_signals(t);
263    spin_unlock_irqrestore(&t->sighand->siglock, flags);
264}
265
266static void __flush_itimer_signals(struct sigpending *pending)
267{
268    sigset_t signal, retain;
269    struct sigqueue *q, *n;
270
271    signal = pending->signal;
272    sigemptyset(&retain);
273
274    list_for_each_entry_safe(q, n, &pending->list, list) {
275        int sig = q->info.si_signo;
276
277        if (likely(q->info.si_code != SI_TIMER)) {
278            sigaddset(&retain, sig);
279        } else {
280            sigdelset(&signal, sig);
281            list_del_init(&q->list);
282            __sigqueue_free(q);
283        }
284    }
285
286    sigorsets(&pending->signal, &signal, &retain);
287}
288
289void flush_itimer_signals(void)
290{
291    struct task_struct *tsk = current;
292    unsigned long flags;
293
294    spin_lock_irqsave(&tsk->sighand->siglock, flags);
295    __flush_itimer_signals(&tsk->pending);
296    __flush_itimer_signals(&tsk->signal->shared_pending);
297    spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
298}
299
300void ignore_signals(struct task_struct *t)
301{
302    int i;
303
304    for (i = 0; i < _NSIG; ++i)
305        t->sighand->action[i].sa.sa_handler = SIG_IGN;
306
307    flush_signals(t);
308}
309
310/*
311 * Flush all handlers for a task.
312 */
313
314void
315flush_signal_handlers(struct task_struct *t, int force_default)
316{
317    int i;
318    struct k_sigaction *ka = &t->sighand->action[0];
319    for (i = _NSIG ; i != 0 ; i--) {
320        if (force_default || ka->sa.sa_handler != SIG_IGN)
321            ka->sa.sa_handler = SIG_DFL;
322        ka->sa.sa_flags = 0;
323        sigemptyset(&ka->sa.sa_mask);
324        ka++;
325    }
326}
327
328int unhandled_signal(struct task_struct *tsk, int sig)
329{
330    void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
331    if (is_global_init(tsk))
332        return 1;
333    if (handler != SIG_IGN && handler != SIG_DFL)
334        return 0;
335    return !tracehook_consider_fatal_signal(tsk, sig);
336}
337
338
339/* Notify the system that a driver wants to block all signals for this
340 * process, and wants to be notified if any signals at all were to be
341 * sent/acted upon. If the notifier routine returns non-zero, then the
342 * signal will be acted upon after all. If the notifier routine returns 0,
343 * then then signal will be blocked. Only one block per process is
344 * allowed. priv is a pointer to private data that the notifier routine
345 * can use to determine if the signal should be blocked or not. */
346
347void
348block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
349{
350    unsigned long flags;
351
352    spin_lock_irqsave(&current->sighand->siglock, flags);
353    current->notifier_mask = mask;
354    current->notifier_data = priv;
355    current->notifier = notifier;
356    spin_unlock_irqrestore(&current->sighand->siglock, flags);
357}
358
359/* Notify the system that blocking has ended. */
360
361void
362unblock_all_signals(void)
363{
364    unsigned long flags;
365
366    spin_lock_irqsave(&current->sighand->siglock, flags);
367    current->notifier = NULL;
368    current->notifier_data = NULL;
369    recalc_sigpending();
370    spin_unlock_irqrestore(&current->sighand->siglock, flags);
371}
372
373static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
374{
375    struct sigqueue *q, *first = NULL;
376
377    /*
378     * Collect the siginfo appropriate to this signal. Check if
379     * there is another siginfo for the same signal.
380    */
381    list_for_each_entry(q, &list->list, list) {
382        if (q->info.si_signo == sig) {
383            if (first)
384                goto still_pending;
385            first = q;
386        }
387    }
388
389    sigdelset(&list->signal, sig);
390
391    if (first) {
392still_pending:
393        list_del_init(&first->list);
394        copy_siginfo(info, &first->info);
395        __sigqueue_free(first);
396    } else {
397        /* Ok, it wasn't in the queue. This must be
398           a fast-pathed signal or we must have been
399           out of queue space. So zero out the info.
400         */
401        info->si_signo = sig;
402        info->si_errno = 0;
403        info->si_code = 0;
404        info->si_pid = 0;
405        info->si_uid = 0;
406    }
407}
408
409static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
410            siginfo_t *info)
411{
412    int sig = next_signal(pending, mask);
413
414    if (sig) {
415        if (current->notifier) {
416            if (sigismember(current->notifier_mask, sig)) {
417                if (!(current->notifier)(current->notifier_data)) {
418                    clear_thread_flag(TIF_SIGPENDING);
419                    return 0;
420                }
421            }
422        }
423
424        collect_signal(sig, pending, info);
425    }
426
427    return sig;
428}
429
430/*
431 * Dequeue a signal and return the element to the caller, which is
432 * expected to free it.
433 *
434 * All callers have to hold the siglock.
435 */
436int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
437{
438    int signr;
439
440    /* We only dequeue private signals from ourselves, we don't let
441     * signalfd steal them
442     */
443    signr = __dequeue_signal(&tsk->pending, mask, info);
444    if (!signr) {
445        signr = __dequeue_signal(&tsk->signal->shared_pending,
446                     mask, info);
447        /*
448         * itimer signal ?
449         *
450         * itimers are process shared and we restart periodic
451         * itimers in the signal delivery path to prevent DoS
452         * attacks in the high resolution timer case. This is
453         * compliant with the old way of self restarting
454         * itimers, as the SIGALRM is a legacy signal and only
455         * queued once. Changing the restart behaviour to
456         * restart the timer in the signal dequeue path is
457         * reducing the timer noise on heavy loaded !highres
458         * systems too.
459         */
460        if (unlikely(signr == SIGALRM)) {
461            struct hrtimer *tmr = &tsk->signal->real_timer;
462
463            if (!hrtimer_is_queued(tmr) &&
464                tsk->signal->it_real_incr.tv64 != 0) {
465                hrtimer_forward(tmr, tmr->base->get_time(),
466                        tsk->signal->it_real_incr);
467                hrtimer_restart(tmr);
468            }
469        }
470    }
471
472    recalc_sigpending();
473    if (!signr)
474        return 0;
475
476    if (unlikely(sig_kernel_stop(signr))) {
477        /*
478         * Set a marker that we have dequeued a stop signal. Our
479         * caller might release the siglock and then the pending
480         * stop signal it is about to process is no longer in the
481         * pending bitmasks, but must still be cleared by a SIGCONT
482         * (and overruled by a SIGKILL). So those cases clear this
483         * shared flag after we've set it. Note that this flag may
484         * remain set after the signal we return is ignored or
485         * handled. That doesn't matter because its only purpose
486         * is to alert stop-signal processing code when another
487         * processor has come along and cleared the flag.
488         */
489        tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
490    }
491    if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
492        /*
493         * Release the siglock to ensure proper locking order
494         * of timer locks outside of siglocks. Note, we leave
495         * irqs disabled here, since the posix-timers code is
496         * about to disable them again anyway.
497         */
498        spin_unlock(&tsk->sighand->siglock);
499        do_schedule_next_timer(info);
500        spin_lock(&tsk->sighand->siglock);
501    }
502    return signr;
503}
504
505/*
506 * Tell a process that it has a new active signal..
507 *
508 * NOTE! we rely on the previous spin_lock to
509 * lock interrupts for us! We can only be called with
510 * "siglock" held, and the local interrupt must
511 * have been disabled when that got acquired!
512 *
513 * No need to set need_resched since signal event passing
514 * goes through ->blocked
515 */
516void signal_wake_up(struct task_struct *t, int resume)
517{
518    unsigned int mask;
519
520    set_tsk_thread_flag(t, TIF_SIGPENDING);
521
522    /*
523     * For SIGKILL, we want to wake it up in the stopped/traced/killable
524     * case. We don't check t->state here because there is a race with it
525     * executing another processor and just now entering stopped state.
526     * By using wake_up_state, we ensure the process will wake up and
527     * handle its death signal.
528     */
529    mask = TASK_INTERRUPTIBLE;
530    if (resume)
531        mask |= TASK_WAKEKILL;
532    if (!wake_up_state(t, mask))
533        kick_process(t);
534}
535
536/*
537 * Remove signals in mask from the pending set and queue.
538 * Returns 1 if any signals were found.
539 *
540 * All callers must be holding the siglock.
541 *
542 * This version takes a sigset mask and looks at all signals,
543 * not just those in the first mask word.
544 */
545static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
546{
547    struct sigqueue *q, *n;
548    sigset_t m;
549
550    sigandsets(&m, mask, &s->signal);
551    if (sigisemptyset(&m))
552        return 0;
553
554    signandsets(&s->signal, &s->signal, mask);
555    list_for_each_entry_safe(q, n, &s->list, list) {
556        if (sigismember(mask, q->info.si_signo)) {
557            list_del_init(&q->list);
558            __sigqueue_free(q);
559        }
560    }
561    return 1;
562}
563/*
564 * Remove signals in mask from the pending set and queue.
565 * Returns 1 if any signals were found.
566 *
567 * All callers must be holding the siglock.
568 */
569static int rm_from_queue(unsigned long mask, struct sigpending *s)
570{
571    struct sigqueue *q, *n;
572
573    if (!sigtestsetmask(&s->signal, mask))
574        return 0;
575
576    sigdelsetmask(&s->signal, mask);
577    list_for_each_entry_safe(q, n, &s->list, list) {
578        if (q->info.si_signo < SIGRTMIN &&
579            (mask & sigmask(q->info.si_signo))) {
580            list_del_init(&q->list);
581            __sigqueue_free(q);
582        }
583    }
584    return 1;
585}
586
587/*
588 * Bad permissions for sending the signal
589 * - the caller must hold at least the RCU read lock
590 */
591static int check_kill_permission(int sig, struct siginfo *info,
592                 struct task_struct *t)
593{
594    const struct cred *cred = current_cred(), *tcred;
595    struct pid *sid;
596    int error;
597
598    if (!valid_signal(sig))
599        return -EINVAL;
600
601    if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
602        return 0;
603
604    error = audit_signal_info(sig, t); /* Let audit system see the signal */
605    if (error)
606        return error;
607
608    tcred = __task_cred(t);
609    if ((cred->euid ^ tcred->suid) &&
610        (cred->euid ^ tcred->uid) &&
611        (cred->uid ^ tcred->suid) &&
612        (cred->uid ^ tcred->uid) &&
613        !capable(CAP_KILL)) {
614        switch (sig) {
615        case SIGCONT:
616            sid = task_session(t);
617            /*
618             * We don't return the error if sid == NULL. The
619             * task was unhashed, the caller must notice this.
620             */
621            if (!sid || sid == task_session(current))
622                break;
623        default:
624            return -EPERM;
625        }
626    }
627
628    return security_task_kill(t, info, sig, 0);
629}
630
631/*
632 * Handle magic process-wide effects of stop/continue signals. Unlike
633 * the signal actions, these happen immediately at signal-generation
634 * time regardless of blocking, ignoring, or handling. This does the
635 * actual continuing for SIGCONT, but not the actual stopping for stop
636 * signals. The process stop is done as a signal action for SIG_DFL.
637 *
638 * Returns true if the signal should be actually delivered, otherwise
639 * it should be dropped.
640 */
641static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
642{
643    struct signal_struct *signal = p->signal;
644    struct task_struct *t;
645
646    if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
647        /*
648         * The process is in the middle of dying, nothing to do.
649         */
650    } else if (sig_kernel_stop(sig)) {
651        /*
652         * This is a stop signal. Remove SIGCONT from all queues.
653         */
654        rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
655        t = p;
656        do {
657            rm_from_queue(sigmask(SIGCONT), &t->pending);
658        } while_each_thread(p, t);
659    } else if (sig == SIGCONT) {
660        unsigned int why;
661        /*
662         * Remove all stop signals from all queues,
663         * and wake all threads.
664         */
665        rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
666        t = p;
667        do {
668            unsigned int state;
669            rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
670            /*
671             * If there is a handler for SIGCONT, we must make
672             * sure that no thread returns to user mode before
673             * we post the signal, in case it was the only
674             * thread eligible to run the signal handler--then
675             * it must not do anything between resuming and
676             * running the handler. With the TIF_SIGPENDING
677             * flag set, the thread will pause and acquire the
678             * siglock that we hold now and until we've queued
679             * the pending signal.
680             *
681             * Wake up the stopped thread _after_ setting
682             * TIF_SIGPENDING
683             */
684            state = __TASK_STOPPED;
685            if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
686                set_tsk_thread_flag(t, TIF_SIGPENDING);
687                state |= TASK_INTERRUPTIBLE;
688            }
689            wake_up_state(t, state);
690        } while_each_thread(p, t);
691
692        /*
693         * Notify the parent with CLD_CONTINUED if we were stopped.
694         *
695         * If we were in the middle of a group stop, we pretend it
696         * was already finished, and then continued. Since SIGCHLD
697         * doesn't queue we report only CLD_STOPPED, as if the next
698         * CLD_CONTINUED was dropped.
699         */
700        why = 0;
701        if (signal->flags & SIGNAL_STOP_STOPPED)
702            why |= SIGNAL_CLD_CONTINUED;
703        else if (signal->group_stop_count)
704            why |= SIGNAL_CLD_STOPPED;
705
706        if (why) {
707            /*
708             * The first thread which returns from do_signal_stop()
709             * will take ->siglock, notice SIGNAL_CLD_MASK, and
710             * notify its parent. See get_signal_to_deliver().
711             */
712            signal->flags = why | SIGNAL_STOP_CONTINUED;
713            signal->group_stop_count = 0;
714            signal->group_exit_code = 0;
715        } else {
716            /*
717             * We are not stopped, but there could be a stop
718             * signal in the middle of being processed after
719             * being removed from the queue. Clear that too.
720             */
721            signal->flags &= ~SIGNAL_STOP_DEQUEUED;
722        }
723    }
724
725    return !sig_ignored(p, sig, from_ancestor_ns);
726}
727
728/*
729 * Test if P wants to take SIG. After we've checked all threads with this,
730 * it's equivalent to finding no threads not blocking SIG. Any threads not
731 * blocking SIG were ruled out because they are not running and already
732 * have pending signals. Such threads will dequeue from the shared queue
733 * as soon as they're available, so putting the signal on the shared queue
734 * will be equivalent to sending it to one such thread.
735 */
736static inline int wants_signal(int sig, struct task_struct *p)
737{
738    if (sigismember(&p->blocked, sig))
739        return 0;
740    if (p->flags & PF_EXITING)
741        return 0;
742    if (sig == SIGKILL)
743        return 1;
744    if (task_is_stopped_or_traced(p))
745        return 0;
746    return task_curr(p) || !signal_pending(p);
747}
748
749static void complete_signal(int sig, struct task_struct *p, int group)
750{
751    struct signal_struct *signal = p->signal;
752    struct task_struct *t;
753
754    /*
755     * Now find a thread we can wake up to take the signal off the queue.
756     *
757     * If the main thread wants the signal, it gets first crack.
758     * Probably the least surprising to the average bear.
759     */
760    if (wants_signal(sig, p))
761        t = p;
762    else if (!group || thread_group_empty(p))
763        /*
764         * There is just one thread and it does not need to be woken.
765         * It will dequeue unblocked signals before it runs again.
766         */
767        return;
768    else {
769        /*
770         * Otherwise try to find a suitable thread.
771         */
772        t = signal->curr_target;
773        while (!wants_signal(sig, t)) {
774            t = next_thread(t);
775            if (t == signal->curr_target)
776                /*
777                 * No thread needs to be woken.
778                 * Any eligible threads will see
779                 * the signal in the queue soon.
780                 */
781                return;
782        }
783        signal->curr_target = t;
784    }
785
786    /*
787     * Found a killable thread. If the signal will be fatal,
788     * then start taking the whole group down immediately.
789     */
790    if (sig_fatal(p, sig) &&
791        !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
792        !sigismember(&t->real_blocked, sig) &&
793        (sig == SIGKILL ||
794         !tracehook_consider_fatal_signal(t, sig))) {
795        /*
796         * This signal will be fatal to the whole group.
797         */
798        if (!sig_kernel_coredump(sig)) {
799            /*
800             * Start a group exit and wake everybody up.
801             * This way we don't have other threads
802             * running and doing things after a slower
803             * thread has the fatal signal pending.
804             */
805            signal->flags = SIGNAL_GROUP_EXIT;
806            signal->group_exit_code = sig;
807            signal->group_stop_count = 0;
808            t = p;
809            do {
810                sigaddset(&t->pending.signal, SIGKILL);
811                signal_wake_up(t, 1);
812            } while_each_thread(p, t);
813            return;
814        }
815    }
816
817    /*
818     * The signal is already in the shared-pending queue.
819     * Tell the chosen thread to wake up and dequeue it.
820     */
821    signal_wake_up(t, sig == SIGKILL);
822    return;
823}
824
825static inline int legacy_queue(struct sigpending *signals, int sig)
826{
827    return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
828}
829
830static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
831            int group, int from_ancestor_ns)
832{
833    struct sigpending *pending;
834    struct sigqueue *q;
835    int override_rlimit;
836
837    trace_sched_signal_send(sig, t);
838
839    assert_spin_locked(&t->sighand->siglock);
840
841    if (!prepare_signal(sig, t, from_ancestor_ns))
842        return 0;
843
844    pending = group ? &t->signal->shared_pending : &t->pending;
845    /*
846     * Short-circuit ignored signals and support queuing
847     * exactly one non-rt signal, so that we can get more
848     * detailed information about the cause of the signal.
849     */
850    if (legacy_queue(pending, sig))
851        return 0;
852    /*
853     * fast-pathed signals for kernel-internal things like SIGSTOP
854     * or SIGKILL.
855     */
856    if (info == SEND_SIG_FORCED)
857        goto out_set;
858
859    /* Real-time signals must be queued if sent by sigqueue, or
860       some other real-time mechanism. It is implementation
861       defined whether kill() does so. We attempt to do so, on
862       the principle of least surprise, but since kill is not
863       allowed to fail with EAGAIN when low on memory we just
864       make sure at least one signal gets delivered and don't
865       pass on the info struct. */
866
867    if (sig < SIGRTMIN)
868        override_rlimit = (is_si_special(info) || info->si_code >= 0);
869    else
870        override_rlimit = 0;
871
872    q = __sigqueue_alloc(t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
873        override_rlimit);
874    if (q) {
875        list_add_tail(&q->list, &pending->list);
876        switch ((unsigned long) info) {
877        case (unsigned long) SEND_SIG_NOINFO:
878            q->info.si_signo = sig;
879            q->info.si_errno = 0;
880            q->info.si_code = SI_USER;
881            q->info.si_pid = task_tgid_nr_ns(current,
882                            task_active_pid_ns(t));
883            q->info.si_uid = current_uid();
884            break;
885        case (unsigned long) SEND_SIG_PRIV:
886            q->info.si_signo = sig;
887            q->info.si_errno = 0;
888            q->info.si_code = SI_KERNEL;
889            q->info.si_pid = 0;
890            q->info.si_uid = 0;
891            break;
892        default:
893            copy_siginfo(&q->info, info);
894            if (from_ancestor_ns)
895                q->info.si_pid = 0;
896            break;
897        }
898    } else if (!is_si_special(info)) {
899        if (sig >= SIGRTMIN && info->si_code != SI_USER)
900        /*
901         * Queue overflow, abort. We may abort if the signal was rt
902         * and sent by user using something other than kill().
903         */
904            return -EAGAIN;
905    }
906
907out_set:
908    signalfd_notify(t, sig);
909    sigaddset(&pending->signal, sig);
910    complete_signal(sig, t, group);
911    return 0;
912}
913
914static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
915            int group)
916{
917    int from_ancestor_ns = 0;
918
919#ifdef CONFIG_PID_NS
920    if (!is_si_special(info) && SI_FROMUSER(info) &&
921            task_pid_nr_ns(current, task_active_pid_ns(t)) <= 0)
922        from_ancestor_ns = 1;
923#endif
924
925    return __send_signal(sig, info, t, group, from_ancestor_ns);
926}
927
928int print_fatal_signals;
929
930static void print_fatal_signal(struct pt_regs *regs, int signr)
931{
932    printk("%s/%d: potentially unexpected fatal signal %d.\n",
933        current->comm, task_pid_nr(current), signr);
934
935#if defined(__i386__) && !defined(__arch_um__)
936    printk("code at %08lx: ", regs->ip);
937    {
938        int i;
939        for (i = 0; i < 16; i++) {
940            unsigned char insn;
941
942            if (get_user(insn, (unsigned char *)(regs->ip + i)))
943                break;
944            printk("%02x ", insn);
945        }
946    }
947#endif
948    printk("\n");
949    preempt_disable();
950    show_regs(regs);
951    preempt_enable();
952}
953
954static int __init setup_print_fatal_signals(char *str)
955{
956    get_option (&str, &print_fatal_signals);
957
958    return 1;
959}
960
961__setup("print-fatal-signals=", setup_print_fatal_signals);
962
963int
964__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
965{
966    return send_signal(sig, info, p, 1);
967}
968
969static int
970specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
971{
972    return send_signal(sig, info, t, 0);
973}
974
975int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
976            bool group)
977{
978    unsigned long flags;
979    int ret = -ESRCH;
980
981    if (lock_task_sighand(p, &flags)) {
982        ret = send_signal(sig, info, p, group);
983        unlock_task_sighand(p, &flags);
984    }
985
986    return ret;
987}
988
989/*
990 * Force a signal that the process can't ignore: if necessary
991 * we unblock the signal and change any SIG_IGN to SIG_DFL.
992 *
993 * Note: If we unblock the signal, we always reset it to SIG_DFL,
994 * since we do not want to have a signal handler that was blocked
995 * be invoked when user space had explicitly blocked it.
996 *
997 * We don't want to have recursive SIGSEGV's etc, for example,
998 * that is why we also clear SIGNAL_UNKILLABLE.
999 */
1000int
1001force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1002{
1003    unsigned long int flags;
1004    int ret, blocked, ignored;
1005    struct k_sigaction *action;
1006
1007    spin_lock_irqsave(&t->sighand->siglock, flags);
1008    action = &t->sighand->action[sig-1];
1009    ignored = action->sa.sa_handler == SIG_IGN;
1010    blocked = sigismember(&t->blocked, sig);
1011    if (blocked || ignored) {
1012        action->sa.sa_handler = SIG_DFL;
1013        if (blocked) {
1014            sigdelset(&t->blocked, sig);
1015            recalc_sigpending_and_wake(t);
1016        }
1017    }
1018    if (action->sa.sa_handler == SIG_DFL)
1019        t->signal->flags &= ~SIGNAL_UNKILLABLE;
1020    ret = specific_send_sig_info(sig, info, t);
1021    spin_unlock_irqrestore(&t->sighand->siglock, flags);
1022
1023    return ret;
1024}
1025
1026void
1027force_sig_specific(int sig, struct task_struct *t)
1028{
1029    force_sig_info(sig, SEND_SIG_FORCED, t);
1030}
1031
1032/*
1033 * Nuke all other threads in the group.
1034 */
1035void zap_other_threads(struct task_struct *p)
1036{
1037    struct task_struct *t;
1038
1039    p->signal->group_stop_count = 0;
1040
1041    for (t = next_thread(p); t != p; t = next_thread(t)) {
1042        /*
1043         * Don't bother with already dead threads
1044         */
1045        if (t->exit_state)
1046            continue;
1047
1048        /* SIGKILL will be handled before any pending SIGSTOP */
1049        sigaddset(&t->pending.signal, SIGKILL);
1050        signal_wake_up(t, 1);
1051    }
1052}
1053
1054struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1055{
1056    struct sighand_struct *sighand;
1057
1058    rcu_read_lock();
1059    for (;;) {
1060        sighand = rcu_dereference(tsk->sighand);
1061        if (unlikely(sighand == NULL))
1062            break;
1063
1064        spin_lock_irqsave(&sighand->siglock, *flags);
1065        if (likely(sighand == tsk->sighand))
1066            break;
1067        spin_unlock_irqrestore(&sighand->siglock, *flags);
1068    }
1069    rcu_read_unlock();
1070
1071    return sighand;
1072}
1073EXPORT_SYMBOL(lock_task_sighand);
1074
1075/*
1076 * send signal info to all the members of a group
1077 * - the caller must hold the RCU read lock at least
1078 */
1079int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1080{
1081    int ret = check_kill_permission(sig, info, p);
1082
1083    if (!ret && sig)
1084        ret = do_send_sig_info(sig, info, p, true);
1085
1086    return ret;
1087}
1088
1089/*
1090 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1091 * control characters do (^C, ^Z etc)
1092 * - the caller must hold at least a readlock on tasklist_lock
1093 */
1094int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1095{
1096    struct task_struct *p = NULL;
1097    int retval, success;
1098
1099    success = 0;
1100    retval = -ESRCH;
1101    do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1102        int err = group_send_sig_info(sig, info, p);
1103        success |= !err;
1104        retval = err;
1105    } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1106    return success ? 0 : retval;
1107}
1108
1109int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1110{
1111    int error = -ESRCH;
1112    struct task_struct *p;
1113
1114    rcu_read_lock();
1115retry:
1116    p = pid_task(pid, PIDTYPE_PID);
1117    if (p) {
1118        error = group_send_sig_info(sig, info, p);
1119        if (unlikely(error == -ESRCH))
1120            /*
1121             * The task was unhashed in between, try again.
1122             * If it is dead, pid_task() will return NULL,
1123             * if we race with de_thread() it will find the
1124             * new leader.
1125             */
1126            goto retry;
1127    }
1128    rcu_read_unlock();
1129
1130    return error;
1131}
1132
1133int
1134kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1135{
1136    int error;
1137    rcu_read_lock();
1138    error = kill_pid_info(sig, info, find_vpid(pid));
1139    rcu_read_unlock();
1140    return error;
1141}
1142
1143/* like kill_pid_info(), but doesn't use uid/euid of "current" */
1144int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1145              uid_t uid, uid_t euid, u32 secid)
1146{
1147    int ret = -EINVAL;
1148    struct task_struct *p;
1149    const struct cred *pcred;
1150
1151    if (!valid_signal(sig))
1152        return ret;
1153
1154    read_lock(&tasklist_lock);
1155    p = pid_task(pid, PIDTYPE_PID);
1156    if (!p) {
1157        ret = -ESRCH;
1158        goto out_unlock;
1159    }
1160    pcred = __task_cred(p);
1161    if ((info == SEND_SIG_NOINFO ||
1162         (!is_si_special(info) && SI_FROMUSER(info))) &&
1163        euid != pcred->suid && euid != pcred->uid &&
1164        uid != pcred->suid && uid != pcred->uid) {
1165        ret = -EPERM;
1166        goto out_unlock;
1167    }
1168    ret = security_task_kill(p, info, sig, secid);
1169    if (ret)
1170        goto out_unlock;
1171    if (sig && p->sighand) {
1172        unsigned long flags;
1173        spin_lock_irqsave(&p->sighand->siglock, flags);
1174        ret = __send_signal(sig, info, p, 1, 0);
1175        spin_unlock_irqrestore(&p->sighand->siglock, flags);
1176    }
1177out_unlock:
1178    read_unlock(&tasklist_lock);
1179    return ret;
1180}
1181EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1182
1183/*
1184 * kill_something_info() interprets pid in interesting ways just like kill(2).
1185 *
1186 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1187 * is probably wrong. Should make it like BSD or SYSV.
1188 */
1189
1190static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1191{
1192    int ret;
1193
1194    if (pid > 0) {
1195        rcu_read_lock();
1196        ret = kill_pid_info(sig, info, find_vpid(pid));
1197        rcu_read_unlock();
1198        return ret;
1199    }
1200
1201    read_lock(&tasklist_lock);
1202    if (pid != -1) {
1203        ret = __kill_pgrp_info(sig, info,
1204                pid ? find_vpid(-pid) : task_pgrp(current));
1205    } else {
1206        int retval = 0, count = 0;
1207        struct task_struct * p;
1208
1209        for_each_process(p) {
1210            if (task_pid_vnr(p) > 1 &&
1211                    !same_thread_group(p, current)) {
1212                int err = group_send_sig_info(sig, info, p);
1213                ++count;
1214                if (err != -EPERM)
1215                    retval = err;
1216            }
1217        }
1218        ret = count ? retval : -ESRCH;
1219    }
1220    read_unlock(&tasklist_lock);
1221
1222    return ret;
1223}
1224
1225/*
1226 * These are for backward compatibility with the rest of the kernel source.
1227 */
1228
1229int
1230send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1231{
1232    /*
1233     * Make sure legacy kernel users don't send in bad values
1234     * (normal paths check this in check_kill_permission).
1235     */
1236    if (!valid_signal(sig))
1237        return -EINVAL;
1238
1239    return do_send_sig_info(sig, info, p, false);
1240}
1241
1242#define __si_special(priv) \
1243    ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1244
1245int
1246send_sig(int sig, struct task_struct *p, int priv)
1247{
1248    return send_sig_info(sig, __si_special(priv), p);
1249}
1250
1251void
1252force_sig(int sig, struct task_struct *p)
1253{
1254    force_sig_info(sig, SEND_SIG_PRIV, p);
1255}
1256
1257/*
1258 * When things go south during signal handling, we
1259 * will force a SIGSEGV. And if the signal that caused
1260 * the problem was already a SIGSEGV, we'll want to
1261 * make sure we don't even try to deliver the signal..
1262 */
1263int
1264force_sigsegv(int sig, struct task_struct *p)
1265{
1266    if (sig == SIGSEGV) {
1267        unsigned long flags;
1268        spin_lock_irqsave(&p->sighand->siglock, flags);
1269        p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1270        spin_unlock_irqrestore(&p->sighand->siglock, flags);
1271    }
1272    force_sig(SIGSEGV, p);
1273    return 0;
1274}
1275
1276int kill_pgrp(struct pid *pid, int sig, int priv)
1277{
1278    int ret;
1279
1280    read_lock(&tasklist_lock);
1281    ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1282    read_unlock(&tasklist_lock);
1283
1284    return ret;
1285}
1286EXPORT_SYMBOL(kill_pgrp);
1287
1288int kill_pid(struct pid *pid, int sig, int priv)
1289{
1290    return kill_pid_info(sig, __si_special(priv), pid);
1291}
1292EXPORT_SYMBOL(kill_pid);
1293
1294/*
1295 * These functions support sending signals using preallocated sigqueue
1296 * structures. This is needed "because realtime applications cannot
1297 * afford to lose notifications of asynchronous events, like timer
1298 * expirations or I/O completions". In the case of Posix Timers
1299 * we allocate the sigqueue structure from the timer_create. If this
1300 * allocation fails we are able to report the failure to the application
1301 * with an EAGAIN error.
1302 */
1303 
1304struct sigqueue *sigqueue_alloc(void)
1305{
1306    struct sigqueue *q;
1307
1308    if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1309        q->flags |= SIGQUEUE_PREALLOC;
1310    return(q);
1311}
1312
1313void sigqueue_free(struct sigqueue *q)
1314{
1315    unsigned long flags;
1316    spinlock_t *lock = &current->sighand->siglock;
1317
1318    BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1319    /*
1320     * We must hold ->siglock while testing q->list
1321     * to serialize with collect_signal() or with
1322     * __exit_signal()->flush_sigqueue().
1323     */
1324    spin_lock_irqsave(lock, flags);
1325    q->flags &= ~SIGQUEUE_PREALLOC;
1326    /*
1327     * If it is queued it will be freed when dequeued,
1328     * like the "regular" sigqueue.
1329     */
1330    if (!list_empty(&q->list))
1331        q = NULL;
1332    spin_unlock_irqrestore(lock, flags);
1333
1334    if (q)
1335        __sigqueue_free(q);
1336}
1337
1338int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1339{
1340    int sig = q->info.si_signo;
1341    struct sigpending *pending;
1342    unsigned long flags;
1343    int ret;
1344
1345    BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1346
1347    ret = -1;
1348    if (!likely(lock_task_sighand(t, &flags)))
1349        goto ret;
1350
1351    ret = 1; /* the signal is ignored */
1352    if (!prepare_signal(sig, t, 0))
1353        goto out;
1354
1355    ret = 0;
1356    if (unlikely(!list_empty(&q->list))) {
1357        /*
1358         * If an SI_TIMER entry is already queue just increment
1359         * the overrun count.
1360         */
1361        BUG_ON(q->info.si_code != SI_TIMER);
1362        q->info.si_overrun++;
1363        goto out;
1364    }
1365    q->info.si_overrun = 0;
1366
1367    signalfd_notify(t, sig);
1368    pending = group ? &t->signal->shared_pending : &t->pending;
1369    list_add_tail(&q->list, &pending->list);
1370    sigaddset(&pending->signal, sig);
1371    complete_signal(sig, t, group);
1372out:
1373    unlock_task_sighand(t, &flags);
1374ret:
1375    return ret;
1376}
1377
1378/*
1379 * Let a parent know about the death of a child.
1380 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1381 *
1382 * Returns -1 if our parent ignored us and so we've switched to
1383 * self-reaping, or else @sig.
1384 */
1385int do_notify_parent(struct task_struct *tsk, int sig)
1386{
1387    struct siginfo info;
1388    unsigned long flags;
1389    struct sighand_struct *psig;
1390    int ret = sig;
1391
1392    BUG_ON(sig == -1);
1393
1394     /* do_notify_parent_cldstop should have been called instead. */
1395     BUG_ON(task_is_stopped_or_traced(tsk));
1396
1397    BUG_ON(!task_ptrace(tsk) &&
1398           (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1399
1400    info.si_signo = sig;
1401    info.si_errno = 0;
1402    /*
1403     * we are under tasklist_lock here so our parent is tied to
1404     * us and cannot exit and release its namespace.
1405     *
1406     * the only it can is to switch its nsproxy with sys_unshare,
1407     * bu uncharing pid namespaces is not allowed, so we'll always
1408     * see relevant namespace
1409     *
1410     * write_lock() currently calls preempt_disable() which is the
1411     * same as rcu_read_lock(), but according to Oleg, this is not
1412     * correct to rely on this
1413     */
1414    rcu_read_lock();
1415    info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1416    info.si_uid = __task_cred(tsk)->uid;
1417    rcu_read_unlock();
1418
1419    info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1420                tsk->signal->utime));
1421    info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1422                tsk->signal->stime));
1423
1424    info.si_status = tsk->exit_code & 0x7f;
1425    if (tsk->exit_code & 0x80)
1426        info.si_code = CLD_DUMPED;
1427    else if (tsk->exit_code & 0x7f)
1428        info.si_code = CLD_KILLED;
1429    else {
1430        info.si_code = CLD_EXITED;
1431        info.si_status = tsk->exit_code >> 8;
1432    }
1433
1434    psig = tsk->parent->sighand;
1435    spin_lock_irqsave(&psig->siglock, flags);
1436    if (!task_ptrace(tsk) && sig == SIGCHLD &&
1437        (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1438         (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1439        /*
1440         * We are exiting and our parent doesn't care. POSIX.1
1441         * defines special semantics for setting SIGCHLD to SIG_IGN
1442         * or setting the SA_NOCLDWAIT flag: we should be reaped
1443         * automatically and not left for our parent's wait4 call.
1444         * Rather than having the parent do it as a magic kind of
1445         * signal handler, we just set this to tell do_exit that we
1446         * can be cleaned up without becoming a zombie. Note that
1447         * we still call __wake_up_parent in this case, because a
1448         * blocked sys_wait4 might now return -ECHILD.
1449         *
1450         * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1451         * is implementation-defined: we do (if you don't want
1452         * it, just use SIG_IGN instead).
1453         */
1454        ret = tsk->exit_signal = -1;
1455        if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1456            sig = -1;
1457    }
1458    if (valid_signal(sig) && sig > 0)
1459        __group_send_sig_info(sig, &info, tsk->parent);
1460    __wake_up_parent(tsk, tsk->parent);
1461    spin_unlock_irqrestore(&psig->siglock, flags);
1462
1463    return ret;
1464}
1465
1466static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1467{
1468    struct siginfo info;
1469    unsigned long flags;
1470    struct task_struct *parent;
1471    struct sighand_struct *sighand;
1472
1473    if (task_ptrace(tsk))
1474        parent = tsk->parent;
1475    else {
1476        tsk = tsk->group_leader;
1477        parent = tsk->real_parent;
1478    }
1479
1480    info.si_signo = SIGCHLD;
1481    info.si_errno = 0;
1482    /*
1483     * see comment in do_notify_parent() abot the following 3 lines
1484     */
1485    rcu_read_lock();
1486    info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1487    info.si_uid = __task_cred(tsk)->uid;
1488    rcu_read_unlock();
1489
1490    info.si_utime = cputime_to_clock_t(tsk->utime);
1491    info.si_stime = cputime_to_clock_t(tsk->stime);
1492
1493     info.si_code = why;
1494     switch (why) {
1495     case CLD_CONTINUED:
1496         info.si_status = SIGCONT;
1497         break;
1498     case CLD_STOPPED:
1499         info.si_status = tsk->signal->group_exit_code & 0x7f;
1500         break;
1501     case CLD_TRAPPED:
1502         info.si_status = tsk->exit_code & 0x7f;
1503         break;
1504     default:
1505         BUG();
1506     }
1507
1508    sighand = parent->sighand;
1509    spin_lock_irqsave(&sighand->siglock, flags);
1510    if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1511        !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1512        __group_send_sig_info(SIGCHLD, &info, parent);
1513    /*
1514     * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1515     */
1516    __wake_up_parent(tsk, parent);
1517    spin_unlock_irqrestore(&sighand->siglock, flags);
1518}
1519
1520static inline int may_ptrace_stop(void)
1521{
1522    if (!likely(task_ptrace(current)))
1523        return 0;
1524    /*
1525     * Are we in the middle of do_coredump?
1526     * If so and our tracer is also part of the coredump stopping
1527     * is a deadlock situation, and pointless because our tracer
1528     * is dead so don't allow us to stop.
1529     * If SIGKILL was already sent before the caller unlocked
1530     * ->siglock we must see ->core_state != NULL. Otherwise it
1531     * is safe to enter schedule().
1532     */
1533    if (unlikely(current->mm->core_state) &&
1534        unlikely(current->mm == current->parent->mm))
1535        return 0;
1536
1537    return 1;
1538}
1539
1540/*
1541 * Return nonzero if there is a SIGKILL that should be waking us up.
1542 * Called with the siglock held.
1543 */
1544static int sigkill_pending(struct task_struct *tsk)
1545{
1546    return sigismember(&tsk->pending.signal, SIGKILL) ||
1547        sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1548}
1549
1550/*
1551 * This must be called with current->sighand->siglock held.
1552 *
1553 * This should be the path for all ptrace stops.
1554 * We always set current->last_siginfo while stopped here.
1555 * That makes it a way to test a stopped process for
1556 * being ptrace-stopped vs being job-control-stopped.
1557 *
1558 * If we actually decide not to stop at all because the tracer
1559 * is gone, we keep current->exit_code unless clear_code.
1560 */
1561static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1562{
1563    if (arch_ptrace_stop_needed(exit_code, info)) {
1564        /*
1565         * The arch code has something special to do before a
1566         * ptrace stop. This is allowed to block, e.g. for faults
1567         * on user stack pages. We can't keep the siglock while
1568         * calling arch_ptrace_stop, so we must release it now.
1569         * To preserve proper semantics, we must do this before
1570         * any signal bookkeeping like checking group_stop_count.
1571         * Meanwhile, a SIGKILL could come in before we retake the
1572         * siglock. That must prevent us from sleeping in TASK_TRACED.
1573         * So after regaining the lock, we must check for SIGKILL.
1574         */
1575        spin_unlock_irq(&current->sighand->siglock);
1576        arch_ptrace_stop(exit_code, info);
1577        spin_lock_irq(&current->sighand->siglock);
1578        if (sigkill_pending(current))
1579            return;
1580    }
1581
1582    /*
1583     * If there is a group stop in progress,
1584     * we must participate in the bookkeeping.
1585     */
1586    if (current->signal->group_stop_count > 0)
1587        --current->signal->group_stop_count;
1588
1589    current->last_siginfo = info;
1590    current->exit_code = exit_code;
1591
1592    /* Let the debugger run. */
1593    __set_current_state(TASK_TRACED);
1594    spin_unlock_irq(&current->sighand->siglock);
1595    read_lock(&tasklist_lock);
1596    if (may_ptrace_stop()) {
1597        do_notify_parent_cldstop(current, CLD_TRAPPED);
1598        /*
1599         * Don't want to allow preemption here, because
1600         * sys_ptrace() needs this task to be inactive.
1601         *
1602         * XXX: implement read_unlock_no_resched().
1603         */
1604        preempt_disable();
1605        read_unlock(&tasklist_lock);
1606        preempt_enable_no_resched();
1607        schedule();
1608    } else {
1609        /*
1610         * By the time we got the lock, our tracer went away.
1611         * Don't drop the lock yet, another tracer may come.
1612         */
1613        __set_current_state(TASK_RUNNING);
1614        if (clear_code)
1615            current->exit_code = 0;
1616        read_unlock(&tasklist_lock);
1617    }
1618
1619    /*
1620     * While in TASK_TRACED, we were considered "frozen enough".
1621     * Now that we woke up, it's crucial if we're supposed to be
1622     * frozen that we freeze now before running anything substantial.
1623     */
1624    try_to_freeze();
1625
1626    /*
1627     * We are back. Now reacquire the siglock before touching
1628     * last_siginfo, so that we are sure to have synchronized with
1629     * any signal-sending on another CPU that wants to examine it.
1630     */
1631    spin_lock_irq(&current->sighand->siglock);
1632    current->last_siginfo = NULL;
1633
1634    /*
1635     * Queued signals ignored us while we were stopped for tracing.
1636     * So check for any that we should take before resuming user mode.
1637     * This sets TIF_SIGPENDING, but never clears it.
1638     */
1639    recalc_sigpending_tsk(current);
1640}
1641
1642void ptrace_notify(int exit_code)
1643{
1644    siginfo_t info;
1645
1646    BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1647
1648    memset(&info, 0, sizeof info);
1649    info.si_signo = SIGTRAP;
1650    info.si_code = exit_code;
1651    info.si_pid = task_pid_vnr(current);
1652    info.si_uid = current_uid();
1653
1654    /* Let the debugger run. */
1655    spin_lock_irq(&current->sighand->siglock);
1656    ptrace_stop(exit_code, 1, &info);
1657    spin_unlock_irq(&current->sighand->siglock);
1658}
1659
1660/*
1661 * This performs the stopping for SIGSTOP and other stop signals.
1662 * We have to stop all threads in the thread group.
1663 * Returns nonzero if we've actually stopped and released the siglock.
1664 * Returns zero if we didn't stop and still hold the siglock.
1665 */
1666static int do_signal_stop(int signr)
1667{
1668    struct signal_struct *sig = current->signal;
1669    int notify;
1670
1671    if (!sig->group_stop_count) {
1672        struct task_struct *t;
1673
1674        if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1675            unlikely(signal_group_exit(sig)))
1676            return 0;
1677        /*
1678         * There is no group stop already in progress.
1679         * We must initiate one now.
1680         */
1681        sig->group_exit_code = signr;
1682
1683        sig->group_stop_count = 1;
1684        for (t = next_thread(current); t != current; t = next_thread(t))
1685            /*
1686             * Setting state to TASK_STOPPED for a group
1687             * stop is always done with the siglock held,
1688             * so this check has no races.
1689             */
1690            if (!(t->flags & PF_EXITING) &&
1691                !task_is_stopped_or_traced(t)) {
1692                sig->group_stop_count++;
1693                signal_wake_up(t, 0);
1694            }
1695    }
1696    /*
1697     * If there are no other threads in the group, or if there is
1698     * a group stop in progress and we are the last to stop, report
1699     * to the parent. When ptraced, every thread reports itself.
1700     */
1701    notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0;
1702    notify = tracehook_notify_jctl(notify, CLD_STOPPED);
1703    /*
1704     * tracehook_notify_jctl() can drop and reacquire siglock, so
1705     * we keep ->group_stop_count != 0 before the call. If SIGCONT
1706     * or SIGKILL comes in between ->group_stop_count == 0.
1707     */
1708    if (sig->group_stop_count) {
1709        if (!--sig->group_stop_count)
1710            sig->flags = SIGNAL_STOP_STOPPED;
1711        current->exit_code = sig->group_exit_code;
1712        __set_current_state(TASK_STOPPED);
1713    }
1714    spin_unlock_irq(&current->sighand->siglock);
1715
1716    if (notify) {
1717        read_lock(&tasklist_lock);
1718        do_notify_parent_cldstop(current, notify);
1719        read_unlock(&tasklist_lock);
1720    }
1721
1722    /* Now we don't run again until woken by SIGCONT or SIGKILL */
1723    do {
1724        schedule();
1725    } while (try_to_freeze());
1726
1727    tracehook_finish_jctl();
1728    current->exit_code = 0;
1729
1730    return 1;
1731}
1732
1733static int ptrace_signal(int signr, siginfo_t *info,
1734             struct pt_regs *regs, void *cookie)
1735{
1736    if (!task_ptrace(current))
1737        return signr;
1738
1739    ptrace_signal_deliver(regs, cookie);
1740
1741    /* Let the debugger run. */
1742    ptrace_stop(signr, 0, info);
1743
1744    /* We're back. Did the debugger cancel the sig? */
1745    signr = current->exit_code;
1746    if (signr == 0)
1747        return signr;
1748
1749    current->exit_code = 0;
1750
1751    /* Update the siginfo structure if the signal has
1752       changed. If the debugger wanted something
1753       specific in the siginfo structure then it should
1754       have updated *info via PTRACE_SETSIGINFO. */
1755    if (signr != info->si_signo) {
1756        info->si_signo = signr;
1757        info->si_errno = 0;
1758        info->si_code = SI_USER;
1759        info->si_pid = task_pid_vnr(current->parent);
1760        info->si_uid = task_uid(current->parent);
1761    }
1762
1763    /* If the (new) signal is now blocked, requeue it. */
1764    if (sigismember(&current->blocked, signr)) {
1765        specific_send_sig_info(signr, info, current);
1766        signr = 0;
1767    }
1768
1769    return signr;
1770}
1771
1772int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1773              struct pt_regs *regs, void *cookie)
1774{
1775    struct sighand_struct *sighand = current->sighand;
1776    struct signal_struct *signal = current->signal;
1777    int signr;
1778
1779relock:
1780    /*
1781     * We'll jump back here after any time we were stopped in TASK_STOPPED.
1782     * While in TASK_STOPPED, we were considered "frozen enough".
1783     * Now that we woke up, it's crucial if we're supposed to be
1784     * frozen that we freeze now before running anything substantial.
1785     */
1786    try_to_freeze();
1787
1788    spin_lock_irq(&sighand->siglock);
1789    /*
1790     * Every stopped thread goes here after wakeup. Check to see if
1791     * we should notify the parent, prepare_signal(SIGCONT) encodes
1792     * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1793     */
1794    if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1795        int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1796                ? CLD_CONTINUED : CLD_STOPPED;
1797        signal->flags &= ~SIGNAL_CLD_MASK;
1798
1799        why = tracehook_notify_jctl(why, CLD_CONTINUED);
1800        spin_unlock_irq(&sighand->siglock);
1801
1802        if (why) {
1803            read_lock(&tasklist_lock);
1804            do_notify_parent_cldstop(current->group_leader, why);
1805            read_unlock(&tasklist_lock);
1806        }
1807        goto relock;
1808    }
1809
1810    for (;;) {
1811        struct k_sigaction *ka;
1812
1813        if (unlikely(signal->group_stop_count > 0) &&
1814            do_signal_stop(0))
1815            goto relock;
1816
1817        /*
1818         * Tracing can induce an artifical signal and choose sigaction.
1819         * The return value in @signr determines the default action,
1820         * but @info->si_signo is the signal number we will report.
1821         */
1822        signr = tracehook_get_signal(current, regs, info, return_ka);
1823        if (unlikely(signr < 0))
1824            goto relock;
1825        if (unlikely(signr != 0))
1826            ka = return_ka;
1827        else {
1828            signr = dequeue_signal(current, &current->blocked,
1829                           info);
1830
1831            if (!signr)
1832                break; /* will return 0 */
1833
1834            if (signr != SIGKILL) {
1835                signr = ptrace_signal(signr, info,
1836                              regs, cookie);
1837                if (!signr)
1838                    continue;
1839            }
1840
1841            ka = &sighand->action[signr-1];
1842        }
1843
1844        if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1845            continue;
1846        if (ka->sa.sa_handler != SIG_DFL) {
1847            /* Run the handler. */
1848            *return_ka = *ka;
1849
1850            if (ka->sa.sa_flags & SA_ONESHOT)
1851                ka->sa.sa_handler = SIG_DFL;
1852
1853            break; /* will return non-zero "signr" value */
1854        }
1855
1856        /*
1857         * Now we are doing the default action for this signal.
1858         */
1859        if (sig_kernel_ignore(signr)) /* Default is nothing. */
1860            continue;
1861
1862        /*
1863         * Global init gets no signals it doesn't want.
1864         * Container-init gets no signals it doesn't want from same
1865         * container.
1866         *
1867         * Note that if global/container-init sees a sig_kernel_only()
1868         * signal here, the signal must have been generated internally
1869         * or must have come from an ancestor namespace. In either
1870         * case, the signal cannot be dropped.
1871         */
1872        if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1873                !sig_kernel_only(signr))
1874            continue;
1875
1876        if (sig_kernel_stop(signr)) {
1877            /*
1878             * The default action is to stop all threads in
1879             * the thread group. The job control signals
1880             * do nothing in an orphaned pgrp, but SIGSTOP
1881             * always works. Note that siglock needs to be
1882             * dropped during the call to is_orphaned_pgrp()
1883             * because of lock ordering with tasklist_lock.
1884             * This allows an intervening SIGCONT to be posted.
1885             * We need to check for that and bail out if necessary.
1886             */
1887            if (signr != SIGSTOP) {
1888                spin_unlock_irq(&sighand->siglock);
1889
1890                /* signals can be posted during this window */
1891
1892                if (is_current_pgrp_orphaned())
1893                    goto relock;
1894
1895                spin_lock_irq(&sighand->siglock);
1896            }
1897
1898            if (likely(do_signal_stop(info->si_signo))) {
1899                /* It released the siglock. */
1900                goto relock;
1901            }
1902
1903            /*
1904             * We didn't actually stop, due to a race
1905             * with SIGCONT or something like that.
1906             */
1907            continue;
1908        }
1909
1910        spin_unlock_irq(&sighand->siglock);
1911
1912        /*
1913         * Anything else is fatal, maybe with a core dump.
1914         */
1915        current->flags |= PF_SIGNALED;
1916
1917        if (sig_kernel_coredump(signr)) {
1918            if (print_fatal_signals)
1919                print_fatal_signal(regs, info->si_signo);
1920            /*
1921             * If it was able to dump core, this kills all
1922             * other threads in the group and synchronizes with
1923             * their demise. If we lost the race with another
1924             * thread getting here, it set group_exit_code
1925             * first and our do_group_exit call below will use
1926             * that value and ignore the one we pass it.
1927             */
1928            do_coredump(info->si_signo, info->si_signo, regs);
1929        }
1930
1931        /*
1932         * Death signals, no core dump.
1933         */
1934        do_group_exit(info->si_signo);
1935        /* NOTREACHED */
1936    }
1937    spin_unlock_irq(&sighand->siglock);
1938    return signr;
1939}
1940
1941void exit_signals(struct task_struct *tsk)
1942{
1943    int group_stop = 0;
1944    struct task_struct *t;
1945
1946    if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1947        tsk->flags |= PF_EXITING;
1948        return;
1949    }
1950
1951    spin_lock_irq(&tsk->sighand->siglock);
1952    /*
1953     * From now this task is not visible for group-wide signals,
1954     * see wants_signal(), do_signal_stop().
1955     */
1956    tsk->flags |= PF_EXITING;
1957    if (!signal_pending(tsk))
1958        goto out;
1959
1960    /* It could be that __group_complete_signal() choose us to
1961     * notify about group-wide signal. Another thread should be
1962     * woken now to take the signal since we will not.
1963     */
1964    for (t = tsk; (t = next_thread(t)) != tsk; )
1965        if (!signal_pending(t) && !(t->flags & PF_EXITING))
1966            recalc_sigpending_and_wake(t);
1967
1968    if (unlikely(tsk->signal->group_stop_count) &&
1969            !--tsk->signal->group_stop_count) {
1970        tsk->signal->flags = SIGNAL_STOP_STOPPED;
1971        group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED);
1972    }
1973out:
1974    spin_unlock_irq(&tsk->sighand->siglock);
1975
1976    if (unlikely(group_stop)) {
1977        read_lock(&tasklist_lock);
1978        do_notify_parent_cldstop(tsk, group_stop);
1979        read_unlock(&tasklist_lock);
1980    }
1981}
1982
1983EXPORT_SYMBOL(recalc_sigpending);
1984EXPORT_SYMBOL_GPL(dequeue_signal);
1985EXPORT_SYMBOL(flush_signals);
1986EXPORT_SYMBOL(force_sig);
1987EXPORT_SYMBOL(send_sig);
1988EXPORT_SYMBOL(send_sig_info);
1989EXPORT_SYMBOL(sigprocmask);
1990EXPORT_SYMBOL(block_all_signals);
1991EXPORT_SYMBOL(unblock_all_signals);
1992
1993
1994/*
1995 * System call entry points.
1996 */
1997
1998SYSCALL_DEFINE0(restart_syscall)
1999{
2000    struct restart_block *restart = &current_thread_info()->restart_block;
2001    return restart->fn(restart);
2002}
2003
2004long do_no_restart_syscall(struct restart_block *param)
2005{
2006    return -EINTR;
2007}
2008
2009/*
2010 * We don't need to get the kernel lock - this is all local to this
2011 * particular thread.. (and that's good, because this is _heavily_
2012 * used by various programs)
2013 */
2014
2015/*
2016 * This is also useful for kernel threads that want to temporarily
2017 * (or permanently) block certain signals.
2018 *
2019 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2020 * interface happily blocks "unblockable" signals like SIGKILL
2021 * and friends.
2022 */
2023int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2024{
2025    int error;
2026
2027    spin_lock_irq(&current->sighand->siglock);
2028    if (oldset)
2029        *oldset = current->blocked;
2030
2031    error = 0;
2032    switch (how) {
2033    case SIG_BLOCK:
2034        sigorsets(&current->blocked, &current->blocked, set);
2035        break;
2036    case SIG_UNBLOCK:
2037        signandsets(&current->blocked, &current->blocked, set);
2038        break;
2039    case SIG_SETMASK:
2040        current->blocked = *set;
2041        break;
2042    default:
2043        error = -EINVAL;
2044    }
2045    recalc_sigpending();
2046    spin_unlock_irq(&current->sighand->siglock);
2047
2048    return error;
2049}
2050
2051SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2052        sigset_t __user *, oset, size_t, sigsetsize)
2053{
2054    int error = -EINVAL;
2055    sigset_t old_set, new_set;
2056
2057    /* XXX: Don't preclude handling different sized sigset_t's. */
2058    if (sigsetsize != sizeof(sigset_t))
2059        goto out;
2060
2061    if (set) {
2062        error = -EFAULT;
2063        if (copy_from_user(&new_set, set, sizeof(*set)))
2064            goto out;
2065        sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2066
2067        error = sigprocmask(how, &new_set, &old_set);
2068        if (error)
2069            goto out;
2070        if (oset)
2071            goto set_old;
2072    } else if (oset) {
2073        spin_lock_irq(&current->sighand->siglock);
2074        old_set = current->blocked;
2075        spin_unlock_irq(&current->sighand->siglock);
2076
2077    set_old:
2078        error = -EFAULT;
2079        if (copy_to_user(oset, &old_set, sizeof(*oset)))
2080            goto out;
2081    }
2082    error = 0;
2083out:
2084    return error;
2085}
2086
2087long do_sigpending(void __user *set, unsigned long sigsetsize)
2088{
2089    long error = -EINVAL;
2090    sigset_t pending;
2091
2092    if (sigsetsize > sizeof(sigset_t))
2093        goto out;
2094
2095    spin_lock_irq(&current->sighand->siglock);
2096    sigorsets(&pending, &current->pending.signal,
2097          &current->signal->shared_pending.signal);
2098    spin_unlock_irq(&current->sighand->siglock);
2099
2100    /* Outside the lock because only this thread touches it. */
2101    sigandsets(&pending, &current->blocked, &pending);
2102
2103    error = -EFAULT;
2104    if (!copy_to_user(set, &pending, sigsetsize))
2105        error = 0;
2106
2107out:
2108    return error;
2109}
2110
2111SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2112{
2113    return do_sigpending(set, sigsetsize);
2114}
2115
2116#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2117
2118int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2119{
2120    int err;
2121
2122    if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2123        return -EFAULT;
2124    if (from->si_code < 0)
2125        return __copy_to_user(to, from, sizeof(siginfo_t))
2126            ? -EFAULT : 0;
2127    /*
2128     * If you change siginfo_t structure, please be sure
2129     * this code is fixed accordingly.
2130     * Please remember to update the signalfd_copyinfo() function
2131     * inside fs/signalfd.c too, in case siginfo_t changes.
2132     * It should never copy any pad contained in the structure
2133     * to avoid security leaks, but must copy the generic
2134     * 3 ints plus the relevant union member.
2135     */
2136    err = __put_user(from->si_signo, &to->si_signo);
2137    err |= __put_user(from->si_errno, &to->si_errno);
2138    err |= __put_user((short)from->si_code, &to->si_code);
2139    switch (from->si_code & __SI_MASK) {
2140    case __SI_KILL:
2141        err |= __put_user(from->si_pid, &to->si_pid);
2142        err |= __put_user(from->si_uid, &to->si_uid);
2143        break;
2144    case __SI_TIMER:
2145         err |= __put_user(from->si_tid, &to->si_tid);
2146         err |= __put_user(from->si_overrun, &to->si_overrun);
2147         err |= __put_user(from->si_ptr, &to->si_ptr);
2148        break;
2149    case __SI_POLL:
2150        err |= __put_user(from->si_band, &to->si_band);
2151        err |= __put_user(from->si_fd, &to->si_fd);
2152        break;
2153    case __SI_FAULT:
2154        err |= __put_user(from->si_addr, &to->si_addr);
2155#ifdef __ARCH_SI_TRAPNO
2156        err |= __put_user(from->si_trapno, &to->si_trapno);
2157#endif
2158        break;
2159    case __SI_CHLD:
2160        err |= __put_user(from->si_pid, &to->si_pid);
2161        err |= __put_user(from->si_uid, &to->si_uid);
2162        err |= __put_user(from->si_status, &to->si_status);
2163        err |= __put_user(from->si_utime, &to->si_utime);
2164        err |= __put_user(from->si_stime, &to->si_stime);
2165        break;
2166    case __SI_RT: /* This is not generated by the kernel as of now. */
2167    case __SI_MESGQ: /* But this is */
2168        err |= __put_user(from->si_pid, &to->si_pid);
2169        err |= __put_user(from->si_uid, &to->si_uid);
2170        err |= __put_user(from->si_ptr, &to->si_ptr);
2171        break;
2172    default: /* this is just in case for now ... */
2173        err |= __put_user(from->si_pid, &to->si_pid);
2174        err |= __put_user(from->si_uid, &to->si_uid);
2175        break;
2176    }
2177    return err;
2178}
2179
2180#endif
2181
2182SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2183        siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2184        size_t, sigsetsize)
2185{
2186    int ret, sig;
2187    sigset_t these;
2188    struct timespec ts;
2189    siginfo_t info;
2190    long timeout = 0;
2191
2192    /* XXX: Don't preclude handling different sized sigset_t's. */
2193    if (sigsetsize != sizeof(sigset_t))
2194        return -EINVAL;
2195
2196    if (copy_from_user(&these, uthese, sizeof(these)))
2197        return -EFAULT;
2198        
2199    /*
2200     * Invert the set of allowed signals to get those we
2201     * want to block.
2202     */
2203    sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2204    signotset(&these);
2205
2206    if (uts) {
2207        if (copy_from_user(&ts, uts, sizeof(ts)))
2208            return -EFAULT;
2209        if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2210            || ts.tv_sec < 0)
2211            return -EINVAL;
2212    }
2213
2214    spin_lock_irq(&current->sighand->siglock);
2215    sig = dequeue_signal(current, &these, &info);
2216    if (!sig) {
2217        timeout = MAX_SCHEDULE_TIMEOUT;
2218        if (uts)
2219            timeout = (timespec_to_jiffies(&ts)
2220                   + (ts.tv_sec || ts.tv_nsec));
2221
2222        if (timeout) {
2223            /* None ready -- temporarily unblock those we're
2224             * interested while we are sleeping in so that we'll
2225             * be awakened when they arrive. */
2226            current->real_blocked = current->blocked;
2227            sigandsets(&current->blocked, &current->blocked, &these);
2228            recalc_sigpending();
2229            spin_unlock_irq(&current->sighand->siglock);
2230
2231            timeout = schedule_timeout_interruptible(timeout);
2232
2233            spin_lock_irq(&current->sighand->siglock);
2234            sig = dequeue_signal(current, &these, &info);
2235            current->blocked = current->real_blocked;
2236            siginitset(&current->real_blocked, 0);
2237            recalc_sigpending();
2238        }
2239    }
2240    spin_unlock_irq(&current->sighand->siglock);
2241
2242    if (sig) {
2243        ret = sig;
2244        if (uinfo) {
2245            if (copy_siginfo_to_user(uinfo, &info))
2246                ret = -EFAULT;
2247        }
2248    } else {
2249        ret = -EAGAIN;
2250        if (timeout)
2251            ret = -EINTR;
2252    }
2253
2254    return ret;
2255}
2256
2257SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2258{
2259    struct siginfo info;
2260
2261    info.si_signo = sig;
2262    info.si_errno = 0;
2263    info.si_code = SI_USER;
2264    info.si_pid = task_tgid_vnr(current);
2265    info.si_uid = current_uid();
2266
2267    return kill_something_info(sig, &info, pid);
2268}
2269
2270static int
2271do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2272{
2273    struct task_struct *p;
2274    int error = -ESRCH;
2275
2276    rcu_read_lock();
2277    p = find_task_by_vpid(pid);
2278    if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2279        error = check_kill_permission(sig, info, p);
2280        /*
2281         * The null signal is a permissions and process existence
2282         * probe. No signal is actually delivered.
2283         */
2284        if (!error && sig) {
2285            error = do_send_sig_info(sig, info, p, false);
2286            /*
2287             * If lock_task_sighand() failed we pretend the task
2288             * dies after receiving the signal. The window is tiny,
2289             * and the signal is private anyway.
2290             */
2291            if (unlikely(error == -ESRCH))
2292                error = 0;
2293        }
2294    }
2295    rcu_read_unlock();
2296
2297    return error;
2298}
2299
2300static int do_tkill(pid_t tgid, pid_t pid, int sig)
2301{
2302    struct siginfo info;
2303
2304    info.si_signo = sig;
2305    info.si_errno = 0;
2306    info.si_code = SI_TKILL;
2307    info.si_pid = task_tgid_vnr(current);
2308    info.si_uid = current_uid();
2309
2310    return do_send_specific(tgid, pid, sig, &info);
2311}
2312
2313/**
2314 * sys_tgkill - send signal to one specific thread
2315 * @tgid: the thread group ID of the thread
2316 * @pid: the PID of the thread
2317 * @sig: signal to be sent
2318 *
2319 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2320 * exists but it's not belonging to the target process anymore. This
2321 * method solves the problem of threads exiting and PIDs getting reused.
2322 */
2323SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2324{
2325    /* This is only valid for single tasks */
2326    if (pid <= 0 || tgid <= 0)
2327        return -EINVAL;
2328
2329    return do_tkill(tgid, pid, sig);
2330}
2331
2332/*
2333 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2334 */
2335SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2336{
2337    /* This is only valid for single tasks */
2338    if (pid <= 0)
2339        return -EINVAL;
2340
2341    return do_tkill(0, pid, sig);
2342}
2343
2344SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2345        siginfo_t __user *, uinfo)
2346{
2347    siginfo_t info;
2348
2349    if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2350        return -EFAULT;
2351
2352    /* Not even root can pretend to send signals from the kernel.
2353       Nor can they impersonate a kill(), which adds source info. */
2354    if (info.si_code >= 0)
2355        return -EPERM;
2356    info.si_signo = sig;
2357
2358    /* POSIX.1b doesn't mention process groups. */
2359    return kill_proc_info(sig, &info, pid);
2360}
2361
2362long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2363{
2364    /* This is only valid for single tasks */
2365    if (pid <= 0 || tgid <= 0)
2366        return -EINVAL;
2367
2368    /* Not even root can pretend to send signals from the kernel.
2369       Nor can they impersonate a kill(), which adds source info. */
2370    if (info->si_code >= 0)
2371        return -EPERM;
2372    info->si_signo = sig;
2373
2374    return do_send_specific(tgid, pid, sig, info);
2375}
2376
2377SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2378        siginfo_t __user *, uinfo)
2379{
2380    siginfo_t info;
2381
2382    if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2383        return -EFAULT;
2384
2385    return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2386}
2387
2388int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2389{
2390    struct task_struct *t = current;
2391    struct k_sigaction *k;
2392    sigset_t mask;
2393
2394    if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2395        return -EINVAL;
2396
2397    k = &t->sighand->action[sig-1];
2398
2399    spin_lock_irq(&current->sighand->siglock);
2400    if (oact)
2401        *oact = *k;
2402
2403    if (act) {
2404        sigdelsetmask(&act->sa.sa_mask,
2405                  sigmask(SIGKILL) | sigmask(SIGSTOP));
2406        *k = *act;
2407        /*
2408         * POSIX 3.3.1.3:
2409         * "Setting a signal action to SIG_IGN for a signal that is
2410         * pending shall cause the pending signal to be discarded,
2411         * whether or not it is blocked."
2412         *
2413         * "Setting a signal action to SIG_DFL for a signal that is
2414         * pending and whose default action is to ignore the signal
2415         * (for example, SIGCHLD), shall cause the pending signal to
2416         * be discarded, whether or not it is blocked"
2417         */
2418        if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2419            sigemptyset(&mask);
2420            sigaddset(&mask, sig);
2421            rm_from_queue_full(&mask, &t->signal->shared_pending);
2422            do {
2423                rm_from_queue_full(&mask, &t->pending);
2424                t = next_thread(t);
2425            } while (t != current);
2426        }
2427    }
2428
2429    spin_unlock_irq(&current->sighand->siglock);
2430    return 0;
2431}
2432
2433int
2434do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2435{
2436    stack_t oss;
2437    int error;
2438
2439    oss.ss_sp = (void __user *) current->sas_ss_sp;
2440    oss.ss_size = current->sas_ss_size;
2441    oss.ss_flags = sas_ss_flags(sp);
2442
2443    if (uss) {
2444        void __user *ss_sp;
2445        size_t ss_size;
2446        int ss_flags;
2447
2448        error = -EFAULT;
2449        if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2450            goto out;
2451        error = __get_user(ss_sp, &uss->ss_sp) |
2452            __get_user(ss_flags, &uss->ss_flags) |
2453            __get_user(ss_size, &uss->ss_size);
2454        if (error)
2455            goto out;
2456
2457        error = -EPERM;
2458        if (on_sig_stack(sp))
2459            goto out;
2460
2461        error = -EINVAL;
2462        /*
2463         *
2464         * Note - this code used to test ss_flags incorrectly
2465         * old code may have been written using ss_flags==0
2466         * to mean ss_flags==SS_ONSTACK (as this was the only
2467         * way that worked) - this fix preserves that older
2468         * mechanism
2469         */
2470        if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2471            goto out;
2472
2473        if (ss_flags == SS_DISABLE) {
2474            ss_size = 0;
2475            ss_sp = NULL;
2476        } else {
2477            error = -ENOMEM;
2478            if (ss_size < MINSIGSTKSZ)
2479                goto out;
2480        }
2481
2482        current->sas_ss_sp = (unsigned long) ss_sp;
2483        current->sas_ss_size = ss_size;
2484    }
2485
2486    error = 0;
2487    if (uoss) {
2488        error = -EFAULT;
2489        if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2490            goto out;
2491        error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2492            __put_user(oss.ss_size, &uoss->ss_size) |
2493            __put_user(oss.ss_flags, &uoss->ss_flags);
2494    }
2495
2496out:
2497    return error;
2498}
2499
2500#ifdef __ARCH_WANT_SYS_SIGPENDING
2501
2502SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2503{
2504    return do_sigpending(set, sizeof(*set));
2505}
2506
2507#endif
2508
2509#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2510/* Some platforms have their own version with special arguments others
2511   support only sys_rt_sigprocmask. */
2512
2513SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2514        old_sigset_t __user *, oset)
2515{
2516    int error;
2517    old_sigset_t old_set, new_set;
2518
2519    if (set) {
2520        error = -EFAULT;
2521        if (copy_from_user(&new_set, set, sizeof(*set)))
2522            goto out;
2523        new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2524
2525        spin_lock_irq(&current->sighand->siglock);
2526        old_set = current->blocked.sig[0];
2527
2528        error = 0;
2529        switch (how) {
2530        default:
2531            error = -EINVAL;
2532            break;
2533        case SIG_BLOCK:
2534            sigaddsetmask(&current->blocked, new_set);
2535            break;
2536        case SIG_UNBLOCK:
2537            sigdelsetmask(&current->blocked, new_set);
2538            break;
2539        case SIG_SETMASK:
2540            current->blocked.sig[0] = new_set;
2541            break;
2542        }
2543
2544        recalc_sigpending();
2545        spin_unlock_irq(&current->sighand->siglock);
2546        if (error)
2547            goto out;
2548        if (oset)
2549            goto set_old;
2550    } else if (oset) {
2551        old_set = current->blocked.sig[0];
2552    set_old:
2553        error = -EFAULT;
2554        if (copy_to_user(oset, &old_set, sizeof(*oset)))
2555            goto out;
2556    }
2557    error = 0;
2558out:
2559    return error;
2560}
2561#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2562
2563#ifdef __ARCH_WANT_SYS_RT_SIGACTION
2564SYSCALL_DEFINE4(rt_sigaction, int, sig,
2565        const struct sigaction __user *, act,
2566        struct sigaction __user *, oact,
2567        size_t, sigsetsize)
2568{
2569    struct k_sigaction new_sa, old_sa;
2570    int ret = -EINVAL;
2571
2572    /* XXX: Don't preclude handling different sized sigset_t's. */
2573    if (sigsetsize != sizeof(sigset_t))
2574        goto out;
2575
2576    if (act) {
2577        if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2578            return -EFAULT;
2579    }
2580
2581    ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2582
2583    if (!ret && oact) {
2584        if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2585            return -EFAULT;
2586    }
2587out:
2588    return ret;
2589}
2590#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2591
2592#ifdef __ARCH_WANT_SYS_SGETMASK
2593
2594/*
2595 * For backwards compatibility. Functionality superseded by sigprocmask.
2596 */
2597SYSCALL_DEFINE0(sgetmask)
2598{
2599    /* SMP safe */
2600    return current->blocked.sig[0];
2601}
2602
2603SYSCALL_DEFINE1(ssetmask, int, newmask)
2604{
2605    int old;
2606
2607    spin_lock_irq(&current->sighand->siglock);
2608    old = current->blocked.sig[0];
2609
2610    siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2611                          sigmask(SIGSTOP)));
2612    recalc_sigpending();
2613    spin_unlock_irq(&current->sighand->siglock);
2614
2615    return old;
2616}
2617#endif /* __ARCH_WANT_SGETMASK */
2618
2619#ifdef __ARCH_WANT_SYS_SIGNAL
2620/*
2621 * For backwards compatibility. Functionality superseded by sigaction.
2622 */
2623SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
2624{
2625    struct k_sigaction new_sa, old_sa;
2626    int ret;
2627
2628    new_sa.sa.sa_handler = handler;
2629    new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2630    sigemptyset(&new_sa.sa.sa_mask);
2631
2632    ret = do_sigaction(sig, &new_sa, &old_sa);
2633
2634    return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2635}
2636#endif /* __ARCH_WANT_SYS_SIGNAL */
2637
2638#ifdef __ARCH_WANT_SYS_PAUSE
2639
2640SYSCALL_DEFINE0(pause)
2641{
2642    current->state = TASK_INTERRUPTIBLE;
2643    schedule();
2644    return -ERESTARTNOHAND;
2645}
2646
2647#endif
2648
2649#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2650SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
2651{
2652    sigset_t newset;
2653
2654    /* XXX: Don't preclude handling different sized sigset_t's. */
2655    if (sigsetsize != sizeof(sigset_t))
2656        return -EINVAL;
2657
2658    if (copy_from_user(&newset, unewset, sizeof(newset)))
2659        return -EFAULT;
2660    sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2661
2662    spin_lock_irq(&current->sighand->siglock);
2663    current->saved_sigmask = current->blocked;
2664    current->blocked = newset;
2665    recalc_sigpending();
2666    spin_unlock_irq(&current->sighand->siglock);
2667
2668    current->state = TASK_INTERRUPTIBLE;
2669    schedule();
2670    set_restore_sigmask();
2671    return -ERESTARTNOHAND;
2672}
2673#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2674
2675__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2676{
2677    return NULL;
2678}
2679
2680void __init signals_init(void)
2681{
2682    sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2683}
2684

Archive Download this file



interactive