Root/ipc/sem.c

1/*
2 * linux/ipc/sem.c
3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
5 *
6 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
7 *
8 * SMP-threaded, sysctl's added
9 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
10 * Enforced range limit on SEM_UNDO
11 * (c) 2001 Red Hat Inc
12 * Lockless wakeup
13 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
14 * Further wakeup optimizations, documentation
15 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
16 *
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
19 *
20 * namespaces support
21 * OpenVZ, SWsoft Inc.
22 * Pavel Emelianov <xemul@openvz.org>
23 *
24 * Implementation notes: (May 2010)
25 * This file implements System V semaphores.
26 *
27 * User space visible behavior:
28 * - FIFO ordering for semop() operations (just FIFO, not starvation
29 * protection)
30 * - multiple semaphore operations that alter the same semaphore in
31 * one semop() are handled.
32 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
33 * SETALL calls.
34 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
35 * - undo adjustments at process exit are limited to 0..SEMVMX.
36 * - namespace are supported.
37 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
38 * to /proc/sys/kernel/sem.
39 * - statistics about the usage are reported in /proc/sysvipc/sem.
40 *
41 * Internals:
42 * - scalability:
43 * - all global variables are read-mostly.
44 * - semop() calls and semctl(RMID) are synchronized by RCU.
45 * - most operations do write operations (actually: spin_lock calls) to
46 * the per-semaphore array structure.
47 * Thus: Perfect SMP scaling between independent semaphore arrays.
48 * If multiple semaphores in one array are used, then cache line
49 * trashing on the semaphore array spinlock will limit the scaling.
50 * - semncnt and semzcnt are calculated on demand in count_semncnt() and
51 * count_semzcnt()
52 * - the task that performs a successful semop() scans the list of all
53 * sleeping tasks and completes any pending operations that can be fulfilled.
54 * Semaphores are actively given to waiting tasks (necessary for FIFO).
55 * (see update_queue())
56 * - To improve the scalability, the actual wake-up calls are performed after
57 * dropping all locks. (see wake_up_sem_queue_prepare(),
58 * wake_up_sem_queue_do())
59 * - All work is done by the waker, the woken up task does not have to do
60 * anything - not even acquiring a lock or dropping a refcount.
61 * - A woken up task may not even touch the semaphore array anymore, it may
62 * have been destroyed already by a semctl(RMID).
63 * - The synchronizations between wake-ups due to a timeout/signal and a
64 * wake-up due to a completed semaphore operation is achieved by using an
65 * intermediate state (IN_WAKEUP).
66 * - UNDO values are stored in an array (one per process and per
67 * semaphore array, lazily allocated). For backwards compatibility, multiple
68 * modes for the UNDO variables are supported (per process, per thread)
69 * (see copy_semundo, CLONE_SYSVSEM)
70 * - There are two lists of the pending operations: a per-array list
71 * and per-semaphore list (stored in the array). This allows to achieve FIFO
72 * ordering without always scanning all pending operations.
73 * The worst-case behavior is nevertheless O(N^2) for N wakeups.
74 */
75
76#include <linux/slab.h>
77#include <linux/spinlock.h>
78#include <linux/init.h>
79#include <linux/proc_fs.h>
80#include <linux/time.h>
81#include <linux/security.h>
82#include <linux/syscalls.h>
83#include <linux/audit.h>
84#include <linux/capability.h>
85#include <linux/seq_file.h>
86#include <linux/rwsem.h>
87#include <linux/nsproxy.h>
88#include <linux/ipc_namespace.h>
89
90#include <asm/uaccess.h>
91#include "util.h"
92
93/* One semaphore structure for each semaphore in the system. */
94struct sem {
95    int semval; /* current value */
96    int sempid; /* pid of last operation */
97    spinlock_t lock; /* spinlock for fine-grained semtimedop */
98    struct list_head pending_alter; /* pending single-sop operations */
99                    /* that alter the semaphore */
100    struct list_head pending_const; /* pending single-sop operations */
101                    /* that do not alter the semaphore*/
102    time_t sem_otime; /* candidate for sem_otime */
103} ____cacheline_aligned_in_smp;
104
105/* One queue for each sleeping process in the system. */
106struct sem_queue {
107    struct list_head list; /* queue of pending operations */
108    struct task_struct *sleeper; /* this process */
109    struct sem_undo *undo; /* undo structure */
110    int pid; /* process id of requesting process */
111    int status; /* completion status of operation */
112    struct sembuf *sops; /* array of pending operations */
113    int nsops; /* number of operations */
114    int alter; /* does *sops alter the array? */
115};
116
117/* Each task has a list of undo requests. They are executed automatically
118 * when the process exits.
119 */
120struct sem_undo {
121    struct list_head list_proc; /* per-process list: *
122                         * all undos from one process
123                         * rcu protected */
124    struct rcu_head rcu; /* rcu struct for sem_undo */
125    struct sem_undo_list *ulp; /* back ptr to sem_undo_list */
126    struct list_head list_id; /* per semaphore array list:
127                         * all undos for one array */
128    int semid; /* semaphore set identifier */
129    short *semadj; /* array of adjustments */
130                        /* one per semaphore */
131};
132
133/* sem_undo_list controls shared access to the list of sem_undo structures
134 * that may be shared among all a CLONE_SYSVSEM task group.
135 */
136struct sem_undo_list {
137    atomic_t refcnt;
138    spinlock_t lock;
139    struct list_head list_proc;
140};
141
142
143#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
144
145#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
146
147static int newary(struct ipc_namespace *, struct ipc_params *);
148static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
149#ifdef CONFIG_PROC_FS
150static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
151#endif
152
153#define SEMMSL_FAST 256 /* 512 bytes on stack */
154#define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
155
156/*
157 * Locking:
158 * sem_undo.id_next,
159 * sem_array.complex_count,
160 * sem_array.pending{_alter,_cont},
161 * sem_array.sem_undo: global sem_lock() for read/write
162 * sem_undo.proc_next: only "current" is allowed to read/write that field.
163 *
164 * sem_array.sem_base[i].pending_{const,alter}:
165 * global or semaphore sem_lock() for read/write
166 */
167
168#define sc_semmsl sem_ctls[0]
169#define sc_semmns sem_ctls[1]
170#define sc_semopm sem_ctls[2]
171#define sc_semmni sem_ctls[3]
172
173void sem_init_ns(struct ipc_namespace *ns)
174{
175    ns->sc_semmsl = SEMMSL;
176    ns->sc_semmns = SEMMNS;
177    ns->sc_semopm = SEMOPM;
178    ns->sc_semmni = SEMMNI;
179    ns->used_sems = 0;
180    ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
181}
182
183#ifdef CONFIG_IPC_NS
184void sem_exit_ns(struct ipc_namespace *ns)
185{
186    free_ipcs(ns, &sem_ids(ns), freeary);
187    idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
188}
189#endif
190
191void __init sem_init(void)
192{
193    sem_init_ns(&init_ipc_ns);
194    ipc_init_proc_interface("sysvipc/sem",
195                " key semid perms nsems uid gid cuid cgid otime ctime\n",
196                IPC_SEM_IDS, sysvipc_sem_proc_show);
197}
198
199/**
200 * unmerge_queues - unmerge queues, if possible.
201 * @sma: semaphore array
202 *
203 * The function unmerges the wait queues if complex_count is 0.
204 * It must be called prior to dropping the global semaphore array lock.
205 */
206static void unmerge_queues(struct sem_array *sma)
207{
208    struct sem_queue *q, *tq;
209
210    /* complex operations still around? */
211    if (sma->complex_count)
212        return;
213    /*
214     * We will switch back to simple mode.
215     * Move all pending operation back into the per-semaphore
216     * queues.
217     */
218    list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
219        struct sem *curr;
220        curr = &sma->sem_base[q->sops[0].sem_num];
221
222        list_add_tail(&q->list, &curr->pending_alter);
223    }
224    INIT_LIST_HEAD(&sma->pending_alter);
225}
226
227/**
228 * merge_queues - merge single semop queues into global queue
229 * @sma: semaphore array
230 *
231 * This function merges all per-semaphore queues into the global queue.
232 * It is necessary to achieve FIFO ordering for the pending single-sop
233 * operations when a multi-semop operation must sleep.
234 * Only the alter operations must be moved, the const operations can stay.
235 */
236static void merge_queues(struct sem_array *sma)
237{
238    int i;
239    for (i = 0; i < sma->sem_nsems; i++) {
240        struct sem *sem = sma->sem_base + i;
241
242        list_splice_init(&sem->pending_alter, &sma->pending_alter);
243    }
244}
245
246static void sem_rcu_free(struct rcu_head *head)
247{
248    struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
249    struct sem_array *sma = ipc_rcu_to_struct(p);
250
251    security_sem_free(sma);
252    ipc_rcu_free(head);
253}
254
255/*
256 * Wait until all currently ongoing simple ops have completed.
257 * Caller must own sem_perm.lock.
258 * New simple ops cannot start, because simple ops first check
259 * that sem_perm.lock is free.
260 * that a) sem_perm.lock is free and b) complex_count is 0.
261 */
262static void sem_wait_array(struct sem_array *sma)
263{
264    int i;
265    struct sem *sem;
266
267    if (sma->complex_count) {
268        /* The thread that increased sma->complex_count waited on
269         * all sem->lock locks. Thus we don't need to wait again.
270         */
271        return;
272    }
273
274    for (i = 0; i < sma->sem_nsems; i++) {
275        sem = sma->sem_base + i;
276        spin_unlock_wait(&sem->lock);
277    }
278}
279
280/*
281 * If the request contains only one semaphore operation, and there are
282 * no complex transactions pending, lock only the semaphore involved.
283 * Otherwise, lock the entire semaphore array, since we either have
284 * multiple semaphores in our own semops, or we need to look at
285 * semaphores from other pending complex operations.
286 */
287static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
288                  int nsops)
289{
290    struct sem *sem;
291
292    if (nsops != 1) {
293        /* Complex operation - acquire a full lock */
294        ipc_lock_object(&sma->sem_perm);
295
296        /* And wait until all simple ops that are processed
297         * right now have dropped their locks.
298         */
299        sem_wait_array(sma);
300        return -1;
301    }
302
303    /*
304     * Only one semaphore affected - try to optimize locking.
305     * The rules are:
306     * - optimized locking is possible if no complex operation
307     * is either enqueued or processed right now.
308     * - The test for enqueued complex ops is simple:
309     * sma->complex_count != 0
310     * - Testing for complex ops that are processed right now is
311     * a bit more difficult. Complex ops acquire the full lock
312     * and first wait that the running simple ops have completed.
313     * (see above)
314     * Thus: If we own a simple lock and the global lock is free
315     * and complex_count is now 0, then it will stay 0 and
316     * thus just locking sem->lock is sufficient.
317     */
318    sem = sma->sem_base + sops->sem_num;
319
320    if (sma->complex_count == 0) {
321        /*
322         * It appears that no complex operation is around.
323         * Acquire the per-semaphore lock.
324         */
325        spin_lock(&sem->lock);
326
327        /* Then check that the global lock is free */
328        if (!spin_is_locked(&sma->sem_perm.lock)) {
329            /* spin_is_locked() is not a memory barrier */
330            smp_mb();
331
332            /* Now repeat the test of complex_count:
333             * It can't change anymore until we drop sem->lock.
334             * Thus: if is now 0, then it will stay 0.
335             */
336            if (sma->complex_count == 0) {
337                /* fast path successful! */
338                return sops->sem_num;
339            }
340        }
341        spin_unlock(&sem->lock);
342    }
343
344    /* slow path: acquire the full lock */
345    ipc_lock_object(&sma->sem_perm);
346
347    if (sma->complex_count == 0) {
348        /* False alarm:
349         * There is no complex operation, thus we can switch
350         * back to the fast path.
351         */
352        spin_lock(&sem->lock);
353        ipc_unlock_object(&sma->sem_perm);
354        return sops->sem_num;
355    } else {
356        /* Not a false alarm, thus complete the sequence for a
357         * full lock.
358         */
359        sem_wait_array(sma);
360        return -1;
361    }
362}
363
364static inline void sem_unlock(struct sem_array *sma, int locknum)
365{
366    if (locknum == -1) {
367        unmerge_queues(sma);
368        ipc_unlock_object(&sma->sem_perm);
369    } else {
370        struct sem *sem = sma->sem_base + locknum;
371        spin_unlock(&sem->lock);
372    }
373}
374
375/*
376 * sem_lock_(check_) routines are called in the paths where the rwsem
377 * is not held.
378 *
379 * The caller holds the RCU read lock.
380 */
381static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
382            int id, struct sembuf *sops, int nsops, int *locknum)
383{
384    struct kern_ipc_perm *ipcp;
385    struct sem_array *sma;
386
387    ipcp = ipc_obtain_object(&sem_ids(ns), id);
388    if (IS_ERR(ipcp))
389        return ERR_CAST(ipcp);
390
391    sma = container_of(ipcp, struct sem_array, sem_perm);
392    *locknum = sem_lock(sma, sops, nsops);
393
394    /* ipc_rmid() may have already freed the ID while sem_lock
395     * was spinning: verify that the structure is still valid
396     */
397    if (ipc_valid_object(ipcp))
398        return container_of(ipcp, struct sem_array, sem_perm);
399
400    sem_unlock(sma, *locknum);
401    return ERR_PTR(-EINVAL);
402}
403
404static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
405{
406    struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id);
407
408    if (IS_ERR(ipcp))
409        return ERR_CAST(ipcp);
410
411    return container_of(ipcp, struct sem_array, sem_perm);
412}
413
414static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
415                            int id)
416{
417    struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
418
419    if (IS_ERR(ipcp))
420        return ERR_CAST(ipcp);
421
422    return container_of(ipcp, struct sem_array, sem_perm);
423}
424
425static inline void sem_lock_and_putref(struct sem_array *sma)
426{
427    sem_lock(sma, NULL, -1);
428    ipc_rcu_putref(sma, ipc_rcu_free);
429}
430
431static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
432{
433    ipc_rmid(&sem_ids(ns), &s->sem_perm);
434}
435
436/*
437 * Lockless wakeup algorithm:
438 * Without the check/retry algorithm a lockless wakeup is possible:
439 * - queue.status is initialized to -EINTR before blocking.
440 * - wakeup is performed by
441 * * unlinking the queue entry from the pending list
442 * * setting queue.status to IN_WAKEUP
443 * This is the notification for the blocked thread that a
444 * result value is imminent.
445 * * call wake_up_process
446 * * set queue.status to the final value.
447 * - the previously blocked thread checks queue.status:
448 * * if it's IN_WAKEUP, then it must wait until the value changes
449 * * if it's not -EINTR, then the operation was completed by
450 * update_queue. semtimedop can return queue.status without
451 * performing any operation on the sem array.
452 * * otherwise it must acquire the spinlock and check what's up.
453 *
454 * The two-stage algorithm is necessary to protect against the following
455 * races:
456 * - if queue.status is set after wake_up_process, then the woken up idle
457 * thread could race forward and try (and fail) to acquire sma->lock
458 * before update_queue had a chance to set queue.status
459 * - if queue.status is written before wake_up_process and if the
460 * blocked process is woken up by a signal between writing
461 * queue.status and the wake_up_process, then the woken up
462 * process could return from semtimedop and die by calling
463 * sys_exit before wake_up_process is called. Then wake_up_process
464 * will oops, because the task structure is already invalid.
465 * (yes, this happened on s390 with sysv msg).
466 *
467 */
468#define IN_WAKEUP 1
469
470/**
471 * newary - Create a new semaphore set
472 * @ns: namespace
473 * @params: ptr to the structure that contains key, semflg and nsems
474 *
475 * Called with sem_ids.rwsem held (as a writer)
476 */
477static int newary(struct ipc_namespace *ns, struct ipc_params *params)
478{
479    int id;
480    int retval;
481    struct sem_array *sma;
482    int size;
483    key_t key = params->key;
484    int nsems = params->u.nsems;
485    int semflg = params->flg;
486    int i;
487
488    if (!nsems)
489        return -EINVAL;
490    if (ns->used_sems + nsems > ns->sc_semmns)
491        return -ENOSPC;
492
493    size = sizeof(*sma) + nsems * sizeof(struct sem);
494    sma = ipc_rcu_alloc(size);
495    if (!sma)
496        return -ENOMEM;
497
498    memset(sma, 0, size);
499
500    sma->sem_perm.mode = (semflg & S_IRWXUGO);
501    sma->sem_perm.key = key;
502
503    sma->sem_perm.security = NULL;
504    retval = security_sem_alloc(sma);
505    if (retval) {
506        ipc_rcu_putref(sma, ipc_rcu_free);
507        return retval;
508    }
509
510    id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
511    if (id < 0) {
512        ipc_rcu_putref(sma, sem_rcu_free);
513        return id;
514    }
515    ns->used_sems += nsems;
516
517    sma->sem_base = (struct sem *) &sma[1];
518
519    for (i = 0; i < nsems; i++) {
520        INIT_LIST_HEAD(&sma->sem_base[i].pending_alter);
521        INIT_LIST_HEAD(&sma->sem_base[i].pending_const);
522        spin_lock_init(&sma->sem_base[i].lock);
523    }
524
525    sma->complex_count = 0;
526    INIT_LIST_HEAD(&sma->pending_alter);
527    INIT_LIST_HEAD(&sma->pending_const);
528    INIT_LIST_HEAD(&sma->list_id);
529    sma->sem_nsems = nsems;
530    sma->sem_ctime = get_seconds();
531    sem_unlock(sma, -1);
532    rcu_read_unlock();
533
534    return sma->sem_perm.id;
535}
536
537
538/*
539 * Called with sem_ids.rwsem and ipcp locked.
540 */
541static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
542{
543    struct sem_array *sma;
544
545    sma = container_of(ipcp, struct sem_array, sem_perm);
546    return security_sem_associate(sma, semflg);
547}
548
549/*
550 * Called with sem_ids.rwsem and ipcp locked.
551 */
552static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
553                struct ipc_params *params)
554{
555    struct sem_array *sma;
556
557    sma = container_of(ipcp, struct sem_array, sem_perm);
558    if (params->u.nsems > sma->sem_nsems)
559        return -EINVAL;
560
561    return 0;
562}
563
564SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
565{
566    struct ipc_namespace *ns;
567    struct ipc_ops sem_ops;
568    struct ipc_params sem_params;
569
570    ns = current->nsproxy->ipc_ns;
571
572    if (nsems < 0 || nsems > ns->sc_semmsl)
573        return -EINVAL;
574
575    sem_ops.getnew = newary;
576    sem_ops.associate = sem_security;
577    sem_ops.more_checks = sem_more_checks;
578
579    sem_params.key = key;
580    sem_params.flg = semflg;
581    sem_params.u.nsems = nsems;
582
583    return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
584}
585
586/**
587 * perform_atomic_semop - Perform (if possible) a semaphore operation
588 * @sma: semaphore array
589 * @sops: array with operations that should be checked
590 * @nsops: number of operations
591 * @un: undo array
592 * @pid: pid that did the change
593 *
594 * Returns 0 if the operation was possible.
595 * Returns 1 if the operation is impossible, the caller must sleep.
596 * Negative values are error codes.
597 */
598static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops,
599                 int nsops, struct sem_undo *un, int pid)
600{
601    int result, sem_op;
602    struct sembuf *sop;
603    struct sem *curr;
604
605    for (sop = sops; sop < sops + nsops; sop++) {
606        curr = sma->sem_base + sop->sem_num;
607        sem_op = sop->sem_op;
608        result = curr->semval;
609
610        if (!sem_op && result)
611            goto would_block;
612
613        result += sem_op;
614        if (result < 0)
615            goto would_block;
616        if (result > SEMVMX)
617            goto out_of_range;
618
619        if (sop->sem_flg & SEM_UNDO) {
620            int undo = un->semadj[sop->sem_num] - sem_op;
621            /* Exceeding the undo range is an error. */
622            if (undo < (-SEMAEM - 1) || undo > SEMAEM)
623                goto out_of_range;
624            un->semadj[sop->sem_num] = undo;
625        }
626
627        curr->semval = result;
628    }
629
630    sop--;
631    while (sop >= sops) {
632        sma->sem_base[sop->sem_num].sempid = pid;
633        sop--;
634    }
635
636    return 0;
637
638out_of_range:
639    result = -ERANGE;
640    goto undo;
641
642would_block:
643    if (sop->sem_flg & IPC_NOWAIT)
644        result = -EAGAIN;
645    else
646        result = 1;
647
648undo:
649    sop--;
650    while (sop >= sops) {
651        sem_op = sop->sem_op;
652        sma->sem_base[sop->sem_num].semval -= sem_op;
653        if (sop->sem_flg & SEM_UNDO)
654            un->semadj[sop->sem_num] += sem_op;
655        sop--;
656    }
657
658    return result;
659}
660
661/** wake_up_sem_queue_prepare(q, error): Prepare wake-up
662 * @q: queue entry that must be signaled
663 * @error: Error value for the signal
664 *
665 * Prepare the wake-up of the queue entry q.
666 */
667static void wake_up_sem_queue_prepare(struct list_head *pt,
668                struct sem_queue *q, int error)
669{
670    if (list_empty(pt)) {
671        /*
672         * Hold preempt off so that we don't get preempted and have the
673         * wakee busy-wait until we're scheduled back on.
674         */
675        preempt_disable();
676    }
677    q->status = IN_WAKEUP;
678    q->pid = error;
679
680    list_add_tail(&q->list, pt);
681}
682
683/**
684 * wake_up_sem_queue_do - do the actual wake-up
685 * @pt: list of tasks to be woken up
686 *
687 * Do the actual wake-up.
688 * The function is called without any locks held, thus the semaphore array
689 * could be destroyed already and the tasks can disappear as soon as the
690 * status is set to the actual return code.
691 */
692static void wake_up_sem_queue_do(struct list_head *pt)
693{
694    struct sem_queue *q, *t;
695    int did_something;
696
697    did_something = !list_empty(pt);
698    list_for_each_entry_safe(q, t, pt, list) {
699        wake_up_process(q->sleeper);
700        /* q can disappear immediately after writing q->status. */
701        smp_wmb();
702        q->status = q->pid;
703    }
704    if (did_something)
705        preempt_enable();
706}
707
708static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
709{
710    list_del(&q->list);
711    if (q->nsops > 1)
712        sma->complex_count--;
713}
714
715/** check_restart(sma, q)
716 * @sma: semaphore array
717 * @q: the operation that just completed
718 *
719 * update_queue is O(N^2) when it restarts scanning the whole queue of
720 * waiting operations. Therefore this function checks if the restart is
721 * really necessary. It is called after a previously waiting operation
722 * modified the array.
723 * Note that wait-for-zero operations are handled without restart.
724 */
725static int check_restart(struct sem_array *sma, struct sem_queue *q)
726{
727    /* pending complex alter operations are too difficult to analyse */
728    if (!list_empty(&sma->pending_alter))
729        return 1;
730
731    /* we were a sleeping complex operation. Too difficult */
732    if (q->nsops > 1)
733        return 1;
734
735    /* It is impossible that someone waits for the new value:
736     * - complex operations always restart.
737     * - wait-for-zero are handled seperately.
738     * - q is a previously sleeping simple operation that
739     * altered the array. It must be a decrement, because
740     * simple increments never sleep.
741     * - If there are older (higher priority) decrements
742     * in the queue, then they have observed the original
743     * semval value and couldn't proceed. The operation
744     * decremented to value - thus they won't proceed either.
745     */
746    return 0;
747}
748
749/**
750 * wake_const_ops - wake up non-alter tasks
751 * @sma: semaphore array.
752 * @semnum: semaphore that was modified.
753 * @pt: list head for the tasks that must be woken up.
754 *
755 * wake_const_ops must be called after a semaphore in a semaphore array
756 * was set to 0. If complex const operations are pending, wake_const_ops must
757 * be called with semnum = -1, as well as with the number of each modified
758 * semaphore.
759 * The tasks that must be woken up are added to @pt. The return code
760 * is stored in q->pid.
761 * The function returns 1 if at least one operation was completed successfully.
762 */
763static int wake_const_ops(struct sem_array *sma, int semnum,
764                struct list_head *pt)
765{
766    struct sem_queue *q;
767    struct list_head *walk;
768    struct list_head *pending_list;
769    int semop_completed = 0;
770
771    if (semnum == -1)
772        pending_list = &sma->pending_const;
773    else
774        pending_list = &sma->sem_base[semnum].pending_const;
775
776    walk = pending_list->next;
777    while (walk != pending_list) {
778        int error;
779
780        q = container_of(walk, struct sem_queue, list);
781        walk = walk->next;
782
783        error = perform_atomic_semop(sma, q->sops, q->nsops,
784                         q->undo, q->pid);
785
786        if (error <= 0) {
787            /* operation completed, remove from queue & wakeup */
788
789            unlink_queue(sma, q);
790
791            wake_up_sem_queue_prepare(pt, q, error);
792            if (error == 0)
793                semop_completed = 1;
794        }
795    }
796    return semop_completed;
797}
798
799/**
800 * do_smart_wakeup_zero - wakeup all wait for zero tasks
801 * @sma: semaphore array
802 * @sops: operations that were performed
803 * @nsops: number of operations
804 * @pt: list head of the tasks that must be woken up.
805 *
806 * Checks all required queue for wait-for-zero operations, based
807 * on the actual changes that were performed on the semaphore array.
808 * The function returns 1 if at least one operation was completed successfully.
809 */
810static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
811                    int nsops, struct list_head *pt)
812{
813    int i;
814    int semop_completed = 0;
815    int got_zero = 0;
816
817    /* first: the per-semaphore queues, if known */
818    if (sops) {
819        for (i = 0; i < nsops; i++) {
820            int num = sops[i].sem_num;
821
822            if (sma->sem_base[num].semval == 0) {
823                got_zero = 1;
824                semop_completed |= wake_const_ops(sma, num, pt);
825            }
826        }
827    } else {
828        /*
829         * No sops means modified semaphores not known.
830         * Assume all were changed.
831         */
832        for (i = 0; i < sma->sem_nsems; i++) {
833            if (sma->sem_base[i].semval == 0) {
834                got_zero = 1;
835                semop_completed |= wake_const_ops(sma, i, pt);
836            }
837        }
838    }
839    /*
840     * If one of the modified semaphores got 0,
841     * then check the global queue, too.
842     */
843    if (got_zero)
844        semop_completed |= wake_const_ops(sma, -1, pt);
845
846    return semop_completed;
847}
848
849
850/**
851 * update_queue - look for tasks that can be completed.
852 * @sma: semaphore array.
853 * @semnum: semaphore that was modified.
854 * @pt: list head for the tasks that must be woken up.
855 *
856 * update_queue must be called after a semaphore in a semaphore array
857 * was modified. If multiple semaphores were modified, update_queue must
858 * be called with semnum = -1, as well as with the number of each modified
859 * semaphore.
860 * The tasks that must be woken up are added to @pt. The return code
861 * is stored in q->pid.
862 * The function internally checks if const operations can now succeed.
863 *
864 * The function return 1 if at least one semop was completed successfully.
865 */
866static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
867{
868    struct sem_queue *q;
869    struct list_head *walk;
870    struct list_head *pending_list;
871    int semop_completed = 0;
872
873    if (semnum == -1)
874        pending_list = &sma->pending_alter;
875    else
876        pending_list = &sma->sem_base[semnum].pending_alter;
877
878again:
879    walk = pending_list->next;
880    while (walk != pending_list) {
881        int error, restart;
882
883        q = container_of(walk, struct sem_queue, list);
884        walk = walk->next;
885
886        /* If we are scanning the single sop, per-semaphore list of
887         * one semaphore and that semaphore is 0, then it is not
888         * necessary to scan further: simple increments
889         * that affect only one entry succeed immediately and cannot
890         * be in the per semaphore pending queue, and decrements
891         * cannot be successful if the value is already 0.
892         */
893        if (semnum != -1 && sma->sem_base[semnum].semval == 0)
894            break;
895
896        error = perform_atomic_semop(sma, q->sops, q->nsops,
897                     q->undo, q->pid);
898
899        /* Does q->sleeper still need to sleep? */
900        if (error > 0)
901            continue;
902
903        unlink_queue(sma, q);
904
905        if (error) {
906            restart = 0;
907        } else {
908            semop_completed = 1;
909            do_smart_wakeup_zero(sma, q->sops, q->nsops, pt);
910            restart = check_restart(sma, q);
911        }
912
913        wake_up_sem_queue_prepare(pt, q, error);
914        if (restart)
915            goto again;
916    }
917    return semop_completed;
918}
919
920/**
921 * set_semotime - set sem_otime
922 * @sma: semaphore array
923 * @sops: operations that modified the array, may be NULL
924 *
925 * sem_otime is replicated to avoid cache line trashing.
926 * This function sets one instance to the current time.
927 */
928static void set_semotime(struct sem_array *sma, struct sembuf *sops)
929{
930    if (sops == NULL) {
931        sma->sem_base[0].sem_otime = get_seconds();
932    } else {
933        sma->sem_base[sops[0].sem_num].sem_otime =
934                            get_seconds();
935    }
936}
937
938/**
939 * do_smart_update - optimized update_queue
940 * @sma: semaphore array
941 * @sops: operations that were performed
942 * @nsops: number of operations
943 * @otime: force setting otime
944 * @pt: list head of the tasks that must be woken up.
945 *
946 * do_smart_update() does the required calls to update_queue and wakeup_zero,
947 * based on the actual changes that were performed on the semaphore array.
948 * Note that the function does not do the actual wake-up: the caller is
949 * responsible for calling wake_up_sem_queue_do(@pt).
950 * It is safe to perform this call after dropping all locks.
951 */
952static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
953            int otime, struct list_head *pt)
954{
955    int i;
956
957    otime |= do_smart_wakeup_zero(sma, sops, nsops, pt);
958
959    if (!list_empty(&sma->pending_alter)) {
960        /* semaphore array uses the global queue - just process it. */
961        otime |= update_queue(sma, -1, pt);
962    } else {
963        if (!sops) {
964            /*
965             * No sops, thus the modified semaphores are not
966             * known. Check all.
967             */
968            for (i = 0; i < sma->sem_nsems; i++)
969                otime |= update_queue(sma, i, pt);
970        } else {
971            /*
972             * Check the semaphores that were increased:
973             * - No complex ops, thus all sleeping ops are
974             * decrease.
975             * - if we decreased the value, then any sleeping
976             * semaphore ops wont be able to run: If the
977             * previous value was too small, then the new
978             * value will be too small, too.
979             */
980            for (i = 0; i < nsops; i++) {
981                if (sops[i].sem_op > 0) {
982                    otime |= update_queue(sma,
983                            sops[i].sem_num, pt);
984                }
985            }
986        }
987    }
988    if (otime)
989        set_semotime(sma, sops);
990}
991
992/* The following counts are associated to each semaphore:
993 * semncnt number of tasks waiting on semval being nonzero
994 * semzcnt number of tasks waiting on semval being zero
995 * This model assumes that a task waits on exactly one semaphore.
996 * Since semaphore operations are to be performed atomically, tasks actually
997 * wait on a whole sequence of semaphores simultaneously.
998 * The counts we return here are a rough approximation, but still
999 * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
1000 */
1001static int count_semncnt(struct sem_array *sma, ushort semnum)
1002{
1003    int semncnt;
1004    struct sem_queue *q;
1005
1006    semncnt = 0;
1007    list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) {
1008        struct sembuf *sops = q->sops;
1009        BUG_ON(sops->sem_num != semnum);
1010        if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT))
1011            semncnt++;
1012    }
1013
1014    list_for_each_entry(q, &sma->pending_alter, list) {
1015        struct sembuf *sops = q->sops;
1016        int nsops = q->nsops;
1017        int i;
1018        for (i = 0; i < nsops; i++)
1019            if (sops[i].sem_num == semnum
1020                && (sops[i].sem_op < 0)
1021                && !(sops[i].sem_flg & IPC_NOWAIT))
1022                semncnt++;
1023    }
1024    return semncnt;
1025}
1026
1027static int count_semzcnt(struct sem_array *sma, ushort semnum)
1028{
1029    int semzcnt;
1030    struct sem_queue *q;
1031
1032    semzcnt = 0;
1033    list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) {
1034        struct sembuf *sops = q->sops;
1035        BUG_ON(sops->sem_num != semnum);
1036        if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT))
1037            semzcnt++;
1038    }
1039
1040    list_for_each_entry(q, &sma->pending_const, list) {
1041        struct sembuf *sops = q->sops;
1042        int nsops = q->nsops;
1043        int i;
1044        for (i = 0; i < nsops; i++)
1045            if (sops[i].sem_num == semnum
1046                && (sops[i].sem_op == 0)
1047                && !(sops[i].sem_flg & IPC_NOWAIT))
1048                semzcnt++;
1049    }
1050    return semzcnt;
1051}
1052
1053/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1054 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1055 * remains locked on exit.
1056 */
1057static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1058{
1059    struct sem_undo *un, *tu;
1060    struct sem_queue *q, *tq;
1061    struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1062    struct list_head tasks;
1063    int i;
1064
1065    /* Free the existing undo structures for this semaphore set. */
1066    ipc_assert_locked_object(&sma->sem_perm);
1067    list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1068        list_del(&un->list_id);
1069        spin_lock(&un->ulp->lock);
1070        un->semid = -1;
1071        list_del_rcu(&un->list_proc);
1072        spin_unlock(&un->ulp->lock);
1073        kfree_rcu(un, rcu);
1074    }
1075
1076    /* Wake up all pending processes and let them fail with EIDRM. */
1077    INIT_LIST_HEAD(&tasks);
1078    list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1079        unlink_queue(sma, q);
1080        wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1081    }
1082
1083    list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1084        unlink_queue(sma, q);
1085        wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1086    }
1087    for (i = 0; i < sma->sem_nsems; i++) {
1088        struct sem *sem = sma->sem_base + i;
1089        list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1090            unlink_queue(sma, q);
1091            wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1092        }
1093        list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1094            unlink_queue(sma, q);
1095            wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1096        }
1097    }
1098
1099    /* Remove the semaphore set from the IDR */
1100    sem_rmid(ns, sma);
1101    sem_unlock(sma, -1);
1102    rcu_read_unlock();
1103
1104    wake_up_sem_queue_do(&tasks);
1105    ns->used_sems -= sma->sem_nsems;
1106    ipc_rcu_putref(sma, sem_rcu_free);
1107}
1108
1109static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1110{
1111    switch (version) {
1112    case IPC_64:
1113        return copy_to_user(buf, in, sizeof(*in));
1114    case IPC_OLD:
1115        {
1116        struct semid_ds out;
1117
1118        memset(&out, 0, sizeof(out));
1119
1120        ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1121
1122        out.sem_otime = in->sem_otime;
1123        out.sem_ctime = in->sem_ctime;
1124        out.sem_nsems = in->sem_nsems;
1125
1126        return copy_to_user(buf, &out, sizeof(out));
1127        }
1128    default:
1129        return -EINVAL;
1130    }
1131}
1132
1133static time_t get_semotime(struct sem_array *sma)
1134{
1135    int i;
1136    time_t res;
1137
1138    res = sma->sem_base[0].sem_otime;
1139    for (i = 1; i < sma->sem_nsems; i++) {
1140        time_t to = sma->sem_base[i].sem_otime;
1141
1142        if (to > res)
1143            res = to;
1144    }
1145    return res;
1146}
1147
1148static int semctl_nolock(struct ipc_namespace *ns, int semid,
1149             int cmd, int version, void __user *p)
1150{
1151    int err;
1152    struct sem_array *sma;
1153
1154    switch (cmd) {
1155    case IPC_INFO:
1156    case SEM_INFO:
1157    {
1158        struct seminfo seminfo;
1159        int max_id;
1160
1161        err = security_sem_semctl(NULL, cmd);
1162        if (err)
1163            return err;
1164        
1165        memset(&seminfo, 0, sizeof(seminfo));
1166        seminfo.semmni = ns->sc_semmni;
1167        seminfo.semmns = ns->sc_semmns;
1168        seminfo.semmsl = ns->sc_semmsl;
1169        seminfo.semopm = ns->sc_semopm;
1170        seminfo.semvmx = SEMVMX;
1171        seminfo.semmnu = SEMMNU;
1172        seminfo.semmap = SEMMAP;
1173        seminfo.semume = SEMUME;
1174        down_read(&sem_ids(ns).rwsem);
1175        if (cmd == SEM_INFO) {
1176            seminfo.semusz = sem_ids(ns).in_use;
1177            seminfo.semaem = ns->used_sems;
1178        } else {
1179            seminfo.semusz = SEMUSZ;
1180            seminfo.semaem = SEMAEM;
1181        }
1182        max_id = ipc_get_maxid(&sem_ids(ns));
1183        up_read(&sem_ids(ns).rwsem);
1184        if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1185            return -EFAULT;
1186        return (max_id < 0) ? 0 : max_id;
1187    }
1188    case IPC_STAT:
1189    case SEM_STAT:
1190    {
1191        struct semid64_ds tbuf;
1192        int id = 0;
1193
1194        memset(&tbuf, 0, sizeof(tbuf));
1195
1196        rcu_read_lock();
1197        if (cmd == SEM_STAT) {
1198            sma = sem_obtain_object(ns, semid);
1199            if (IS_ERR(sma)) {
1200                err = PTR_ERR(sma);
1201                goto out_unlock;
1202            }
1203            id = sma->sem_perm.id;
1204        } else {
1205            sma = sem_obtain_object_check(ns, semid);
1206            if (IS_ERR(sma)) {
1207                err = PTR_ERR(sma);
1208                goto out_unlock;
1209            }
1210        }
1211
1212        err = -EACCES;
1213        if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1214            goto out_unlock;
1215
1216        err = security_sem_semctl(sma, cmd);
1217        if (err)
1218            goto out_unlock;
1219
1220        kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
1221        tbuf.sem_otime = get_semotime(sma);
1222        tbuf.sem_ctime = sma->sem_ctime;
1223        tbuf.sem_nsems = sma->sem_nsems;
1224        rcu_read_unlock();
1225        if (copy_semid_to_user(p, &tbuf, version))
1226            return -EFAULT;
1227        return id;
1228    }
1229    default:
1230        return -EINVAL;
1231    }
1232out_unlock:
1233    rcu_read_unlock();
1234    return err;
1235}
1236
1237static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1238        unsigned long arg)
1239{
1240    struct sem_undo *un;
1241    struct sem_array *sma;
1242    struct sem *curr;
1243    int err;
1244    struct list_head tasks;
1245    int val;
1246#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1247    /* big-endian 64bit */
1248    val = arg >> 32;
1249#else
1250    /* 32bit or little-endian 64bit */
1251    val = arg;
1252#endif
1253
1254    if (val > SEMVMX || val < 0)
1255        return -ERANGE;
1256
1257    INIT_LIST_HEAD(&tasks);
1258
1259    rcu_read_lock();
1260    sma = sem_obtain_object_check(ns, semid);
1261    if (IS_ERR(sma)) {
1262        rcu_read_unlock();
1263        return PTR_ERR(sma);
1264    }
1265
1266    if (semnum < 0 || semnum >= sma->sem_nsems) {
1267        rcu_read_unlock();
1268        return -EINVAL;
1269    }
1270
1271
1272    if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1273        rcu_read_unlock();
1274        return -EACCES;
1275    }
1276
1277    err = security_sem_semctl(sma, SETVAL);
1278    if (err) {
1279        rcu_read_unlock();
1280        return -EACCES;
1281    }
1282
1283    sem_lock(sma, NULL, -1);
1284
1285    if (!ipc_valid_object(&sma->sem_perm)) {
1286        sem_unlock(sma, -1);
1287        rcu_read_unlock();
1288        return -EIDRM;
1289    }
1290
1291    curr = &sma->sem_base[semnum];
1292
1293    ipc_assert_locked_object(&sma->sem_perm);
1294    list_for_each_entry(un, &sma->list_id, list_id)
1295        un->semadj[semnum] = 0;
1296
1297    curr->semval = val;
1298    curr->sempid = task_tgid_vnr(current);
1299    sma->sem_ctime = get_seconds();
1300    /* maybe some queued-up processes were waiting for this */
1301    do_smart_update(sma, NULL, 0, 0, &tasks);
1302    sem_unlock(sma, -1);
1303    rcu_read_unlock();
1304    wake_up_sem_queue_do(&tasks);
1305    return 0;
1306}
1307
1308static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1309        int cmd, void __user *p)
1310{
1311    struct sem_array *sma;
1312    struct sem *curr;
1313    int err, nsems;
1314    ushort fast_sem_io[SEMMSL_FAST];
1315    ushort *sem_io = fast_sem_io;
1316    struct list_head tasks;
1317
1318    INIT_LIST_HEAD(&tasks);
1319
1320    rcu_read_lock();
1321    sma = sem_obtain_object_check(ns, semid);
1322    if (IS_ERR(sma)) {
1323        rcu_read_unlock();
1324        return PTR_ERR(sma);
1325    }
1326
1327    nsems = sma->sem_nsems;
1328
1329    err = -EACCES;
1330    if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1331        goto out_rcu_wakeup;
1332
1333    err = security_sem_semctl(sma, cmd);
1334    if (err)
1335        goto out_rcu_wakeup;
1336
1337    err = -EACCES;
1338    switch (cmd) {
1339    case GETALL:
1340    {
1341        ushort __user *array = p;
1342        int i;
1343
1344        sem_lock(sma, NULL, -1);
1345        if (!ipc_valid_object(&sma->sem_perm)) {
1346            err = -EIDRM;
1347            goto out_unlock;
1348        }
1349        if (nsems > SEMMSL_FAST) {
1350            if (!ipc_rcu_getref(sma)) {
1351                err = -EIDRM;
1352                goto out_unlock;
1353            }
1354            sem_unlock(sma, -1);
1355            rcu_read_unlock();
1356            sem_io = ipc_alloc(sizeof(ushort)*nsems);
1357            if (sem_io == NULL) {
1358                ipc_rcu_putref(sma, ipc_rcu_free);
1359                return -ENOMEM;
1360            }
1361
1362            rcu_read_lock();
1363            sem_lock_and_putref(sma);
1364            if (!ipc_valid_object(&sma->sem_perm)) {
1365                err = -EIDRM;
1366                goto out_unlock;
1367            }
1368        }
1369        for (i = 0; i < sma->sem_nsems; i++)
1370            sem_io[i] = sma->sem_base[i].semval;
1371        sem_unlock(sma, -1);
1372        rcu_read_unlock();
1373        err = 0;
1374        if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1375            err = -EFAULT;
1376        goto out_free;
1377    }
1378    case SETALL:
1379    {
1380        int i;
1381        struct sem_undo *un;
1382
1383        if (!ipc_rcu_getref(sma)) {
1384            err = -EIDRM;
1385            goto out_rcu_wakeup;
1386        }
1387        rcu_read_unlock();
1388
1389        if (nsems > SEMMSL_FAST) {
1390            sem_io = ipc_alloc(sizeof(ushort)*nsems);
1391            if (sem_io == NULL) {
1392                ipc_rcu_putref(sma, ipc_rcu_free);
1393                return -ENOMEM;
1394            }
1395        }
1396
1397        if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1398            ipc_rcu_putref(sma, ipc_rcu_free);
1399            err = -EFAULT;
1400            goto out_free;
1401        }
1402
1403        for (i = 0; i < nsems; i++) {
1404            if (sem_io[i] > SEMVMX) {
1405                ipc_rcu_putref(sma, ipc_rcu_free);
1406                err = -ERANGE;
1407                goto out_free;
1408            }
1409        }
1410        rcu_read_lock();
1411        sem_lock_and_putref(sma);
1412        if (!ipc_valid_object(&sma->sem_perm)) {
1413            err = -EIDRM;
1414            goto out_unlock;
1415        }
1416
1417        for (i = 0; i < nsems; i++)
1418            sma->sem_base[i].semval = sem_io[i];
1419
1420        ipc_assert_locked_object(&sma->sem_perm);
1421        list_for_each_entry(un, &sma->list_id, list_id) {
1422            for (i = 0; i < nsems; i++)
1423                un->semadj[i] = 0;
1424        }
1425        sma->sem_ctime = get_seconds();
1426        /* maybe some queued-up processes were waiting for this */
1427        do_smart_update(sma, NULL, 0, 0, &tasks);
1428        err = 0;
1429        goto out_unlock;
1430    }
1431    /* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1432    }
1433    err = -EINVAL;
1434    if (semnum < 0 || semnum >= nsems)
1435        goto out_rcu_wakeup;
1436
1437    sem_lock(sma, NULL, -1);
1438    if (!ipc_valid_object(&sma->sem_perm)) {
1439        err = -EIDRM;
1440        goto out_unlock;
1441    }
1442    curr = &sma->sem_base[semnum];
1443
1444    switch (cmd) {
1445    case GETVAL:
1446        err = curr->semval;
1447        goto out_unlock;
1448    case GETPID:
1449        err = curr->sempid;
1450        goto out_unlock;
1451    case GETNCNT:
1452        err = count_semncnt(sma, semnum);
1453        goto out_unlock;
1454    case GETZCNT:
1455        err = count_semzcnt(sma, semnum);
1456        goto out_unlock;
1457    }
1458
1459out_unlock:
1460    sem_unlock(sma, -1);
1461out_rcu_wakeup:
1462    rcu_read_unlock();
1463    wake_up_sem_queue_do(&tasks);
1464out_free:
1465    if (sem_io != fast_sem_io)
1466        ipc_free(sem_io, sizeof(ushort)*nsems);
1467    return err;
1468}
1469
1470static inline unsigned long
1471copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1472{
1473    switch (version) {
1474    case IPC_64:
1475        if (copy_from_user(out, buf, sizeof(*out)))
1476            return -EFAULT;
1477        return 0;
1478    case IPC_OLD:
1479        {
1480        struct semid_ds tbuf_old;
1481
1482        if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1483            return -EFAULT;
1484
1485        out->sem_perm.uid = tbuf_old.sem_perm.uid;
1486        out->sem_perm.gid = tbuf_old.sem_perm.gid;
1487        out->sem_perm.mode = tbuf_old.sem_perm.mode;
1488
1489        return 0;
1490        }
1491    default:
1492        return -EINVAL;
1493    }
1494}
1495
1496/*
1497 * This function handles some semctl commands which require the rwsem
1498 * to be held in write mode.
1499 * NOTE: no locks must be held, the rwsem is taken inside this function.
1500 */
1501static int semctl_down(struct ipc_namespace *ns, int semid,
1502               int cmd, int version, void __user *p)
1503{
1504    struct sem_array *sma;
1505    int err;
1506    struct semid64_ds semid64;
1507    struct kern_ipc_perm *ipcp;
1508
1509    if (cmd == IPC_SET) {
1510        if (copy_semid_from_user(&semid64, p, version))
1511            return -EFAULT;
1512    }
1513
1514    down_write(&sem_ids(ns).rwsem);
1515    rcu_read_lock();
1516
1517    ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
1518                      &semid64.sem_perm, 0);
1519    if (IS_ERR(ipcp)) {
1520        err = PTR_ERR(ipcp);
1521        goto out_unlock1;
1522    }
1523
1524    sma = container_of(ipcp, struct sem_array, sem_perm);
1525
1526    err = security_sem_semctl(sma, cmd);
1527    if (err)
1528        goto out_unlock1;
1529
1530    switch (cmd) {
1531    case IPC_RMID:
1532        sem_lock(sma, NULL, -1);
1533        /* freeary unlocks the ipc object and rcu */
1534        freeary(ns, ipcp);
1535        goto out_up;
1536    case IPC_SET:
1537        sem_lock(sma, NULL, -1);
1538        err = ipc_update_perm(&semid64.sem_perm, ipcp);
1539        if (err)
1540            goto out_unlock0;
1541        sma->sem_ctime = get_seconds();
1542        break;
1543    default:
1544        err = -EINVAL;
1545        goto out_unlock1;
1546    }
1547
1548out_unlock0:
1549    sem_unlock(sma, -1);
1550out_unlock1:
1551    rcu_read_unlock();
1552out_up:
1553    up_write(&sem_ids(ns).rwsem);
1554    return err;
1555}
1556
1557SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1558{
1559    int version;
1560    struct ipc_namespace *ns;
1561    void __user *p = (void __user *)arg;
1562
1563    if (semid < 0)
1564        return -EINVAL;
1565
1566    version = ipc_parse_version(&cmd);
1567    ns = current->nsproxy->ipc_ns;
1568
1569    switch (cmd) {
1570    case IPC_INFO:
1571    case SEM_INFO:
1572    case IPC_STAT:
1573    case SEM_STAT:
1574        return semctl_nolock(ns, semid, cmd, version, p);
1575    case GETALL:
1576    case GETVAL:
1577    case GETPID:
1578    case GETNCNT:
1579    case GETZCNT:
1580    case SETALL:
1581        return semctl_main(ns, semid, semnum, cmd, p);
1582    case SETVAL:
1583        return semctl_setval(ns, semid, semnum, arg);
1584    case IPC_RMID:
1585    case IPC_SET:
1586        return semctl_down(ns, semid, cmd, version, p);
1587    default:
1588        return -EINVAL;
1589    }
1590}
1591
1592/* If the task doesn't already have a undo_list, then allocate one
1593 * here. We guarantee there is only one thread using this undo list,
1594 * and current is THE ONE
1595 *
1596 * If this allocation and assignment succeeds, but later
1597 * portions of this code fail, there is no need to free the sem_undo_list.
1598 * Just let it stay associated with the task, and it'll be freed later
1599 * at exit time.
1600 *
1601 * This can block, so callers must hold no locks.
1602 */
1603static inline int get_undo_list(struct sem_undo_list **undo_listp)
1604{
1605    struct sem_undo_list *undo_list;
1606
1607    undo_list = current->sysvsem.undo_list;
1608    if (!undo_list) {
1609        undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1610        if (undo_list == NULL)
1611            return -ENOMEM;
1612        spin_lock_init(&undo_list->lock);
1613        atomic_set(&undo_list->refcnt, 1);
1614        INIT_LIST_HEAD(&undo_list->list_proc);
1615
1616        current->sysvsem.undo_list = undo_list;
1617    }
1618    *undo_listp = undo_list;
1619    return 0;
1620}
1621
1622static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1623{
1624    struct sem_undo *un;
1625
1626    list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1627        if (un->semid == semid)
1628            return un;
1629    }
1630    return NULL;
1631}
1632
1633static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1634{
1635    struct sem_undo *un;
1636
1637    assert_spin_locked(&ulp->lock);
1638
1639    un = __lookup_undo(ulp, semid);
1640    if (un) {
1641        list_del_rcu(&un->list_proc);
1642        list_add_rcu(&un->list_proc, &ulp->list_proc);
1643    }
1644    return un;
1645}
1646
1647/**
1648 * find_alloc_undo - lookup (and if not present create) undo array
1649 * @ns: namespace
1650 * @semid: semaphore array id
1651 *
1652 * The function looks up (and if not present creates) the undo structure.
1653 * The size of the undo structure depends on the size of the semaphore
1654 * array, thus the alloc path is not that straightforward.
1655 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1656 * performs a rcu_read_lock().
1657 */
1658static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1659{
1660    struct sem_array *sma;
1661    struct sem_undo_list *ulp;
1662    struct sem_undo *un, *new;
1663    int nsems, error;
1664
1665    error = get_undo_list(&ulp);
1666    if (error)
1667        return ERR_PTR(error);
1668
1669    rcu_read_lock();
1670    spin_lock(&ulp->lock);
1671    un = lookup_undo(ulp, semid);
1672    spin_unlock(&ulp->lock);
1673    if (likely(un != NULL))
1674        goto out;
1675
1676    /* no undo structure around - allocate one. */
1677    /* step 1: figure out the size of the semaphore array */
1678    sma = sem_obtain_object_check(ns, semid);
1679    if (IS_ERR(sma)) {
1680        rcu_read_unlock();
1681        return ERR_CAST(sma);
1682    }
1683
1684    nsems = sma->sem_nsems;
1685    if (!ipc_rcu_getref(sma)) {
1686        rcu_read_unlock();
1687        un = ERR_PTR(-EIDRM);
1688        goto out;
1689    }
1690    rcu_read_unlock();
1691
1692    /* step 2: allocate new undo structure */
1693    new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1694    if (!new) {
1695        ipc_rcu_putref(sma, ipc_rcu_free);
1696        return ERR_PTR(-ENOMEM);
1697    }
1698
1699    /* step 3: Acquire the lock on semaphore array */
1700    rcu_read_lock();
1701    sem_lock_and_putref(sma);
1702    if (!ipc_valid_object(&sma->sem_perm)) {
1703        sem_unlock(sma, -1);
1704        rcu_read_unlock();
1705        kfree(new);
1706        un = ERR_PTR(-EIDRM);
1707        goto out;
1708    }
1709    spin_lock(&ulp->lock);
1710
1711    /*
1712     * step 4: check for races: did someone else allocate the undo struct?
1713     */
1714    un = lookup_undo(ulp, semid);
1715    if (un) {
1716        kfree(new);
1717        goto success;
1718    }
1719    /* step 5: initialize & link new undo structure */
1720    new->semadj = (short *) &new[1];
1721    new->ulp = ulp;
1722    new->semid = semid;
1723    assert_spin_locked(&ulp->lock);
1724    list_add_rcu(&new->list_proc, &ulp->list_proc);
1725    ipc_assert_locked_object(&sma->sem_perm);
1726    list_add(&new->list_id, &sma->list_id);
1727    un = new;
1728
1729success:
1730    spin_unlock(&ulp->lock);
1731    sem_unlock(sma, -1);
1732out:
1733    return un;
1734}
1735
1736
1737/**
1738 * get_queue_result - retrieve the result code from sem_queue
1739 * @q: Pointer to queue structure
1740 *
1741 * Retrieve the return code from the pending queue. If IN_WAKEUP is found in
1742 * q->status, then we must loop until the value is replaced with the final
1743 * value: This may happen if a task is woken up by an unrelated event (e.g.
1744 * signal) and in parallel the task is woken up by another task because it got
1745 * the requested semaphores.
1746 *
1747 * The function can be called with or without holding the semaphore spinlock.
1748 */
1749static int get_queue_result(struct sem_queue *q)
1750{
1751    int error;
1752
1753    error = q->status;
1754    while (unlikely(error == IN_WAKEUP)) {
1755        cpu_relax();
1756        error = q->status;
1757    }
1758
1759    return error;
1760}
1761
1762SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1763        unsigned, nsops, const struct timespec __user *, timeout)
1764{
1765    int error = -EINVAL;
1766    struct sem_array *sma;
1767    struct sembuf fast_sops[SEMOPM_FAST];
1768    struct sembuf *sops = fast_sops, *sop;
1769    struct sem_undo *un;
1770    int undos = 0, alter = 0, max, locknum;
1771    struct sem_queue queue;
1772    unsigned long jiffies_left = 0;
1773    struct ipc_namespace *ns;
1774    struct list_head tasks;
1775
1776    ns = current->nsproxy->ipc_ns;
1777
1778    if (nsops < 1 || semid < 0)
1779        return -EINVAL;
1780    if (nsops > ns->sc_semopm)
1781        return -E2BIG;
1782    if (nsops > SEMOPM_FAST) {
1783        sops = kmalloc(sizeof(*sops)*nsops, GFP_KERNEL);
1784        if (sops == NULL)
1785            return -ENOMEM;
1786    }
1787    if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
1788        error = -EFAULT;
1789        goto out_free;
1790    }
1791    if (timeout) {
1792        struct timespec _timeout;
1793        if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1794            error = -EFAULT;
1795            goto out_free;
1796        }
1797        if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1798            _timeout.tv_nsec >= 1000000000L) {
1799            error = -EINVAL;
1800            goto out_free;
1801        }
1802        jiffies_left = timespec_to_jiffies(&_timeout);
1803    }
1804    max = 0;
1805    for (sop = sops; sop < sops + nsops; sop++) {
1806        if (sop->sem_num >= max)
1807            max = sop->sem_num;
1808        if (sop->sem_flg & SEM_UNDO)
1809            undos = 1;
1810        if (sop->sem_op != 0)
1811            alter = 1;
1812    }
1813
1814    INIT_LIST_HEAD(&tasks);
1815
1816    if (undos) {
1817        /* On success, find_alloc_undo takes the rcu_read_lock */
1818        un = find_alloc_undo(ns, semid);
1819        if (IS_ERR(un)) {
1820            error = PTR_ERR(un);
1821            goto out_free;
1822        }
1823    } else {
1824        un = NULL;
1825        rcu_read_lock();
1826    }
1827
1828    sma = sem_obtain_object_check(ns, semid);
1829    if (IS_ERR(sma)) {
1830        rcu_read_unlock();
1831        error = PTR_ERR(sma);
1832        goto out_free;
1833    }
1834
1835    error = -EFBIG;
1836    if (max >= sma->sem_nsems)
1837        goto out_rcu_wakeup;
1838
1839    error = -EACCES;
1840    if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1841        goto out_rcu_wakeup;
1842
1843    error = security_sem_semop(sma, sops, nsops, alter);
1844    if (error)
1845        goto out_rcu_wakeup;
1846
1847    error = -EIDRM;
1848    locknum = sem_lock(sma, sops, nsops);
1849    /*
1850     * We eventually might perform the following check in a lockless
1851     * fashion, considering ipc_valid_object() locking constraints.
1852     * If nsops == 1 and there is no contention for sem_perm.lock, then
1853     * only a per-semaphore lock is held and it's OK to proceed with the
1854     * check below. More details on the fine grained locking scheme
1855     * entangled here and why it's RMID race safe on comments at sem_lock()
1856     */
1857    if (!ipc_valid_object(&sma->sem_perm))
1858        goto out_unlock_free;
1859    /*
1860     * semid identifiers are not unique - find_alloc_undo may have
1861     * allocated an undo structure, it was invalidated by an RMID
1862     * and now a new array with received the same id. Check and fail.
1863     * This case can be detected checking un->semid. The existence of
1864     * "un" itself is guaranteed by rcu.
1865     */
1866    if (un && un->semid == -1)
1867        goto out_unlock_free;
1868
1869    error = perform_atomic_semop(sma, sops, nsops, un,
1870                    task_tgid_vnr(current));
1871    if (error == 0) {
1872        /* If the operation was successful, then do
1873         * the required updates.
1874         */
1875        if (alter)
1876            do_smart_update(sma, sops, nsops, 1, &tasks);
1877        else
1878            set_semotime(sma, sops);
1879    }
1880    if (error <= 0)
1881        goto out_unlock_free;
1882
1883    /* We need to sleep on this operation, so we put the current
1884     * task into the pending queue and go to sleep.
1885     */
1886        
1887    queue.sops = sops;
1888    queue.nsops = nsops;
1889    queue.undo = un;
1890    queue.pid = task_tgid_vnr(current);
1891    queue.alter = alter;
1892
1893    if (nsops == 1) {
1894        struct sem *curr;
1895        curr = &sma->sem_base[sops->sem_num];
1896
1897        if (alter) {
1898            if (sma->complex_count) {
1899                list_add_tail(&queue.list,
1900                        &sma->pending_alter);
1901            } else {
1902
1903                list_add_tail(&queue.list,
1904                        &curr->pending_alter);
1905            }
1906        } else {
1907            list_add_tail(&queue.list, &curr->pending_const);
1908        }
1909    } else {
1910        if (!sma->complex_count)
1911            merge_queues(sma);
1912
1913        if (alter)
1914            list_add_tail(&queue.list, &sma->pending_alter);
1915        else
1916            list_add_tail(&queue.list, &sma->pending_const);
1917
1918        sma->complex_count++;
1919    }
1920
1921    queue.status = -EINTR;
1922    queue.sleeper = current;
1923
1924sleep_again:
1925    current->state = TASK_INTERRUPTIBLE;
1926    sem_unlock(sma, locknum);
1927    rcu_read_unlock();
1928
1929    if (timeout)
1930        jiffies_left = schedule_timeout(jiffies_left);
1931    else
1932        schedule();
1933
1934    error = get_queue_result(&queue);
1935
1936    if (error != -EINTR) {
1937        /* fast path: update_queue already obtained all requested
1938         * resources.
1939         * Perform a smp_mb(): User space could assume that semop()
1940         * is a memory barrier: Without the mb(), the cpu could
1941         * speculatively read in user space stale data that was
1942         * overwritten by the previous owner of the semaphore.
1943         */
1944        smp_mb();
1945
1946        goto out_free;
1947    }
1948
1949    rcu_read_lock();
1950    sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
1951
1952    /*
1953     * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
1954     */
1955    error = get_queue_result(&queue);
1956
1957    /*
1958     * Array removed? If yes, leave without sem_unlock().
1959     */
1960    if (IS_ERR(sma)) {
1961        rcu_read_unlock();
1962        goto out_free;
1963    }
1964
1965
1966    /*
1967     * If queue.status != -EINTR we are woken up by another process.
1968     * Leave without unlink_queue(), but with sem_unlock().
1969     */
1970    if (error != -EINTR)
1971        goto out_unlock_free;
1972
1973    /*
1974     * If an interrupt occurred we have to clean up the queue
1975     */
1976    if (timeout && jiffies_left == 0)
1977        error = -EAGAIN;
1978
1979    /*
1980     * If the wakeup was spurious, just retry
1981     */
1982    if (error == -EINTR && !signal_pending(current))
1983        goto sleep_again;
1984
1985    unlink_queue(sma, &queue);
1986
1987out_unlock_free:
1988    sem_unlock(sma, locknum);
1989out_rcu_wakeup:
1990    rcu_read_unlock();
1991    wake_up_sem_queue_do(&tasks);
1992out_free:
1993    if (sops != fast_sops)
1994        kfree(sops);
1995    return error;
1996}
1997
1998SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
1999        unsigned, nsops)
2000{
2001    return sys_semtimedop(semid, tsops, nsops, NULL);
2002}
2003
2004/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2005 * parent and child tasks.
2006 */
2007
2008int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2009{
2010    struct sem_undo_list *undo_list;
2011    int error;
2012
2013    if (clone_flags & CLONE_SYSVSEM) {
2014        error = get_undo_list(&undo_list);
2015        if (error)
2016            return error;
2017        atomic_inc(&undo_list->refcnt);
2018        tsk->sysvsem.undo_list = undo_list;
2019    } else
2020        tsk->sysvsem.undo_list = NULL;
2021
2022    return 0;
2023}
2024
2025/*
2026 * add semadj values to semaphores, free undo structures.
2027 * undo structures are not freed when semaphore arrays are destroyed
2028 * so some of them may be out of date.
2029 * IMPLEMENTATION NOTE: There is some confusion over whether the
2030 * set of adjustments that needs to be done should be done in an atomic
2031 * manner or not. That is, if we are attempting to decrement the semval
2032 * should we queue up and wait until we can do so legally?
2033 * The original implementation attempted to do this (queue and wait).
2034 * The current implementation does not do so. The POSIX standard
2035 * and SVID should be consulted to determine what behavior is mandated.
2036 */
2037void exit_sem(struct task_struct *tsk)
2038{
2039    struct sem_undo_list *ulp;
2040
2041    ulp = tsk->sysvsem.undo_list;
2042    if (!ulp)
2043        return;
2044    tsk->sysvsem.undo_list = NULL;
2045
2046    if (!atomic_dec_and_test(&ulp->refcnt))
2047        return;
2048
2049    for (;;) {
2050        struct sem_array *sma;
2051        struct sem_undo *un;
2052        struct list_head tasks;
2053        int semid, i;
2054
2055        rcu_read_lock();
2056        un = list_entry_rcu(ulp->list_proc.next,
2057                    struct sem_undo, list_proc);
2058        if (&un->list_proc == &ulp->list_proc)
2059            semid = -1;
2060         else
2061            semid = un->semid;
2062
2063        if (semid == -1) {
2064            rcu_read_unlock();
2065            break;
2066        }
2067
2068        sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
2069        /* exit_sem raced with IPC_RMID, nothing to do */
2070        if (IS_ERR(sma)) {
2071            rcu_read_unlock();
2072            continue;
2073        }
2074
2075        sem_lock(sma, NULL, -1);
2076        /* exit_sem raced with IPC_RMID, nothing to do */
2077        if (!ipc_valid_object(&sma->sem_perm)) {
2078            sem_unlock(sma, -1);
2079            rcu_read_unlock();
2080            continue;
2081        }
2082        un = __lookup_undo(ulp, semid);
2083        if (un == NULL) {
2084            /* exit_sem raced with IPC_RMID+semget() that created
2085             * exactly the same semid. Nothing to do.
2086             */
2087            sem_unlock(sma, -1);
2088            rcu_read_unlock();
2089            continue;
2090        }
2091
2092        /* remove un from the linked lists */
2093        ipc_assert_locked_object(&sma->sem_perm);
2094        list_del(&un->list_id);
2095
2096        spin_lock(&ulp->lock);
2097        list_del_rcu(&un->list_proc);
2098        spin_unlock(&ulp->lock);
2099
2100        /* perform adjustments registered in un */
2101        for (i = 0; i < sma->sem_nsems; i++) {
2102            struct sem *semaphore = &sma->sem_base[i];
2103            if (un->semadj[i]) {
2104                semaphore->semval += un->semadj[i];
2105                /*
2106                 * Range checks of the new semaphore value,
2107                 * not defined by sus:
2108                 * - Some unices ignore the undo entirely
2109                 * (e.g. HP UX 11i 11.22, Tru64 V5.1)
2110                 * - some cap the value (e.g. FreeBSD caps
2111                 * at 0, but doesn't enforce SEMVMX)
2112                 *
2113                 * Linux caps the semaphore value, both at 0
2114                 * and at SEMVMX.
2115                 *
2116                 * Manfred <manfred@colorfullife.com>
2117                 */
2118                if (semaphore->semval < 0)
2119                    semaphore->semval = 0;
2120                if (semaphore->semval > SEMVMX)
2121                    semaphore->semval = SEMVMX;
2122                semaphore->sempid = task_tgid_vnr(current);
2123            }
2124        }
2125        /* maybe some queued-up processes were waiting for this */
2126        INIT_LIST_HEAD(&tasks);
2127        do_smart_update(sma, NULL, 0, 1, &tasks);
2128        sem_unlock(sma, -1);
2129        rcu_read_unlock();
2130        wake_up_sem_queue_do(&tasks);
2131
2132        kfree_rcu(un, rcu);
2133    }
2134    kfree(ulp);
2135}
2136
2137#ifdef CONFIG_PROC_FS
2138static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2139{
2140    struct user_namespace *user_ns = seq_user_ns(s);
2141    struct sem_array *sma = it;
2142    time_t sem_otime;
2143
2144    /*
2145     * The proc interface isn't aware of sem_lock(), it calls
2146     * ipc_lock_object() directly (in sysvipc_find_ipc).
2147     * In order to stay compatible with sem_lock(), we must wait until
2148     * all simple semop() calls have left their critical regions.
2149     */
2150    sem_wait_array(sma);
2151
2152    sem_otime = get_semotime(sma);
2153
2154    return seq_printf(s,
2155              "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
2156              sma->sem_perm.key,
2157              sma->sem_perm.id,
2158              sma->sem_perm.mode,
2159              sma->sem_nsems,
2160              from_kuid_munged(user_ns, sma->sem_perm.uid),
2161              from_kgid_munged(user_ns, sma->sem_perm.gid),
2162              from_kuid_munged(user_ns, sma->sem_perm.cuid),
2163              from_kgid_munged(user_ns, sma->sem_perm.cgid),
2164              sem_otime,
2165              sma->sem_ctime);
2166}
2167#endif
2168

Archive Download this file



interactive