Root/ipc/sem.c

Source at commit fbf123cd4cc0c097fe9a99c90109ebb2a5e94a50 created 7 years 11 months ago.
By Lars-Peter Clausen, dma: jz4740: Dequeue descriptor from active list before completing it
1/*
2 * linux/ipc/sem.c
3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
5 *
6 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
7 *
8 * SMP-threaded, sysctl's added
9 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
10 * Enforced range limit on SEM_UNDO
11 * (c) 2001 Red Hat Inc
12 * Lockless wakeup
13 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
14 * Further wakeup optimizations, documentation
15 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
16 *
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
19 *
20 * namespaces support
21 * OpenVZ, SWsoft Inc.
22 * Pavel Emelianov <xemul@openvz.org>
23 *
24 * Implementation notes: (May 2010)
25 * This file implements System V semaphores.
26 *
27 * User space visible behavior:
28 * - FIFO ordering for semop() operations (just FIFO, not starvation
29 * protection)
30 * - multiple semaphore operations that alter the same semaphore in
31 * one semop() are handled.
32 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
33 * SETALL calls.
34 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
35 * - undo adjustments at process exit are limited to 0..SEMVMX.
36 * - namespace are supported.
37 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
38 * to /proc/sys/kernel/sem.
39 * - statistics about the usage are reported in /proc/sysvipc/sem.
40 *
41 * Internals:
42 * - scalability:
43 * - all global variables are read-mostly.
44 * - semop() calls and semctl(RMID) are synchronized by RCU.
45 * - most operations do write operations (actually: spin_lock calls) to
46 * the per-semaphore array structure.
47 * Thus: Perfect SMP scaling between independent semaphore arrays.
48 * If multiple semaphores in one array are used, then cache line
49 * trashing on the semaphore array spinlock will limit the scaling.
50 * - semncnt and semzcnt are calculated on demand in count_semcnt()
51 * - the task that performs a successful semop() scans the list of all
52 * sleeping tasks and completes any pending operations that can be fulfilled.
53 * Semaphores are actively given to waiting tasks (necessary for FIFO).
54 * (see update_queue())
55 * - To improve the scalability, the actual wake-up calls are performed after
56 * dropping all locks. (see wake_up_sem_queue_prepare(),
57 * wake_up_sem_queue_do())
58 * - All work is done by the waker, the woken up task does not have to do
59 * anything - not even acquiring a lock or dropping a refcount.
60 * - A woken up task may not even touch the semaphore array anymore, it may
61 * have been destroyed already by a semctl(RMID).
62 * - The synchronizations between wake-ups due to a timeout/signal and a
63 * wake-up due to a completed semaphore operation is achieved by using an
64 * intermediate state (IN_WAKEUP).
65 * - UNDO values are stored in an array (one per process and per
66 * semaphore array, lazily allocated). For backwards compatibility, multiple
67 * modes for the UNDO variables are supported (per process, per thread)
68 * (see copy_semundo, CLONE_SYSVSEM)
69 * - There are two lists of the pending operations: a per-array list
70 * and per-semaphore list (stored in the array). This allows to achieve FIFO
71 * ordering without always scanning all pending operations.
72 * The worst-case behavior is nevertheless O(N^2) for N wakeups.
73 */
74
75#include <linux/slab.h>
76#include <linux/spinlock.h>
77#include <linux/init.h>
78#include <linux/proc_fs.h>
79#include <linux/time.h>
80#include <linux/security.h>
81#include <linux/syscalls.h>
82#include <linux/audit.h>
83#include <linux/capability.h>
84#include <linux/seq_file.h>
85#include <linux/rwsem.h>
86#include <linux/nsproxy.h>
87#include <linux/ipc_namespace.h>
88
89#include <linux/uaccess.h>
90#include "util.h"
91
92/* One semaphore structure for each semaphore in the system. */
93struct sem {
94    int semval; /* current value */
95    int sempid; /* pid of last operation */
96    spinlock_t lock; /* spinlock for fine-grained semtimedop */
97    struct list_head pending_alter; /* pending single-sop operations */
98                    /* that alter the semaphore */
99    struct list_head pending_const; /* pending single-sop operations */
100                    /* that do not alter the semaphore*/
101    time_t sem_otime; /* candidate for sem_otime */
102} ____cacheline_aligned_in_smp;
103
104/* One queue for each sleeping process in the system. */
105struct sem_queue {
106    struct list_head list; /* queue of pending operations */
107    struct task_struct *sleeper; /* this process */
108    struct sem_undo *undo; /* undo structure */
109    int pid; /* process id of requesting process */
110    int status; /* completion status of operation */
111    struct sembuf *sops; /* array of pending operations */
112    struct sembuf *blocking; /* the operation that blocked */
113    int nsops; /* number of operations */
114    int alter; /* does *sops alter the array? */
115};
116
117/* Each task has a list of undo requests. They are executed automatically
118 * when the process exits.
119 */
120struct sem_undo {
121    struct list_head list_proc; /* per-process list: *
122                         * all undos from one process
123                         * rcu protected */
124    struct rcu_head rcu; /* rcu struct for sem_undo */
125    struct sem_undo_list *ulp; /* back ptr to sem_undo_list */
126    struct list_head list_id; /* per semaphore array list:
127                         * all undos for one array */
128    int semid; /* semaphore set identifier */
129    short *semadj; /* array of adjustments */
130                        /* one per semaphore */
131};
132
133/* sem_undo_list controls shared access to the list of sem_undo structures
134 * that may be shared among all a CLONE_SYSVSEM task group.
135 */
136struct sem_undo_list {
137    atomic_t refcnt;
138    spinlock_t lock;
139    struct list_head list_proc;
140};
141
142
143#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
144
145#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
146
147static int newary(struct ipc_namespace *, struct ipc_params *);
148static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
149#ifdef CONFIG_PROC_FS
150static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
151#endif
152
153#define SEMMSL_FAST 256 /* 512 bytes on stack */
154#define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
155
156/*
157 * Locking:
158 * sem_undo.id_next,
159 * sem_array.complex_count,
160 * sem_array.pending{_alter,_cont},
161 * sem_array.sem_undo: global sem_lock() for read/write
162 * sem_undo.proc_next: only "current" is allowed to read/write that field.
163 *
164 * sem_array.sem_base[i].pending_{const,alter}:
165 * global or semaphore sem_lock() for read/write
166 */
167
168#define sc_semmsl sem_ctls[0]
169#define sc_semmns sem_ctls[1]
170#define sc_semopm sem_ctls[2]
171#define sc_semmni sem_ctls[3]
172
173void sem_init_ns(struct ipc_namespace *ns)
174{
175    ns->sc_semmsl = SEMMSL;
176    ns->sc_semmns = SEMMNS;
177    ns->sc_semopm = SEMOPM;
178    ns->sc_semmni = SEMMNI;
179    ns->used_sems = 0;
180    ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
181}
182
183#ifdef CONFIG_IPC_NS
184void sem_exit_ns(struct ipc_namespace *ns)
185{
186    free_ipcs(ns, &sem_ids(ns), freeary);
187    idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
188}
189#endif
190
191void __init sem_init(void)
192{
193    sem_init_ns(&init_ipc_ns);
194    ipc_init_proc_interface("sysvipc/sem",
195                " key semid perms nsems uid gid cuid cgid otime ctime\n",
196                IPC_SEM_IDS, sysvipc_sem_proc_show);
197}
198
199/**
200 * unmerge_queues - unmerge queues, if possible.
201 * @sma: semaphore array
202 *
203 * The function unmerges the wait queues if complex_count is 0.
204 * It must be called prior to dropping the global semaphore array lock.
205 */
206static void unmerge_queues(struct sem_array *sma)
207{
208    struct sem_queue *q, *tq;
209
210    /* complex operations still around? */
211    if (sma->complex_count)
212        return;
213    /*
214     * We will switch back to simple mode.
215     * Move all pending operation back into the per-semaphore
216     * queues.
217     */
218    list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
219        struct sem *curr;
220        curr = &sma->sem_base[q->sops[0].sem_num];
221
222        list_add_tail(&q->list, &curr->pending_alter);
223    }
224    INIT_LIST_HEAD(&sma->pending_alter);
225}
226
227/**
228 * merge_queues - merge single semop queues into global queue
229 * @sma: semaphore array
230 *
231 * This function merges all per-semaphore queues into the global queue.
232 * It is necessary to achieve FIFO ordering for the pending single-sop
233 * operations when a multi-semop operation must sleep.
234 * Only the alter operations must be moved, the const operations can stay.
235 */
236static void merge_queues(struct sem_array *sma)
237{
238    int i;
239    for (i = 0; i < sma->sem_nsems; i++) {
240        struct sem *sem = sma->sem_base + i;
241
242        list_splice_init(&sem->pending_alter, &sma->pending_alter);
243    }
244}
245
246static void sem_rcu_free(struct rcu_head *head)
247{
248    struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
249    struct sem_array *sma = ipc_rcu_to_struct(p);
250
251    security_sem_free(sma);
252    ipc_rcu_free(head);
253}
254
255/*
256 * Wait until all currently ongoing simple ops have completed.
257 * Caller must own sem_perm.lock.
258 * New simple ops cannot start, because simple ops first check
259 * that sem_perm.lock is free.
260 * that a) sem_perm.lock is free and b) complex_count is 0.
261 */
262static void sem_wait_array(struct sem_array *sma)
263{
264    int i;
265    struct sem *sem;
266
267    if (sma->complex_count) {
268        /* The thread that increased sma->complex_count waited on
269         * all sem->lock locks. Thus we don't need to wait again.
270         */
271        return;
272    }
273
274    for (i = 0; i < sma->sem_nsems; i++) {
275        sem = sma->sem_base + i;
276        spin_unlock_wait(&sem->lock);
277    }
278}
279
280/*
281 * If the request contains only one semaphore operation, and there are
282 * no complex transactions pending, lock only the semaphore involved.
283 * Otherwise, lock the entire semaphore array, since we either have
284 * multiple semaphores in our own semops, or we need to look at
285 * semaphores from other pending complex operations.
286 */
287static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
288                  int nsops)
289{
290    struct sem *sem;
291
292    if (nsops != 1) {
293        /* Complex operation - acquire a full lock */
294        ipc_lock_object(&sma->sem_perm);
295
296        /* And wait until all simple ops that are processed
297         * right now have dropped their locks.
298         */
299        sem_wait_array(sma);
300        return -1;
301    }
302
303    /*
304     * Only one semaphore affected - try to optimize locking.
305     * The rules are:
306     * - optimized locking is possible if no complex operation
307     * is either enqueued or processed right now.
308     * - The test for enqueued complex ops is simple:
309     * sma->complex_count != 0
310     * - Testing for complex ops that are processed right now is
311     * a bit more difficult. Complex ops acquire the full lock
312     * and first wait that the running simple ops have completed.
313     * (see above)
314     * Thus: If we own a simple lock and the global lock is free
315     * and complex_count is now 0, then it will stay 0 and
316     * thus just locking sem->lock is sufficient.
317     */
318    sem = sma->sem_base + sops->sem_num;
319
320    if (sma->complex_count == 0) {
321        /*
322         * It appears that no complex operation is around.
323         * Acquire the per-semaphore lock.
324         */
325        spin_lock(&sem->lock);
326
327        /* Then check that the global lock is free */
328        if (!spin_is_locked(&sma->sem_perm.lock)) {
329            /* spin_is_locked() is not a memory barrier */
330            smp_mb();
331
332            /* Now repeat the test of complex_count:
333             * It can't change anymore until we drop sem->lock.
334             * Thus: if is now 0, then it will stay 0.
335             */
336            if (sma->complex_count == 0) {
337                /* fast path successful! */
338                return sops->sem_num;
339            }
340        }
341        spin_unlock(&sem->lock);
342    }
343
344    /* slow path: acquire the full lock */
345    ipc_lock_object(&sma->sem_perm);
346
347    if (sma->complex_count == 0) {
348        /* False alarm:
349         * There is no complex operation, thus we can switch
350         * back to the fast path.
351         */
352        spin_lock(&sem->lock);
353        ipc_unlock_object(&sma->sem_perm);
354        return sops->sem_num;
355    } else {
356        /* Not a false alarm, thus complete the sequence for a
357         * full lock.
358         */
359        sem_wait_array(sma);
360        return -1;
361    }
362}
363
364static inline void sem_unlock(struct sem_array *sma, int locknum)
365{
366    if (locknum == -1) {
367        unmerge_queues(sma);
368        ipc_unlock_object(&sma->sem_perm);
369    } else {
370        struct sem *sem = sma->sem_base + locknum;
371        spin_unlock(&sem->lock);
372    }
373}
374
375/*
376 * sem_lock_(check_) routines are called in the paths where the rwsem
377 * is not held.
378 *
379 * The caller holds the RCU read lock.
380 */
381static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
382            int id, struct sembuf *sops, int nsops, int *locknum)
383{
384    struct kern_ipc_perm *ipcp;
385    struct sem_array *sma;
386
387    ipcp = ipc_obtain_object(&sem_ids(ns), id);
388    if (IS_ERR(ipcp))
389        return ERR_CAST(ipcp);
390
391    sma = container_of(ipcp, struct sem_array, sem_perm);
392    *locknum = sem_lock(sma, sops, nsops);
393
394    /* ipc_rmid() may have already freed the ID while sem_lock
395     * was spinning: verify that the structure is still valid
396     */
397    if (ipc_valid_object(ipcp))
398        return container_of(ipcp, struct sem_array, sem_perm);
399
400    sem_unlock(sma, *locknum);
401    return ERR_PTR(-EINVAL);
402}
403
404static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
405{
406    struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id);
407
408    if (IS_ERR(ipcp))
409        return ERR_CAST(ipcp);
410
411    return container_of(ipcp, struct sem_array, sem_perm);
412}
413
414static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
415                            int id)
416{
417    struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
418
419    if (IS_ERR(ipcp))
420        return ERR_CAST(ipcp);
421
422    return container_of(ipcp, struct sem_array, sem_perm);
423}
424
425static inline void sem_lock_and_putref(struct sem_array *sma)
426{
427    sem_lock(sma, NULL, -1);
428    ipc_rcu_putref(sma, ipc_rcu_free);
429}
430
431static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
432{
433    ipc_rmid(&sem_ids(ns), &s->sem_perm);
434}
435
436/*
437 * Lockless wakeup algorithm:
438 * Without the check/retry algorithm a lockless wakeup is possible:
439 * - queue.status is initialized to -EINTR before blocking.
440 * - wakeup is performed by
441 * * unlinking the queue entry from the pending list
442 * * setting queue.status to IN_WAKEUP
443 * This is the notification for the blocked thread that a
444 * result value is imminent.
445 * * call wake_up_process
446 * * set queue.status to the final value.
447 * - the previously blocked thread checks queue.status:
448 * * if it's IN_WAKEUP, then it must wait until the value changes
449 * * if it's not -EINTR, then the operation was completed by
450 * update_queue. semtimedop can return queue.status without
451 * performing any operation on the sem array.
452 * * otherwise it must acquire the spinlock and check what's up.
453 *
454 * The two-stage algorithm is necessary to protect against the following
455 * races:
456 * - if queue.status is set after wake_up_process, then the woken up idle
457 * thread could race forward and try (and fail) to acquire sma->lock
458 * before update_queue had a chance to set queue.status
459 * - if queue.status is written before wake_up_process and if the
460 * blocked process is woken up by a signal between writing
461 * queue.status and the wake_up_process, then the woken up
462 * process could return from semtimedop and die by calling
463 * sys_exit before wake_up_process is called. Then wake_up_process
464 * will oops, because the task structure is already invalid.
465 * (yes, this happened on s390 with sysv msg).
466 *
467 */
468#define IN_WAKEUP 1
469
470/**
471 * newary - Create a new semaphore set
472 * @ns: namespace
473 * @params: ptr to the structure that contains key, semflg and nsems
474 *
475 * Called with sem_ids.rwsem held (as a writer)
476 */
477static int newary(struct ipc_namespace *ns, struct ipc_params *params)
478{
479    int id;
480    int retval;
481    struct sem_array *sma;
482    int size;
483    key_t key = params->key;
484    int nsems = params->u.nsems;
485    int semflg = params->flg;
486    int i;
487
488    if (!nsems)
489        return -EINVAL;
490    if (ns->used_sems + nsems > ns->sc_semmns)
491        return -ENOSPC;
492
493    size = sizeof(*sma) + nsems * sizeof(struct sem);
494    sma = ipc_rcu_alloc(size);
495    if (!sma)
496        return -ENOMEM;
497
498    memset(sma, 0, size);
499
500    sma->sem_perm.mode = (semflg & S_IRWXUGO);
501    sma->sem_perm.key = key;
502
503    sma->sem_perm.security = NULL;
504    retval = security_sem_alloc(sma);
505    if (retval) {
506        ipc_rcu_putref(sma, ipc_rcu_free);
507        return retval;
508    }
509
510    id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
511    if (id < 0) {
512        ipc_rcu_putref(sma, sem_rcu_free);
513        return id;
514    }
515    ns->used_sems += nsems;
516
517    sma->sem_base = (struct sem *) &sma[1];
518
519    for (i = 0; i < nsems; i++) {
520        INIT_LIST_HEAD(&sma->sem_base[i].pending_alter);
521        INIT_LIST_HEAD(&sma->sem_base[i].pending_const);
522        spin_lock_init(&sma->sem_base[i].lock);
523    }
524
525    sma->complex_count = 0;
526    INIT_LIST_HEAD(&sma->pending_alter);
527    INIT_LIST_HEAD(&sma->pending_const);
528    INIT_LIST_HEAD(&sma->list_id);
529    sma->sem_nsems = nsems;
530    sma->sem_ctime = get_seconds();
531    sem_unlock(sma, -1);
532    rcu_read_unlock();
533
534    return sma->sem_perm.id;
535}
536
537
538/*
539 * Called with sem_ids.rwsem and ipcp locked.
540 */
541static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
542{
543    struct sem_array *sma;
544
545    sma = container_of(ipcp, struct sem_array, sem_perm);
546    return security_sem_associate(sma, semflg);
547}
548
549/*
550 * Called with sem_ids.rwsem and ipcp locked.
551 */
552static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
553                struct ipc_params *params)
554{
555    struct sem_array *sma;
556
557    sma = container_of(ipcp, struct sem_array, sem_perm);
558    if (params->u.nsems > sma->sem_nsems)
559        return -EINVAL;
560
561    return 0;
562}
563
564SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
565{
566    struct ipc_namespace *ns;
567    static const struct ipc_ops sem_ops = {
568        .getnew = newary,
569        .associate = sem_security,
570        .more_checks = sem_more_checks,
571    };
572    struct ipc_params sem_params;
573
574    ns = current->nsproxy->ipc_ns;
575
576    if (nsems < 0 || nsems > ns->sc_semmsl)
577        return -EINVAL;
578
579    sem_params.key = key;
580    sem_params.flg = semflg;
581    sem_params.u.nsems = nsems;
582
583    return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
584}
585
586/**
587 * perform_atomic_semop - Perform (if possible) a semaphore operation
588 * @sma: semaphore array
589 * @q: struct sem_queue that describes the operation
590 *
591 * Returns 0 if the operation was possible.
592 * Returns 1 if the operation is impossible, the caller must sleep.
593 * Negative values are error codes.
594 */
595static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
596{
597    int result, sem_op, nsops, pid;
598    struct sembuf *sop;
599    struct sem *curr;
600    struct sembuf *sops;
601    struct sem_undo *un;
602
603    sops = q->sops;
604    nsops = q->nsops;
605    un = q->undo;
606
607    for (sop = sops; sop < sops + nsops; sop++) {
608        curr = sma->sem_base + sop->sem_num;
609        sem_op = sop->sem_op;
610        result = curr->semval;
611
612        if (!sem_op && result)
613            goto would_block;
614
615        result += sem_op;
616        if (result < 0)
617            goto would_block;
618        if (result > SEMVMX)
619            goto out_of_range;
620
621        if (sop->sem_flg & SEM_UNDO) {
622            int undo = un->semadj[sop->sem_num] - sem_op;
623            /* Exceeding the undo range is an error. */
624            if (undo < (-SEMAEM - 1) || undo > SEMAEM)
625                goto out_of_range;
626            un->semadj[sop->sem_num] = undo;
627        }
628
629        curr->semval = result;
630    }
631
632    sop--;
633    pid = q->pid;
634    while (sop >= sops) {
635        sma->sem_base[sop->sem_num].sempid = pid;
636        sop--;
637    }
638
639    return 0;
640
641out_of_range:
642    result = -ERANGE;
643    goto undo;
644
645would_block:
646    q->blocking = sop;
647
648    if (sop->sem_flg & IPC_NOWAIT)
649        result = -EAGAIN;
650    else
651        result = 1;
652
653undo:
654    sop--;
655    while (sop >= sops) {
656        sem_op = sop->sem_op;
657        sma->sem_base[sop->sem_num].semval -= sem_op;
658        if (sop->sem_flg & SEM_UNDO)
659            un->semadj[sop->sem_num] += sem_op;
660        sop--;
661    }
662
663    return result;
664}
665
666/** wake_up_sem_queue_prepare(q, error): Prepare wake-up
667 * @q: queue entry that must be signaled
668 * @error: Error value for the signal
669 *
670 * Prepare the wake-up of the queue entry q.
671 */
672static void wake_up_sem_queue_prepare(struct list_head *pt,
673                struct sem_queue *q, int error)
674{
675    if (list_empty(pt)) {
676        /*
677         * Hold preempt off so that we don't get preempted and have the
678         * wakee busy-wait until we're scheduled back on.
679         */
680        preempt_disable();
681    }
682    q->status = IN_WAKEUP;
683    q->pid = error;
684
685    list_add_tail(&q->list, pt);
686}
687
688/**
689 * wake_up_sem_queue_do - do the actual wake-up
690 * @pt: list of tasks to be woken up
691 *
692 * Do the actual wake-up.
693 * The function is called without any locks held, thus the semaphore array
694 * could be destroyed already and the tasks can disappear as soon as the
695 * status is set to the actual return code.
696 */
697static void wake_up_sem_queue_do(struct list_head *pt)
698{
699    struct sem_queue *q, *t;
700    int did_something;
701
702    did_something = !list_empty(pt);
703    list_for_each_entry_safe(q, t, pt, list) {
704        wake_up_process(q->sleeper);
705        /* q can disappear immediately after writing q->status. */
706        smp_wmb();
707        q->status = q->pid;
708    }
709    if (did_something)
710        preempt_enable();
711}
712
713static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
714{
715    list_del(&q->list);
716    if (q->nsops > 1)
717        sma->complex_count--;
718}
719
720/** check_restart(sma, q)
721 * @sma: semaphore array
722 * @q: the operation that just completed
723 *
724 * update_queue is O(N^2) when it restarts scanning the whole queue of
725 * waiting operations. Therefore this function checks if the restart is
726 * really necessary. It is called after a previously waiting operation
727 * modified the array.
728 * Note that wait-for-zero operations are handled without restart.
729 */
730static int check_restart(struct sem_array *sma, struct sem_queue *q)
731{
732    /* pending complex alter operations are too difficult to analyse */
733    if (!list_empty(&sma->pending_alter))
734        return 1;
735
736    /* we were a sleeping complex operation. Too difficult */
737    if (q->nsops > 1)
738        return 1;
739
740    /* It is impossible that someone waits for the new value:
741     * - complex operations always restart.
742     * - wait-for-zero are handled seperately.
743     * - q is a previously sleeping simple operation that
744     * altered the array. It must be a decrement, because
745     * simple increments never sleep.
746     * - If there are older (higher priority) decrements
747     * in the queue, then they have observed the original
748     * semval value and couldn't proceed. The operation
749     * decremented to value - thus they won't proceed either.
750     */
751    return 0;
752}
753
754/**
755 * wake_const_ops - wake up non-alter tasks
756 * @sma: semaphore array.
757 * @semnum: semaphore that was modified.
758 * @pt: list head for the tasks that must be woken up.
759 *
760 * wake_const_ops must be called after a semaphore in a semaphore array
761 * was set to 0. If complex const operations are pending, wake_const_ops must
762 * be called with semnum = -1, as well as with the number of each modified
763 * semaphore.
764 * The tasks that must be woken up are added to @pt. The return code
765 * is stored in q->pid.
766 * The function returns 1 if at least one operation was completed successfully.
767 */
768static int wake_const_ops(struct sem_array *sma, int semnum,
769                struct list_head *pt)
770{
771    struct sem_queue *q;
772    struct list_head *walk;
773    struct list_head *pending_list;
774    int semop_completed = 0;
775
776    if (semnum == -1)
777        pending_list = &sma->pending_const;
778    else
779        pending_list = &sma->sem_base[semnum].pending_const;
780
781    walk = pending_list->next;
782    while (walk != pending_list) {
783        int error;
784
785        q = container_of(walk, struct sem_queue, list);
786        walk = walk->next;
787
788        error = perform_atomic_semop(sma, q);
789
790        if (error <= 0) {
791            /* operation completed, remove from queue & wakeup */
792
793            unlink_queue(sma, q);
794
795            wake_up_sem_queue_prepare(pt, q, error);
796            if (error == 0)
797                semop_completed = 1;
798        }
799    }
800    return semop_completed;
801}
802
803/**
804 * do_smart_wakeup_zero - wakeup all wait for zero tasks
805 * @sma: semaphore array
806 * @sops: operations that were performed
807 * @nsops: number of operations
808 * @pt: list head of the tasks that must be woken up.
809 *
810 * Checks all required queue for wait-for-zero operations, based
811 * on the actual changes that were performed on the semaphore array.
812 * The function returns 1 if at least one operation was completed successfully.
813 */
814static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
815                    int nsops, struct list_head *pt)
816{
817    int i;
818    int semop_completed = 0;
819    int got_zero = 0;
820
821    /* first: the per-semaphore queues, if known */
822    if (sops) {
823        for (i = 0; i < nsops; i++) {
824            int num = sops[i].sem_num;
825
826            if (sma->sem_base[num].semval == 0) {
827                got_zero = 1;
828                semop_completed |= wake_const_ops(sma, num, pt);
829            }
830        }
831    } else {
832        /*
833         * No sops means modified semaphores not known.
834         * Assume all were changed.
835         */
836        for (i = 0; i < sma->sem_nsems; i++) {
837            if (sma->sem_base[i].semval == 0) {
838                got_zero = 1;
839                semop_completed |= wake_const_ops(sma, i, pt);
840            }
841        }
842    }
843    /*
844     * If one of the modified semaphores got 0,
845     * then check the global queue, too.
846     */
847    if (got_zero)
848        semop_completed |= wake_const_ops(sma, -1, pt);
849
850    return semop_completed;
851}
852
853
854/**
855 * update_queue - look for tasks that can be completed.
856 * @sma: semaphore array.
857 * @semnum: semaphore that was modified.
858 * @pt: list head for the tasks that must be woken up.
859 *
860 * update_queue must be called after a semaphore in a semaphore array
861 * was modified. If multiple semaphores were modified, update_queue must
862 * be called with semnum = -1, as well as with the number of each modified
863 * semaphore.
864 * The tasks that must be woken up are added to @pt. The return code
865 * is stored in q->pid.
866 * The function internally checks if const operations can now succeed.
867 *
868 * The function return 1 if at least one semop was completed successfully.
869 */
870static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
871{
872    struct sem_queue *q;
873    struct list_head *walk;
874    struct list_head *pending_list;
875    int semop_completed = 0;
876
877    if (semnum == -1)
878        pending_list = &sma->pending_alter;
879    else
880        pending_list = &sma->sem_base[semnum].pending_alter;
881
882again:
883    walk = pending_list->next;
884    while (walk != pending_list) {
885        int error, restart;
886
887        q = container_of(walk, struct sem_queue, list);
888        walk = walk->next;
889
890        /* If we are scanning the single sop, per-semaphore list of
891         * one semaphore and that semaphore is 0, then it is not
892         * necessary to scan further: simple increments
893         * that affect only one entry succeed immediately and cannot
894         * be in the per semaphore pending queue, and decrements
895         * cannot be successful if the value is already 0.
896         */
897        if (semnum != -1 && sma->sem_base[semnum].semval == 0)
898            break;
899
900        error = perform_atomic_semop(sma, q);
901
902        /* Does q->sleeper still need to sleep? */
903        if (error > 0)
904            continue;
905
906        unlink_queue(sma, q);
907
908        if (error) {
909            restart = 0;
910        } else {
911            semop_completed = 1;
912            do_smart_wakeup_zero(sma, q->sops, q->nsops, pt);
913            restart = check_restart(sma, q);
914        }
915
916        wake_up_sem_queue_prepare(pt, q, error);
917        if (restart)
918            goto again;
919    }
920    return semop_completed;
921}
922
923/**
924 * set_semotime - set sem_otime
925 * @sma: semaphore array
926 * @sops: operations that modified the array, may be NULL
927 *
928 * sem_otime is replicated to avoid cache line trashing.
929 * This function sets one instance to the current time.
930 */
931static void set_semotime(struct sem_array *sma, struct sembuf *sops)
932{
933    if (sops == NULL) {
934        sma->sem_base[0].sem_otime = get_seconds();
935    } else {
936        sma->sem_base[sops[0].sem_num].sem_otime =
937                            get_seconds();
938    }
939}
940
941/**
942 * do_smart_update - optimized update_queue
943 * @sma: semaphore array
944 * @sops: operations that were performed
945 * @nsops: number of operations
946 * @otime: force setting otime
947 * @pt: list head of the tasks that must be woken up.
948 *
949 * do_smart_update() does the required calls to update_queue and wakeup_zero,
950 * based on the actual changes that were performed on the semaphore array.
951 * Note that the function does not do the actual wake-up: the caller is
952 * responsible for calling wake_up_sem_queue_do(@pt).
953 * It is safe to perform this call after dropping all locks.
954 */
955static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
956            int otime, struct list_head *pt)
957{
958    int i;
959
960    otime |= do_smart_wakeup_zero(sma, sops, nsops, pt);
961
962    if (!list_empty(&sma->pending_alter)) {
963        /* semaphore array uses the global queue - just process it. */
964        otime |= update_queue(sma, -1, pt);
965    } else {
966        if (!sops) {
967            /*
968             * No sops, thus the modified semaphores are not
969             * known. Check all.
970             */
971            for (i = 0; i < sma->sem_nsems; i++)
972                otime |= update_queue(sma, i, pt);
973        } else {
974            /*
975             * Check the semaphores that were increased:
976             * - No complex ops, thus all sleeping ops are
977             * decrease.
978             * - if we decreased the value, then any sleeping
979             * semaphore ops wont be able to run: If the
980             * previous value was too small, then the new
981             * value will be too small, too.
982             */
983            for (i = 0; i < nsops; i++) {
984                if (sops[i].sem_op > 0) {
985                    otime |= update_queue(sma,
986                            sops[i].sem_num, pt);
987                }
988            }
989        }
990    }
991    if (otime)
992        set_semotime(sma, sops);
993}
994
995/*
996 * check_qop: Test if a queued operation sleeps on the semaphore semnum
997 */
998static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q,
999            bool count_zero)
1000{
1001    struct sembuf *sop = q->blocking;
1002
1003    /*
1004     * Linux always (since 0.99.10) reported a task as sleeping on all
1005     * semaphores. This violates SUS, therefore it was changed to the
1006     * standard compliant behavior.
1007     * Give the administrators a chance to notice that an application
1008     * might misbehave because it relies on the Linux behavior.
1009     */
1010    pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n"
1011            "The task %s (%d) triggered the difference, watch for misbehavior.\n",
1012            current->comm, task_pid_nr(current));
1013
1014    if (sop->sem_num != semnum)
1015        return 0;
1016
1017    if (count_zero && sop->sem_op == 0)
1018        return 1;
1019    if (!count_zero && sop->sem_op < 0)
1020        return 1;
1021
1022    return 0;
1023}
1024
1025/* The following counts are associated to each semaphore:
1026 * semncnt number of tasks waiting on semval being nonzero
1027 * semzcnt number of tasks waiting on semval being zero
1028 *
1029 * Per definition, a task waits only on the semaphore of the first semop
1030 * that cannot proceed, even if additional operation would block, too.
1031 */
1032static int count_semcnt(struct sem_array *sma, ushort semnum,
1033            bool count_zero)
1034{
1035    struct list_head *l;
1036    struct sem_queue *q;
1037    int semcnt;
1038
1039    semcnt = 0;
1040    /* First: check the simple operations. They are easy to evaluate */
1041    if (count_zero)
1042        l = &sma->sem_base[semnum].pending_const;
1043    else
1044        l = &sma->sem_base[semnum].pending_alter;
1045
1046    list_for_each_entry(q, l, list) {
1047        /* all task on a per-semaphore list sleep on exactly
1048         * that semaphore
1049         */
1050        semcnt++;
1051    }
1052
1053    /* Then: check the complex operations. */
1054    list_for_each_entry(q, &sma->pending_alter, list) {
1055        semcnt += check_qop(sma, semnum, q, count_zero);
1056    }
1057    if (count_zero) {
1058        list_for_each_entry(q, &sma->pending_const, list) {
1059            semcnt += check_qop(sma, semnum, q, count_zero);
1060        }
1061    }
1062    return semcnt;
1063}
1064
1065/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1066 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1067 * remains locked on exit.
1068 */
1069static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1070{
1071    struct sem_undo *un, *tu;
1072    struct sem_queue *q, *tq;
1073    struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1074    struct list_head tasks;
1075    int i;
1076
1077    /* Free the existing undo structures for this semaphore set. */
1078    ipc_assert_locked_object(&sma->sem_perm);
1079    list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1080        list_del(&un->list_id);
1081        spin_lock(&un->ulp->lock);
1082        un->semid = -1;
1083        list_del_rcu(&un->list_proc);
1084        spin_unlock(&un->ulp->lock);
1085        kfree_rcu(un, rcu);
1086    }
1087
1088    /* Wake up all pending processes and let them fail with EIDRM. */
1089    INIT_LIST_HEAD(&tasks);
1090    list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1091        unlink_queue(sma, q);
1092        wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1093    }
1094
1095    list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1096        unlink_queue(sma, q);
1097        wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1098    }
1099    for (i = 0; i < sma->sem_nsems; i++) {
1100        struct sem *sem = sma->sem_base + i;
1101        list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1102            unlink_queue(sma, q);
1103            wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1104        }
1105        list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1106            unlink_queue(sma, q);
1107            wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1108        }
1109    }
1110
1111    /* Remove the semaphore set from the IDR */
1112    sem_rmid(ns, sma);
1113    sem_unlock(sma, -1);
1114    rcu_read_unlock();
1115
1116    wake_up_sem_queue_do(&tasks);
1117    ns->used_sems -= sma->sem_nsems;
1118    ipc_rcu_putref(sma, sem_rcu_free);
1119}
1120
1121static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1122{
1123    switch (version) {
1124    case IPC_64:
1125        return copy_to_user(buf, in, sizeof(*in));
1126    case IPC_OLD:
1127        {
1128        struct semid_ds out;
1129
1130        memset(&out, 0, sizeof(out));
1131
1132        ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1133
1134        out.sem_otime = in->sem_otime;
1135        out.sem_ctime = in->sem_ctime;
1136        out.sem_nsems = in->sem_nsems;
1137
1138        return copy_to_user(buf, &out, sizeof(out));
1139        }
1140    default:
1141        return -EINVAL;
1142    }
1143}
1144
1145static time_t get_semotime(struct sem_array *sma)
1146{
1147    int i;
1148    time_t res;
1149
1150    res = sma->sem_base[0].sem_otime;
1151    for (i = 1; i < sma->sem_nsems; i++) {
1152        time_t to = sma->sem_base[i].sem_otime;
1153
1154        if (to > res)
1155            res = to;
1156    }
1157    return res;
1158}
1159
1160static int semctl_nolock(struct ipc_namespace *ns, int semid,
1161             int cmd, int version, void __user *p)
1162{
1163    int err;
1164    struct sem_array *sma;
1165
1166    switch (cmd) {
1167    case IPC_INFO:
1168    case SEM_INFO:
1169    {
1170        struct seminfo seminfo;
1171        int max_id;
1172
1173        err = security_sem_semctl(NULL, cmd);
1174        if (err)
1175            return err;
1176
1177        memset(&seminfo, 0, sizeof(seminfo));
1178        seminfo.semmni = ns->sc_semmni;
1179        seminfo.semmns = ns->sc_semmns;
1180        seminfo.semmsl = ns->sc_semmsl;
1181        seminfo.semopm = ns->sc_semopm;
1182        seminfo.semvmx = SEMVMX;
1183        seminfo.semmnu = SEMMNU;
1184        seminfo.semmap = SEMMAP;
1185        seminfo.semume = SEMUME;
1186        down_read(&sem_ids(ns).rwsem);
1187        if (cmd == SEM_INFO) {
1188            seminfo.semusz = sem_ids(ns).in_use;
1189            seminfo.semaem = ns->used_sems;
1190        } else {
1191            seminfo.semusz = SEMUSZ;
1192            seminfo.semaem = SEMAEM;
1193        }
1194        max_id = ipc_get_maxid(&sem_ids(ns));
1195        up_read(&sem_ids(ns).rwsem);
1196        if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1197            return -EFAULT;
1198        return (max_id < 0) ? 0 : max_id;
1199    }
1200    case IPC_STAT:
1201    case SEM_STAT:
1202    {
1203        struct semid64_ds tbuf;
1204        int id = 0;
1205
1206        memset(&tbuf, 0, sizeof(tbuf));
1207
1208        rcu_read_lock();
1209        if (cmd == SEM_STAT) {
1210            sma = sem_obtain_object(ns, semid);
1211            if (IS_ERR(sma)) {
1212                err = PTR_ERR(sma);
1213                goto out_unlock;
1214            }
1215            id = sma->sem_perm.id;
1216        } else {
1217            sma = sem_obtain_object_check(ns, semid);
1218            if (IS_ERR(sma)) {
1219                err = PTR_ERR(sma);
1220                goto out_unlock;
1221            }
1222        }
1223
1224        err = -EACCES;
1225        if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1226            goto out_unlock;
1227
1228        err = security_sem_semctl(sma, cmd);
1229        if (err)
1230            goto out_unlock;
1231
1232        kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
1233        tbuf.sem_otime = get_semotime(sma);
1234        tbuf.sem_ctime = sma->sem_ctime;
1235        tbuf.sem_nsems = sma->sem_nsems;
1236        rcu_read_unlock();
1237        if (copy_semid_to_user(p, &tbuf, version))
1238            return -EFAULT;
1239        return id;
1240    }
1241    default:
1242        return -EINVAL;
1243    }
1244out_unlock:
1245    rcu_read_unlock();
1246    return err;
1247}
1248
1249static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1250        unsigned long arg)
1251{
1252    struct sem_undo *un;
1253    struct sem_array *sma;
1254    struct sem *curr;
1255    int err;
1256    struct list_head tasks;
1257    int val;
1258#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1259    /* big-endian 64bit */
1260    val = arg >> 32;
1261#else
1262    /* 32bit or little-endian 64bit */
1263    val = arg;
1264#endif
1265
1266    if (val > SEMVMX || val < 0)
1267        return -ERANGE;
1268
1269    INIT_LIST_HEAD(&tasks);
1270
1271    rcu_read_lock();
1272    sma = sem_obtain_object_check(ns, semid);
1273    if (IS_ERR(sma)) {
1274        rcu_read_unlock();
1275        return PTR_ERR(sma);
1276    }
1277
1278    if (semnum < 0 || semnum >= sma->sem_nsems) {
1279        rcu_read_unlock();
1280        return -EINVAL;
1281    }
1282
1283
1284    if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1285        rcu_read_unlock();
1286        return -EACCES;
1287    }
1288
1289    err = security_sem_semctl(sma, SETVAL);
1290    if (err) {
1291        rcu_read_unlock();
1292        return -EACCES;
1293    }
1294
1295    sem_lock(sma, NULL, -1);
1296
1297    if (!ipc_valid_object(&sma->sem_perm)) {
1298        sem_unlock(sma, -1);
1299        rcu_read_unlock();
1300        return -EIDRM;
1301    }
1302
1303    curr = &sma->sem_base[semnum];
1304
1305    ipc_assert_locked_object(&sma->sem_perm);
1306    list_for_each_entry(un, &sma->list_id, list_id)
1307        un->semadj[semnum] = 0;
1308
1309    curr->semval = val;
1310    curr->sempid = task_tgid_vnr(current);
1311    sma->sem_ctime = get_seconds();
1312    /* maybe some queued-up processes were waiting for this */
1313    do_smart_update(sma, NULL, 0, 0, &tasks);
1314    sem_unlock(sma, -1);
1315    rcu_read_unlock();
1316    wake_up_sem_queue_do(&tasks);
1317    return 0;
1318}
1319
1320static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1321        int cmd, void __user *p)
1322{
1323    struct sem_array *sma;
1324    struct sem *curr;
1325    int err, nsems;
1326    ushort fast_sem_io[SEMMSL_FAST];
1327    ushort *sem_io = fast_sem_io;
1328    struct list_head tasks;
1329
1330    INIT_LIST_HEAD(&tasks);
1331
1332    rcu_read_lock();
1333    sma = sem_obtain_object_check(ns, semid);
1334    if (IS_ERR(sma)) {
1335        rcu_read_unlock();
1336        return PTR_ERR(sma);
1337    }
1338
1339    nsems = sma->sem_nsems;
1340
1341    err = -EACCES;
1342    if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1343        goto out_rcu_wakeup;
1344
1345    err = security_sem_semctl(sma, cmd);
1346    if (err)
1347        goto out_rcu_wakeup;
1348
1349    err = -EACCES;
1350    switch (cmd) {
1351    case GETALL:
1352    {
1353        ushort __user *array = p;
1354        int i;
1355
1356        sem_lock(sma, NULL, -1);
1357        if (!ipc_valid_object(&sma->sem_perm)) {
1358            err = -EIDRM;
1359            goto out_unlock;
1360        }
1361        if (nsems > SEMMSL_FAST) {
1362            if (!ipc_rcu_getref(sma)) {
1363                err = -EIDRM;
1364                goto out_unlock;
1365            }
1366            sem_unlock(sma, -1);
1367            rcu_read_unlock();
1368            sem_io = ipc_alloc(sizeof(ushort)*nsems);
1369            if (sem_io == NULL) {
1370                ipc_rcu_putref(sma, ipc_rcu_free);
1371                return -ENOMEM;
1372            }
1373
1374            rcu_read_lock();
1375            sem_lock_and_putref(sma);
1376            if (!ipc_valid_object(&sma->sem_perm)) {
1377                err = -EIDRM;
1378                goto out_unlock;
1379            }
1380        }
1381        for (i = 0; i < sma->sem_nsems; i++)
1382            sem_io[i] = sma->sem_base[i].semval;
1383        sem_unlock(sma, -1);
1384        rcu_read_unlock();
1385        err = 0;
1386        if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1387            err = -EFAULT;
1388        goto out_free;
1389    }
1390    case SETALL:
1391    {
1392        int i;
1393        struct sem_undo *un;
1394
1395        if (!ipc_rcu_getref(sma)) {
1396            err = -EIDRM;
1397            goto out_rcu_wakeup;
1398        }
1399        rcu_read_unlock();
1400
1401        if (nsems > SEMMSL_FAST) {
1402            sem_io = ipc_alloc(sizeof(ushort)*nsems);
1403            if (sem_io == NULL) {
1404                ipc_rcu_putref(sma, ipc_rcu_free);
1405                return -ENOMEM;
1406            }
1407        }
1408
1409        if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1410            ipc_rcu_putref(sma, ipc_rcu_free);
1411            err = -EFAULT;
1412            goto out_free;
1413        }
1414
1415        for (i = 0; i < nsems; i++) {
1416            if (sem_io[i] > SEMVMX) {
1417                ipc_rcu_putref(sma, ipc_rcu_free);
1418                err = -ERANGE;
1419                goto out_free;
1420            }
1421        }
1422        rcu_read_lock();
1423        sem_lock_and_putref(sma);
1424        if (!ipc_valid_object(&sma->sem_perm)) {
1425            err = -EIDRM;
1426            goto out_unlock;
1427        }
1428
1429        for (i = 0; i < nsems; i++)
1430            sma->sem_base[i].semval = sem_io[i];
1431
1432        ipc_assert_locked_object(&sma->sem_perm);
1433        list_for_each_entry(un, &sma->list_id, list_id) {
1434            for (i = 0; i < nsems; i++)
1435                un->semadj[i] = 0;
1436        }
1437        sma->sem_ctime = get_seconds();
1438        /* maybe some queued-up processes were waiting for this */
1439        do_smart_update(sma, NULL, 0, 0, &tasks);
1440        err = 0;
1441        goto out_unlock;
1442    }
1443    /* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1444    }
1445    err = -EINVAL;
1446    if (semnum < 0 || semnum >= nsems)
1447        goto out_rcu_wakeup;
1448
1449    sem_lock(sma, NULL, -1);
1450    if (!ipc_valid_object(&sma->sem_perm)) {
1451        err = -EIDRM;
1452        goto out_unlock;
1453    }
1454    curr = &sma->sem_base[semnum];
1455
1456    switch (cmd) {
1457    case GETVAL:
1458        err = curr->semval;
1459        goto out_unlock;
1460    case GETPID:
1461        err = curr->sempid;
1462        goto out_unlock;
1463    case GETNCNT:
1464        err = count_semcnt(sma, semnum, 0);
1465        goto out_unlock;
1466    case GETZCNT:
1467        err = count_semcnt(sma, semnum, 1);
1468        goto out_unlock;
1469    }
1470
1471out_unlock:
1472    sem_unlock(sma, -1);
1473out_rcu_wakeup:
1474    rcu_read_unlock();
1475    wake_up_sem_queue_do(&tasks);
1476out_free:
1477    if (sem_io != fast_sem_io)
1478        ipc_free(sem_io, sizeof(ushort)*nsems);
1479    return err;
1480}
1481
1482static inline unsigned long
1483copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1484{
1485    switch (version) {
1486    case IPC_64:
1487        if (copy_from_user(out, buf, sizeof(*out)))
1488            return -EFAULT;
1489        return 0;
1490    case IPC_OLD:
1491        {
1492        struct semid_ds tbuf_old;
1493
1494        if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1495            return -EFAULT;
1496
1497        out->sem_perm.uid = tbuf_old.sem_perm.uid;
1498        out->sem_perm.gid = tbuf_old.sem_perm.gid;
1499        out->sem_perm.mode = tbuf_old.sem_perm.mode;
1500
1501        return 0;
1502        }
1503    default:
1504        return -EINVAL;
1505    }
1506}
1507
1508/*
1509 * This function handles some semctl commands which require the rwsem
1510 * to be held in write mode.
1511 * NOTE: no locks must be held, the rwsem is taken inside this function.
1512 */
1513static int semctl_down(struct ipc_namespace *ns, int semid,
1514               int cmd, int version, void __user *p)
1515{
1516    struct sem_array *sma;
1517    int err;
1518    struct semid64_ds semid64;
1519    struct kern_ipc_perm *ipcp;
1520
1521    if (cmd == IPC_SET) {
1522        if (copy_semid_from_user(&semid64, p, version))
1523            return -EFAULT;
1524    }
1525
1526    down_write(&sem_ids(ns).rwsem);
1527    rcu_read_lock();
1528
1529    ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
1530                      &semid64.sem_perm, 0);
1531    if (IS_ERR(ipcp)) {
1532        err = PTR_ERR(ipcp);
1533        goto out_unlock1;
1534    }
1535
1536    sma = container_of(ipcp, struct sem_array, sem_perm);
1537
1538    err = security_sem_semctl(sma, cmd);
1539    if (err)
1540        goto out_unlock1;
1541
1542    switch (cmd) {
1543    case IPC_RMID:
1544        sem_lock(sma, NULL, -1);
1545        /* freeary unlocks the ipc object and rcu */
1546        freeary(ns, ipcp);
1547        goto out_up;
1548    case IPC_SET:
1549        sem_lock(sma, NULL, -1);
1550        err = ipc_update_perm(&semid64.sem_perm, ipcp);
1551        if (err)
1552            goto out_unlock0;
1553        sma->sem_ctime = get_seconds();
1554        break;
1555    default:
1556        err = -EINVAL;
1557        goto out_unlock1;
1558    }
1559
1560out_unlock0:
1561    sem_unlock(sma, -1);
1562out_unlock1:
1563    rcu_read_unlock();
1564out_up:
1565    up_write(&sem_ids(ns).rwsem);
1566    return err;
1567}
1568
1569SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1570{
1571    int version;
1572    struct ipc_namespace *ns;
1573    void __user *p = (void __user *)arg;
1574
1575    if (semid < 0)
1576        return -EINVAL;
1577
1578    version = ipc_parse_version(&cmd);
1579    ns = current->nsproxy->ipc_ns;
1580
1581    switch (cmd) {
1582    case IPC_INFO:
1583    case SEM_INFO:
1584    case IPC_STAT:
1585    case SEM_STAT:
1586        return semctl_nolock(ns, semid, cmd, version, p);
1587    case GETALL:
1588    case GETVAL:
1589    case GETPID:
1590    case GETNCNT:
1591    case GETZCNT:
1592    case SETALL:
1593        return semctl_main(ns, semid, semnum, cmd, p);
1594    case SETVAL:
1595        return semctl_setval(ns, semid, semnum, arg);
1596    case IPC_RMID:
1597    case IPC_SET:
1598        return semctl_down(ns, semid, cmd, version, p);
1599    default:
1600        return -EINVAL;
1601    }
1602}
1603
1604/* If the task doesn't already have a undo_list, then allocate one
1605 * here. We guarantee there is only one thread using this undo list,
1606 * and current is THE ONE
1607 *
1608 * If this allocation and assignment succeeds, but later
1609 * portions of this code fail, there is no need to free the sem_undo_list.
1610 * Just let it stay associated with the task, and it'll be freed later
1611 * at exit time.
1612 *
1613 * This can block, so callers must hold no locks.
1614 */
1615static inline int get_undo_list(struct sem_undo_list **undo_listp)
1616{
1617    struct sem_undo_list *undo_list;
1618
1619    undo_list = current->sysvsem.undo_list;
1620    if (!undo_list) {
1621        undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1622        if (undo_list == NULL)
1623            return -ENOMEM;
1624        spin_lock_init(&undo_list->lock);
1625        atomic_set(&undo_list->refcnt, 1);
1626        INIT_LIST_HEAD(&undo_list->list_proc);
1627
1628        current->sysvsem.undo_list = undo_list;
1629    }
1630    *undo_listp = undo_list;
1631    return 0;
1632}
1633
1634static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1635{
1636    struct sem_undo *un;
1637
1638    list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1639        if (un->semid == semid)
1640            return un;
1641    }
1642    return NULL;
1643}
1644
1645static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1646{
1647    struct sem_undo *un;
1648
1649    assert_spin_locked(&ulp->lock);
1650
1651    un = __lookup_undo(ulp, semid);
1652    if (un) {
1653        list_del_rcu(&un->list_proc);
1654        list_add_rcu(&un->list_proc, &ulp->list_proc);
1655    }
1656    return un;
1657}
1658
1659/**
1660 * find_alloc_undo - lookup (and if not present create) undo array
1661 * @ns: namespace
1662 * @semid: semaphore array id
1663 *
1664 * The function looks up (and if not present creates) the undo structure.
1665 * The size of the undo structure depends on the size of the semaphore
1666 * array, thus the alloc path is not that straightforward.
1667 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1668 * performs a rcu_read_lock().
1669 */
1670static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1671{
1672    struct sem_array *sma;
1673    struct sem_undo_list *ulp;
1674    struct sem_undo *un, *new;
1675    int nsems, error;
1676
1677    error = get_undo_list(&ulp);
1678    if (error)
1679        return ERR_PTR(error);
1680
1681    rcu_read_lock();
1682    spin_lock(&ulp->lock);
1683    un = lookup_undo(ulp, semid);
1684    spin_unlock(&ulp->lock);
1685    if (likely(un != NULL))
1686        goto out;
1687
1688    /* no undo structure around - allocate one. */
1689    /* step 1: figure out the size of the semaphore array */
1690    sma = sem_obtain_object_check(ns, semid);
1691    if (IS_ERR(sma)) {
1692        rcu_read_unlock();
1693        return ERR_CAST(sma);
1694    }
1695
1696    nsems = sma->sem_nsems;
1697    if (!ipc_rcu_getref(sma)) {
1698        rcu_read_unlock();
1699        un = ERR_PTR(-EIDRM);
1700        goto out;
1701    }
1702    rcu_read_unlock();
1703
1704    /* step 2: allocate new undo structure */
1705    new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1706    if (!new) {
1707        ipc_rcu_putref(sma, ipc_rcu_free);
1708        return ERR_PTR(-ENOMEM);
1709    }
1710
1711    /* step 3: Acquire the lock on semaphore array */
1712    rcu_read_lock();
1713    sem_lock_and_putref(sma);
1714    if (!ipc_valid_object(&sma->sem_perm)) {
1715        sem_unlock(sma, -1);
1716        rcu_read_unlock();
1717        kfree(new);
1718        un = ERR_PTR(-EIDRM);
1719        goto out;
1720    }
1721    spin_lock(&ulp->lock);
1722
1723    /*
1724     * step 4: check for races: did someone else allocate the undo struct?
1725     */
1726    un = lookup_undo(ulp, semid);
1727    if (un) {
1728        kfree(new);
1729        goto success;
1730    }
1731    /* step 5: initialize & link new undo structure */
1732    new->semadj = (short *) &new[1];
1733    new->ulp = ulp;
1734    new->semid = semid;
1735    assert_spin_locked(&ulp->lock);
1736    list_add_rcu(&new->list_proc, &ulp->list_proc);
1737    ipc_assert_locked_object(&sma->sem_perm);
1738    list_add(&new->list_id, &sma->list_id);
1739    un = new;
1740
1741success:
1742    spin_unlock(&ulp->lock);
1743    sem_unlock(sma, -1);
1744out:
1745    return un;
1746}
1747
1748
1749/**
1750 * get_queue_result - retrieve the result code from sem_queue
1751 * @q: Pointer to queue structure
1752 *
1753 * Retrieve the return code from the pending queue. If IN_WAKEUP is found in
1754 * q->status, then we must loop until the value is replaced with the final
1755 * value: This may happen if a task is woken up by an unrelated event (e.g.
1756 * signal) and in parallel the task is woken up by another task because it got
1757 * the requested semaphores.
1758 *
1759 * The function can be called with or without holding the semaphore spinlock.
1760 */
1761static int get_queue_result(struct sem_queue *q)
1762{
1763    int error;
1764
1765    error = q->status;
1766    while (unlikely(error == IN_WAKEUP)) {
1767        cpu_relax();
1768        error = q->status;
1769    }
1770
1771    return error;
1772}
1773
1774SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1775        unsigned, nsops, const struct timespec __user *, timeout)
1776{
1777    int error = -EINVAL;
1778    struct sem_array *sma;
1779    struct sembuf fast_sops[SEMOPM_FAST];
1780    struct sembuf *sops = fast_sops, *sop;
1781    struct sem_undo *un;
1782    int undos = 0, alter = 0, max, locknum;
1783    struct sem_queue queue;
1784    unsigned long jiffies_left = 0;
1785    struct ipc_namespace *ns;
1786    struct list_head tasks;
1787
1788    ns = current->nsproxy->ipc_ns;
1789
1790    if (nsops < 1 || semid < 0)
1791        return -EINVAL;
1792    if (nsops > ns->sc_semopm)
1793        return -E2BIG;
1794    if (nsops > SEMOPM_FAST) {
1795        sops = kmalloc(sizeof(*sops)*nsops, GFP_KERNEL);
1796        if (sops == NULL)
1797            return -ENOMEM;
1798    }
1799    if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
1800        error = -EFAULT;
1801        goto out_free;
1802    }
1803    if (timeout) {
1804        struct timespec _timeout;
1805        if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1806            error = -EFAULT;
1807            goto out_free;
1808        }
1809        if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1810            _timeout.tv_nsec >= 1000000000L) {
1811            error = -EINVAL;
1812            goto out_free;
1813        }
1814        jiffies_left = timespec_to_jiffies(&_timeout);
1815    }
1816    max = 0;
1817    for (sop = sops; sop < sops + nsops; sop++) {
1818        if (sop->sem_num >= max)
1819            max = sop->sem_num;
1820        if (sop->sem_flg & SEM_UNDO)
1821            undos = 1;
1822        if (sop->sem_op != 0)
1823            alter = 1;
1824    }
1825
1826    INIT_LIST_HEAD(&tasks);
1827
1828    if (undos) {
1829        /* On success, find_alloc_undo takes the rcu_read_lock */
1830        un = find_alloc_undo(ns, semid);
1831        if (IS_ERR(un)) {
1832            error = PTR_ERR(un);
1833            goto out_free;
1834        }
1835    } else {
1836        un = NULL;
1837        rcu_read_lock();
1838    }
1839
1840    sma = sem_obtain_object_check(ns, semid);
1841    if (IS_ERR(sma)) {
1842        rcu_read_unlock();
1843        error = PTR_ERR(sma);
1844        goto out_free;
1845    }
1846
1847    error = -EFBIG;
1848    if (max >= sma->sem_nsems)
1849        goto out_rcu_wakeup;
1850
1851    error = -EACCES;
1852    if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1853        goto out_rcu_wakeup;
1854
1855    error = security_sem_semop(sma, sops, nsops, alter);
1856    if (error)
1857        goto out_rcu_wakeup;
1858
1859    error = -EIDRM;
1860    locknum = sem_lock(sma, sops, nsops);
1861    /*
1862     * We eventually might perform the following check in a lockless
1863     * fashion, considering ipc_valid_object() locking constraints.
1864     * If nsops == 1 and there is no contention for sem_perm.lock, then
1865     * only a per-semaphore lock is held and it's OK to proceed with the
1866     * check below. More details on the fine grained locking scheme
1867     * entangled here and why it's RMID race safe on comments at sem_lock()
1868     */
1869    if (!ipc_valid_object(&sma->sem_perm))
1870        goto out_unlock_free;
1871    /*
1872     * semid identifiers are not unique - find_alloc_undo may have
1873     * allocated an undo structure, it was invalidated by an RMID
1874     * and now a new array with received the same id. Check and fail.
1875     * This case can be detected checking un->semid. The existence of
1876     * "un" itself is guaranteed by rcu.
1877     */
1878    if (un && un->semid == -1)
1879        goto out_unlock_free;
1880
1881    queue.sops = sops;
1882    queue.nsops = nsops;
1883    queue.undo = un;
1884    queue.pid = task_tgid_vnr(current);
1885    queue.alter = alter;
1886
1887    error = perform_atomic_semop(sma, &queue);
1888    if (error == 0) {
1889        /* If the operation was successful, then do
1890         * the required updates.
1891         */
1892        if (alter)
1893            do_smart_update(sma, sops, nsops, 1, &tasks);
1894        else
1895            set_semotime(sma, sops);
1896    }
1897    if (error <= 0)
1898        goto out_unlock_free;
1899
1900    /* We need to sleep on this operation, so we put the current
1901     * task into the pending queue and go to sleep.
1902     */
1903
1904    if (nsops == 1) {
1905        struct sem *curr;
1906        curr = &sma->sem_base[sops->sem_num];
1907
1908        if (alter) {
1909            if (sma->complex_count) {
1910                list_add_tail(&queue.list,
1911                        &sma->pending_alter);
1912            } else {
1913
1914                list_add_tail(&queue.list,
1915                        &curr->pending_alter);
1916            }
1917        } else {
1918            list_add_tail(&queue.list, &curr->pending_const);
1919        }
1920    } else {
1921        if (!sma->complex_count)
1922            merge_queues(sma);
1923
1924        if (alter)
1925            list_add_tail(&queue.list, &sma->pending_alter);
1926        else
1927            list_add_tail(&queue.list, &sma->pending_const);
1928
1929        sma->complex_count++;
1930    }
1931
1932    queue.status = -EINTR;
1933    queue.sleeper = current;
1934
1935sleep_again:
1936    current->state = TASK_INTERRUPTIBLE;
1937    sem_unlock(sma, locknum);
1938    rcu_read_unlock();
1939
1940    if (timeout)
1941        jiffies_left = schedule_timeout(jiffies_left);
1942    else
1943        schedule();
1944
1945    error = get_queue_result(&queue);
1946
1947    if (error != -EINTR) {
1948        /* fast path: update_queue already obtained all requested
1949         * resources.
1950         * Perform a smp_mb(): User space could assume that semop()
1951         * is a memory barrier: Without the mb(), the cpu could
1952         * speculatively read in user space stale data that was
1953         * overwritten by the previous owner of the semaphore.
1954         */
1955        smp_mb();
1956
1957        goto out_free;
1958    }
1959
1960    rcu_read_lock();
1961    sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
1962
1963    /*
1964     * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
1965     */
1966    error = get_queue_result(&queue);
1967
1968    /*
1969     * Array removed? If yes, leave without sem_unlock().
1970     */
1971    if (IS_ERR(sma)) {
1972        rcu_read_unlock();
1973        goto out_free;
1974    }
1975
1976
1977    /*
1978     * If queue.status != -EINTR we are woken up by another process.
1979     * Leave without unlink_queue(), but with sem_unlock().
1980     */
1981    if (error != -EINTR)
1982        goto out_unlock_free;
1983
1984    /*
1985     * If an interrupt occurred we have to clean up the queue
1986     */
1987    if (timeout && jiffies_left == 0)
1988        error = -EAGAIN;
1989
1990    /*
1991     * If the wakeup was spurious, just retry
1992     */
1993    if (error == -EINTR && !signal_pending(current))
1994        goto sleep_again;
1995
1996    unlink_queue(sma, &queue);
1997
1998out_unlock_free:
1999    sem_unlock(sma, locknum);
2000out_rcu_wakeup:
2001    rcu_read_unlock();
2002    wake_up_sem_queue_do(&tasks);
2003out_free:
2004    if (sops != fast_sops)
2005        kfree(sops);
2006    return error;
2007}
2008
2009SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2010        unsigned, nsops)
2011{
2012    return sys_semtimedop(semid, tsops, nsops, NULL);
2013}
2014
2015/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2016 * parent and child tasks.
2017 */
2018
2019int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2020{
2021    struct sem_undo_list *undo_list;
2022    int error;
2023
2024    if (clone_flags & CLONE_SYSVSEM) {
2025        error = get_undo_list(&undo_list);
2026        if (error)
2027            return error;
2028        atomic_inc(&undo_list->refcnt);
2029        tsk->sysvsem.undo_list = undo_list;
2030    } else
2031        tsk->sysvsem.undo_list = NULL;
2032
2033    return 0;
2034}
2035
2036/*
2037 * add semadj values to semaphores, free undo structures.
2038 * undo structures are not freed when semaphore arrays are destroyed
2039 * so some of them may be out of date.
2040 * IMPLEMENTATION NOTE: There is some confusion over whether the
2041 * set of adjustments that needs to be done should be done in an atomic
2042 * manner or not. That is, if we are attempting to decrement the semval
2043 * should we queue up and wait until we can do so legally?
2044 * The original implementation attempted to do this (queue and wait).
2045 * The current implementation does not do so. The POSIX standard
2046 * and SVID should be consulted to determine what behavior is mandated.
2047 */
2048void exit_sem(struct task_struct *tsk)
2049{
2050    struct sem_undo_list *ulp;
2051
2052    ulp = tsk->sysvsem.undo_list;
2053    if (!ulp)
2054        return;
2055    tsk->sysvsem.undo_list = NULL;
2056
2057    if (!atomic_dec_and_test(&ulp->refcnt))
2058        return;
2059
2060    for (;;) {
2061        struct sem_array *sma;
2062        struct sem_undo *un;
2063        struct list_head tasks;
2064        int semid, i;
2065
2066        rcu_read_lock();
2067        un = list_entry_rcu(ulp->list_proc.next,
2068                    struct sem_undo, list_proc);
2069        if (&un->list_proc == &ulp->list_proc)
2070            semid = -1;
2071         else
2072            semid = un->semid;
2073
2074        if (semid == -1) {
2075            rcu_read_unlock();
2076            break;
2077        }
2078
2079        sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
2080        /* exit_sem raced with IPC_RMID, nothing to do */
2081        if (IS_ERR(sma)) {
2082            rcu_read_unlock();
2083            continue;
2084        }
2085
2086        sem_lock(sma, NULL, -1);
2087        /* exit_sem raced with IPC_RMID, nothing to do */
2088        if (!ipc_valid_object(&sma->sem_perm)) {
2089            sem_unlock(sma, -1);
2090            rcu_read_unlock();
2091            continue;
2092        }
2093        un = __lookup_undo(ulp, semid);
2094        if (un == NULL) {
2095            /* exit_sem raced with IPC_RMID+semget() that created
2096             * exactly the same semid. Nothing to do.
2097             */
2098            sem_unlock(sma, -1);
2099            rcu_read_unlock();
2100            continue;
2101        }
2102
2103        /* remove un from the linked lists */
2104        ipc_assert_locked_object(&sma->sem_perm);
2105        list_del(&un->list_id);
2106
2107        spin_lock(&ulp->lock);
2108        list_del_rcu(&un->list_proc);
2109        spin_unlock(&ulp->lock);
2110
2111        /* perform adjustments registered in un */
2112        for (i = 0; i < sma->sem_nsems; i++) {
2113            struct sem *semaphore = &sma->sem_base[i];
2114            if (un->semadj[i]) {
2115                semaphore->semval += un->semadj[i];
2116                /*
2117                 * Range checks of the new semaphore value,
2118                 * not defined by sus:
2119                 * - Some unices ignore the undo entirely
2120                 * (e.g. HP UX 11i 11.22, Tru64 V5.1)
2121                 * - some cap the value (e.g. FreeBSD caps
2122                 * at 0, but doesn't enforce SEMVMX)
2123                 *
2124                 * Linux caps the semaphore value, both at 0
2125                 * and at SEMVMX.
2126                 *
2127                 * Manfred <manfred@colorfullife.com>
2128                 */
2129                if (semaphore->semval < 0)
2130                    semaphore->semval = 0;
2131                if (semaphore->semval > SEMVMX)
2132                    semaphore->semval = SEMVMX;
2133                semaphore->sempid = task_tgid_vnr(current);
2134            }
2135        }
2136        /* maybe some queued-up processes were waiting for this */
2137        INIT_LIST_HEAD(&tasks);
2138        do_smart_update(sma, NULL, 0, 1, &tasks);
2139        sem_unlock(sma, -1);
2140        rcu_read_unlock();
2141        wake_up_sem_queue_do(&tasks);
2142
2143        kfree_rcu(un, rcu);
2144    }
2145    kfree(ulp);
2146}
2147
2148#ifdef CONFIG_PROC_FS
2149static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2150{
2151    struct user_namespace *user_ns = seq_user_ns(s);
2152    struct sem_array *sma = it;
2153    time_t sem_otime;
2154
2155    /*
2156     * The proc interface isn't aware of sem_lock(), it calls
2157     * ipc_lock_object() directly (in sysvipc_find_ipc).
2158     * In order to stay compatible with sem_lock(), we must wait until
2159     * all simple semop() calls have left their critical regions.
2160     */
2161    sem_wait_array(sma);
2162
2163    sem_otime = get_semotime(sma);
2164
2165    return seq_printf(s,
2166              "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
2167              sma->sem_perm.key,
2168              sma->sem_perm.id,
2169              sma->sem_perm.mode,
2170              sma->sem_nsems,
2171              from_kuid_munged(user_ns, sma->sem_perm.uid),
2172              from_kgid_munged(user_ns, sma->sem_perm.gid),
2173              from_kuid_munged(user_ns, sma->sem_perm.cuid),
2174              from_kgid_munged(user_ns, sma->sem_perm.cgid),
2175              sem_otime,
2176              sma->sem_ctime);
2177}
2178#endif
2179

Archive Download this file



interactive