Root/
1 | /* |
2 | * linux/ipc/sem.c |
3 | * Copyright (C) 1992 Krishna Balasubramanian |
4 | * Copyright (C) 1995 Eric Schenk, Bruno Haible |
5 | * |
6 | * IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995): |
7 | * This code underwent a massive rewrite in order to solve some problems |
8 | * with the original code. In particular the original code failed to |
9 | * wake up processes that were waiting for semval to go to 0 if the |
10 | * value went to 0 and was then incremented rapidly enough. In solving |
11 | * this problem I have also modified the implementation so that it |
12 | * processes pending operations in a FIFO manner, thus give a guarantee |
13 | * that processes waiting for a lock on the semaphore won't starve |
14 | * unless another locking process fails to unlock. |
15 | * In addition the following two changes in behavior have been introduced: |
16 | * - The original implementation of semop returned the value |
17 | * last semaphore element examined on success. This does not |
18 | * match the manual page specifications, and effectively |
19 | * allows the user to read the semaphore even if they do not |
20 | * have read permissions. The implementation now returns 0 |
21 | * on success as stated in the manual page. |
22 | * - There is some confusion over whether the set of undo adjustments |
23 | * to be performed at exit should be done in an atomic manner. |
24 | * That is, if we are attempting to decrement the semval should we queue |
25 | * up and wait until we can do so legally? |
26 | * The original implementation attempted to do this. |
27 | * The current implementation does not do so. This is because I don't |
28 | * think it is the right thing (TM) to do, and because I couldn't |
29 | * see a clean way to get the old behavior with the new design. |
30 | * The POSIX standard and SVID should be consulted to determine |
31 | * what behavior is mandated. |
32 | * |
33 | * Further notes on refinement (Christoph Rohland, December 1998): |
34 | * - The POSIX standard says, that the undo adjustments simply should |
35 | * redo. So the current implementation is o.K. |
36 | * - The previous code had two flaws: |
37 | * 1) It actively gave the semaphore to the next waiting process |
38 | * sleeping on the semaphore. Since this process did not have the |
39 | * cpu this led to many unnecessary context switches and bad |
40 | * performance. Now we only check which process should be able to |
41 | * get the semaphore and if this process wants to reduce some |
42 | * semaphore value we simply wake it up without doing the |
43 | * operation. So it has to try to get it later. Thus e.g. the |
44 | * running process may reacquire the semaphore during the current |
45 | * time slice. If it only waits for zero or increases the semaphore, |
46 | * we do the operation in advance and wake it up. |
47 | * 2) It did not wake up all zero waiting processes. We try to do |
48 | * better but only get the semops right which only wait for zero or |
49 | * increase. If there are decrement operations in the operations |
50 | * array we do the same as before. |
51 | * |
52 | * With the incarnation of O(1) scheduler, it becomes unnecessary to perform |
53 | * check/retry algorithm for waking up blocked processes as the new scheduler |
54 | * is better at handling thread switch than the old one. |
55 | * |
56 | * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com> |
57 | * |
58 | * SMP-threaded, sysctl's added |
59 | * (c) 1999 Manfred Spraul <manfred@colorfullife.com> |
60 | * Enforced range limit on SEM_UNDO |
61 | * (c) 2001 Red Hat Inc |
62 | * Lockless wakeup |
63 | * (c) 2003 Manfred Spraul <manfred@colorfullife.com> |
64 | * |
65 | * support for audit of ipc object properties and permission changes |
66 | * Dustin Kirkland <dustin.kirkland@us.ibm.com> |
67 | * |
68 | * namespaces support |
69 | * OpenVZ, SWsoft Inc. |
70 | * Pavel Emelianov <xemul@openvz.org> |
71 | */ |
72 | |
73 | #include <linux/slab.h> |
74 | #include <linux/spinlock.h> |
75 | #include <linux/init.h> |
76 | #include <linux/proc_fs.h> |
77 | #include <linux/time.h> |
78 | #include <linux/security.h> |
79 | #include <linux/syscalls.h> |
80 | #include <linux/audit.h> |
81 | #include <linux/capability.h> |
82 | #include <linux/seq_file.h> |
83 | #include <linux/rwsem.h> |
84 | #include <linux/nsproxy.h> |
85 | #include <linux/ipc_namespace.h> |
86 | |
87 | #include <asm/uaccess.h> |
88 | #include "util.h" |
89 | |
90 | #define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS]) |
91 | |
92 | #define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm) |
93 | #define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid) |
94 | |
95 | static int newary(struct ipc_namespace *, struct ipc_params *); |
96 | static void freeary(struct ipc_namespace *, struct kern_ipc_perm *); |
97 | #ifdef CONFIG_PROC_FS |
98 | static int sysvipc_sem_proc_show(struct seq_file *s, void *it); |
99 | #endif |
100 | |
101 | #define SEMMSL_FAST 256 /* 512 bytes on stack */ |
102 | #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */ |
103 | |
104 | /* |
105 | * linked list protection: |
106 | * sem_undo.id_next, |
107 | * sem_array.sem_pending{,last}, |
108 | * sem_array.sem_undo: sem_lock() for read/write |
109 | * sem_undo.proc_next: only "current" is allowed to read/write that field. |
110 | * |
111 | */ |
112 | |
113 | #define sc_semmsl sem_ctls[0] |
114 | #define sc_semmns sem_ctls[1] |
115 | #define sc_semopm sem_ctls[2] |
116 | #define sc_semmni sem_ctls[3] |
117 | |
118 | void sem_init_ns(struct ipc_namespace *ns) |
119 | { |
120 | ns->sc_semmsl = SEMMSL; |
121 | ns->sc_semmns = SEMMNS; |
122 | ns->sc_semopm = SEMOPM; |
123 | ns->sc_semmni = SEMMNI; |
124 | ns->used_sems = 0; |
125 | ipc_init_ids(&ns->ids[IPC_SEM_IDS]); |
126 | } |
127 | |
128 | #ifdef CONFIG_IPC_NS |
129 | void sem_exit_ns(struct ipc_namespace *ns) |
130 | { |
131 | free_ipcs(ns, &sem_ids(ns), freeary); |
132 | idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr); |
133 | } |
134 | #endif |
135 | |
136 | void __init sem_init (void) |
137 | { |
138 | sem_init_ns(&init_ipc_ns); |
139 | ipc_init_proc_interface("sysvipc/sem", |
140 | " key semid perms nsems uid gid cuid cgid otime ctime\n", |
141 | IPC_SEM_IDS, sysvipc_sem_proc_show); |
142 | } |
143 | |
144 | /* |
145 | * sem_lock_(check_) routines are called in the paths where the rw_mutex |
146 | * is not held. |
147 | */ |
148 | static inline struct sem_array *sem_lock(struct ipc_namespace *ns, int id) |
149 | { |
150 | struct kern_ipc_perm *ipcp = ipc_lock(&sem_ids(ns), id); |
151 | |
152 | if (IS_ERR(ipcp)) |
153 | return (struct sem_array *)ipcp; |
154 | |
155 | return container_of(ipcp, struct sem_array, sem_perm); |
156 | } |
157 | |
158 | static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns, |
159 | int id) |
160 | { |
161 | struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id); |
162 | |
163 | if (IS_ERR(ipcp)) |
164 | return (struct sem_array *)ipcp; |
165 | |
166 | return container_of(ipcp, struct sem_array, sem_perm); |
167 | } |
168 | |
169 | static inline void sem_lock_and_putref(struct sem_array *sma) |
170 | { |
171 | ipc_lock_by_ptr(&sma->sem_perm); |
172 | ipc_rcu_putref(sma); |
173 | } |
174 | |
175 | static inline void sem_getref_and_unlock(struct sem_array *sma) |
176 | { |
177 | ipc_rcu_getref(sma); |
178 | ipc_unlock(&(sma)->sem_perm); |
179 | } |
180 | |
181 | static inline void sem_putref(struct sem_array *sma) |
182 | { |
183 | ipc_lock_by_ptr(&sma->sem_perm); |
184 | ipc_rcu_putref(sma); |
185 | ipc_unlock(&(sma)->sem_perm); |
186 | } |
187 | |
188 | static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) |
189 | { |
190 | ipc_rmid(&sem_ids(ns), &s->sem_perm); |
191 | } |
192 | |
193 | /* |
194 | * Lockless wakeup algorithm: |
195 | * Without the check/retry algorithm a lockless wakeup is possible: |
196 | * - queue.status is initialized to -EINTR before blocking. |
197 | * - wakeup is performed by |
198 | * * unlinking the queue entry from sma->sem_pending |
199 | * * setting queue.status to IN_WAKEUP |
200 | * This is the notification for the blocked thread that a |
201 | * result value is imminent. |
202 | * * call wake_up_process |
203 | * * set queue.status to the final value. |
204 | * - the previously blocked thread checks queue.status: |
205 | * * if it's IN_WAKEUP, then it must wait until the value changes |
206 | * * if it's not -EINTR, then the operation was completed by |
207 | * update_queue. semtimedop can return queue.status without |
208 | * performing any operation on the sem array. |
209 | * * otherwise it must acquire the spinlock and check what's up. |
210 | * |
211 | * The two-stage algorithm is necessary to protect against the following |
212 | * races: |
213 | * - if queue.status is set after wake_up_process, then the woken up idle |
214 | * thread could race forward and try (and fail) to acquire sma->lock |
215 | * before update_queue had a chance to set queue.status |
216 | * - if queue.status is written before wake_up_process and if the |
217 | * blocked process is woken up by a signal between writing |
218 | * queue.status and the wake_up_process, then the woken up |
219 | * process could return from semtimedop and die by calling |
220 | * sys_exit before wake_up_process is called. Then wake_up_process |
221 | * will oops, because the task structure is already invalid. |
222 | * (yes, this happened on s390 with sysv msg). |
223 | * |
224 | */ |
225 | #define IN_WAKEUP 1 |
226 | |
227 | /** |
228 | * newary - Create a new semaphore set |
229 | * @ns: namespace |
230 | * @params: ptr to the structure that contains key, semflg and nsems |
231 | * |
232 | * Called with sem_ids.rw_mutex held (as a writer) |
233 | */ |
234 | |
235 | static int newary(struct ipc_namespace *ns, struct ipc_params *params) |
236 | { |
237 | int id; |
238 | int retval; |
239 | struct sem_array *sma; |
240 | int size; |
241 | key_t key = params->key; |
242 | int nsems = params->u.nsems; |
243 | int semflg = params->flg; |
244 | int i; |
245 | |
246 | if (!nsems) |
247 | return -EINVAL; |
248 | if (ns->used_sems + nsems > ns->sc_semmns) |
249 | return -ENOSPC; |
250 | |
251 | size = sizeof (*sma) + nsems * sizeof (struct sem); |
252 | sma = ipc_rcu_alloc(size); |
253 | if (!sma) { |
254 | return -ENOMEM; |
255 | } |
256 | memset (sma, 0, size); |
257 | |
258 | sma->sem_perm.mode = (semflg & S_IRWXUGO); |
259 | sma->sem_perm.key = key; |
260 | |
261 | sma->sem_perm.security = NULL; |
262 | retval = security_sem_alloc(sma); |
263 | if (retval) { |
264 | ipc_rcu_putref(sma); |
265 | return retval; |
266 | } |
267 | |
268 | id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); |
269 | if (id < 0) { |
270 | security_sem_free(sma); |
271 | ipc_rcu_putref(sma); |
272 | return id; |
273 | } |
274 | ns->used_sems += nsems; |
275 | |
276 | sma->sem_base = (struct sem *) &sma[1]; |
277 | |
278 | for (i = 0; i < nsems; i++) |
279 | INIT_LIST_HEAD(&sma->sem_base[i].sem_pending); |
280 | |
281 | sma->complex_count = 0; |
282 | INIT_LIST_HEAD(&sma->sem_pending); |
283 | INIT_LIST_HEAD(&sma->list_id); |
284 | sma->sem_nsems = nsems; |
285 | sma->sem_ctime = get_seconds(); |
286 | sem_unlock(sma); |
287 | |
288 | return sma->sem_perm.id; |
289 | } |
290 | |
291 | |
292 | /* |
293 | * Called with sem_ids.rw_mutex and ipcp locked. |
294 | */ |
295 | static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg) |
296 | { |
297 | struct sem_array *sma; |
298 | |
299 | sma = container_of(ipcp, struct sem_array, sem_perm); |
300 | return security_sem_associate(sma, semflg); |
301 | } |
302 | |
303 | /* |
304 | * Called with sem_ids.rw_mutex and ipcp locked. |
305 | */ |
306 | static inline int sem_more_checks(struct kern_ipc_perm *ipcp, |
307 | struct ipc_params *params) |
308 | { |
309 | struct sem_array *sma; |
310 | |
311 | sma = container_of(ipcp, struct sem_array, sem_perm); |
312 | if (params->u.nsems > sma->sem_nsems) |
313 | return -EINVAL; |
314 | |
315 | return 0; |
316 | } |
317 | |
318 | SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) |
319 | { |
320 | struct ipc_namespace *ns; |
321 | struct ipc_ops sem_ops; |
322 | struct ipc_params sem_params; |
323 | |
324 | ns = current->nsproxy->ipc_ns; |
325 | |
326 | if (nsems < 0 || nsems > ns->sc_semmsl) |
327 | return -EINVAL; |
328 | |
329 | sem_ops.getnew = newary; |
330 | sem_ops.associate = sem_security; |
331 | sem_ops.more_checks = sem_more_checks; |
332 | |
333 | sem_params.key = key; |
334 | sem_params.flg = semflg; |
335 | sem_params.u.nsems = nsems; |
336 | |
337 | return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params); |
338 | } |
339 | |
340 | /* |
341 | * Determine whether a sequence of semaphore operations would succeed |
342 | * all at once. Return 0 if yes, 1 if need to sleep, else return error code. |
343 | */ |
344 | |
345 | static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops, |
346 | int nsops, struct sem_undo *un, int pid) |
347 | { |
348 | int result, sem_op; |
349 | struct sembuf *sop; |
350 | struct sem * curr; |
351 | |
352 | for (sop = sops; sop < sops + nsops; sop++) { |
353 | curr = sma->sem_base + sop->sem_num; |
354 | sem_op = sop->sem_op; |
355 | result = curr->semval; |
356 | |
357 | if (!sem_op && result) |
358 | goto would_block; |
359 | |
360 | result += sem_op; |
361 | if (result < 0) |
362 | goto would_block; |
363 | if (result > SEMVMX) |
364 | goto out_of_range; |
365 | if (sop->sem_flg & SEM_UNDO) { |
366 | int undo = un->semadj[sop->sem_num] - sem_op; |
367 | /* |
368 | * Exceeding the undo range is an error. |
369 | */ |
370 | if (undo < (-SEMAEM - 1) || undo > SEMAEM) |
371 | goto out_of_range; |
372 | } |
373 | curr->semval = result; |
374 | } |
375 | |
376 | sop--; |
377 | while (sop >= sops) { |
378 | sma->sem_base[sop->sem_num].sempid = pid; |
379 | if (sop->sem_flg & SEM_UNDO) |
380 | un->semadj[sop->sem_num] -= sop->sem_op; |
381 | sop--; |
382 | } |
383 | |
384 | sma->sem_otime = get_seconds(); |
385 | return 0; |
386 | |
387 | out_of_range: |
388 | result = -ERANGE; |
389 | goto undo; |
390 | |
391 | would_block: |
392 | if (sop->sem_flg & IPC_NOWAIT) |
393 | result = -EAGAIN; |
394 | else |
395 | result = 1; |
396 | |
397 | undo: |
398 | sop--; |
399 | while (sop >= sops) { |
400 | sma->sem_base[sop->sem_num].semval -= sop->sem_op; |
401 | sop--; |
402 | } |
403 | |
404 | return result; |
405 | } |
406 | |
407 | /* |
408 | * Wake up a process waiting on the sem queue with a given error. |
409 | * The queue is invalid (may not be accessed) after the function returns. |
410 | */ |
411 | static void wake_up_sem_queue(struct sem_queue *q, int error) |
412 | { |
413 | /* |
414 | * Hold preempt off so that we don't get preempted and have the |
415 | * wakee busy-wait until we're scheduled back on. We're holding |
416 | * locks here so it may not strictly be needed, however if the |
417 | * locks become preemptible then this prevents such a problem. |
418 | */ |
419 | preempt_disable(); |
420 | q->status = IN_WAKEUP; |
421 | wake_up_process(q->sleeper); |
422 | /* hands-off: q can disappear immediately after writing q->status. */ |
423 | smp_wmb(); |
424 | q->status = error; |
425 | preempt_enable(); |
426 | } |
427 | |
428 | static void unlink_queue(struct sem_array *sma, struct sem_queue *q) |
429 | { |
430 | list_del(&q->list); |
431 | if (q->nsops == 1) |
432 | list_del(&q->simple_list); |
433 | else |
434 | sma->complex_count--; |
435 | } |
436 | |
437 | |
438 | /** |
439 | * update_queue(sma, semnum): Look for tasks that can be completed. |
440 | * @sma: semaphore array. |
441 | * @semnum: semaphore that was modified. |
442 | * |
443 | * update_queue must be called after a semaphore in a semaphore array |
444 | * was modified. If multiple semaphore were modified, then @semnum |
445 | * must be set to -1. |
446 | */ |
447 | static void update_queue(struct sem_array *sma, int semnum) |
448 | { |
449 | struct sem_queue *q; |
450 | struct list_head *walk; |
451 | struct list_head *pending_list; |
452 | int offset; |
453 | |
454 | /* if there are complex operations around, then knowing the semaphore |
455 | * that was modified doesn't help us. Assume that multiple semaphores |
456 | * were modified. |
457 | */ |
458 | if (sma->complex_count) |
459 | semnum = -1; |
460 | |
461 | if (semnum == -1) { |
462 | pending_list = &sma->sem_pending; |
463 | offset = offsetof(struct sem_queue, list); |
464 | } else { |
465 | pending_list = &sma->sem_base[semnum].sem_pending; |
466 | offset = offsetof(struct sem_queue, simple_list); |
467 | } |
468 | |
469 | again: |
470 | walk = pending_list->next; |
471 | while (walk != pending_list) { |
472 | int error, alter; |
473 | |
474 | q = (struct sem_queue *)((char *)walk - offset); |
475 | walk = walk->next; |
476 | |
477 | /* If we are scanning the single sop, per-semaphore list of |
478 | * one semaphore and that semaphore is 0, then it is not |
479 | * necessary to scan the "alter" entries: simple increments |
480 | * that affect only one entry succeed immediately and cannot |
481 | * be in the per semaphore pending queue, and decrements |
482 | * cannot be successful if the value is already 0. |
483 | */ |
484 | if (semnum != -1 && sma->sem_base[semnum].semval == 0 && |
485 | q->alter) |
486 | break; |
487 | |
488 | error = try_atomic_semop(sma, q->sops, q->nsops, |
489 | q->undo, q->pid); |
490 | |
491 | /* Does q->sleeper still need to sleep? */ |
492 | if (error > 0) |
493 | continue; |
494 | |
495 | unlink_queue(sma, q); |
496 | |
497 | /* |
498 | * The next operation that must be checked depends on the type |
499 | * of the completed operation: |
500 | * - if the operation modified the array, then restart from the |
501 | * head of the queue and check for threads that might be |
502 | * waiting for the new semaphore values. |
503 | * - if the operation didn't modify the array, then just |
504 | * continue. |
505 | */ |
506 | alter = q->alter; |
507 | wake_up_sem_queue(q, error); |
508 | if (alter && !error) |
509 | goto again; |
510 | } |
511 | } |
512 | |
513 | /* The following counts are associated to each semaphore: |
514 | * semncnt number of tasks waiting on semval being nonzero |
515 | * semzcnt number of tasks waiting on semval being zero |
516 | * This model assumes that a task waits on exactly one semaphore. |
517 | * Since semaphore operations are to be performed atomically, tasks actually |
518 | * wait on a whole sequence of semaphores simultaneously. |
519 | * The counts we return here are a rough approximation, but still |
520 | * warrant that semncnt+semzcnt>0 if the task is on the pending queue. |
521 | */ |
522 | static int count_semncnt (struct sem_array * sma, ushort semnum) |
523 | { |
524 | int semncnt; |
525 | struct sem_queue * q; |
526 | |
527 | semncnt = 0; |
528 | list_for_each_entry(q, &sma->sem_pending, list) { |
529 | struct sembuf * sops = q->sops; |
530 | int nsops = q->nsops; |
531 | int i; |
532 | for (i = 0; i < nsops; i++) |
533 | if (sops[i].sem_num == semnum |
534 | && (sops[i].sem_op < 0) |
535 | && !(sops[i].sem_flg & IPC_NOWAIT)) |
536 | semncnt++; |
537 | } |
538 | return semncnt; |
539 | } |
540 | |
541 | static int count_semzcnt (struct sem_array * sma, ushort semnum) |
542 | { |
543 | int semzcnt; |
544 | struct sem_queue * q; |
545 | |
546 | semzcnt = 0; |
547 | list_for_each_entry(q, &sma->sem_pending, list) { |
548 | struct sembuf * sops = q->sops; |
549 | int nsops = q->nsops; |
550 | int i; |
551 | for (i = 0; i < nsops; i++) |
552 | if (sops[i].sem_num == semnum |
553 | && (sops[i].sem_op == 0) |
554 | && !(sops[i].sem_flg & IPC_NOWAIT)) |
555 | semzcnt++; |
556 | } |
557 | return semzcnt; |
558 | } |
559 | |
560 | static void free_un(struct rcu_head *head) |
561 | { |
562 | struct sem_undo *un = container_of(head, struct sem_undo, rcu); |
563 | kfree(un); |
564 | } |
565 | |
566 | /* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked |
567 | * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex |
568 | * remains locked on exit. |
569 | */ |
570 | static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) |
571 | { |
572 | struct sem_undo *un, *tu; |
573 | struct sem_queue *q, *tq; |
574 | struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); |
575 | |
576 | /* Free the existing undo structures for this semaphore set. */ |
577 | assert_spin_locked(&sma->sem_perm.lock); |
578 | list_for_each_entry_safe(un, tu, &sma->list_id, list_id) { |
579 | list_del(&un->list_id); |
580 | spin_lock(&un->ulp->lock); |
581 | un->semid = -1; |
582 | list_del_rcu(&un->list_proc); |
583 | spin_unlock(&un->ulp->lock); |
584 | call_rcu(&un->rcu, free_un); |
585 | } |
586 | |
587 | /* Wake up all pending processes and let them fail with EIDRM. */ |
588 | list_for_each_entry_safe(q, tq, &sma->sem_pending, list) { |
589 | unlink_queue(sma, q); |
590 | wake_up_sem_queue(q, -EIDRM); |
591 | } |
592 | |
593 | /* Remove the semaphore set from the IDR */ |
594 | sem_rmid(ns, sma); |
595 | sem_unlock(sma); |
596 | |
597 | ns->used_sems -= sma->sem_nsems; |
598 | security_sem_free(sma); |
599 | ipc_rcu_putref(sma); |
600 | } |
601 | |
602 | static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) |
603 | { |
604 | switch(version) { |
605 | case IPC_64: |
606 | return copy_to_user(buf, in, sizeof(*in)); |
607 | case IPC_OLD: |
608 | { |
609 | struct semid_ds out; |
610 | |
611 | ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm); |
612 | |
613 | out.sem_otime = in->sem_otime; |
614 | out.sem_ctime = in->sem_ctime; |
615 | out.sem_nsems = in->sem_nsems; |
616 | |
617 | return copy_to_user(buf, &out, sizeof(out)); |
618 | } |
619 | default: |
620 | return -EINVAL; |
621 | } |
622 | } |
623 | |
624 | static int semctl_nolock(struct ipc_namespace *ns, int semid, |
625 | int cmd, int version, union semun arg) |
626 | { |
627 | int err; |
628 | struct sem_array *sma; |
629 | |
630 | switch(cmd) { |
631 | case IPC_INFO: |
632 | case SEM_INFO: |
633 | { |
634 | struct seminfo seminfo; |
635 | int max_id; |
636 | |
637 | err = security_sem_semctl(NULL, cmd); |
638 | if (err) |
639 | return err; |
640 | |
641 | memset(&seminfo,0,sizeof(seminfo)); |
642 | seminfo.semmni = ns->sc_semmni; |
643 | seminfo.semmns = ns->sc_semmns; |
644 | seminfo.semmsl = ns->sc_semmsl; |
645 | seminfo.semopm = ns->sc_semopm; |
646 | seminfo.semvmx = SEMVMX; |
647 | seminfo.semmnu = SEMMNU; |
648 | seminfo.semmap = SEMMAP; |
649 | seminfo.semume = SEMUME; |
650 | down_read(&sem_ids(ns).rw_mutex); |
651 | if (cmd == SEM_INFO) { |
652 | seminfo.semusz = sem_ids(ns).in_use; |
653 | seminfo.semaem = ns->used_sems; |
654 | } else { |
655 | seminfo.semusz = SEMUSZ; |
656 | seminfo.semaem = SEMAEM; |
657 | } |
658 | max_id = ipc_get_maxid(&sem_ids(ns)); |
659 | up_read(&sem_ids(ns).rw_mutex); |
660 | if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) |
661 | return -EFAULT; |
662 | return (max_id < 0) ? 0: max_id; |
663 | } |
664 | case IPC_STAT: |
665 | case SEM_STAT: |
666 | { |
667 | struct semid64_ds tbuf; |
668 | int id; |
669 | |
670 | if (cmd == SEM_STAT) { |
671 | sma = sem_lock(ns, semid); |
672 | if (IS_ERR(sma)) |
673 | return PTR_ERR(sma); |
674 | id = sma->sem_perm.id; |
675 | } else { |
676 | sma = sem_lock_check(ns, semid); |
677 | if (IS_ERR(sma)) |
678 | return PTR_ERR(sma); |
679 | id = 0; |
680 | } |
681 | |
682 | err = -EACCES; |
683 | if (ipcperms (&sma->sem_perm, S_IRUGO)) |
684 | goto out_unlock; |
685 | |
686 | err = security_sem_semctl(sma, cmd); |
687 | if (err) |
688 | goto out_unlock; |
689 | |
690 | memset(&tbuf, 0, sizeof(tbuf)); |
691 | |
692 | kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); |
693 | tbuf.sem_otime = sma->sem_otime; |
694 | tbuf.sem_ctime = sma->sem_ctime; |
695 | tbuf.sem_nsems = sma->sem_nsems; |
696 | sem_unlock(sma); |
697 | if (copy_semid_to_user (arg.buf, &tbuf, version)) |
698 | return -EFAULT; |
699 | return id; |
700 | } |
701 | default: |
702 | return -EINVAL; |
703 | } |
704 | out_unlock: |
705 | sem_unlock(sma); |
706 | return err; |
707 | } |
708 | |
709 | static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, |
710 | int cmd, int version, union semun arg) |
711 | { |
712 | struct sem_array *sma; |
713 | struct sem* curr; |
714 | int err; |
715 | ushort fast_sem_io[SEMMSL_FAST]; |
716 | ushort* sem_io = fast_sem_io; |
717 | int nsems; |
718 | |
719 | sma = sem_lock_check(ns, semid); |
720 | if (IS_ERR(sma)) |
721 | return PTR_ERR(sma); |
722 | |
723 | nsems = sma->sem_nsems; |
724 | |
725 | err = -EACCES; |
726 | if (ipcperms (&sma->sem_perm, (cmd==SETVAL||cmd==SETALL)?S_IWUGO:S_IRUGO)) |
727 | goto out_unlock; |
728 | |
729 | err = security_sem_semctl(sma, cmd); |
730 | if (err) |
731 | goto out_unlock; |
732 | |
733 | err = -EACCES; |
734 | switch (cmd) { |
735 | case GETALL: |
736 | { |
737 | ushort __user *array = arg.array; |
738 | int i; |
739 | |
740 | if(nsems > SEMMSL_FAST) { |
741 | sem_getref_and_unlock(sma); |
742 | |
743 | sem_io = ipc_alloc(sizeof(ushort)*nsems); |
744 | if(sem_io == NULL) { |
745 | sem_putref(sma); |
746 | return -ENOMEM; |
747 | } |
748 | |
749 | sem_lock_and_putref(sma); |
750 | if (sma->sem_perm.deleted) { |
751 | sem_unlock(sma); |
752 | err = -EIDRM; |
753 | goto out_free; |
754 | } |
755 | } |
756 | |
757 | for (i = 0; i < sma->sem_nsems; i++) |
758 | sem_io[i] = sma->sem_base[i].semval; |
759 | sem_unlock(sma); |
760 | err = 0; |
761 | if(copy_to_user(array, sem_io, nsems*sizeof(ushort))) |
762 | err = -EFAULT; |
763 | goto out_free; |
764 | } |
765 | case SETALL: |
766 | { |
767 | int i; |
768 | struct sem_undo *un; |
769 | |
770 | sem_getref_and_unlock(sma); |
771 | |
772 | if(nsems > SEMMSL_FAST) { |
773 | sem_io = ipc_alloc(sizeof(ushort)*nsems); |
774 | if(sem_io == NULL) { |
775 | sem_putref(sma); |
776 | return -ENOMEM; |
777 | } |
778 | } |
779 | |
780 | if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) { |
781 | sem_putref(sma); |
782 | err = -EFAULT; |
783 | goto out_free; |
784 | } |
785 | |
786 | for (i = 0; i < nsems; i++) { |
787 | if (sem_io[i] > SEMVMX) { |
788 | sem_putref(sma); |
789 | err = -ERANGE; |
790 | goto out_free; |
791 | } |
792 | } |
793 | sem_lock_and_putref(sma); |
794 | if (sma->sem_perm.deleted) { |
795 | sem_unlock(sma); |
796 | err = -EIDRM; |
797 | goto out_free; |
798 | } |
799 | |
800 | for (i = 0; i < nsems; i++) |
801 | sma->sem_base[i].semval = sem_io[i]; |
802 | |
803 | assert_spin_locked(&sma->sem_perm.lock); |
804 | list_for_each_entry(un, &sma->list_id, list_id) { |
805 | for (i = 0; i < nsems; i++) |
806 | un->semadj[i] = 0; |
807 | } |
808 | sma->sem_ctime = get_seconds(); |
809 | /* maybe some queued-up processes were waiting for this */ |
810 | update_queue(sma, -1); |
811 | err = 0; |
812 | goto out_unlock; |
813 | } |
814 | /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */ |
815 | } |
816 | err = -EINVAL; |
817 | if(semnum < 0 || semnum >= nsems) |
818 | goto out_unlock; |
819 | |
820 | curr = &sma->sem_base[semnum]; |
821 | |
822 | switch (cmd) { |
823 | case GETVAL: |
824 | err = curr->semval; |
825 | goto out_unlock; |
826 | case GETPID: |
827 | err = curr->sempid; |
828 | goto out_unlock; |
829 | case GETNCNT: |
830 | err = count_semncnt(sma,semnum); |
831 | goto out_unlock; |
832 | case GETZCNT: |
833 | err = count_semzcnt(sma,semnum); |
834 | goto out_unlock; |
835 | case SETVAL: |
836 | { |
837 | int val = arg.val; |
838 | struct sem_undo *un; |
839 | |
840 | err = -ERANGE; |
841 | if (val > SEMVMX || val < 0) |
842 | goto out_unlock; |
843 | |
844 | assert_spin_locked(&sma->sem_perm.lock); |
845 | list_for_each_entry(un, &sma->list_id, list_id) |
846 | un->semadj[semnum] = 0; |
847 | |
848 | curr->semval = val; |
849 | curr->sempid = task_tgid_vnr(current); |
850 | sma->sem_ctime = get_seconds(); |
851 | /* maybe some queued-up processes were waiting for this */ |
852 | update_queue(sma, semnum); |
853 | err = 0; |
854 | goto out_unlock; |
855 | } |
856 | } |
857 | out_unlock: |
858 | sem_unlock(sma); |
859 | out_free: |
860 | if(sem_io != fast_sem_io) |
861 | ipc_free(sem_io, sizeof(ushort)*nsems); |
862 | return err; |
863 | } |
864 | |
865 | static inline unsigned long |
866 | copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) |
867 | { |
868 | switch(version) { |
869 | case IPC_64: |
870 | if (copy_from_user(out, buf, sizeof(*out))) |
871 | return -EFAULT; |
872 | return 0; |
873 | case IPC_OLD: |
874 | { |
875 | struct semid_ds tbuf_old; |
876 | |
877 | if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) |
878 | return -EFAULT; |
879 | |
880 | out->sem_perm.uid = tbuf_old.sem_perm.uid; |
881 | out->sem_perm.gid = tbuf_old.sem_perm.gid; |
882 | out->sem_perm.mode = tbuf_old.sem_perm.mode; |
883 | |
884 | return 0; |
885 | } |
886 | default: |
887 | return -EINVAL; |
888 | } |
889 | } |
890 | |
891 | /* |
892 | * This function handles some semctl commands which require the rw_mutex |
893 | * to be held in write mode. |
894 | * NOTE: no locks must be held, the rw_mutex is taken inside this function. |
895 | */ |
896 | static int semctl_down(struct ipc_namespace *ns, int semid, |
897 | int cmd, int version, union semun arg) |
898 | { |
899 | struct sem_array *sma; |
900 | int err; |
901 | struct semid64_ds semid64; |
902 | struct kern_ipc_perm *ipcp; |
903 | |
904 | if(cmd == IPC_SET) { |
905 | if (copy_semid_from_user(&semid64, arg.buf, version)) |
906 | return -EFAULT; |
907 | } |
908 | |
909 | ipcp = ipcctl_pre_down(&sem_ids(ns), semid, cmd, &semid64.sem_perm, 0); |
910 | if (IS_ERR(ipcp)) |
911 | return PTR_ERR(ipcp); |
912 | |
913 | sma = container_of(ipcp, struct sem_array, sem_perm); |
914 | |
915 | err = security_sem_semctl(sma, cmd); |
916 | if (err) |
917 | goto out_unlock; |
918 | |
919 | switch(cmd){ |
920 | case IPC_RMID: |
921 | freeary(ns, ipcp); |
922 | goto out_up; |
923 | case IPC_SET: |
924 | ipc_update_perm(&semid64.sem_perm, ipcp); |
925 | sma->sem_ctime = get_seconds(); |
926 | break; |
927 | default: |
928 | err = -EINVAL; |
929 | } |
930 | |
931 | out_unlock: |
932 | sem_unlock(sma); |
933 | out_up: |
934 | up_write(&sem_ids(ns).rw_mutex); |
935 | return err; |
936 | } |
937 | |
938 | SYSCALL_DEFINE(semctl)(int semid, int semnum, int cmd, union semun arg) |
939 | { |
940 | int err = -EINVAL; |
941 | int version; |
942 | struct ipc_namespace *ns; |
943 | |
944 | if (semid < 0) |
945 | return -EINVAL; |
946 | |
947 | version = ipc_parse_version(&cmd); |
948 | ns = current->nsproxy->ipc_ns; |
949 | |
950 | switch(cmd) { |
951 | case IPC_INFO: |
952 | case SEM_INFO: |
953 | case IPC_STAT: |
954 | case SEM_STAT: |
955 | err = semctl_nolock(ns, semid, cmd, version, arg); |
956 | return err; |
957 | case GETALL: |
958 | case GETVAL: |
959 | case GETPID: |
960 | case GETNCNT: |
961 | case GETZCNT: |
962 | case SETVAL: |
963 | case SETALL: |
964 | err = semctl_main(ns,semid,semnum,cmd,version,arg); |
965 | return err; |
966 | case IPC_RMID: |
967 | case IPC_SET: |
968 | err = semctl_down(ns, semid, cmd, version, arg); |
969 | return err; |
970 | default: |
971 | return -EINVAL; |
972 | } |
973 | } |
974 | #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS |
975 | asmlinkage long SyS_semctl(int semid, int semnum, int cmd, union semun arg) |
976 | { |
977 | return SYSC_semctl((int) semid, (int) semnum, (int) cmd, arg); |
978 | } |
979 | SYSCALL_ALIAS(sys_semctl, SyS_semctl); |
980 | #endif |
981 | |
982 | /* If the task doesn't already have a undo_list, then allocate one |
983 | * here. We guarantee there is only one thread using this undo list, |
984 | * and current is THE ONE |
985 | * |
986 | * If this allocation and assignment succeeds, but later |
987 | * portions of this code fail, there is no need to free the sem_undo_list. |
988 | * Just let it stay associated with the task, and it'll be freed later |
989 | * at exit time. |
990 | * |
991 | * This can block, so callers must hold no locks. |
992 | */ |
993 | static inline int get_undo_list(struct sem_undo_list **undo_listp) |
994 | { |
995 | struct sem_undo_list *undo_list; |
996 | |
997 | undo_list = current->sysvsem.undo_list; |
998 | if (!undo_list) { |
999 | undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL); |
1000 | if (undo_list == NULL) |
1001 | return -ENOMEM; |
1002 | spin_lock_init(&undo_list->lock); |
1003 | atomic_set(&undo_list->refcnt, 1); |
1004 | INIT_LIST_HEAD(&undo_list->list_proc); |
1005 | |
1006 | current->sysvsem.undo_list = undo_list; |
1007 | } |
1008 | *undo_listp = undo_list; |
1009 | return 0; |
1010 | } |
1011 | |
1012 | static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid) |
1013 | { |
1014 | struct sem_undo *un; |
1015 | |
1016 | list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) { |
1017 | if (un->semid == semid) |
1018 | return un; |
1019 | } |
1020 | return NULL; |
1021 | } |
1022 | |
1023 | static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) |
1024 | { |
1025 | struct sem_undo *un; |
1026 | |
1027 | assert_spin_locked(&ulp->lock); |
1028 | |
1029 | un = __lookup_undo(ulp, semid); |
1030 | if (un) { |
1031 | list_del_rcu(&un->list_proc); |
1032 | list_add_rcu(&un->list_proc, &ulp->list_proc); |
1033 | } |
1034 | return un; |
1035 | } |
1036 | |
1037 | /** |
1038 | * find_alloc_undo - Lookup (and if not present create) undo array |
1039 | * @ns: namespace |
1040 | * @semid: semaphore array id |
1041 | * |
1042 | * The function looks up (and if not present creates) the undo structure. |
1043 | * The size of the undo structure depends on the size of the semaphore |
1044 | * array, thus the alloc path is not that straightforward. |
1045 | * Lifetime-rules: sem_undo is rcu-protected, on success, the function |
1046 | * performs a rcu_read_lock(). |
1047 | */ |
1048 | static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) |
1049 | { |
1050 | struct sem_array *sma; |
1051 | struct sem_undo_list *ulp; |
1052 | struct sem_undo *un, *new; |
1053 | int nsems; |
1054 | int error; |
1055 | |
1056 | error = get_undo_list(&ulp); |
1057 | if (error) |
1058 | return ERR_PTR(error); |
1059 | |
1060 | rcu_read_lock(); |
1061 | spin_lock(&ulp->lock); |
1062 | un = lookup_undo(ulp, semid); |
1063 | spin_unlock(&ulp->lock); |
1064 | if (likely(un!=NULL)) |
1065 | goto out; |
1066 | rcu_read_unlock(); |
1067 | |
1068 | /* no undo structure around - allocate one. */ |
1069 | /* step 1: figure out the size of the semaphore array */ |
1070 | sma = sem_lock_check(ns, semid); |
1071 | if (IS_ERR(sma)) |
1072 | return ERR_PTR(PTR_ERR(sma)); |
1073 | |
1074 | nsems = sma->sem_nsems; |
1075 | sem_getref_and_unlock(sma); |
1076 | |
1077 | /* step 2: allocate new undo structure */ |
1078 | new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); |
1079 | if (!new) { |
1080 | sem_putref(sma); |
1081 | return ERR_PTR(-ENOMEM); |
1082 | } |
1083 | |
1084 | /* step 3: Acquire the lock on semaphore array */ |
1085 | sem_lock_and_putref(sma); |
1086 | if (sma->sem_perm.deleted) { |
1087 | sem_unlock(sma); |
1088 | kfree(new); |
1089 | un = ERR_PTR(-EIDRM); |
1090 | goto out; |
1091 | } |
1092 | spin_lock(&ulp->lock); |
1093 | |
1094 | /* |
1095 | * step 4: check for races: did someone else allocate the undo struct? |
1096 | */ |
1097 | un = lookup_undo(ulp, semid); |
1098 | if (un) { |
1099 | kfree(new); |
1100 | goto success; |
1101 | } |
1102 | /* step 5: initialize & link new undo structure */ |
1103 | new->semadj = (short *) &new[1]; |
1104 | new->ulp = ulp; |
1105 | new->semid = semid; |
1106 | assert_spin_locked(&ulp->lock); |
1107 | list_add_rcu(&new->list_proc, &ulp->list_proc); |
1108 | assert_spin_locked(&sma->sem_perm.lock); |
1109 | list_add(&new->list_id, &sma->list_id); |
1110 | un = new; |
1111 | |
1112 | success: |
1113 | spin_unlock(&ulp->lock); |
1114 | rcu_read_lock(); |
1115 | sem_unlock(sma); |
1116 | out: |
1117 | return un; |
1118 | } |
1119 | |
1120 | SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, |
1121 | unsigned, nsops, const struct timespec __user *, timeout) |
1122 | { |
1123 | int error = -EINVAL; |
1124 | struct sem_array *sma; |
1125 | struct sembuf fast_sops[SEMOPM_FAST]; |
1126 | struct sembuf* sops = fast_sops, *sop; |
1127 | struct sem_undo *un; |
1128 | int undos = 0, alter = 0, max; |
1129 | struct sem_queue queue; |
1130 | unsigned long jiffies_left = 0; |
1131 | struct ipc_namespace *ns; |
1132 | |
1133 | ns = current->nsproxy->ipc_ns; |
1134 | |
1135 | if (nsops < 1 || semid < 0) |
1136 | return -EINVAL; |
1137 | if (nsops > ns->sc_semopm) |
1138 | return -E2BIG; |
1139 | if(nsops > SEMOPM_FAST) { |
1140 | sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL); |
1141 | if(sops==NULL) |
1142 | return -ENOMEM; |
1143 | } |
1144 | if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) { |
1145 | error=-EFAULT; |
1146 | goto out_free; |
1147 | } |
1148 | if (timeout) { |
1149 | struct timespec _timeout; |
1150 | if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) { |
1151 | error = -EFAULT; |
1152 | goto out_free; |
1153 | } |
1154 | if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 || |
1155 | _timeout.tv_nsec >= 1000000000L) { |
1156 | error = -EINVAL; |
1157 | goto out_free; |
1158 | } |
1159 | jiffies_left = timespec_to_jiffies(&_timeout); |
1160 | } |
1161 | max = 0; |
1162 | for (sop = sops; sop < sops + nsops; sop++) { |
1163 | if (sop->sem_num >= max) |
1164 | max = sop->sem_num; |
1165 | if (sop->sem_flg & SEM_UNDO) |
1166 | undos = 1; |
1167 | if (sop->sem_op != 0) |
1168 | alter = 1; |
1169 | } |
1170 | |
1171 | if (undos) { |
1172 | un = find_alloc_undo(ns, semid); |
1173 | if (IS_ERR(un)) { |
1174 | error = PTR_ERR(un); |
1175 | goto out_free; |
1176 | } |
1177 | } else |
1178 | un = NULL; |
1179 | |
1180 | sma = sem_lock_check(ns, semid); |
1181 | if (IS_ERR(sma)) { |
1182 | if (un) |
1183 | rcu_read_unlock(); |
1184 | error = PTR_ERR(sma); |
1185 | goto out_free; |
1186 | } |
1187 | |
1188 | /* |
1189 | * semid identifiers are not unique - find_alloc_undo may have |
1190 | * allocated an undo structure, it was invalidated by an RMID |
1191 | * and now a new array with received the same id. Check and fail. |
1192 | * This case can be detected checking un->semid. The existance of |
1193 | * "un" itself is guaranteed by rcu. |
1194 | */ |
1195 | error = -EIDRM; |
1196 | if (un) { |
1197 | if (un->semid == -1) { |
1198 | rcu_read_unlock(); |
1199 | goto out_unlock_free; |
1200 | } else { |
1201 | /* |
1202 | * rcu lock can be released, "un" cannot disappear: |
1203 | * - sem_lock is acquired, thus IPC_RMID is |
1204 | * impossible. |
1205 | * - exit_sem is impossible, it always operates on |
1206 | * current (or a dead task). |
1207 | */ |
1208 | |
1209 | rcu_read_unlock(); |
1210 | } |
1211 | } |
1212 | |
1213 | error = -EFBIG; |
1214 | if (max >= sma->sem_nsems) |
1215 | goto out_unlock_free; |
1216 | |
1217 | error = -EACCES; |
1218 | if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) |
1219 | goto out_unlock_free; |
1220 | |
1221 | error = security_sem_semop(sma, sops, nsops, alter); |
1222 | if (error) |
1223 | goto out_unlock_free; |
1224 | |
1225 | error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current)); |
1226 | if (error <= 0) { |
1227 | if (alter && error == 0) |
1228 | update_queue(sma, (nsops == 1) ? sops[0].sem_num : -1); |
1229 | |
1230 | goto out_unlock_free; |
1231 | } |
1232 | |
1233 | /* We need to sleep on this operation, so we put the current |
1234 | * task into the pending queue and go to sleep. |
1235 | */ |
1236 | |
1237 | queue.sops = sops; |
1238 | queue.nsops = nsops; |
1239 | queue.undo = un; |
1240 | queue.pid = task_tgid_vnr(current); |
1241 | queue.alter = alter; |
1242 | if (alter) |
1243 | list_add_tail(&queue.list, &sma->sem_pending); |
1244 | else |
1245 | list_add(&queue.list, &sma->sem_pending); |
1246 | |
1247 | if (nsops == 1) { |
1248 | struct sem *curr; |
1249 | curr = &sma->sem_base[sops->sem_num]; |
1250 | |
1251 | if (alter) |
1252 | list_add_tail(&queue.simple_list, &curr->sem_pending); |
1253 | else |
1254 | list_add(&queue.simple_list, &curr->sem_pending); |
1255 | } else { |
1256 | INIT_LIST_HEAD(&queue.simple_list); |
1257 | sma->complex_count++; |
1258 | } |
1259 | |
1260 | queue.status = -EINTR; |
1261 | queue.sleeper = current; |
1262 | current->state = TASK_INTERRUPTIBLE; |
1263 | sem_unlock(sma); |
1264 | |
1265 | if (timeout) |
1266 | jiffies_left = schedule_timeout(jiffies_left); |
1267 | else |
1268 | schedule(); |
1269 | |
1270 | error = queue.status; |
1271 | while(unlikely(error == IN_WAKEUP)) { |
1272 | cpu_relax(); |
1273 | error = queue.status; |
1274 | } |
1275 | |
1276 | if (error != -EINTR) { |
1277 | /* fast path: update_queue already obtained all requested |
1278 | * resources */ |
1279 | goto out_free; |
1280 | } |
1281 | |
1282 | sma = sem_lock(ns, semid); |
1283 | if (IS_ERR(sma)) { |
1284 | error = -EIDRM; |
1285 | goto out_free; |
1286 | } |
1287 | |
1288 | /* |
1289 | * If queue.status != -EINTR we are woken up by another process |
1290 | */ |
1291 | error = queue.status; |
1292 | if (error != -EINTR) { |
1293 | goto out_unlock_free; |
1294 | } |
1295 | |
1296 | /* |
1297 | * If an interrupt occurred we have to clean up the queue |
1298 | */ |
1299 | if (timeout && jiffies_left == 0) |
1300 | error = -EAGAIN; |
1301 | unlink_queue(sma, &queue); |
1302 | |
1303 | out_unlock_free: |
1304 | sem_unlock(sma); |
1305 | out_free: |
1306 | if(sops != fast_sops) |
1307 | kfree(sops); |
1308 | return error; |
1309 | } |
1310 | |
1311 | SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops, |
1312 | unsigned, nsops) |
1313 | { |
1314 | return sys_semtimedop(semid, tsops, nsops, NULL); |
1315 | } |
1316 | |
1317 | /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between |
1318 | * parent and child tasks. |
1319 | */ |
1320 | |
1321 | int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) |
1322 | { |
1323 | struct sem_undo_list *undo_list; |
1324 | int error; |
1325 | |
1326 | if (clone_flags & CLONE_SYSVSEM) { |
1327 | error = get_undo_list(&undo_list); |
1328 | if (error) |
1329 | return error; |
1330 | atomic_inc(&undo_list->refcnt); |
1331 | tsk->sysvsem.undo_list = undo_list; |
1332 | } else |
1333 | tsk->sysvsem.undo_list = NULL; |
1334 | |
1335 | return 0; |
1336 | } |
1337 | |
1338 | /* |
1339 | * add semadj values to semaphores, free undo structures. |
1340 | * undo structures are not freed when semaphore arrays are destroyed |
1341 | * so some of them may be out of date. |
1342 | * IMPLEMENTATION NOTE: There is some confusion over whether the |
1343 | * set of adjustments that needs to be done should be done in an atomic |
1344 | * manner or not. That is, if we are attempting to decrement the semval |
1345 | * should we queue up and wait until we can do so legally? |
1346 | * The original implementation attempted to do this (queue and wait). |
1347 | * The current implementation does not do so. The POSIX standard |
1348 | * and SVID should be consulted to determine what behavior is mandated. |
1349 | */ |
1350 | void exit_sem(struct task_struct *tsk) |
1351 | { |
1352 | struct sem_undo_list *ulp; |
1353 | |
1354 | ulp = tsk->sysvsem.undo_list; |
1355 | if (!ulp) |
1356 | return; |
1357 | tsk->sysvsem.undo_list = NULL; |
1358 | |
1359 | if (!atomic_dec_and_test(&ulp->refcnt)) |
1360 | return; |
1361 | |
1362 | for (;;) { |
1363 | struct sem_array *sma; |
1364 | struct sem_undo *un; |
1365 | int semid; |
1366 | int i; |
1367 | |
1368 | rcu_read_lock(); |
1369 | un = list_entry_rcu(ulp->list_proc.next, |
1370 | struct sem_undo, list_proc); |
1371 | if (&un->list_proc == &ulp->list_proc) |
1372 | semid = -1; |
1373 | else |
1374 | semid = un->semid; |
1375 | rcu_read_unlock(); |
1376 | |
1377 | if (semid == -1) |
1378 | break; |
1379 | |
1380 | sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid); |
1381 | |
1382 | /* exit_sem raced with IPC_RMID, nothing to do */ |
1383 | if (IS_ERR(sma)) |
1384 | continue; |
1385 | |
1386 | un = __lookup_undo(ulp, semid); |
1387 | if (un == NULL) { |
1388 | /* exit_sem raced with IPC_RMID+semget() that created |
1389 | * exactly the same semid. Nothing to do. |
1390 | */ |
1391 | sem_unlock(sma); |
1392 | continue; |
1393 | } |
1394 | |
1395 | /* remove un from the linked lists */ |
1396 | assert_spin_locked(&sma->sem_perm.lock); |
1397 | list_del(&un->list_id); |
1398 | |
1399 | spin_lock(&ulp->lock); |
1400 | list_del_rcu(&un->list_proc); |
1401 | spin_unlock(&ulp->lock); |
1402 | |
1403 | /* perform adjustments registered in un */ |
1404 | for (i = 0; i < sma->sem_nsems; i++) { |
1405 | struct sem * semaphore = &sma->sem_base[i]; |
1406 | if (un->semadj[i]) { |
1407 | semaphore->semval += un->semadj[i]; |
1408 | /* |
1409 | * Range checks of the new semaphore value, |
1410 | * not defined by sus: |
1411 | * - Some unices ignore the undo entirely |
1412 | * (e.g. HP UX 11i 11.22, Tru64 V5.1) |
1413 | * - some cap the value (e.g. FreeBSD caps |
1414 | * at 0, but doesn't enforce SEMVMX) |
1415 | * |
1416 | * Linux caps the semaphore value, both at 0 |
1417 | * and at SEMVMX. |
1418 | * |
1419 | * Manfred <manfred@colorfullife.com> |
1420 | */ |
1421 | if (semaphore->semval < 0) |
1422 | semaphore->semval = 0; |
1423 | if (semaphore->semval > SEMVMX) |
1424 | semaphore->semval = SEMVMX; |
1425 | semaphore->sempid = task_tgid_vnr(current); |
1426 | } |
1427 | } |
1428 | sma->sem_otime = get_seconds(); |
1429 | /* maybe some queued-up processes were waiting for this */ |
1430 | update_queue(sma, -1); |
1431 | sem_unlock(sma); |
1432 | |
1433 | call_rcu(&un->rcu, free_un); |
1434 | } |
1435 | kfree(ulp); |
1436 | } |
1437 | |
1438 | #ifdef CONFIG_PROC_FS |
1439 | static int sysvipc_sem_proc_show(struct seq_file *s, void *it) |
1440 | { |
1441 | struct sem_array *sma = it; |
1442 | |
1443 | return seq_printf(s, |
1444 | "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n", |
1445 | sma->sem_perm.key, |
1446 | sma->sem_perm.id, |
1447 | sma->sem_perm.mode, |
1448 | sma->sem_nsems, |
1449 | sma->sem_perm.uid, |
1450 | sma->sem_perm.gid, |
1451 | sma->sem_perm.cuid, |
1452 | sma->sem_perm.cgid, |
1453 | sma->sem_otime, |
1454 | sma->sem_ctime); |
1455 | } |
1456 | #endif |
1457 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9