Root/
1 | /* |
2 | * POSIX message queues filesystem for Linux. |
3 | * |
4 | * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl) |
5 | * Michal Wronski (michal.wronski@gmail.com) |
6 | * |
7 | * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) |
8 | * Lockless receive & send, fd based notify: |
9 | * Manfred Spraul (manfred@colorfullife.com) |
10 | * |
11 | * Audit: George Wilson (ltcgcw@us.ibm.com) |
12 | * |
13 | * This file is released under the GPL. |
14 | */ |
15 | |
16 | #include <linux/capability.h> |
17 | #include <linux/init.h> |
18 | #include <linux/pagemap.h> |
19 | #include <linux/file.h> |
20 | #include <linux/mount.h> |
21 | #include <linux/namei.h> |
22 | #include <linux/sysctl.h> |
23 | #include <linux/poll.h> |
24 | #include <linux/mqueue.h> |
25 | #include <linux/msg.h> |
26 | #include <linux/skbuff.h> |
27 | #include <linux/netlink.h> |
28 | #include <linux/syscalls.h> |
29 | #include <linux/audit.h> |
30 | #include <linux/signal.h> |
31 | #include <linux/mutex.h> |
32 | #include <linux/nsproxy.h> |
33 | #include <linux/pid.h> |
34 | #include <linux/ipc_namespace.h> |
35 | #include <linux/slab.h> |
36 | |
37 | #include <net/sock.h> |
38 | #include "util.h" |
39 | |
40 | #define MQUEUE_MAGIC 0x19800202 |
41 | #define DIRENT_SIZE 20 |
42 | #define FILENT_SIZE 80 |
43 | |
44 | #define SEND 0 |
45 | #define RECV 1 |
46 | |
47 | #define STATE_NONE 0 |
48 | #define STATE_PENDING 1 |
49 | #define STATE_READY 2 |
50 | |
51 | struct ext_wait_queue { /* queue of sleeping tasks */ |
52 | struct task_struct *task; |
53 | struct list_head list; |
54 | struct msg_msg *msg; /* ptr of loaded message */ |
55 | int state; /* one of STATE_* values */ |
56 | }; |
57 | |
58 | struct mqueue_inode_info { |
59 | spinlock_t lock; |
60 | struct inode vfs_inode; |
61 | wait_queue_head_t wait_q; |
62 | |
63 | struct msg_msg **messages; |
64 | struct mq_attr attr; |
65 | |
66 | struct sigevent notify; |
67 | struct pid* notify_owner; |
68 | struct user_struct *user; /* user who created, for accounting */ |
69 | struct sock *notify_sock; |
70 | struct sk_buff *notify_cookie; |
71 | |
72 | /* for tasks waiting for free space and messages, respectively */ |
73 | struct ext_wait_queue e_wait_q[2]; |
74 | |
75 | unsigned long qsize; /* size of queue in memory (sum of all msgs) */ |
76 | }; |
77 | |
78 | static const struct inode_operations mqueue_dir_inode_operations; |
79 | static const struct file_operations mqueue_file_operations; |
80 | static const struct super_operations mqueue_super_ops; |
81 | static void remove_notification(struct mqueue_inode_info *info); |
82 | |
83 | static struct kmem_cache *mqueue_inode_cachep; |
84 | |
85 | static struct ctl_table_header * mq_sysctl_table; |
86 | |
87 | static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) |
88 | { |
89 | return container_of(inode, struct mqueue_inode_info, vfs_inode); |
90 | } |
91 | |
92 | /* |
93 | * This routine should be called with the mq_lock held. |
94 | */ |
95 | static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode) |
96 | { |
97 | return get_ipc_ns(inode->i_sb->s_fs_info); |
98 | } |
99 | |
100 | static struct ipc_namespace *get_ns_from_inode(struct inode *inode) |
101 | { |
102 | struct ipc_namespace *ns; |
103 | |
104 | spin_lock(&mq_lock); |
105 | ns = __get_ns_from_inode(inode); |
106 | spin_unlock(&mq_lock); |
107 | return ns; |
108 | } |
109 | |
110 | static struct inode *mqueue_get_inode(struct super_block *sb, |
111 | struct ipc_namespace *ipc_ns, int mode, |
112 | struct mq_attr *attr) |
113 | { |
114 | struct user_struct *u = current_user(); |
115 | struct inode *inode; |
116 | |
117 | inode = new_inode(sb); |
118 | if (inode) { |
119 | inode->i_mode = mode; |
120 | inode->i_uid = current_fsuid(); |
121 | inode->i_gid = current_fsgid(); |
122 | inode->i_mtime = inode->i_ctime = inode->i_atime = |
123 | CURRENT_TIME; |
124 | |
125 | if (S_ISREG(mode)) { |
126 | struct mqueue_inode_info *info; |
127 | struct task_struct *p = current; |
128 | unsigned long mq_bytes, mq_msg_tblsz; |
129 | |
130 | inode->i_fop = &mqueue_file_operations; |
131 | inode->i_size = FILENT_SIZE; |
132 | /* mqueue specific info */ |
133 | info = MQUEUE_I(inode); |
134 | spin_lock_init(&info->lock); |
135 | init_waitqueue_head(&info->wait_q); |
136 | INIT_LIST_HEAD(&info->e_wait_q[0].list); |
137 | INIT_LIST_HEAD(&info->e_wait_q[1].list); |
138 | info->notify_owner = NULL; |
139 | info->qsize = 0; |
140 | info->user = NULL; /* set when all is ok */ |
141 | memset(&info->attr, 0, sizeof(info->attr)); |
142 | info->attr.mq_maxmsg = ipc_ns->mq_msg_max; |
143 | info->attr.mq_msgsize = ipc_ns->mq_msgsize_max; |
144 | if (attr) { |
145 | info->attr.mq_maxmsg = attr->mq_maxmsg; |
146 | info->attr.mq_msgsize = attr->mq_msgsize; |
147 | } |
148 | mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *); |
149 | info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL); |
150 | if (!info->messages) |
151 | goto out_inode; |
152 | |
153 | mq_bytes = (mq_msg_tblsz + |
154 | (info->attr.mq_maxmsg * info->attr.mq_msgsize)); |
155 | |
156 | spin_lock(&mq_lock); |
157 | if (u->mq_bytes + mq_bytes < u->mq_bytes || |
158 | u->mq_bytes + mq_bytes > |
159 | task_rlimit(p, RLIMIT_MSGQUEUE)) { |
160 | spin_unlock(&mq_lock); |
161 | kfree(info->messages); |
162 | goto out_inode; |
163 | } |
164 | u->mq_bytes += mq_bytes; |
165 | spin_unlock(&mq_lock); |
166 | |
167 | /* all is ok */ |
168 | info->user = get_uid(u); |
169 | } else if (S_ISDIR(mode)) { |
170 | inc_nlink(inode); |
171 | /* Some things misbehave if size == 0 on a directory */ |
172 | inode->i_size = 2 * DIRENT_SIZE; |
173 | inode->i_op = &mqueue_dir_inode_operations; |
174 | inode->i_fop = &simple_dir_operations; |
175 | } |
176 | } |
177 | return inode; |
178 | out_inode: |
179 | make_bad_inode(inode); |
180 | iput(inode); |
181 | return NULL; |
182 | } |
183 | |
184 | static int mqueue_fill_super(struct super_block *sb, void *data, int silent) |
185 | { |
186 | struct inode *inode; |
187 | struct ipc_namespace *ns = data; |
188 | int error; |
189 | |
190 | sb->s_blocksize = PAGE_CACHE_SIZE; |
191 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; |
192 | sb->s_magic = MQUEUE_MAGIC; |
193 | sb->s_op = &mqueue_super_ops; |
194 | |
195 | inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, |
196 | NULL); |
197 | if (!inode) { |
198 | error = -ENOMEM; |
199 | goto out; |
200 | } |
201 | |
202 | sb->s_root = d_alloc_root(inode); |
203 | if (!sb->s_root) { |
204 | iput(inode); |
205 | error = -ENOMEM; |
206 | goto out; |
207 | } |
208 | error = 0; |
209 | |
210 | out: |
211 | return error; |
212 | } |
213 | |
214 | static int mqueue_get_sb(struct file_system_type *fs_type, |
215 | int flags, const char *dev_name, |
216 | void *data, struct vfsmount *mnt) |
217 | { |
218 | if (!(flags & MS_KERNMOUNT)) |
219 | data = current->nsproxy->ipc_ns; |
220 | return get_sb_ns(fs_type, flags, data, mqueue_fill_super, mnt); |
221 | } |
222 | |
223 | static void init_once(void *foo) |
224 | { |
225 | struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; |
226 | |
227 | inode_init_once(&p->vfs_inode); |
228 | } |
229 | |
230 | static struct inode *mqueue_alloc_inode(struct super_block *sb) |
231 | { |
232 | struct mqueue_inode_info *ei; |
233 | |
234 | ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); |
235 | if (!ei) |
236 | return NULL; |
237 | return &ei->vfs_inode; |
238 | } |
239 | |
240 | static void mqueue_destroy_inode(struct inode *inode) |
241 | { |
242 | kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); |
243 | } |
244 | |
245 | static void mqueue_delete_inode(struct inode *inode) |
246 | { |
247 | struct mqueue_inode_info *info; |
248 | struct user_struct *user; |
249 | unsigned long mq_bytes; |
250 | int i; |
251 | struct ipc_namespace *ipc_ns; |
252 | |
253 | if (S_ISDIR(inode->i_mode)) { |
254 | clear_inode(inode); |
255 | return; |
256 | } |
257 | ipc_ns = get_ns_from_inode(inode); |
258 | info = MQUEUE_I(inode); |
259 | spin_lock(&info->lock); |
260 | for (i = 0; i < info->attr.mq_curmsgs; i++) |
261 | free_msg(info->messages[i]); |
262 | kfree(info->messages); |
263 | spin_unlock(&info->lock); |
264 | |
265 | clear_inode(inode); |
266 | |
267 | /* Total amount of bytes accounted for the mqueue */ |
268 | mq_bytes = info->attr.mq_maxmsg * (sizeof(struct msg_msg *) |
269 | + info->attr.mq_msgsize); |
270 | user = info->user; |
271 | if (user) { |
272 | spin_lock(&mq_lock); |
273 | user->mq_bytes -= mq_bytes; |
274 | /* |
275 | * get_ns_from_inode() ensures that the |
276 | * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns |
277 | * to which we now hold a reference, or it is NULL. |
278 | * We can't put it here under mq_lock, though. |
279 | */ |
280 | if (ipc_ns) |
281 | ipc_ns->mq_queues_count--; |
282 | spin_unlock(&mq_lock); |
283 | free_uid(user); |
284 | } |
285 | if (ipc_ns) |
286 | put_ipc_ns(ipc_ns); |
287 | } |
288 | |
289 | static int mqueue_create(struct inode *dir, struct dentry *dentry, |
290 | int mode, struct nameidata *nd) |
291 | { |
292 | struct inode *inode; |
293 | struct mq_attr *attr = dentry->d_fsdata; |
294 | int error; |
295 | struct ipc_namespace *ipc_ns; |
296 | |
297 | spin_lock(&mq_lock); |
298 | ipc_ns = __get_ns_from_inode(dir); |
299 | if (!ipc_ns) { |
300 | error = -EACCES; |
301 | goto out_unlock; |
302 | } |
303 | if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && |
304 | !capable(CAP_SYS_RESOURCE)) { |
305 | error = -ENOSPC; |
306 | goto out_unlock; |
307 | } |
308 | ipc_ns->mq_queues_count++; |
309 | spin_unlock(&mq_lock); |
310 | |
311 | inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); |
312 | if (!inode) { |
313 | error = -ENOMEM; |
314 | spin_lock(&mq_lock); |
315 | ipc_ns->mq_queues_count--; |
316 | goto out_unlock; |
317 | } |
318 | |
319 | put_ipc_ns(ipc_ns); |
320 | dir->i_size += DIRENT_SIZE; |
321 | dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; |
322 | |
323 | d_instantiate(dentry, inode); |
324 | dget(dentry); |
325 | return 0; |
326 | out_unlock: |
327 | spin_unlock(&mq_lock); |
328 | if (ipc_ns) |
329 | put_ipc_ns(ipc_ns); |
330 | return error; |
331 | } |
332 | |
333 | static int mqueue_unlink(struct inode *dir, struct dentry *dentry) |
334 | { |
335 | struct inode *inode = dentry->d_inode; |
336 | |
337 | dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; |
338 | dir->i_size -= DIRENT_SIZE; |
339 | drop_nlink(inode); |
340 | dput(dentry); |
341 | return 0; |
342 | } |
343 | |
344 | /* |
345 | * This is routine for system read from queue file. |
346 | * To avoid mess with doing here some sort of mq_receive we allow |
347 | * to read only queue size & notification info (the only values |
348 | * that are interesting from user point of view and aren't accessible |
349 | * through std routines) |
350 | */ |
351 | static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, |
352 | size_t count, loff_t *off) |
353 | { |
354 | struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); |
355 | char buffer[FILENT_SIZE]; |
356 | ssize_t ret; |
357 | |
358 | spin_lock(&info->lock); |
359 | snprintf(buffer, sizeof(buffer), |
360 | "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n", |
361 | info->qsize, |
362 | info->notify_owner ? info->notify.sigev_notify : 0, |
363 | (info->notify_owner && |
364 | info->notify.sigev_notify == SIGEV_SIGNAL) ? |
365 | info->notify.sigev_signo : 0, |
366 | pid_vnr(info->notify_owner)); |
367 | spin_unlock(&info->lock); |
368 | buffer[sizeof(buffer)-1] = '\0'; |
369 | |
370 | ret = simple_read_from_buffer(u_data, count, off, buffer, |
371 | strlen(buffer)); |
372 | if (ret <= 0) |
373 | return ret; |
374 | |
375 | filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME; |
376 | return ret; |
377 | } |
378 | |
379 | static int mqueue_flush_file(struct file *filp, fl_owner_t id) |
380 | { |
381 | struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); |
382 | |
383 | spin_lock(&info->lock); |
384 | if (task_tgid(current) == info->notify_owner) |
385 | remove_notification(info); |
386 | |
387 | spin_unlock(&info->lock); |
388 | return 0; |
389 | } |
390 | |
391 | static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) |
392 | { |
393 | struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); |
394 | int retval = 0; |
395 | |
396 | poll_wait(filp, &info->wait_q, poll_tab); |
397 | |
398 | spin_lock(&info->lock); |
399 | if (info->attr.mq_curmsgs) |
400 | retval = POLLIN | POLLRDNORM; |
401 | |
402 | if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) |
403 | retval |= POLLOUT | POLLWRNORM; |
404 | spin_unlock(&info->lock); |
405 | |
406 | return retval; |
407 | } |
408 | |
409 | /* Adds current to info->e_wait_q[sr] before element with smaller prio */ |
410 | static void wq_add(struct mqueue_inode_info *info, int sr, |
411 | struct ext_wait_queue *ewp) |
412 | { |
413 | struct ext_wait_queue *walk; |
414 | |
415 | ewp->task = current; |
416 | |
417 | list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { |
418 | if (walk->task->static_prio <= current->static_prio) { |
419 | list_add_tail(&ewp->list, &walk->list); |
420 | return; |
421 | } |
422 | } |
423 | list_add_tail(&ewp->list, &info->e_wait_q[sr].list); |
424 | } |
425 | |
426 | /* |
427 | * Puts current task to sleep. Caller must hold queue lock. After return |
428 | * lock isn't held. |
429 | * sr: SEND or RECV |
430 | */ |
431 | static int wq_sleep(struct mqueue_inode_info *info, int sr, |
432 | long timeout, struct ext_wait_queue *ewp) |
433 | { |
434 | int retval; |
435 | signed long time; |
436 | |
437 | wq_add(info, sr, ewp); |
438 | |
439 | for (;;) { |
440 | set_current_state(TASK_INTERRUPTIBLE); |
441 | |
442 | spin_unlock(&info->lock); |
443 | time = schedule_timeout(timeout); |
444 | |
445 | while (ewp->state == STATE_PENDING) |
446 | cpu_relax(); |
447 | |
448 | if (ewp->state == STATE_READY) { |
449 | retval = 0; |
450 | goto out; |
451 | } |
452 | spin_lock(&info->lock); |
453 | if (ewp->state == STATE_READY) { |
454 | retval = 0; |
455 | goto out_unlock; |
456 | } |
457 | if (signal_pending(current)) { |
458 | retval = -ERESTARTSYS; |
459 | break; |
460 | } |
461 | if (time == 0) { |
462 | retval = -ETIMEDOUT; |
463 | break; |
464 | } |
465 | } |
466 | list_del(&ewp->list); |
467 | out_unlock: |
468 | spin_unlock(&info->lock); |
469 | out: |
470 | return retval; |
471 | } |
472 | |
473 | /* |
474 | * Returns waiting task that should be serviced first or NULL if none exists |
475 | */ |
476 | static struct ext_wait_queue *wq_get_first_waiter( |
477 | struct mqueue_inode_info *info, int sr) |
478 | { |
479 | struct list_head *ptr; |
480 | |
481 | ptr = info->e_wait_q[sr].list.prev; |
482 | if (ptr == &info->e_wait_q[sr].list) |
483 | return NULL; |
484 | return list_entry(ptr, struct ext_wait_queue, list); |
485 | } |
486 | |
487 | /* Auxiliary functions to manipulate messages' list */ |
488 | static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info) |
489 | { |
490 | int k; |
491 | |
492 | k = info->attr.mq_curmsgs - 1; |
493 | while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) { |
494 | info->messages[k + 1] = info->messages[k]; |
495 | k--; |
496 | } |
497 | info->attr.mq_curmsgs++; |
498 | info->qsize += ptr->m_ts; |
499 | info->messages[k + 1] = ptr; |
500 | } |
501 | |
502 | static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) |
503 | { |
504 | info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts; |
505 | return info->messages[info->attr.mq_curmsgs]; |
506 | } |
507 | |
508 | static inline void set_cookie(struct sk_buff *skb, char code) |
509 | { |
510 | ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code; |
511 | } |
512 | |
513 | /* |
514 | * The next function is only to split too long sys_mq_timedsend |
515 | */ |
516 | static void __do_notify(struct mqueue_inode_info *info) |
517 | { |
518 | /* notification |
519 | * invoked when there is registered process and there isn't process |
520 | * waiting synchronously for message AND state of queue changed from |
521 | * empty to not empty. Here we are sure that no one is waiting |
522 | * synchronously. */ |
523 | if (info->notify_owner && |
524 | info->attr.mq_curmsgs == 1) { |
525 | struct siginfo sig_i; |
526 | switch (info->notify.sigev_notify) { |
527 | case SIGEV_NONE: |
528 | break; |
529 | case SIGEV_SIGNAL: |
530 | /* sends signal */ |
531 | |
532 | sig_i.si_signo = info->notify.sigev_signo; |
533 | sig_i.si_errno = 0; |
534 | sig_i.si_code = SI_MESGQ; |
535 | sig_i.si_value = info->notify.sigev_value; |
536 | sig_i.si_pid = task_tgid_nr_ns(current, |
537 | ns_of_pid(info->notify_owner)); |
538 | sig_i.si_uid = current_uid(); |
539 | |
540 | kill_pid_info(info->notify.sigev_signo, |
541 | &sig_i, info->notify_owner); |
542 | break; |
543 | case SIGEV_THREAD: |
544 | set_cookie(info->notify_cookie, NOTIFY_WOKENUP); |
545 | netlink_sendskb(info->notify_sock, info->notify_cookie); |
546 | break; |
547 | } |
548 | /* after notification unregisters process */ |
549 | put_pid(info->notify_owner); |
550 | info->notify_owner = NULL; |
551 | } |
552 | wake_up(&info->wait_q); |
553 | } |
554 | |
555 | static long prepare_timeout(struct timespec *p) |
556 | { |
557 | struct timespec nowts; |
558 | long timeout; |
559 | |
560 | if (p) { |
561 | if (unlikely(p->tv_nsec < 0 || p->tv_sec < 0 |
562 | || p->tv_nsec >= NSEC_PER_SEC)) |
563 | return -EINVAL; |
564 | nowts = CURRENT_TIME; |
565 | /* first subtract as jiffies can't be too big */ |
566 | p->tv_sec -= nowts.tv_sec; |
567 | if (p->tv_nsec < nowts.tv_nsec) { |
568 | p->tv_nsec += NSEC_PER_SEC; |
569 | p->tv_sec--; |
570 | } |
571 | p->tv_nsec -= nowts.tv_nsec; |
572 | if (p->tv_sec < 0) |
573 | return 0; |
574 | |
575 | timeout = timespec_to_jiffies(p) + 1; |
576 | } else |
577 | return MAX_SCHEDULE_TIMEOUT; |
578 | |
579 | return timeout; |
580 | } |
581 | |
582 | static void remove_notification(struct mqueue_inode_info *info) |
583 | { |
584 | if (info->notify_owner != NULL && |
585 | info->notify.sigev_notify == SIGEV_THREAD) { |
586 | set_cookie(info->notify_cookie, NOTIFY_REMOVED); |
587 | netlink_sendskb(info->notify_sock, info->notify_cookie); |
588 | } |
589 | put_pid(info->notify_owner); |
590 | info->notify_owner = NULL; |
591 | } |
592 | |
593 | static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr) |
594 | { |
595 | if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) |
596 | return 0; |
597 | if (capable(CAP_SYS_RESOURCE)) { |
598 | if (attr->mq_maxmsg > HARD_MSGMAX) |
599 | return 0; |
600 | } else { |
601 | if (attr->mq_maxmsg > ipc_ns->mq_msg_max || |
602 | attr->mq_msgsize > ipc_ns->mq_msgsize_max) |
603 | return 0; |
604 | } |
605 | /* check for overflow */ |
606 | if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) |
607 | return 0; |
608 | if ((unsigned long)(attr->mq_maxmsg * (attr->mq_msgsize |
609 | + sizeof (struct msg_msg *))) < |
610 | (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize)) |
611 | return 0; |
612 | return 1; |
613 | } |
614 | |
615 | /* |
616 | * Invoked when creating a new queue via sys_mq_open |
617 | */ |
618 | static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir, |
619 | struct dentry *dentry, int oflag, mode_t mode, |
620 | struct mq_attr *attr) |
621 | { |
622 | const struct cred *cred = current_cred(); |
623 | struct file *result; |
624 | int ret; |
625 | |
626 | if (attr) { |
627 | if (!mq_attr_ok(ipc_ns, attr)) { |
628 | ret = -EINVAL; |
629 | goto out; |
630 | } |
631 | /* store for use during create */ |
632 | dentry->d_fsdata = attr; |
633 | } |
634 | |
635 | mode &= ~current_umask(); |
636 | ret = mnt_want_write(ipc_ns->mq_mnt); |
637 | if (ret) |
638 | goto out; |
639 | ret = vfs_create(dir->d_inode, dentry, mode, NULL); |
640 | dentry->d_fsdata = NULL; |
641 | if (ret) |
642 | goto out_drop_write; |
643 | |
644 | result = dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred); |
645 | /* |
646 | * dentry_open() took a persistent mnt_want_write(), |
647 | * so we can now drop this one. |
648 | */ |
649 | mnt_drop_write(ipc_ns->mq_mnt); |
650 | return result; |
651 | |
652 | out_drop_write: |
653 | mnt_drop_write(ipc_ns->mq_mnt); |
654 | out: |
655 | dput(dentry); |
656 | mntput(ipc_ns->mq_mnt); |
657 | return ERR_PTR(ret); |
658 | } |
659 | |
660 | /* Opens existing queue */ |
661 | static struct file *do_open(struct ipc_namespace *ipc_ns, |
662 | struct dentry *dentry, int oflag) |
663 | { |
664 | int ret; |
665 | const struct cred *cred = current_cred(); |
666 | |
667 | static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, |
668 | MAY_READ | MAY_WRITE }; |
669 | |
670 | if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) { |
671 | ret = -EINVAL; |
672 | goto err; |
673 | } |
674 | |
675 | if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) { |
676 | ret = -EACCES; |
677 | goto err; |
678 | } |
679 | |
680 | return dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred); |
681 | |
682 | err: |
683 | dput(dentry); |
684 | mntput(ipc_ns->mq_mnt); |
685 | return ERR_PTR(ret); |
686 | } |
687 | |
688 | SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, mode_t, mode, |
689 | struct mq_attr __user *, u_attr) |
690 | { |
691 | struct dentry *dentry; |
692 | struct file *filp; |
693 | char *name; |
694 | struct mq_attr attr; |
695 | int fd, error; |
696 | struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; |
697 | |
698 | if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) |
699 | return -EFAULT; |
700 | |
701 | audit_mq_open(oflag, mode, u_attr ? &attr : NULL); |
702 | |
703 | if (IS_ERR(name = getname(u_name))) |
704 | return PTR_ERR(name); |
705 | |
706 | fd = get_unused_fd_flags(O_CLOEXEC); |
707 | if (fd < 0) |
708 | goto out_putname; |
709 | |
710 | mutex_lock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); |
711 | dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name)); |
712 | if (IS_ERR(dentry)) { |
713 | error = PTR_ERR(dentry); |
714 | goto out_putfd; |
715 | } |
716 | mntget(ipc_ns->mq_mnt); |
717 | |
718 | if (oflag & O_CREAT) { |
719 | if (dentry->d_inode) { /* entry already exists */ |
720 | audit_inode(name, dentry); |
721 | if (oflag & O_EXCL) { |
722 | error = -EEXIST; |
723 | goto out; |
724 | } |
725 | filp = do_open(ipc_ns, dentry, oflag); |
726 | } else { |
727 | filp = do_create(ipc_ns, ipc_ns->mq_mnt->mnt_root, |
728 | dentry, oflag, mode, |
729 | u_attr ? &attr : NULL); |
730 | } |
731 | } else { |
732 | if (!dentry->d_inode) { |
733 | error = -ENOENT; |
734 | goto out; |
735 | } |
736 | audit_inode(name, dentry); |
737 | filp = do_open(ipc_ns, dentry, oflag); |
738 | } |
739 | |
740 | if (IS_ERR(filp)) { |
741 | error = PTR_ERR(filp); |
742 | goto out_putfd; |
743 | } |
744 | |
745 | fd_install(fd, filp); |
746 | goto out_upsem; |
747 | |
748 | out: |
749 | dput(dentry); |
750 | mntput(ipc_ns->mq_mnt); |
751 | out_putfd: |
752 | put_unused_fd(fd); |
753 | fd = error; |
754 | out_upsem: |
755 | mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); |
756 | out_putname: |
757 | putname(name); |
758 | return fd; |
759 | } |
760 | |
761 | SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) |
762 | { |
763 | int err; |
764 | char *name; |
765 | struct dentry *dentry; |
766 | struct inode *inode = NULL; |
767 | struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; |
768 | |
769 | name = getname(u_name); |
770 | if (IS_ERR(name)) |
771 | return PTR_ERR(name); |
772 | |
773 | mutex_lock_nested(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex, |
774 | I_MUTEX_PARENT); |
775 | dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name)); |
776 | if (IS_ERR(dentry)) { |
777 | err = PTR_ERR(dentry); |
778 | goto out_unlock; |
779 | } |
780 | |
781 | if (!dentry->d_inode) { |
782 | err = -ENOENT; |
783 | goto out_err; |
784 | } |
785 | |
786 | inode = dentry->d_inode; |
787 | if (inode) |
788 | atomic_inc(&inode->i_count); |
789 | err = mnt_want_write(ipc_ns->mq_mnt); |
790 | if (err) |
791 | goto out_err; |
792 | err = vfs_unlink(dentry->d_parent->d_inode, dentry); |
793 | mnt_drop_write(ipc_ns->mq_mnt); |
794 | out_err: |
795 | dput(dentry); |
796 | |
797 | out_unlock: |
798 | mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); |
799 | putname(name); |
800 | if (inode) |
801 | iput(inode); |
802 | |
803 | return err; |
804 | } |
805 | |
806 | /* Pipelined send and receive functions. |
807 | * |
808 | * If a receiver finds no waiting message, then it registers itself in the |
809 | * list of waiting receivers. A sender checks that list before adding the new |
810 | * message into the message array. If there is a waiting receiver, then it |
811 | * bypasses the message array and directly hands the message over to the |
812 | * receiver. |
813 | * The receiver accepts the message and returns without grabbing the queue |
814 | * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers |
815 | * are necessary. The same algorithm is used for sysv semaphores, see |
816 | * ipc/sem.c for more details. |
817 | * |
818 | * The same algorithm is used for senders. |
819 | */ |
820 | |
821 | /* pipelined_send() - send a message directly to the task waiting in |
822 | * sys_mq_timedreceive() (without inserting message into a queue). |
823 | */ |
824 | static inline void pipelined_send(struct mqueue_inode_info *info, |
825 | struct msg_msg *message, |
826 | struct ext_wait_queue *receiver) |
827 | { |
828 | receiver->msg = message; |
829 | list_del(&receiver->list); |
830 | receiver->state = STATE_PENDING; |
831 | wake_up_process(receiver->task); |
832 | smp_wmb(); |
833 | receiver->state = STATE_READY; |
834 | } |
835 | |
836 | /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() |
837 | * gets its message and put to the queue (we have one free place for sure). */ |
838 | static inline void pipelined_receive(struct mqueue_inode_info *info) |
839 | { |
840 | struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); |
841 | |
842 | if (!sender) { |
843 | /* for poll */ |
844 | wake_up_interruptible(&info->wait_q); |
845 | return; |
846 | } |
847 | msg_insert(sender->msg, info); |
848 | list_del(&sender->list); |
849 | sender->state = STATE_PENDING; |
850 | wake_up_process(sender->task); |
851 | smp_wmb(); |
852 | sender->state = STATE_READY; |
853 | } |
854 | |
855 | SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, |
856 | size_t, msg_len, unsigned int, msg_prio, |
857 | const struct timespec __user *, u_abs_timeout) |
858 | { |
859 | struct file *filp; |
860 | struct inode *inode; |
861 | struct ext_wait_queue wait; |
862 | struct ext_wait_queue *receiver; |
863 | struct msg_msg *msg_ptr; |
864 | struct mqueue_inode_info *info; |
865 | struct timespec ts, *p = NULL; |
866 | long timeout; |
867 | int ret; |
868 | |
869 | if (u_abs_timeout) { |
870 | if (copy_from_user(&ts, u_abs_timeout, |
871 | sizeof(struct timespec))) |
872 | return -EFAULT; |
873 | p = &ts; |
874 | } |
875 | |
876 | if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) |
877 | return -EINVAL; |
878 | |
879 | audit_mq_sendrecv(mqdes, msg_len, msg_prio, p); |
880 | timeout = prepare_timeout(p); |
881 | |
882 | filp = fget(mqdes); |
883 | if (unlikely(!filp)) { |
884 | ret = -EBADF; |
885 | goto out; |
886 | } |
887 | |
888 | inode = filp->f_path.dentry->d_inode; |
889 | if (unlikely(filp->f_op != &mqueue_file_operations)) { |
890 | ret = -EBADF; |
891 | goto out_fput; |
892 | } |
893 | info = MQUEUE_I(inode); |
894 | audit_inode(NULL, filp->f_path.dentry); |
895 | |
896 | if (unlikely(!(filp->f_mode & FMODE_WRITE))) { |
897 | ret = -EBADF; |
898 | goto out_fput; |
899 | } |
900 | |
901 | if (unlikely(msg_len > info->attr.mq_msgsize)) { |
902 | ret = -EMSGSIZE; |
903 | goto out_fput; |
904 | } |
905 | |
906 | /* First try to allocate memory, before doing anything with |
907 | * existing queues. */ |
908 | msg_ptr = load_msg(u_msg_ptr, msg_len); |
909 | if (IS_ERR(msg_ptr)) { |
910 | ret = PTR_ERR(msg_ptr); |
911 | goto out_fput; |
912 | } |
913 | msg_ptr->m_ts = msg_len; |
914 | msg_ptr->m_type = msg_prio; |
915 | |
916 | spin_lock(&info->lock); |
917 | |
918 | if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { |
919 | if (filp->f_flags & O_NONBLOCK) { |
920 | spin_unlock(&info->lock); |
921 | ret = -EAGAIN; |
922 | } else if (unlikely(timeout < 0)) { |
923 | spin_unlock(&info->lock); |
924 | ret = timeout; |
925 | } else { |
926 | wait.task = current; |
927 | wait.msg = (void *) msg_ptr; |
928 | wait.state = STATE_NONE; |
929 | ret = wq_sleep(info, SEND, timeout, &wait); |
930 | } |
931 | if (ret < 0) |
932 | free_msg(msg_ptr); |
933 | } else { |
934 | receiver = wq_get_first_waiter(info, RECV); |
935 | if (receiver) { |
936 | pipelined_send(info, msg_ptr, receiver); |
937 | } else { |
938 | /* adds message to the queue */ |
939 | msg_insert(msg_ptr, info); |
940 | __do_notify(info); |
941 | } |
942 | inode->i_atime = inode->i_mtime = inode->i_ctime = |
943 | CURRENT_TIME; |
944 | spin_unlock(&info->lock); |
945 | ret = 0; |
946 | } |
947 | out_fput: |
948 | fput(filp); |
949 | out: |
950 | return ret; |
951 | } |
952 | |
953 | SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, |
954 | size_t, msg_len, unsigned int __user *, u_msg_prio, |
955 | const struct timespec __user *, u_abs_timeout) |
956 | { |
957 | long timeout; |
958 | ssize_t ret; |
959 | struct msg_msg *msg_ptr; |
960 | struct file *filp; |
961 | struct inode *inode; |
962 | struct mqueue_inode_info *info; |
963 | struct ext_wait_queue wait; |
964 | struct timespec ts, *p = NULL; |
965 | |
966 | if (u_abs_timeout) { |
967 | if (copy_from_user(&ts, u_abs_timeout, |
968 | sizeof(struct timespec))) |
969 | return -EFAULT; |
970 | p = &ts; |
971 | } |
972 | |
973 | audit_mq_sendrecv(mqdes, msg_len, 0, p); |
974 | timeout = prepare_timeout(p); |
975 | |
976 | filp = fget(mqdes); |
977 | if (unlikely(!filp)) { |
978 | ret = -EBADF; |
979 | goto out; |
980 | } |
981 | |
982 | inode = filp->f_path.dentry->d_inode; |
983 | if (unlikely(filp->f_op != &mqueue_file_operations)) { |
984 | ret = -EBADF; |
985 | goto out_fput; |
986 | } |
987 | info = MQUEUE_I(inode); |
988 | audit_inode(NULL, filp->f_path.dentry); |
989 | |
990 | if (unlikely(!(filp->f_mode & FMODE_READ))) { |
991 | ret = -EBADF; |
992 | goto out_fput; |
993 | } |
994 | |
995 | /* checks if buffer is big enough */ |
996 | if (unlikely(msg_len < info->attr.mq_msgsize)) { |
997 | ret = -EMSGSIZE; |
998 | goto out_fput; |
999 | } |
1000 | |
1001 | spin_lock(&info->lock); |
1002 | if (info->attr.mq_curmsgs == 0) { |
1003 | if (filp->f_flags & O_NONBLOCK) { |
1004 | spin_unlock(&info->lock); |
1005 | ret = -EAGAIN; |
1006 | msg_ptr = NULL; |
1007 | } else if (unlikely(timeout < 0)) { |
1008 | spin_unlock(&info->lock); |
1009 | ret = timeout; |
1010 | msg_ptr = NULL; |
1011 | } else { |
1012 | wait.task = current; |
1013 | wait.state = STATE_NONE; |
1014 | ret = wq_sleep(info, RECV, timeout, &wait); |
1015 | msg_ptr = wait.msg; |
1016 | } |
1017 | } else { |
1018 | msg_ptr = msg_get(info); |
1019 | |
1020 | inode->i_atime = inode->i_mtime = inode->i_ctime = |
1021 | CURRENT_TIME; |
1022 | |
1023 | /* There is now free space in queue. */ |
1024 | pipelined_receive(info); |
1025 | spin_unlock(&info->lock); |
1026 | ret = 0; |
1027 | } |
1028 | if (ret == 0) { |
1029 | ret = msg_ptr->m_ts; |
1030 | |
1031 | if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) || |
1032 | store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) { |
1033 | ret = -EFAULT; |
1034 | } |
1035 | free_msg(msg_ptr); |
1036 | } |
1037 | out_fput: |
1038 | fput(filp); |
1039 | out: |
1040 | return ret; |
1041 | } |
1042 | |
1043 | /* |
1044 | * Notes: the case when user wants us to deregister (with NULL as pointer) |
1045 | * and he isn't currently owner of notification, will be silently discarded. |
1046 | * It isn't explicitly defined in the POSIX. |
1047 | */ |
1048 | SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, |
1049 | const struct sigevent __user *, u_notification) |
1050 | { |
1051 | int ret; |
1052 | struct file *filp; |
1053 | struct sock *sock; |
1054 | struct inode *inode; |
1055 | struct sigevent notification; |
1056 | struct mqueue_inode_info *info; |
1057 | struct sk_buff *nc; |
1058 | |
1059 | if (u_notification) { |
1060 | if (copy_from_user(¬ification, u_notification, |
1061 | sizeof(struct sigevent))) |
1062 | return -EFAULT; |
1063 | } |
1064 | |
1065 | audit_mq_notify(mqdes, u_notification ? ¬ification : NULL); |
1066 | |
1067 | nc = NULL; |
1068 | sock = NULL; |
1069 | if (u_notification != NULL) { |
1070 | if (unlikely(notification.sigev_notify != SIGEV_NONE && |
1071 | notification.sigev_notify != SIGEV_SIGNAL && |
1072 | notification.sigev_notify != SIGEV_THREAD)) |
1073 | return -EINVAL; |
1074 | if (notification.sigev_notify == SIGEV_SIGNAL && |
1075 | !valid_signal(notification.sigev_signo)) { |
1076 | return -EINVAL; |
1077 | } |
1078 | if (notification.sigev_notify == SIGEV_THREAD) { |
1079 | long timeo; |
1080 | |
1081 | /* create the notify skb */ |
1082 | nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); |
1083 | if (!nc) { |
1084 | ret = -ENOMEM; |
1085 | goto out; |
1086 | } |
1087 | if (copy_from_user(nc->data, |
1088 | notification.sigev_value.sival_ptr, |
1089 | NOTIFY_COOKIE_LEN)) { |
1090 | ret = -EFAULT; |
1091 | goto out; |
1092 | } |
1093 | |
1094 | /* TODO: add a header? */ |
1095 | skb_put(nc, NOTIFY_COOKIE_LEN); |
1096 | /* and attach it to the socket */ |
1097 | retry: |
1098 | filp = fget(notification.sigev_signo); |
1099 | if (!filp) { |
1100 | ret = -EBADF; |
1101 | goto out; |
1102 | } |
1103 | sock = netlink_getsockbyfilp(filp); |
1104 | fput(filp); |
1105 | if (IS_ERR(sock)) { |
1106 | ret = PTR_ERR(sock); |
1107 | sock = NULL; |
1108 | goto out; |
1109 | } |
1110 | |
1111 | timeo = MAX_SCHEDULE_TIMEOUT; |
1112 | ret = netlink_attachskb(sock, nc, &timeo, NULL); |
1113 | if (ret == 1) |
1114 | goto retry; |
1115 | if (ret) { |
1116 | sock = NULL; |
1117 | nc = NULL; |
1118 | goto out; |
1119 | } |
1120 | } |
1121 | } |
1122 | |
1123 | filp = fget(mqdes); |
1124 | if (!filp) { |
1125 | ret = -EBADF; |
1126 | goto out; |
1127 | } |
1128 | |
1129 | inode = filp->f_path.dentry->d_inode; |
1130 | if (unlikely(filp->f_op != &mqueue_file_operations)) { |
1131 | ret = -EBADF; |
1132 | goto out_fput; |
1133 | } |
1134 | info = MQUEUE_I(inode); |
1135 | |
1136 | ret = 0; |
1137 | spin_lock(&info->lock); |
1138 | if (u_notification == NULL) { |
1139 | if (info->notify_owner == task_tgid(current)) { |
1140 | remove_notification(info); |
1141 | inode->i_atime = inode->i_ctime = CURRENT_TIME; |
1142 | } |
1143 | } else if (info->notify_owner != NULL) { |
1144 | ret = -EBUSY; |
1145 | } else { |
1146 | switch (notification.sigev_notify) { |
1147 | case SIGEV_NONE: |
1148 | info->notify.sigev_notify = SIGEV_NONE; |
1149 | break; |
1150 | case SIGEV_THREAD: |
1151 | info->notify_sock = sock; |
1152 | info->notify_cookie = nc; |
1153 | sock = NULL; |
1154 | nc = NULL; |
1155 | info->notify.sigev_notify = SIGEV_THREAD; |
1156 | break; |
1157 | case SIGEV_SIGNAL: |
1158 | info->notify.sigev_signo = notification.sigev_signo; |
1159 | info->notify.sigev_value = notification.sigev_value; |
1160 | info->notify.sigev_notify = SIGEV_SIGNAL; |
1161 | break; |
1162 | } |
1163 | |
1164 | info->notify_owner = get_pid(task_tgid(current)); |
1165 | inode->i_atime = inode->i_ctime = CURRENT_TIME; |
1166 | } |
1167 | spin_unlock(&info->lock); |
1168 | out_fput: |
1169 | fput(filp); |
1170 | out: |
1171 | if (sock) { |
1172 | netlink_detachskb(sock, nc); |
1173 | } else if (nc) { |
1174 | dev_kfree_skb(nc); |
1175 | } |
1176 | return ret; |
1177 | } |
1178 | |
1179 | SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, |
1180 | const struct mq_attr __user *, u_mqstat, |
1181 | struct mq_attr __user *, u_omqstat) |
1182 | { |
1183 | int ret; |
1184 | struct mq_attr mqstat, omqstat; |
1185 | struct file *filp; |
1186 | struct inode *inode; |
1187 | struct mqueue_inode_info *info; |
1188 | |
1189 | if (u_mqstat != NULL) { |
1190 | if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr))) |
1191 | return -EFAULT; |
1192 | if (mqstat.mq_flags & (~O_NONBLOCK)) |
1193 | return -EINVAL; |
1194 | } |
1195 | |
1196 | filp = fget(mqdes); |
1197 | if (!filp) { |
1198 | ret = -EBADF; |
1199 | goto out; |
1200 | } |
1201 | |
1202 | inode = filp->f_path.dentry->d_inode; |
1203 | if (unlikely(filp->f_op != &mqueue_file_operations)) { |
1204 | ret = -EBADF; |
1205 | goto out_fput; |
1206 | } |
1207 | info = MQUEUE_I(inode); |
1208 | |
1209 | spin_lock(&info->lock); |
1210 | |
1211 | omqstat = info->attr; |
1212 | omqstat.mq_flags = filp->f_flags & O_NONBLOCK; |
1213 | if (u_mqstat) { |
1214 | audit_mq_getsetattr(mqdes, &mqstat); |
1215 | spin_lock(&filp->f_lock); |
1216 | if (mqstat.mq_flags & O_NONBLOCK) |
1217 | filp->f_flags |= O_NONBLOCK; |
1218 | else |
1219 | filp->f_flags &= ~O_NONBLOCK; |
1220 | spin_unlock(&filp->f_lock); |
1221 | |
1222 | inode->i_atime = inode->i_ctime = CURRENT_TIME; |
1223 | } |
1224 | |
1225 | spin_unlock(&info->lock); |
1226 | |
1227 | ret = 0; |
1228 | if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat, |
1229 | sizeof(struct mq_attr))) |
1230 | ret = -EFAULT; |
1231 | |
1232 | out_fput: |
1233 | fput(filp); |
1234 | out: |
1235 | return ret; |
1236 | } |
1237 | |
1238 | static const struct inode_operations mqueue_dir_inode_operations = { |
1239 | .lookup = simple_lookup, |
1240 | .create = mqueue_create, |
1241 | .unlink = mqueue_unlink, |
1242 | }; |
1243 | |
1244 | static const struct file_operations mqueue_file_operations = { |
1245 | .flush = mqueue_flush_file, |
1246 | .poll = mqueue_poll_file, |
1247 | .read = mqueue_read_file, |
1248 | }; |
1249 | |
1250 | static const struct super_operations mqueue_super_ops = { |
1251 | .alloc_inode = mqueue_alloc_inode, |
1252 | .destroy_inode = mqueue_destroy_inode, |
1253 | .statfs = simple_statfs, |
1254 | .delete_inode = mqueue_delete_inode, |
1255 | .drop_inode = generic_delete_inode, |
1256 | }; |
1257 | |
1258 | static struct file_system_type mqueue_fs_type = { |
1259 | .name = "mqueue", |
1260 | .get_sb = mqueue_get_sb, |
1261 | .kill_sb = kill_litter_super, |
1262 | }; |
1263 | |
1264 | int mq_init_ns(struct ipc_namespace *ns) |
1265 | { |
1266 | ns->mq_queues_count = 0; |
1267 | ns->mq_queues_max = DFLT_QUEUESMAX; |
1268 | ns->mq_msg_max = DFLT_MSGMAX; |
1269 | ns->mq_msgsize_max = DFLT_MSGSIZEMAX; |
1270 | |
1271 | ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns); |
1272 | if (IS_ERR(ns->mq_mnt)) { |
1273 | int err = PTR_ERR(ns->mq_mnt); |
1274 | ns->mq_mnt = NULL; |
1275 | return err; |
1276 | } |
1277 | return 0; |
1278 | } |
1279 | |
1280 | void mq_clear_sbinfo(struct ipc_namespace *ns) |
1281 | { |
1282 | ns->mq_mnt->mnt_sb->s_fs_info = NULL; |
1283 | } |
1284 | |
1285 | void mq_put_mnt(struct ipc_namespace *ns) |
1286 | { |
1287 | mntput(ns->mq_mnt); |
1288 | } |
1289 | |
1290 | static int __init init_mqueue_fs(void) |
1291 | { |
1292 | int error; |
1293 | |
1294 | mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", |
1295 | sizeof(struct mqueue_inode_info), 0, |
1296 | SLAB_HWCACHE_ALIGN, init_once); |
1297 | if (mqueue_inode_cachep == NULL) |
1298 | return -ENOMEM; |
1299 | |
1300 | /* ignore failures - they are not fatal */ |
1301 | mq_sysctl_table = mq_register_sysctl_table(); |
1302 | |
1303 | error = register_filesystem(&mqueue_fs_type); |
1304 | if (error) |
1305 | goto out_sysctl; |
1306 | |
1307 | spin_lock_init(&mq_lock); |
1308 | |
1309 | init_ipc_ns.mq_mnt = kern_mount_data(&mqueue_fs_type, &init_ipc_ns); |
1310 | if (IS_ERR(init_ipc_ns.mq_mnt)) { |
1311 | error = PTR_ERR(init_ipc_ns.mq_mnt); |
1312 | goto out_filesystem; |
1313 | } |
1314 | |
1315 | return 0; |
1316 | |
1317 | out_filesystem: |
1318 | unregister_filesystem(&mqueue_fs_type); |
1319 | out_sysctl: |
1320 | if (mq_sysctl_table) |
1321 | unregister_sysctl_table(mq_sysctl_table); |
1322 | kmem_cache_destroy(mqueue_inode_cachep); |
1323 | return error; |
1324 | } |
1325 | |
1326 | __initcall(init_mqueue_fs); |
1327 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9