Root/
1 | /* |
2 | * POSIX message queues filesystem for Linux. |
3 | * |
4 | * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl) |
5 | * Michal Wronski (michal.wronski@gmail.com) |
6 | * |
7 | * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) |
8 | * Lockless receive & send, fd based notify: |
9 | * Manfred Spraul (manfred@colorfullife.com) |
10 | * |
11 | * Audit: George Wilson (ltcgcw@us.ibm.com) |
12 | * |
13 | * This file is released under the GPL. |
14 | */ |
15 | |
16 | #include <linux/capability.h> |
17 | #include <linux/init.h> |
18 | #include <linux/pagemap.h> |
19 | #include <linux/file.h> |
20 | #include <linux/mount.h> |
21 | #include <linux/namei.h> |
22 | #include <linux/sysctl.h> |
23 | #include <linux/poll.h> |
24 | #include <linux/mqueue.h> |
25 | #include <linux/msg.h> |
26 | #include <linux/skbuff.h> |
27 | #include <linux/vmalloc.h> |
28 | #include <linux/netlink.h> |
29 | #include <linux/syscalls.h> |
30 | #include <linux/audit.h> |
31 | #include <linux/signal.h> |
32 | #include <linux/mutex.h> |
33 | #include <linux/nsproxy.h> |
34 | #include <linux/pid.h> |
35 | #include <linux/ipc_namespace.h> |
36 | #include <linux/user_namespace.h> |
37 | #include <linux/slab.h> |
38 | |
39 | #include <net/sock.h> |
40 | #include "util.h" |
41 | |
42 | #define MQUEUE_MAGIC 0x19800202 |
43 | #define DIRENT_SIZE 20 |
44 | #define FILENT_SIZE 80 |
45 | |
46 | #define SEND 0 |
47 | #define RECV 1 |
48 | |
49 | #define STATE_NONE 0 |
50 | #define STATE_PENDING 1 |
51 | #define STATE_READY 2 |
52 | |
53 | struct posix_msg_tree_node { |
54 | struct rb_node rb_node; |
55 | struct list_head msg_list; |
56 | int priority; |
57 | }; |
58 | |
59 | struct ext_wait_queue { /* queue of sleeping tasks */ |
60 | struct task_struct *task; |
61 | struct list_head list; |
62 | struct msg_msg *msg; /* ptr of loaded message */ |
63 | int state; /* one of STATE_* values */ |
64 | }; |
65 | |
66 | struct mqueue_inode_info { |
67 | spinlock_t lock; |
68 | struct inode vfs_inode; |
69 | wait_queue_head_t wait_q; |
70 | |
71 | struct rb_root msg_tree; |
72 | struct posix_msg_tree_node *node_cache; |
73 | struct mq_attr attr; |
74 | |
75 | struct sigevent notify; |
76 | struct pid* notify_owner; |
77 | struct user_namespace *notify_user_ns; |
78 | struct user_struct *user; /* user who created, for accounting */ |
79 | struct sock *notify_sock; |
80 | struct sk_buff *notify_cookie; |
81 | |
82 | /* for tasks waiting for free space and messages, respectively */ |
83 | struct ext_wait_queue e_wait_q[2]; |
84 | |
85 | unsigned long qsize; /* size of queue in memory (sum of all msgs) */ |
86 | }; |
87 | |
88 | static const struct inode_operations mqueue_dir_inode_operations; |
89 | static const struct file_operations mqueue_file_operations; |
90 | static const struct super_operations mqueue_super_ops; |
91 | static void remove_notification(struct mqueue_inode_info *info); |
92 | |
93 | static struct kmem_cache *mqueue_inode_cachep; |
94 | |
95 | static struct ctl_table_header * mq_sysctl_table; |
96 | |
97 | static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) |
98 | { |
99 | return container_of(inode, struct mqueue_inode_info, vfs_inode); |
100 | } |
101 | |
102 | /* |
103 | * This routine should be called with the mq_lock held. |
104 | */ |
105 | static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode) |
106 | { |
107 | return get_ipc_ns(inode->i_sb->s_fs_info); |
108 | } |
109 | |
110 | static struct ipc_namespace *get_ns_from_inode(struct inode *inode) |
111 | { |
112 | struct ipc_namespace *ns; |
113 | |
114 | spin_lock(&mq_lock); |
115 | ns = __get_ns_from_inode(inode); |
116 | spin_unlock(&mq_lock); |
117 | return ns; |
118 | } |
119 | |
120 | /* Auxiliary functions to manipulate messages' list */ |
121 | static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) |
122 | { |
123 | struct rb_node **p, *parent = NULL; |
124 | struct posix_msg_tree_node *leaf; |
125 | |
126 | p = &info->msg_tree.rb_node; |
127 | while (*p) { |
128 | parent = *p; |
129 | leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); |
130 | |
131 | if (likely(leaf->priority == msg->m_type)) |
132 | goto insert_msg; |
133 | else if (msg->m_type < leaf->priority) |
134 | p = &(*p)->rb_left; |
135 | else |
136 | p = &(*p)->rb_right; |
137 | } |
138 | if (info->node_cache) { |
139 | leaf = info->node_cache; |
140 | info->node_cache = NULL; |
141 | } else { |
142 | leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC); |
143 | if (!leaf) |
144 | return -ENOMEM; |
145 | rb_init_node(&leaf->rb_node); |
146 | INIT_LIST_HEAD(&leaf->msg_list); |
147 | info->qsize += sizeof(*leaf); |
148 | } |
149 | leaf->priority = msg->m_type; |
150 | rb_link_node(&leaf->rb_node, parent, p); |
151 | rb_insert_color(&leaf->rb_node, &info->msg_tree); |
152 | insert_msg: |
153 | info->attr.mq_curmsgs++; |
154 | info->qsize += msg->m_ts; |
155 | list_add_tail(&msg->m_list, &leaf->msg_list); |
156 | return 0; |
157 | } |
158 | |
159 | static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) |
160 | { |
161 | struct rb_node **p, *parent = NULL; |
162 | struct posix_msg_tree_node *leaf; |
163 | struct msg_msg *msg; |
164 | |
165 | try_again: |
166 | p = &info->msg_tree.rb_node; |
167 | while (*p) { |
168 | parent = *p; |
169 | /* |
170 | * During insert, low priorities go to the left and high to the |
171 | * right. On receive, we want the highest priorities first, so |
172 | * walk all the way to the right. |
173 | */ |
174 | p = &(*p)->rb_right; |
175 | } |
176 | if (!parent) { |
177 | if (info->attr.mq_curmsgs) { |
178 | pr_warn_once("Inconsistency in POSIX message queue, " |
179 | "no tree element, but supposedly messages " |
180 | "should exist!\n"); |
181 | info->attr.mq_curmsgs = 0; |
182 | } |
183 | return NULL; |
184 | } |
185 | leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); |
186 | if (unlikely(list_empty(&leaf->msg_list))) { |
187 | pr_warn_once("Inconsistency in POSIX message queue, " |
188 | "empty leaf node but we haven't implemented " |
189 | "lazy leaf delete!\n"); |
190 | rb_erase(&leaf->rb_node, &info->msg_tree); |
191 | if (info->node_cache) { |
192 | info->qsize -= sizeof(*leaf); |
193 | kfree(leaf); |
194 | } else { |
195 | info->node_cache = leaf; |
196 | } |
197 | goto try_again; |
198 | } else { |
199 | msg = list_first_entry(&leaf->msg_list, |
200 | struct msg_msg, m_list); |
201 | list_del(&msg->m_list); |
202 | if (list_empty(&leaf->msg_list)) { |
203 | rb_erase(&leaf->rb_node, &info->msg_tree); |
204 | if (info->node_cache) { |
205 | info->qsize -= sizeof(*leaf); |
206 | kfree(leaf); |
207 | } else { |
208 | info->node_cache = leaf; |
209 | } |
210 | } |
211 | } |
212 | info->attr.mq_curmsgs--; |
213 | info->qsize -= msg->m_ts; |
214 | return msg; |
215 | } |
216 | |
217 | static struct inode *mqueue_get_inode(struct super_block *sb, |
218 | struct ipc_namespace *ipc_ns, umode_t mode, |
219 | struct mq_attr *attr) |
220 | { |
221 | struct user_struct *u = current_user(); |
222 | struct inode *inode; |
223 | int ret = -ENOMEM; |
224 | |
225 | inode = new_inode(sb); |
226 | if (!inode) |
227 | goto err; |
228 | |
229 | inode->i_ino = get_next_ino(); |
230 | inode->i_mode = mode; |
231 | inode->i_uid = current_fsuid(); |
232 | inode->i_gid = current_fsgid(); |
233 | inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME; |
234 | |
235 | if (S_ISREG(mode)) { |
236 | struct mqueue_inode_info *info; |
237 | unsigned long mq_bytes, mq_treesize; |
238 | |
239 | inode->i_fop = &mqueue_file_operations; |
240 | inode->i_size = FILENT_SIZE; |
241 | /* mqueue specific info */ |
242 | info = MQUEUE_I(inode); |
243 | spin_lock_init(&info->lock); |
244 | init_waitqueue_head(&info->wait_q); |
245 | INIT_LIST_HEAD(&info->e_wait_q[0].list); |
246 | INIT_LIST_HEAD(&info->e_wait_q[1].list); |
247 | info->notify_owner = NULL; |
248 | info->notify_user_ns = NULL; |
249 | info->qsize = 0; |
250 | info->user = NULL; /* set when all is ok */ |
251 | info->msg_tree = RB_ROOT; |
252 | info->node_cache = NULL; |
253 | memset(&info->attr, 0, sizeof(info->attr)); |
254 | info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max, |
255 | ipc_ns->mq_msg_default); |
256 | info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, |
257 | ipc_ns->mq_msgsize_default); |
258 | if (attr) { |
259 | info->attr.mq_maxmsg = attr->mq_maxmsg; |
260 | info->attr.mq_msgsize = attr->mq_msgsize; |
261 | } |
262 | /* |
263 | * We used to allocate a static array of pointers and account |
264 | * the size of that array as well as one msg_msg struct per |
265 | * possible message into the queue size. That's no longer |
266 | * accurate as the queue is now an rbtree and will grow and |
267 | * shrink depending on usage patterns. We can, however, still |
268 | * account one msg_msg struct per message, but the nodes are |
269 | * allocated depending on priority usage, and most programs |
270 | * only use one, or a handful, of priorities. However, since |
271 | * this is pinned memory, we need to assume worst case, so |
272 | * that means the min(mq_maxmsg, max_priorities) * struct |
273 | * posix_msg_tree_node. |
274 | */ |
275 | mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + |
276 | min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * |
277 | sizeof(struct posix_msg_tree_node); |
278 | |
279 | mq_bytes = mq_treesize + (info->attr.mq_maxmsg * |
280 | info->attr.mq_msgsize); |
281 | |
282 | spin_lock(&mq_lock); |
283 | if (u->mq_bytes + mq_bytes < u->mq_bytes || |
284 | u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) { |
285 | spin_unlock(&mq_lock); |
286 | /* mqueue_evict_inode() releases info->messages */ |
287 | ret = -EMFILE; |
288 | goto out_inode; |
289 | } |
290 | u->mq_bytes += mq_bytes; |
291 | spin_unlock(&mq_lock); |
292 | |
293 | /* all is ok */ |
294 | info->user = get_uid(u); |
295 | } else if (S_ISDIR(mode)) { |
296 | inc_nlink(inode); |
297 | /* Some things misbehave if size == 0 on a directory */ |
298 | inode->i_size = 2 * DIRENT_SIZE; |
299 | inode->i_op = &mqueue_dir_inode_operations; |
300 | inode->i_fop = &simple_dir_operations; |
301 | } |
302 | |
303 | return inode; |
304 | out_inode: |
305 | iput(inode); |
306 | err: |
307 | return ERR_PTR(ret); |
308 | } |
309 | |
310 | static int mqueue_fill_super(struct super_block *sb, void *data, int silent) |
311 | { |
312 | struct inode *inode; |
313 | struct ipc_namespace *ns = data; |
314 | |
315 | sb->s_blocksize = PAGE_CACHE_SIZE; |
316 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; |
317 | sb->s_magic = MQUEUE_MAGIC; |
318 | sb->s_op = &mqueue_super_ops; |
319 | |
320 | inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); |
321 | if (IS_ERR(inode)) |
322 | return PTR_ERR(inode); |
323 | |
324 | sb->s_root = d_make_root(inode); |
325 | if (!sb->s_root) |
326 | return -ENOMEM; |
327 | return 0; |
328 | } |
329 | |
330 | static struct dentry *mqueue_mount(struct file_system_type *fs_type, |
331 | int flags, const char *dev_name, |
332 | void *data) |
333 | { |
334 | if (!(flags & MS_KERNMOUNT)) |
335 | data = current->nsproxy->ipc_ns; |
336 | return mount_ns(fs_type, flags, data, mqueue_fill_super); |
337 | } |
338 | |
339 | static void init_once(void *foo) |
340 | { |
341 | struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; |
342 | |
343 | inode_init_once(&p->vfs_inode); |
344 | } |
345 | |
346 | static struct inode *mqueue_alloc_inode(struct super_block *sb) |
347 | { |
348 | struct mqueue_inode_info *ei; |
349 | |
350 | ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); |
351 | if (!ei) |
352 | return NULL; |
353 | return &ei->vfs_inode; |
354 | } |
355 | |
356 | static void mqueue_i_callback(struct rcu_head *head) |
357 | { |
358 | struct inode *inode = container_of(head, struct inode, i_rcu); |
359 | kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); |
360 | } |
361 | |
362 | static void mqueue_destroy_inode(struct inode *inode) |
363 | { |
364 | call_rcu(&inode->i_rcu, mqueue_i_callback); |
365 | } |
366 | |
367 | static void mqueue_evict_inode(struct inode *inode) |
368 | { |
369 | struct mqueue_inode_info *info; |
370 | struct user_struct *user; |
371 | unsigned long mq_bytes, mq_treesize; |
372 | struct ipc_namespace *ipc_ns; |
373 | struct msg_msg *msg; |
374 | |
375 | clear_inode(inode); |
376 | |
377 | if (S_ISDIR(inode->i_mode)) |
378 | return; |
379 | |
380 | ipc_ns = get_ns_from_inode(inode); |
381 | info = MQUEUE_I(inode); |
382 | spin_lock(&info->lock); |
383 | while ((msg = msg_get(info)) != NULL) |
384 | free_msg(msg); |
385 | kfree(info->node_cache); |
386 | spin_unlock(&info->lock); |
387 | |
388 | /* Total amount of bytes accounted for the mqueue */ |
389 | mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + |
390 | min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * |
391 | sizeof(struct posix_msg_tree_node); |
392 | |
393 | mq_bytes = mq_treesize + (info->attr.mq_maxmsg * |
394 | info->attr.mq_msgsize); |
395 | |
396 | user = info->user; |
397 | if (user) { |
398 | spin_lock(&mq_lock); |
399 | user->mq_bytes -= mq_bytes; |
400 | /* |
401 | * get_ns_from_inode() ensures that the |
402 | * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns |
403 | * to which we now hold a reference, or it is NULL. |
404 | * We can't put it here under mq_lock, though. |
405 | */ |
406 | if (ipc_ns) |
407 | ipc_ns->mq_queues_count--; |
408 | spin_unlock(&mq_lock); |
409 | free_uid(user); |
410 | } |
411 | if (ipc_ns) |
412 | put_ipc_ns(ipc_ns); |
413 | } |
414 | |
415 | static int mqueue_create(struct inode *dir, struct dentry *dentry, |
416 | umode_t mode, bool excl) |
417 | { |
418 | struct inode *inode; |
419 | struct mq_attr *attr = dentry->d_fsdata; |
420 | int error; |
421 | struct ipc_namespace *ipc_ns; |
422 | |
423 | spin_lock(&mq_lock); |
424 | ipc_ns = __get_ns_from_inode(dir); |
425 | if (!ipc_ns) { |
426 | error = -EACCES; |
427 | goto out_unlock; |
428 | } |
429 | if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX || |
430 | (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && |
431 | !capable(CAP_SYS_RESOURCE))) { |
432 | error = -ENOSPC; |
433 | goto out_unlock; |
434 | } |
435 | ipc_ns->mq_queues_count++; |
436 | spin_unlock(&mq_lock); |
437 | |
438 | inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); |
439 | if (IS_ERR(inode)) { |
440 | error = PTR_ERR(inode); |
441 | spin_lock(&mq_lock); |
442 | ipc_ns->mq_queues_count--; |
443 | goto out_unlock; |
444 | } |
445 | |
446 | put_ipc_ns(ipc_ns); |
447 | dir->i_size += DIRENT_SIZE; |
448 | dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; |
449 | |
450 | d_instantiate(dentry, inode); |
451 | dget(dentry); |
452 | return 0; |
453 | out_unlock: |
454 | spin_unlock(&mq_lock); |
455 | if (ipc_ns) |
456 | put_ipc_ns(ipc_ns); |
457 | return error; |
458 | } |
459 | |
460 | static int mqueue_unlink(struct inode *dir, struct dentry *dentry) |
461 | { |
462 | struct inode *inode = dentry->d_inode; |
463 | |
464 | dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; |
465 | dir->i_size -= DIRENT_SIZE; |
466 | drop_nlink(inode); |
467 | dput(dentry); |
468 | return 0; |
469 | } |
470 | |
471 | /* |
472 | * This is routine for system read from queue file. |
473 | * To avoid mess with doing here some sort of mq_receive we allow |
474 | * to read only queue size & notification info (the only values |
475 | * that are interesting from user point of view and aren't accessible |
476 | * through std routines) |
477 | */ |
478 | static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, |
479 | size_t count, loff_t *off) |
480 | { |
481 | struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); |
482 | char buffer[FILENT_SIZE]; |
483 | ssize_t ret; |
484 | |
485 | spin_lock(&info->lock); |
486 | snprintf(buffer, sizeof(buffer), |
487 | "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n", |
488 | info->qsize, |
489 | info->notify_owner ? info->notify.sigev_notify : 0, |
490 | (info->notify_owner && |
491 | info->notify.sigev_notify == SIGEV_SIGNAL) ? |
492 | info->notify.sigev_signo : 0, |
493 | pid_vnr(info->notify_owner)); |
494 | spin_unlock(&info->lock); |
495 | buffer[sizeof(buffer)-1] = '\0'; |
496 | |
497 | ret = simple_read_from_buffer(u_data, count, off, buffer, |
498 | strlen(buffer)); |
499 | if (ret <= 0) |
500 | return ret; |
501 | |
502 | filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME; |
503 | return ret; |
504 | } |
505 | |
506 | static int mqueue_flush_file(struct file *filp, fl_owner_t id) |
507 | { |
508 | struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); |
509 | |
510 | spin_lock(&info->lock); |
511 | if (task_tgid(current) == info->notify_owner) |
512 | remove_notification(info); |
513 | |
514 | spin_unlock(&info->lock); |
515 | return 0; |
516 | } |
517 | |
518 | static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) |
519 | { |
520 | struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); |
521 | int retval = 0; |
522 | |
523 | poll_wait(filp, &info->wait_q, poll_tab); |
524 | |
525 | spin_lock(&info->lock); |
526 | if (info->attr.mq_curmsgs) |
527 | retval = POLLIN | POLLRDNORM; |
528 | |
529 | if (info->attr.mq_curmsgs < info->attr.mq_maxmsg) |
530 | retval |= POLLOUT | POLLWRNORM; |
531 | spin_unlock(&info->lock); |
532 | |
533 | return retval; |
534 | } |
535 | |
536 | /* Adds current to info->e_wait_q[sr] before element with smaller prio */ |
537 | static void wq_add(struct mqueue_inode_info *info, int sr, |
538 | struct ext_wait_queue *ewp) |
539 | { |
540 | struct ext_wait_queue *walk; |
541 | |
542 | ewp->task = current; |
543 | |
544 | list_for_each_entry(walk, &info->e_wait_q[sr].list, list) { |
545 | if (walk->task->static_prio <= current->static_prio) { |
546 | list_add_tail(&ewp->list, &walk->list); |
547 | return; |
548 | } |
549 | } |
550 | list_add_tail(&ewp->list, &info->e_wait_q[sr].list); |
551 | } |
552 | |
553 | /* |
554 | * Puts current task to sleep. Caller must hold queue lock. After return |
555 | * lock isn't held. |
556 | * sr: SEND or RECV |
557 | */ |
558 | static int wq_sleep(struct mqueue_inode_info *info, int sr, |
559 | ktime_t *timeout, struct ext_wait_queue *ewp) |
560 | { |
561 | int retval; |
562 | signed long time; |
563 | |
564 | wq_add(info, sr, ewp); |
565 | |
566 | for (;;) { |
567 | set_current_state(TASK_INTERRUPTIBLE); |
568 | |
569 | spin_unlock(&info->lock); |
570 | time = schedule_hrtimeout_range_clock(timeout, 0, |
571 | HRTIMER_MODE_ABS, CLOCK_REALTIME); |
572 | |
573 | while (ewp->state == STATE_PENDING) |
574 | cpu_relax(); |
575 | |
576 | if (ewp->state == STATE_READY) { |
577 | retval = 0; |
578 | goto out; |
579 | } |
580 | spin_lock(&info->lock); |
581 | if (ewp->state == STATE_READY) { |
582 | retval = 0; |
583 | goto out_unlock; |
584 | } |
585 | if (signal_pending(current)) { |
586 | retval = -ERESTARTSYS; |
587 | break; |
588 | } |
589 | if (time == 0) { |
590 | retval = -ETIMEDOUT; |
591 | break; |
592 | } |
593 | } |
594 | list_del(&ewp->list); |
595 | out_unlock: |
596 | spin_unlock(&info->lock); |
597 | out: |
598 | return retval; |
599 | } |
600 | |
601 | /* |
602 | * Returns waiting task that should be serviced first or NULL if none exists |
603 | */ |
604 | static struct ext_wait_queue *wq_get_first_waiter( |
605 | struct mqueue_inode_info *info, int sr) |
606 | { |
607 | struct list_head *ptr; |
608 | |
609 | ptr = info->e_wait_q[sr].list.prev; |
610 | if (ptr == &info->e_wait_q[sr].list) |
611 | return NULL; |
612 | return list_entry(ptr, struct ext_wait_queue, list); |
613 | } |
614 | |
615 | |
616 | static inline void set_cookie(struct sk_buff *skb, char code) |
617 | { |
618 | ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code; |
619 | } |
620 | |
621 | /* |
622 | * The next function is only to split too long sys_mq_timedsend |
623 | */ |
624 | static void __do_notify(struct mqueue_inode_info *info) |
625 | { |
626 | /* notification |
627 | * invoked when there is registered process and there isn't process |
628 | * waiting synchronously for message AND state of queue changed from |
629 | * empty to not empty. Here we are sure that no one is waiting |
630 | * synchronously. */ |
631 | if (info->notify_owner && |
632 | info->attr.mq_curmsgs == 1) { |
633 | struct siginfo sig_i; |
634 | switch (info->notify.sigev_notify) { |
635 | case SIGEV_NONE: |
636 | break; |
637 | case SIGEV_SIGNAL: |
638 | /* sends signal */ |
639 | |
640 | sig_i.si_signo = info->notify.sigev_signo; |
641 | sig_i.si_errno = 0; |
642 | sig_i.si_code = SI_MESGQ; |
643 | sig_i.si_value = info->notify.sigev_value; |
644 | /* map current pid/uid into info->owner's namespaces */ |
645 | rcu_read_lock(); |
646 | sig_i.si_pid = task_tgid_nr_ns(current, |
647 | ns_of_pid(info->notify_owner)); |
648 | sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); |
649 | rcu_read_unlock(); |
650 | |
651 | kill_pid_info(info->notify.sigev_signo, |
652 | &sig_i, info->notify_owner); |
653 | break; |
654 | case SIGEV_THREAD: |
655 | set_cookie(info->notify_cookie, NOTIFY_WOKENUP); |
656 | netlink_sendskb(info->notify_sock, info->notify_cookie); |
657 | break; |
658 | } |
659 | /* after notification unregisters process */ |
660 | put_pid(info->notify_owner); |
661 | put_user_ns(info->notify_user_ns); |
662 | info->notify_owner = NULL; |
663 | info->notify_user_ns = NULL; |
664 | } |
665 | wake_up(&info->wait_q); |
666 | } |
667 | |
668 | static int prepare_timeout(const struct timespec __user *u_abs_timeout, |
669 | ktime_t *expires, struct timespec *ts) |
670 | { |
671 | if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec))) |
672 | return -EFAULT; |
673 | if (!timespec_valid(ts)) |
674 | return -EINVAL; |
675 | |
676 | *expires = timespec_to_ktime(*ts); |
677 | return 0; |
678 | } |
679 | |
680 | static void remove_notification(struct mqueue_inode_info *info) |
681 | { |
682 | if (info->notify_owner != NULL && |
683 | info->notify.sigev_notify == SIGEV_THREAD) { |
684 | set_cookie(info->notify_cookie, NOTIFY_REMOVED); |
685 | netlink_sendskb(info->notify_sock, info->notify_cookie); |
686 | } |
687 | put_pid(info->notify_owner); |
688 | put_user_ns(info->notify_user_ns); |
689 | info->notify_owner = NULL; |
690 | info->notify_user_ns = NULL; |
691 | } |
692 | |
693 | static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr) |
694 | { |
695 | int mq_treesize; |
696 | unsigned long total_size; |
697 | |
698 | if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) |
699 | return -EINVAL; |
700 | if (capable(CAP_SYS_RESOURCE)) { |
701 | if (attr->mq_maxmsg > HARD_MSGMAX || |
702 | attr->mq_msgsize > HARD_MSGSIZEMAX) |
703 | return -EINVAL; |
704 | } else { |
705 | if (attr->mq_maxmsg > ipc_ns->mq_msg_max || |
706 | attr->mq_msgsize > ipc_ns->mq_msgsize_max) |
707 | return -EINVAL; |
708 | } |
709 | /* check for overflow */ |
710 | if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) |
711 | return -EOVERFLOW; |
712 | mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) + |
713 | min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) * |
714 | sizeof(struct posix_msg_tree_node); |
715 | total_size = attr->mq_maxmsg * attr->mq_msgsize; |
716 | if (total_size + mq_treesize < total_size) |
717 | return -EOVERFLOW; |
718 | return 0; |
719 | } |
720 | |
721 | /* |
722 | * Invoked when creating a new queue via sys_mq_open |
723 | */ |
724 | static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir, |
725 | struct path *path, int oflag, umode_t mode, |
726 | struct mq_attr *attr) |
727 | { |
728 | const struct cred *cred = current_cred(); |
729 | struct file *result; |
730 | int ret; |
731 | |
732 | if (attr) { |
733 | ret = mq_attr_ok(ipc_ns, attr); |
734 | if (ret) |
735 | return ERR_PTR(ret); |
736 | /* store for use during create */ |
737 | path->dentry->d_fsdata = attr; |
738 | } else { |
739 | struct mq_attr def_attr; |
740 | |
741 | def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max, |
742 | ipc_ns->mq_msg_default); |
743 | def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, |
744 | ipc_ns->mq_msgsize_default); |
745 | ret = mq_attr_ok(ipc_ns, &def_attr); |
746 | if (ret) |
747 | return ERR_PTR(ret); |
748 | } |
749 | |
750 | mode &= ~current_umask(); |
751 | ret = mnt_want_write(path->mnt); |
752 | if (ret) |
753 | return ERR_PTR(ret); |
754 | ret = vfs_create(dir, path->dentry, mode, true); |
755 | path->dentry->d_fsdata = NULL; |
756 | if (!ret) |
757 | result = dentry_open(path, oflag, cred); |
758 | else |
759 | result = ERR_PTR(ret); |
760 | /* |
761 | * dentry_open() took a persistent mnt_want_write(), |
762 | * so we can now drop this one. |
763 | */ |
764 | mnt_drop_write(path->mnt); |
765 | return result; |
766 | } |
767 | |
768 | /* Opens existing queue */ |
769 | static struct file *do_open(struct path *path, int oflag) |
770 | { |
771 | static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, |
772 | MAY_READ | MAY_WRITE }; |
773 | int acc; |
774 | if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) |
775 | return ERR_PTR(-EINVAL); |
776 | acc = oflag2acc[oflag & O_ACCMODE]; |
777 | if (inode_permission(path->dentry->d_inode, acc)) |
778 | return ERR_PTR(-EACCES); |
779 | return dentry_open(path, oflag, current_cred()); |
780 | } |
781 | |
782 | SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, |
783 | struct mq_attr __user *, u_attr) |
784 | { |
785 | struct path path; |
786 | struct file *filp; |
787 | char *name; |
788 | struct mq_attr attr; |
789 | int fd, error; |
790 | struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; |
791 | struct dentry *root = ipc_ns->mq_mnt->mnt_root; |
792 | |
793 | if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) |
794 | return -EFAULT; |
795 | |
796 | audit_mq_open(oflag, mode, u_attr ? &attr : NULL); |
797 | |
798 | if (IS_ERR(name = getname(u_name))) |
799 | return PTR_ERR(name); |
800 | |
801 | fd = get_unused_fd_flags(O_CLOEXEC); |
802 | if (fd < 0) |
803 | goto out_putname; |
804 | |
805 | error = 0; |
806 | mutex_lock(&root->d_inode->i_mutex); |
807 | path.dentry = lookup_one_len(name, root, strlen(name)); |
808 | if (IS_ERR(path.dentry)) { |
809 | error = PTR_ERR(path.dentry); |
810 | goto out_putfd; |
811 | } |
812 | path.mnt = mntget(ipc_ns->mq_mnt); |
813 | |
814 | if (oflag & O_CREAT) { |
815 | if (path.dentry->d_inode) { /* entry already exists */ |
816 | audit_inode(name, path.dentry); |
817 | if (oflag & O_EXCL) { |
818 | error = -EEXIST; |
819 | goto out; |
820 | } |
821 | filp = do_open(&path, oflag); |
822 | } else { |
823 | filp = do_create(ipc_ns, root->d_inode, |
824 | &path, oflag, mode, |
825 | u_attr ? &attr : NULL); |
826 | } |
827 | } else { |
828 | if (!path.dentry->d_inode) { |
829 | error = -ENOENT; |
830 | goto out; |
831 | } |
832 | audit_inode(name, path.dentry); |
833 | filp = do_open(&path, oflag); |
834 | } |
835 | |
836 | if (!IS_ERR(filp)) |
837 | fd_install(fd, filp); |
838 | else |
839 | error = PTR_ERR(filp); |
840 | out: |
841 | path_put(&path); |
842 | out_putfd: |
843 | if (error) { |
844 | put_unused_fd(fd); |
845 | fd = error; |
846 | } |
847 | mutex_unlock(&root->d_inode->i_mutex); |
848 | out_putname: |
849 | putname(name); |
850 | return fd; |
851 | } |
852 | |
853 | SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) |
854 | { |
855 | int err; |
856 | char *name; |
857 | struct dentry *dentry; |
858 | struct inode *inode = NULL; |
859 | struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; |
860 | |
861 | name = getname(u_name); |
862 | if (IS_ERR(name)) |
863 | return PTR_ERR(name); |
864 | |
865 | mutex_lock_nested(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex, |
866 | I_MUTEX_PARENT); |
867 | dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name)); |
868 | if (IS_ERR(dentry)) { |
869 | err = PTR_ERR(dentry); |
870 | goto out_unlock; |
871 | } |
872 | |
873 | if (!dentry->d_inode) { |
874 | err = -ENOENT; |
875 | goto out_err; |
876 | } |
877 | |
878 | inode = dentry->d_inode; |
879 | if (inode) |
880 | ihold(inode); |
881 | err = mnt_want_write(ipc_ns->mq_mnt); |
882 | if (err) |
883 | goto out_err; |
884 | err = vfs_unlink(dentry->d_parent->d_inode, dentry); |
885 | mnt_drop_write(ipc_ns->mq_mnt); |
886 | out_err: |
887 | dput(dentry); |
888 | |
889 | out_unlock: |
890 | mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); |
891 | putname(name); |
892 | if (inode) |
893 | iput(inode); |
894 | |
895 | return err; |
896 | } |
897 | |
898 | /* Pipelined send and receive functions. |
899 | * |
900 | * If a receiver finds no waiting message, then it registers itself in the |
901 | * list of waiting receivers. A sender checks that list before adding the new |
902 | * message into the message array. If there is a waiting receiver, then it |
903 | * bypasses the message array and directly hands the message over to the |
904 | * receiver. |
905 | * The receiver accepts the message and returns without grabbing the queue |
906 | * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers |
907 | * are necessary. The same algorithm is used for sysv semaphores, see |
908 | * ipc/sem.c for more details. |
909 | * |
910 | * The same algorithm is used for senders. |
911 | */ |
912 | |
913 | /* pipelined_send() - send a message directly to the task waiting in |
914 | * sys_mq_timedreceive() (without inserting message into a queue). |
915 | */ |
916 | static inline void pipelined_send(struct mqueue_inode_info *info, |
917 | struct msg_msg *message, |
918 | struct ext_wait_queue *receiver) |
919 | { |
920 | receiver->msg = message; |
921 | list_del(&receiver->list); |
922 | receiver->state = STATE_PENDING; |
923 | wake_up_process(receiver->task); |
924 | smp_wmb(); |
925 | receiver->state = STATE_READY; |
926 | } |
927 | |
928 | /* pipelined_receive() - if there is task waiting in sys_mq_timedsend() |
929 | * gets its message and put to the queue (we have one free place for sure). */ |
930 | static inline void pipelined_receive(struct mqueue_inode_info *info) |
931 | { |
932 | struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND); |
933 | |
934 | if (!sender) { |
935 | /* for poll */ |
936 | wake_up_interruptible(&info->wait_q); |
937 | return; |
938 | } |
939 | if (msg_insert(sender->msg, info)) |
940 | return; |
941 | list_del(&sender->list); |
942 | sender->state = STATE_PENDING; |
943 | wake_up_process(sender->task); |
944 | smp_wmb(); |
945 | sender->state = STATE_READY; |
946 | } |
947 | |
948 | SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, |
949 | size_t, msg_len, unsigned int, msg_prio, |
950 | const struct timespec __user *, u_abs_timeout) |
951 | { |
952 | struct file *filp; |
953 | struct inode *inode; |
954 | struct ext_wait_queue wait; |
955 | struct ext_wait_queue *receiver; |
956 | struct msg_msg *msg_ptr; |
957 | struct mqueue_inode_info *info; |
958 | ktime_t expires, *timeout = NULL; |
959 | struct timespec ts; |
960 | struct posix_msg_tree_node *new_leaf = NULL; |
961 | int ret = 0; |
962 | |
963 | if (u_abs_timeout) { |
964 | int res = prepare_timeout(u_abs_timeout, &expires, &ts); |
965 | if (res) |
966 | return res; |
967 | timeout = &expires; |
968 | } |
969 | |
970 | if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) |
971 | return -EINVAL; |
972 | |
973 | audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL); |
974 | |
975 | filp = fget(mqdes); |
976 | if (unlikely(!filp)) { |
977 | ret = -EBADF; |
978 | goto out; |
979 | } |
980 | |
981 | inode = filp->f_path.dentry->d_inode; |
982 | if (unlikely(filp->f_op != &mqueue_file_operations)) { |
983 | ret = -EBADF; |
984 | goto out_fput; |
985 | } |
986 | info = MQUEUE_I(inode); |
987 | audit_inode(NULL, filp->f_path.dentry); |
988 | |
989 | if (unlikely(!(filp->f_mode & FMODE_WRITE))) { |
990 | ret = -EBADF; |
991 | goto out_fput; |
992 | } |
993 | |
994 | if (unlikely(msg_len > info->attr.mq_msgsize)) { |
995 | ret = -EMSGSIZE; |
996 | goto out_fput; |
997 | } |
998 | |
999 | /* First try to allocate memory, before doing anything with |
1000 | * existing queues. */ |
1001 | msg_ptr = load_msg(u_msg_ptr, msg_len); |
1002 | if (IS_ERR(msg_ptr)) { |
1003 | ret = PTR_ERR(msg_ptr); |
1004 | goto out_fput; |
1005 | } |
1006 | msg_ptr->m_ts = msg_len; |
1007 | msg_ptr->m_type = msg_prio; |
1008 | |
1009 | /* |
1010 | * msg_insert really wants us to have a valid, spare node struct so |
1011 | * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will |
1012 | * fall back to that if necessary. |
1013 | */ |
1014 | if (!info->node_cache) |
1015 | new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); |
1016 | |
1017 | spin_lock(&info->lock); |
1018 | |
1019 | if (!info->node_cache && new_leaf) { |
1020 | /* Save our speculative allocation into the cache */ |
1021 | rb_init_node(&new_leaf->rb_node); |
1022 | INIT_LIST_HEAD(&new_leaf->msg_list); |
1023 | info->node_cache = new_leaf; |
1024 | info->qsize += sizeof(*new_leaf); |
1025 | new_leaf = NULL; |
1026 | } else { |
1027 | kfree(new_leaf); |
1028 | } |
1029 | |
1030 | if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { |
1031 | if (filp->f_flags & O_NONBLOCK) { |
1032 | ret = -EAGAIN; |
1033 | } else { |
1034 | wait.task = current; |
1035 | wait.msg = (void *) msg_ptr; |
1036 | wait.state = STATE_NONE; |
1037 | ret = wq_sleep(info, SEND, timeout, &wait); |
1038 | /* |
1039 | * wq_sleep must be called with info->lock held, and |
1040 | * returns with the lock released |
1041 | */ |
1042 | goto out_free; |
1043 | } |
1044 | } else { |
1045 | receiver = wq_get_first_waiter(info, RECV); |
1046 | if (receiver) { |
1047 | pipelined_send(info, msg_ptr, receiver); |
1048 | } else { |
1049 | /* adds message to the queue */ |
1050 | ret = msg_insert(msg_ptr, info); |
1051 | if (ret) |
1052 | goto out_unlock; |
1053 | __do_notify(info); |
1054 | } |
1055 | inode->i_atime = inode->i_mtime = inode->i_ctime = |
1056 | CURRENT_TIME; |
1057 | } |
1058 | out_unlock: |
1059 | spin_unlock(&info->lock); |
1060 | out_free: |
1061 | if (ret) |
1062 | free_msg(msg_ptr); |
1063 | out_fput: |
1064 | fput(filp); |
1065 | out: |
1066 | return ret; |
1067 | } |
1068 | |
1069 | SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, |
1070 | size_t, msg_len, unsigned int __user *, u_msg_prio, |
1071 | const struct timespec __user *, u_abs_timeout) |
1072 | { |
1073 | ssize_t ret; |
1074 | struct msg_msg *msg_ptr; |
1075 | struct file *filp; |
1076 | struct inode *inode; |
1077 | struct mqueue_inode_info *info; |
1078 | struct ext_wait_queue wait; |
1079 | ktime_t expires, *timeout = NULL; |
1080 | struct timespec ts; |
1081 | struct posix_msg_tree_node *new_leaf = NULL; |
1082 | |
1083 | if (u_abs_timeout) { |
1084 | int res = prepare_timeout(u_abs_timeout, &expires, &ts); |
1085 | if (res) |
1086 | return res; |
1087 | timeout = &expires; |
1088 | } |
1089 | |
1090 | audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL); |
1091 | |
1092 | filp = fget(mqdes); |
1093 | if (unlikely(!filp)) { |
1094 | ret = -EBADF; |
1095 | goto out; |
1096 | } |
1097 | |
1098 | inode = filp->f_path.dentry->d_inode; |
1099 | if (unlikely(filp->f_op != &mqueue_file_operations)) { |
1100 | ret = -EBADF; |
1101 | goto out_fput; |
1102 | } |
1103 | info = MQUEUE_I(inode); |
1104 | audit_inode(NULL, filp->f_path.dentry); |
1105 | |
1106 | if (unlikely(!(filp->f_mode & FMODE_READ))) { |
1107 | ret = -EBADF; |
1108 | goto out_fput; |
1109 | } |
1110 | |
1111 | /* checks if buffer is big enough */ |
1112 | if (unlikely(msg_len < info->attr.mq_msgsize)) { |
1113 | ret = -EMSGSIZE; |
1114 | goto out_fput; |
1115 | } |
1116 | |
1117 | /* |
1118 | * msg_insert really wants us to have a valid, spare node struct so |
1119 | * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will |
1120 | * fall back to that if necessary. |
1121 | */ |
1122 | if (!info->node_cache) |
1123 | new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); |
1124 | |
1125 | spin_lock(&info->lock); |
1126 | |
1127 | if (!info->node_cache && new_leaf) { |
1128 | /* Save our speculative allocation into the cache */ |
1129 | rb_init_node(&new_leaf->rb_node); |
1130 | INIT_LIST_HEAD(&new_leaf->msg_list); |
1131 | info->node_cache = new_leaf; |
1132 | info->qsize += sizeof(*new_leaf); |
1133 | } else { |
1134 | kfree(new_leaf); |
1135 | } |
1136 | |
1137 | if (info->attr.mq_curmsgs == 0) { |
1138 | if (filp->f_flags & O_NONBLOCK) { |
1139 | spin_unlock(&info->lock); |
1140 | ret = -EAGAIN; |
1141 | } else { |
1142 | wait.task = current; |
1143 | wait.state = STATE_NONE; |
1144 | ret = wq_sleep(info, RECV, timeout, &wait); |
1145 | msg_ptr = wait.msg; |
1146 | } |
1147 | } else { |
1148 | msg_ptr = msg_get(info); |
1149 | |
1150 | inode->i_atime = inode->i_mtime = inode->i_ctime = |
1151 | CURRENT_TIME; |
1152 | |
1153 | /* There is now free space in queue. */ |
1154 | pipelined_receive(info); |
1155 | spin_unlock(&info->lock); |
1156 | ret = 0; |
1157 | } |
1158 | if (ret == 0) { |
1159 | ret = msg_ptr->m_ts; |
1160 | |
1161 | if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) || |
1162 | store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) { |
1163 | ret = -EFAULT; |
1164 | } |
1165 | free_msg(msg_ptr); |
1166 | } |
1167 | out_fput: |
1168 | fput(filp); |
1169 | out: |
1170 | return ret; |
1171 | } |
1172 | |
1173 | /* |
1174 | * Notes: the case when user wants us to deregister (with NULL as pointer) |
1175 | * and he isn't currently owner of notification, will be silently discarded. |
1176 | * It isn't explicitly defined in the POSIX. |
1177 | */ |
1178 | SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, |
1179 | const struct sigevent __user *, u_notification) |
1180 | { |
1181 | int ret; |
1182 | struct file *filp; |
1183 | struct sock *sock; |
1184 | struct inode *inode; |
1185 | struct sigevent notification; |
1186 | struct mqueue_inode_info *info; |
1187 | struct sk_buff *nc; |
1188 | |
1189 | if (u_notification) { |
1190 | if (copy_from_user(¬ification, u_notification, |
1191 | sizeof(struct sigevent))) |
1192 | return -EFAULT; |
1193 | } |
1194 | |
1195 | audit_mq_notify(mqdes, u_notification ? ¬ification : NULL); |
1196 | |
1197 | nc = NULL; |
1198 | sock = NULL; |
1199 | if (u_notification != NULL) { |
1200 | if (unlikely(notification.sigev_notify != SIGEV_NONE && |
1201 | notification.sigev_notify != SIGEV_SIGNAL && |
1202 | notification.sigev_notify != SIGEV_THREAD)) |
1203 | return -EINVAL; |
1204 | if (notification.sigev_notify == SIGEV_SIGNAL && |
1205 | !valid_signal(notification.sigev_signo)) { |
1206 | return -EINVAL; |
1207 | } |
1208 | if (notification.sigev_notify == SIGEV_THREAD) { |
1209 | long timeo; |
1210 | |
1211 | /* create the notify skb */ |
1212 | nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); |
1213 | if (!nc) { |
1214 | ret = -ENOMEM; |
1215 | goto out; |
1216 | } |
1217 | if (copy_from_user(nc->data, |
1218 | notification.sigev_value.sival_ptr, |
1219 | NOTIFY_COOKIE_LEN)) { |
1220 | ret = -EFAULT; |
1221 | goto out; |
1222 | } |
1223 | |
1224 | /* TODO: add a header? */ |
1225 | skb_put(nc, NOTIFY_COOKIE_LEN); |
1226 | /* and attach it to the socket */ |
1227 | retry: |
1228 | filp = fget(notification.sigev_signo); |
1229 | if (!filp) { |
1230 | ret = -EBADF; |
1231 | goto out; |
1232 | } |
1233 | sock = netlink_getsockbyfilp(filp); |
1234 | fput(filp); |
1235 | if (IS_ERR(sock)) { |
1236 | ret = PTR_ERR(sock); |
1237 | sock = NULL; |
1238 | goto out; |
1239 | } |
1240 | |
1241 | timeo = MAX_SCHEDULE_TIMEOUT; |
1242 | ret = netlink_attachskb(sock, nc, &timeo, NULL); |
1243 | if (ret == 1) |
1244 | goto retry; |
1245 | if (ret) { |
1246 | sock = NULL; |
1247 | nc = NULL; |
1248 | goto out; |
1249 | } |
1250 | } |
1251 | } |
1252 | |
1253 | filp = fget(mqdes); |
1254 | if (!filp) { |
1255 | ret = -EBADF; |
1256 | goto out; |
1257 | } |
1258 | |
1259 | inode = filp->f_path.dentry->d_inode; |
1260 | if (unlikely(filp->f_op != &mqueue_file_operations)) { |
1261 | ret = -EBADF; |
1262 | goto out_fput; |
1263 | } |
1264 | info = MQUEUE_I(inode); |
1265 | |
1266 | ret = 0; |
1267 | spin_lock(&info->lock); |
1268 | if (u_notification == NULL) { |
1269 | if (info->notify_owner == task_tgid(current)) { |
1270 | remove_notification(info); |
1271 | inode->i_atime = inode->i_ctime = CURRENT_TIME; |
1272 | } |
1273 | } else if (info->notify_owner != NULL) { |
1274 | ret = -EBUSY; |
1275 | } else { |
1276 | switch (notification.sigev_notify) { |
1277 | case SIGEV_NONE: |
1278 | info->notify.sigev_notify = SIGEV_NONE; |
1279 | break; |
1280 | case SIGEV_THREAD: |
1281 | info->notify_sock = sock; |
1282 | info->notify_cookie = nc; |
1283 | sock = NULL; |
1284 | nc = NULL; |
1285 | info->notify.sigev_notify = SIGEV_THREAD; |
1286 | break; |
1287 | case SIGEV_SIGNAL: |
1288 | info->notify.sigev_signo = notification.sigev_signo; |
1289 | info->notify.sigev_value = notification.sigev_value; |
1290 | info->notify.sigev_notify = SIGEV_SIGNAL; |
1291 | break; |
1292 | } |
1293 | |
1294 | info->notify_owner = get_pid(task_tgid(current)); |
1295 | info->notify_user_ns = get_user_ns(current_user_ns()); |
1296 | inode->i_atime = inode->i_ctime = CURRENT_TIME; |
1297 | } |
1298 | spin_unlock(&info->lock); |
1299 | out_fput: |
1300 | fput(filp); |
1301 | out: |
1302 | if (sock) { |
1303 | netlink_detachskb(sock, nc); |
1304 | } else if (nc) { |
1305 | dev_kfree_skb(nc); |
1306 | } |
1307 | return ret; |
1308 | } |
1309 | |
1310 | SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, |
1311 | const struct mq_attr __user *, u_mqstat, |
1312 | struct mq_attr __user *, u_omqstat) |
1313 | { |
1314 | int ret; |
1315 | struct mq_attr mqstat, omqstat; |
1316 | struct file *filp; |
1317 | struct inode *inode; |
1318 | struct mqueue_inode_info *info; |
1319 | |
1320 | if (u_mqstat != NULL) { |
1321 | if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr))) |
1322 | return -EFAULT; |
1323 | if (mqstat.mq_flags & (~O_NONBLOCK)) |
1324 | return -EINVAL; |
1325 | } |
1326 | |
1327 | filp = fget(mqdes); |
1328 | if (!filp) { |
1329 | ret = -EBADF; |
1330 | goto out; |
1331 | } |
1332 | |
1333 | inode = filp->f_path.dentry->d_inode; |
1334 | if (unlikely(filp->f_op != &mqueue_file_operations)) { |
1335 | ret = -EBADF; |
1336 | goto out_fput; |
1337 | } |
1338 | info = MQUEUE_I(inode); |
1339 | |
1340 | spin_lock(&info->lock); |
1341 | |
1342 | omqstat = info->attr; |
1343 | omqstat.mq_flags = filp->f_flags & O_NONBLOCK; |
1344 | if (u_mqstat) { |
1345 | audit_mq_getsetattr(mqdes, &mqstat); |
1346 | spin_lock(&filp->f_lock); |
1347 | if (mqstat.mq_flags & O_NONBLOCK) |
1348 | filp->f_flags |= O_NONBLOCK; |
1349 | else |
1350 | filp->f_flags &= ~O_NONBLOCK; |
1351 | spin_unlock(&filp->f_lock); |
1352 | |
1353 | inode->i_atime = inode->i_ctime = CURRENT_TIME; |
1354 | } |
1355 | |
1356 | spin_unlock(&info->lock); |
1357 | |
1358 | ret = 0; |
1359 | if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat, |
1360 | sizeof(struct mq_attr))) |
1361 | ret = -EFAULT; |
1362 | |
1363 | out_fput: |
1364 | fput(filp); |
1365 | out: |
1366 | return ret; |
1367 | } |
1368 | |
1369 | static const struct inode_operations mqueue_dir_inode_operations = { |
1370 | .lookup = simple_lookup, |
1371 | .create = mqueue_create, |
1372 | .unlink = mqueue_unlink, |
1373 | }; |
1374 | |
1375 | static const struct file_operations mqueue_file_operations = { |
1376 | .flush = mqueue_flush_file, |
1377 | .poll = mqueue_poll_file, |
1378 | .read = mqueue_read_file, |
1379 | .llseek = default_llseek, |
1380 | }; |
1381 | |
1382 | static const struct super_operations mqueue_super_ops = { |
1383 | .alloc_inode = mqueue_alloc_inode, |
1384 | .destroy_inode = mqueue_destroy_inode, |
1385 | .evict_inode = mqueue_evict_inode, |
1386 | .statfs = simple_statfs, |
1387 | }; |
1388 | |
1389 | static struct file_system_type mqueue_fs_type = { |
1390 | .name = "mqueue", |
1391 | .mount = mqueue_mount, |
1392 | .kill_sb = kill_litter_super, |
1393 | }; |
1394 | |
1395 | int mq_init_ns(struct ipc_namespace *ns) |
1396 | { |
1397 | ns->mq_queues_count = 0; |
1398 | ns->mq_queues_max = DFLT_QUEUESMAX; |
1399 | ns->mq_msg_max = DFLT_MSGMAX; |
1400 | ns->mq_msgsize_max = DFLT_MSGSIZEMAX; |
1401 | ns->mq_msg_default = DFLT_MSG; |
1402 | ns->mq_msgsize_default = DFLT_MSGSIZE; |
1403 | |
1404 | ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns); |
1405 | if (IS_ERR(ns->mq_mnt)) { |
1406 | int err = PTR_ERR(ns->mq_mnt); |
1407 | ns->mq_mnt = NULL; |
1408 | return err; |
1409 | } |
1410 | return 0; |
1411 | } |
1412 | |
1413 | void mq_clear_sbinfo(struct ipc_namespace *ns) |
1414 | { |
1415 | ns->mq_mnt->mnt_sb->s_fs_info = NULL; |
1416 | } |
1417 | |
1418 | void mq_put_mnt(struct ipc_namespace *ns) |
1419 | { |
1420 | kern_unmount(ns->mq_mnt); |
1421 | } |
1422 | |
1423 | static int __init init_mqueue_fs(void) |
1424 | { |
1425 | int error; |
1426 | |
1427 | mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", |
1428 | sizeof(struct mqueue_inode_info), 0, |
1429 | SLAB_HWCACHE_ALIGN, init_once); |
1430 | if (mqueue_inode_cachep == NULL) |
1431 | return -ENOMEM; |
1432 | |
1433 | /* ignore failures - they are not fatal */ |
1434 | mq_sysctl_table = mq_register_sysctl_table(); |
1435 | |
1436 | error = register_filesystem(&mqueue_fs_type); |
1437 | if (error) |
1438 | goto out_sysctl; |
1439 | |
1440 | spin_lock_init(&mq_lock); |
1441 | |
1442 | error = mq_init_ns(&init_ipc_ns); |
1443 | if (error) |
1444 | goto out_filesystem; |
1445 | |
1446 | return 0; |
1447 | |
1448 | out_filesystem: |
1449 | unregister_filesystem(&mqueue_fs_type); |
1450 | out_sysctl: |
1451 | if (mq_sysctl_table) |
1452 | unregister_sysctl_table(mq_sysctl_table); |
1453 | kmem_cache_destroy(mqueue_inode_cachep); |
1454 | return error; |
1455 | } |
1456 | |
1457 | __initcall(init_mqueue_fs); |
1458 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9