Root/
1 | /* |
2 | * VMware vSockets Driver |
3 | * |
4 | * Copyright (C) 2007-2013 VMware, Inc. All rights reserved. |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the Free |
8 | * Software Foundation version 2 and no later version. |
9 | * |
10 | * This program is distributed in the hope that it will be useful, but WITHOUT |
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
13 | * more details. |
14 | */ |
15 | |
16 | /* Implementation notes: |
17 | * |
18 | * - There are two kinds of sockets: those created by user action (such as |
19 | * calling socket(2)) and those created by incoming connection request packets. |
20 | * |
21 | * - There are two "global" tables, one for bound sockets (sockets that have |
22 | * specified an address that they are responsible for) and one for connected |
23 | * sockets (sockets that have established a connection with another socket). |
24 | * These tables are "global" in that all sockets on the system are placed |
25 | * within them. - Note, though, that the bound table contains an extra entry |
26 | * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in |
27 | * that list. The bound table is used solely for lookup of sockets when packets |
28 | * are received and that's not necessary for SOCK_DGRAM sockets since we create |
29 | * a datagram handle for each and need not perform a lookup. Keeping SOCK_DGRAM |
30 | * sockets out of the bound hash buckets will reduce the chance of collisions |
31 | * when looking for SOCK_STREAM sockets and prevents us from having to check the |
32 | * socket type in the hash table lookups. |
33 | * |
34 | * - Sockets created by user action will either be "client" sockets that |
35 | * initiate a connection or "server" sockets that listen for connections; we do |
36 | * not support simultaneous connects (two "client" sockets connecting). |
37 | * |
38 | * - "Server" sockets are referred to as listener sockets throughout this |
39 | * implementation because they are in the SS_LISTEN state. When a connection |
40 | * request is received (the second kind of socket mentioned above), we create a |
41 | * new socket and refer to it as a pending socket. These pending sockets are |
42 | * placed on the pending connection list of the listener socket. When future |
43 | * packets are received for the address the listener socket is bound to, we |
44 | * check if the source of the packet is from one that has an existing pending |
45 | * connection. If it does, we process the packet for the pending socket. When |
46 | * that socket reaches the connected state, it is removed from the listener |
47 | * socket's pending list and enqueued in the listener socket's accept queue. |
48 | * Callers of accept(2) will accept connected sockets from the listener socket's |
49 | * accept queue. If the socket cannot be accepted for some reason then it is |
50 | * marked rejected. Once the connection is accepted, it is owned by the user |
51 | * process and the responsibility for cleanup falls with that user process. |
52 | * |
53 | * - It is possible that these pending sockets will never reach the connected |
54 | * state; in fact, we may never receive another packet after the connection |
55 | * request. Because of this, we must schedule a cleanup function to run in the |
56 | * future, after some amount of time passes where a connection should have been |
57 | * established. This function ensures that the socket is off all lists so it |
58 | * cannot be retrieved, then drops all references to the socket so it is cleaned |
59 | * up (sock_put() -> sk_free() -> our sk_destruct implementation). Note this |
60 | * function will also cleanup rejected sockets, those that reach the connected |
61 | * state but leave it before they have been accepted. |
62 | * |
63 | * - Sockets created by user action will be cleaned up when the user process |
64 | * calls close(2), causing our release implementation to be called. Our release |
65 | * implementation will perform some cleanup then drop the last reference so our |
66 | * sk_destruct implementation is invoked. Our sk_destruct implementation will |
67 | * perform additional cleanup that's common for both types of sockets. |
68 | * |
69 | * - A socket's reference count is what ensures that the structure won't be |
70 | * freed. Each entry in a list (such as the "global" bound and connected tables |
71 | * and the listener socket's pending list and connected queue) ensures a |
72 | * reference. When we defer work until process context and pass a socket as our |
73 | * argument, we must ensure the reference count is increased to ensure the |
74 | * socket isn't freed before the function is run; the deferred function will |
75 | * then drop the reference. |
76 | */ |
77 | |
78 | #include <linux/types.h> |
79 | #include <linux/bitops.h> |
80 | #include <linux/cred.h> |
81 | #include <linux/init.h> |
82 | #include <linux/io.h> |
83 | #include <linux/kernel.h> |
84 | #include <linux/kmod.h> |
85 | #include <linux/list.h> |
86 | #include <linux/miscdevice.h> |
87 | #include <linux/module.h> |
88 | #include <linux/mutex.h> |
89 | #include <linux/net.h> |
90 | #include <linux/poll.h> |
91 | #include <linux/skbuff.h> |
92 | #include <linux/smp.h> |
93 | #include <linux/socket.h> |
94 | #include <linux/stddef.h> |
95 | #include <linux/unistd.h> |
96 | #include <linux/wait.h> |
97 | #include <linux/workqueue.h> |
98 | #include <net/sock.h> |
99 | #include <net/af_vsock.h> |
100 | |
101 | static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr); |
102 | static void vsock_sk_destruct(struct sock *sk); |
103 | static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); |
104 | |
105 | /* Protocol family. */ |
106 | static struct proto vsock_proto = { |
107 | .name = "AF_VSOCK", |
108 | .owner = THIS_MODULE, |
109 | .obj_size = sizeof(struct vsock_sock), |
110 | }; |
111 | |
112 | /* The default peer timeout indicates how long we will wait for a peer response |
113 | * to a control message. |
114 | */ |
115 | #define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ) |
116 | |
117 | #define SS_LISTEN 255 |
118 | |
119 | static const struct vsock_transport *transport; |
120 | static DEFINE_MUTEX(vsock_register_mutex); |
121 | |
122 | /**** EXPORTS ****/ |
123 | |
124 | /* Get the ID of the local context. This is transport dependent. */ |
125 | |
126 | int vm_sockets_get_local_cid(void) |
127 | { |
128 | return transport->get_local_cid(); |
129 | } |
130 | EXPORT_SYMBOL_GPL(vm_sockets_get_local_cid); |
131 | |
132 | /**** UTILS ****/ |
133 | |
134 | /* Each bound VSocket is stored in the bind hash table and each connected |
135 | * VSocket is stored in the connected hash table. |
136 | * |
137 | * Unbound sockets are all put on the same list attached to the end of the hash |
138 | * table (vsock_unbound_sockets). Bound sockets are added to the hash table in |
139 | * the bucket that their local address hashes to (vsock_bound_sockets(addr) |
140 | * represents the list that addr hashes to). |
141 | * |
142 | * Specifically, we initialize the vsock_bind_table array to a size of |
143 | * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through |
144 | * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and |
145 | * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets. The hash function |
146 | * mods with VSOCK_HASH_SIZE to ensure this. |
147 | */ |
148 | #define VSOCK_HASH_SIZE 251 |
149 | #define MAX_PORT_RETRIES 24 |
150 | |
151 | #define VSOCK_HASH(addr) ((addr)->svm_port % VSOCK_HASH_SIZE) |
152 | #define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)]) |
153 | #define vsock_unbound_sockets (&vsock_bind_table[VSOCK_HASH_SIZE]) |
154 | |
155 | /* XXX This can probably be implemented in a better way. */ |
156 | #define VSOCK_CONN_HASH(src, dst) \ |
157 | (((src)->svm_cid ^ (dst)->svm_port) % VSOCK_HASH_SIZE) |
158 | #define vsock_connected_sockets(src, dst) \ |
159 | (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)]) |
160 | #define vsock_connected_sockets_vsk(vsk) \ |
161 | vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr) |
162 | |
163 | static struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1]; |
164 | static struct list_head vsock_connected_table[VSOCK_HASH_SIZE]; |
165 | static DEFINE_SPINLOCK(vsock_table_lock); |
166 | |
167 | /* Autobind this socket to the local address if necessary. */ |
168 | static int vsock_auto_bind(struct vsock_sock *vsk) |
169 | { |
170 | struct sock *sk = sk_vsock(vsk); |
171 | struct sockaddr_vm local_addr; |
172 | |
173 | if (vsock_addr_bound(&vsk->local_addr)) |
174 | return 0; |
175 | vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); |
176 | return __vsock_bind(sk, &local_addr); |
177 | } |
178 | |
179 | static void vsock_init_tables(void) |
180 | { |
181 | int i; |
182 | |
183 | for (i = 0; i < ARRAY_SIZE(vsock_bind_table); i++) |
184 | INIT_LIST_HEAD(&vsock_bind_table[i]); |
185 | |
186 | for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) |
187 | INIT_LIST_HEAD(&vsock_connected_table[i]); |
188 | } |
189 | |
190 | static void __vsock_insert_bound(struct list_head *list, |
191 | struct vsock_sock *vsk) |
192 | { |
193 | sock_hold(&vsk->sk); |
194 | list_add(&vsk->bound_table, list); |
195 | } |
196 | |
197 | static void __vsock_insert_connected(struct list_head *list, |
198 | struct vsock_sock *vsk) |
199 | { |
200 | sock_hold(&vsk->sk); |
201 | list_add(&vsk->connected_table, list); |
202 | } |
203 | |
204 | static void __vsock_remove_bound(struct vsock_sock *vsk) |
205 | { |
206 | list_del_init(&vsk->bound_table); |
207 | sock_put(&vsk->sk); |
208 | } |
209 | |
210 | static void __vsock_remove_connected(struct vsock_sock *vsk) |
211 | { |
212 | list_del_init(&vsk->connected_table); |
213 | sock_put(&vsk->sk); |
214 | } |
215 | |
216 | static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr) |
217 | { |
218 | struct vsock_sock *vsk; |
219 | |
220 | list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) |
221 | if (addr->svm_port == vsk->local_addr.svm_port) |
222 | return sk_vsock(vsk); |
223 | |
224 | return NULL; |
225 | } |
226 | |
227 | static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src, |
228 | struct sockaddr_vm *dst) |
229 | { |
230 | struct vsock_sock *vsk; |
231 | |
232 | list_for_each_entry(vsk, vsock_connected_sockets(src, dst), |
233 | connected_table) { |
234 | if (vsock_addr_equals_addr(src, &vsk->remote_addr) && |
235 | dst->svm_port == vsk->local_addr.svm_port) { |
236 | return sk_vsock(vsk); |
237 | } |
238 | } |
239 | |
240 | return NULL; |
241 | } |
242 | |
243 | static bool __vsock_in_bound_table(struct vsock_sock *vsk) |
244 | { |
245 | return !list_empty(&vsk->bound_table); |
246 | } |
247 | |
248 | static bool __vsock_in_connected_table(struct vsock_sock *vsk) |
249 | { |
250 | return !list_empty(&vsk->connected_table); |
251 | } |
252 | |
253 | static void vsock_insert_unbound(struct vsock_sock *vsk) |
254 | { |
255 | spin_lock_bh(&vsock_table_lock); |
256 | __vsock_insert_bound(vsock_unbound_sockets, vsk); |
257 | spin_unlock_bh(&vsock_table_lock); |
258 | } |
259 | |
260 | void vsock_insert_connected(struct vsock_sock *vsk) |
261 | { |
262 | struct list_head *list = vsock_connected_sockets( |
263 | &vsk->remote_addr, &vsk->local_addr); |
264 | |
265 | spin_lock_bh(&vsock_table_lock); |
266 | __vsock_insert_connected(list, vsk); |
267 | spin_unlock_bh(&vsock_table_lock); |
268 | } |
269 | EXPORT_SYMBOL_GPL(vsock_insert_connected); |
270 | |
271 | void vsock_remove_bound(struct vsock_sock *vsk) |
272 | { |
273 | spin_lock_bh(&vsock_table_lock); |
274 | __vsock_remove_bound(vsk); |
275 | spin_unlock_bh(&vsock_table_lock); |
276 | } |
277 | EXPORT_SYMBOL_GPL(vsock_remove_bound); |
278 | |
279 | void vsock_remove_connected(struct vsock_sock *vsk) |
280 | { |
281 | spin_lock_bh(&vsock_table_lock); |
282 | __vsock_remove_connected(vsk); |
283 | spin_unlock_bh(&vsock_table_lock); |
284 | } |
285 | EXPORT_SYMBOL_GPL(vsock_remove_connected); |
286 | |
287 | struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr) |
288 | { |
289 | struct sock *sk; |
290 | |
291 | spin_lock_bh(&vsock_table_lock); |
292 | sk = __vsock_find_bound_socket(addr); |
293 | if (sk) |
294 | sock_hold(sk); |
295 | |
296 | spin_unlock_bh(&vsock_table_lock); |
297 | |
298 | return sk; |
299 | } |
300 | EXPORT_SYMBOL_GPL(vsock_find_bound_socket); |
301 | |
302 | struct sock *vsock_find_connected_socket(struct sockaddr_vm *src, |
303 | struct sockaddr_vm *dst) |
304 | { |
305 | struct sock *sk; |
306 | |
307 | spin_lock_bh(&vsock_table_lock); |
308 | sk = __vsock_find_connected_socket(src, dst); |
309 | if (sk) |
310 | sock_hold(sk); |
311 | |
312 | spin_unlock_bh(&vsock_table_lock); |
313 | |
314 | return sk; |
315 | } |
316 | EXPORT_SYMBOL_GPL(vsock_find_connected_socket); |
317 | |
318 | static bool vsock_in_bound_table(struct vsock_sock *vsk) |
319 | { |
320 | bool ret; |
321 | |
322 | spin_lock_bh(&vsock_table_lock); |
323 | ret = __vsock_in_bound_table(vsk); |
324 | spin_unlock_bh(&vsock_table_lock); |
325 | |
326 | return ret; |
327 | } |
328 | |
329 | static bool vsock_in_connected_table(struct vsock_sock *vsk) |
330 | { |
331 | bool ret; |
332 | |
333 | spin_lock_bh(&vsock_table_lock); |
334 | ret = __vsock_in_connected_table(vsk); |
335 | spin_unlock_bh(&vsock_table_lock); |
336 | |
337 | return ret; |
338 | } |
339 | |
340 | void vsock_for_each_connected_socket(void (*fn)(struct sock *sk)) |
341 | { |
342 | int i; |
343 | |
344 | spin_lock_bh(&vsock_table_lock); |
345 | |
346 | for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) { |
347 | struct vsock_sock *vsk; |
348 | list_for_each_entry(vsk, &vsock_connected_table[i], |
349 | connected_table) |
350 | fn(sk_vsock(vsk)); |
351 | } |
352 | |
353 | spin_unlock_bh(&vsock_table_lock); |
354 | } |
355 | EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket); |
356 | |
357 | void vsock_add_pending(struct sock *listener, struct sock *pending) |
358 | { |
359 | struct vsock_sock *vlistener; |
360 | struct vsock_sock *vpending; |
361 | |
362 | vlistener = vsock_sk(listener); |
363 | vpending = vsock_sk(pending); |
364 | |
365 | sock_hold(pending); |
366 | sock_hold(listener); |
367 | list_add_tail(&vpending->pending_links, &vlistener->pending_links); |
368 | } |
369 | EXPORT_SYMBOL_GPL(vsock_add_pending); |
370 | |
371 | void vsock_remove_pending(struct sock *listener, struct sock *pending) |
372 | { |
373 | struct vsock_sock *vpending = vsock_sk(pending); |
374 | |
375 | list_del_init(&vpending->pending_links); |
376 | sock_put(listener); |
377 | sock_put(pending); |
378 | } |
379 | EXPORT_SYMBOL_GPL(vsock_remove_pending); |
380 | |
381 | void vsock_enqueue_accept(struct sock *listener, struct sock *connected) |
382 | { |
383 | struct vsock_sock *vlistener; |
384 | struct vsock_sock *vconnected; |
385 | |
386 | vlistener = vsock_sk(listener); |
387 | vconnected = vsock_sk(connected); |
388 | |
389 | sock_hold(connected); |
390 | sock_hold(listener); |
391 | list_add_tail(&vconnected->accept_queue, &vlistener->accept_queue); |
392 | } |
393 | EXPORT_SYMBOL_GPL(vsock_enqueue_accept); |
394 | |
395 | static struct sock *vsock_dequeue_accept(struct sock *listener) |
396 | { |
397 | struct vsock_sock *vlistener; |
398 | struct vsock_sock *vconnected; |
399 | |
400 | vlistener = vsock_sk(listener); |
401 | |
402 | if (list_empty(&vlistener->accept_queue)) |
403 | return NULL; |
404 | |
405 | vconnected = list_entry(vlistener->accept_queue.next, |
406 | struct vsock_sock, accept_queue); |
407 | |
408 | list_del_init(&vconnected->accept_queue); |
409 | sock_put(listener); |
410 | /* The caller will need a reference on the connected socket so we let |
411 | * it call sock_put(). |
412 | */ |
413 | |
414 | return sk_vsock(vconnected); |
415 | } |
416 | |
417 | static bool vsock_is_accept_queue_empty(struct sock *sk) |
418 | { |
419 | struct vsock_sock *vsk = vsock_sk(sk); |
420 | return list_empty(&vsk->accept_queue); |
421 | } |
422 | |
423 | static bool vsock_is_pending(struct sock *sk) |
424 | { |
425 | struct vsock_sock *vsk = vsock_sk(sk); |
426 | return !list_empty(&vsk->pending_links); |
427 | } |
428 | |
429 | static int vsock_send_shutdown(struct sock *sk, int mode) |
430 | { |
431 | return transport->shutdown(vsock_sk(sk), mode); |
432 | } |
433 | |
434 | void vsock_pending_work(struct work_struct *work) |
435 | { |
436 | struct sock *sk; |
437 | struct sock *listener; |
438 | struct vsock_sock *vsk; |
439 | bool cleanup; |
440 | |
441 | vsk = container_of(work, struct vsock_sock, dwork.work); |
442 | sk = sk_vsock(vsk); |
443 | listener = vsk->listener; |
444 | cleanup = true; |
445 | |
446 | lock_sock(listener); |
447 | lock_sock(sk); |
448 | |
449 | if (vsock_is_pending(sk)) { |
450 | vsock_remove_pending(listener, sk); |
451 | } else if (!vsk->rejected) { |
452 | /* We are not on the pending list and accept() did not reject |
453 | * us, so we must have been accepted by our user process. We |
454 | * just need to drop our references to the sockets and be on |
455 | * our way. |
456 | */ |
457 | cleanup = false; |
458 | goto out; |
459 | } |
460 | |
461 | listener->sk_ack_backlog--; |
462 | |
463 | /* We need to remove ourself from the global connected sockets list so |
464 | * incoming packets can't find this socket, and to reduce the reference |
465 | * count. |
466 | */ |
467 | if (vsock_in_connected_table(vsk)) |
468 | vsock_remove_connected(vsk); |
469 | |
470 | sk->sk_state = SS_FREE; |
471 | |
472 | out: |
473 | release_sock(sk); |
474 | release_sock(listener); |
475 | if (cleanup) |
476 | sock_put(sk); |
477 | |
478 | sock_put(sk); |
479 | sock_put(listener); |
480 | } |
481 | EXPORT_SYMBOL_GPL(vsock_pending_work); |
482 | |
483 | /**** SOCKET OPERATIONS ****/ |
484 | |
485 | static int __vsock_bind_stream(struct vsock_sock *vsk, |
486 | struct sockaddr_vm *addr) |
487 | { |
488 | static u32 port = LAST_RESERVED_PORT + 1; |
489 | struct sockaddr_vm new_addr; |
490 | |
491 | vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port); |
492 | |
493 | if (addr->svm_port == VMADDR_PORT_ANY) { |
494 | bool found = false; |
495 | unsigned int i; |
496 | |
497 | for (i = 0; i < MAX_PORT_RETRIES; i++) { |
498 | if (port <= LAST_RESERVED_PORT) |
499 | port = LAST_RESERVED_PORT + 1; |
500 | |
501 | new_addr.svm_port = port++; |
502 | |
503 | if (!__vsock_find_bound_socket(&new_addr)) { |
504 | found = true; |
505 | break; |
506 | } |
507 | } |
508 | |
509 | if (!found) |
510 | return -EADDRNOTAVAIL; |
511 | } else { |
512 | /* If port is in reserved range, ensure caller |
513 | * has necessary privileges. |
514 | */ |
515 | if (addr->svm_port <= LAST_RESERVED_PORT && |
516 | !capable(CAP_NET_BIND_SERVICE)) { |
517 | return -EACCES; |
518 | } |
519 | |
520 | if (__vsock_find_bound_socket(&new_addr)) |
521 | return -EADDRINUSE; |
522 | } |
523 | |
524 | vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port); |
525 | |
526 | /* Remove stream sockets from the unbound list and add them to the hash |
527 | * table for easy lookup by its address. The unbound list is simply an |
528 | * extra entry at the end of the hash table, a trick used by AF_UNIX. |
529 | */ |
530 | __vsock_remove_bound(vsk); |
531 | __vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk); |
532 | |
533 | return 0; |
534 | } |
535 | |
536 | static int __vsock_bind_dgram(struct vsock_sock *vsk, |
537 | struct sockaddr_vm *addr) |
538 | { |
539 | return transport->dgram_bind(vsk, addr); |
540 | } |
541 | |
542 | static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr) |
543 | { |
544 | struct vsock_sock *vsk = vsock_sk(sk); |
545 | u32 cid; |
546 | int retval; |
547 | |
548 | /* First ensure this socket isn't already bound. */ |
549 | if (vsock_addr_bound(&vsk->local_addr)) |
550 | return -EINVAL; |
551 | |
552 | /* Now bind to the provided address or select appropriate values if |
553 | * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that |
554 | * like AF_INET prevents binding to a non-local IP address (in most |
555 | * cases), we only allow binding to the local CID. |
556 | */ |
557 | cid = transport->get_local_cid(); |
558 | if (addr->svm_cid != cid && addr->svm_cid != VMADDR_CID_ANY) |
559 | return -EADDRNOTAVAIL; |
560 | |
561 | switch (sk->sk_socket->type) { |
562 | case SOCK_STREAM: |
563 | spin_lock_bh(&vsock_table_lock); |
564 | retval = __vsock_bind_stream(vsk, addr); |
565 | spin_unlock_bh(&vsock_table_lock); |
566 | break; |
567 | |
568 | case SOCK_DGRAM: |
569 | retval = __vsock_bind_dgram(vsk, addr); |
570 | break; |
571 | |
572 | default: |
573 | retval = -EINVAL; |
574 | break; |
575 | } |
576 | |
577 | return retval; |
578 | } |
579 | |
580 | struct sock *__vsock_create(struct net *net, |
581 | struct socket *sock, |
582 | struct sock *parent, |
583 | gfp_t priority, |
584 | unsigned short type) |
585 | { |
586 | struct sock *sk; |
587 | struct vsock_sock *psk; |
588 | struct vsock_sock *vsk; |
589 | |
590 | sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto); |
591 | if (!sk) |
592 | return NULL; |
593 | |
594 | sock_init_data(sock, sk); |
595 | |
596 | /* sk->sk_type is normally set in sock_init_data, but only if sock is |
597 | * non-NULL. We make sure that our sockets always have a type by |
598 | * setting it here if needed. |
599 | */ |
600 | if (!sock) |
601 | sk->sk_type = type; |
602 | |
603 | vsk = vsock_sk(sk); |
604 | vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); |
605 | vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); |
606 | |
607 | sk->sk_destruct = vsock_sk_destruct; |
608 | sk->sk_backlog_rcv = vsock_queue_rcv_skb; |
609 | sk->sk_state = 0; |
610 | sock_reset_flag(sk, SOCK_DONE); |
611 | |
612 | INIT_LIST_HEAD(&vsk->bound_table); |
613 | INIT_LIST_HEAD(&vsk->connected_table); |
614 | vsk->listener = NULL; |
615 | INIT_LIST_HEAD(&vsk->pending_links); |
616 | INIT_LIST_HEAD(&vsk->accept_queue); |
617 | vsk->rejected = false; |
618 | vsk->sent_request = false; |
619 | vsk->ignore_connecting_rst = false; |
620 | vsk->peer_shutdown = 0; |
621 | |
622 | psk = parent ? vsock_sk(parent) : NULL; |
623 | if (parent) { |
624 | vsk->trusted = psk->trusted; |
625 | vsk->owner = get_cred(psk->owner); |
626 | vsk->connect_timeout = psk->connect_timeout; |
627 | } else { |
628 | vsk->trusted = capable(CAP_NET_ADMIN); |
629 | vsk->owner = get_current_cred(); |
630 | vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT; |
631 | } |
632 | |
633 | if (transport->init(vsk, psk) < 0) { |
634 | sk_free(sk); |
635 | return NULL; |
636 | } |
637 | |
638 | if (sock) |
639 | vsock_insert_unbound(vsk); |
640 | |
641 | return sk; |
642 | } |
643 | EXPORT_SYMBOL_GPL(__vsock_create); |
644 | |
645 | static void __vsock_release(struct sock *sk) |
646 | { |
647 | if (sk) { |
648 | struct sk_buff *skb; |
649 | struct sock *pending; |
650 | struct vsock_sock *vsk; |
651 | |
652 | vsk = vsock_sk(sk); |
653 | pending = NULL; /* Compiler warning. */ |
654 | |
655 | if (vsock_in_bound_table(vsk)) |
656 | vsock_remove_bound(vsk); |
657 | |
658 | if (vsock_in_connected_table(vsk)) |
659 | vsock_remove_connected(vsk); |
660 | |
661 | transport->release(vsk); |
662 | |
663 | lock_sock(sk); |
664 | sock_orphan(sk); |
665 | sk->sk_shutdown = SHUTDOWN_MASK; |
666 | |
667 | while ((skb = skb_dequeue(&sk->sk_receive_queue))) |
668 | kfree_skb(skb); |
669 | |
670 | /* Clean up any sockets that never were accepted. */ |
671 | while ((pending = vsock_dequeue_accept(sk)) != NULL) { |
672 | __vsock_release(pending); |
673 | sock_put(pending); |
674 | } |
675 | |
676 | release_sock(sk); |
677 | sock_put(sk); |
678 | } |
679 | } |
680 | |
681 | static void vsock_sk_destruct(struct sock *sk) |
682 | { |
683 | struct vsock_sock *vsk = vsock_sk(sk); |
684 | |
685 | transport->destruct(vsk); |
686 | |
687 | /* When clearing these addresses, there's no need to set the family and |
688 | * possibly register the address family with the kernel. |
689 | */ |
690 | vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); |
691 | vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); |
692 | |
693 | put_cred(vsk->owner); |
694 | } |
695 | |
696 | static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
697 | { |
698 | int err; |
699 | |
700 | err = sock_queue_rcv_skb(sk, skb); |
701 | if (err) |
702 | kfree_skb(skb); |
703 | |
704 | return err; |
705 | } |
706 | |
707 | s64 vsock_stream_has_data(struct vsock_sock *vsk) |
708 | { |
709 | return transport->stream_has_data(vsk); |
710 | } |
711 | EXPORT_SYMBOL_GPL(vsock_stream_has_data); |
712 | |
713 | s64 vsock_stream_has_space(struct vsock_sock *vsk) |
714 | { |
715 | return transport->stream_has_space(vsk); |
716 | } |
717 | EXPORT_SYMBOL_GPL(vsock_stream_has_space); |
718 | |
719 | static int vsock_release(struct socket *sock) |
720 | { |
721 | __vsock_release(sock->sk); |
722 | sock->sk = NULL; |
723 | sock->state = SS_FREE; |
724 | |
725 | return 0; |
726 | } |
727 | |
728 | static int |
729 | vsock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) |
730 | { |
731 | int err; |
732 | struct sock *sk; |
733 | struct sockaddr_vm *vm_addr; |
734 | |
735 | sk = sock->sk; |
736 | |
737 | if (vsock_addr_cast(addr, addr_len, &vm_addr) != 0) |
738 | return -EINVAL; |
739 | |
740 | lock_sock(sk); |
741 | err = __vsock_bind(sk, vm_addr); |
742 | release_sock(sk); |
743 | |
744 | return err; |
745 | } |
746 | |
747 | static int vsock_getname(struct socket *sock, |
748 | struct sockaddr *addr, int *addr_len, int peer) |
749 | { |
750 | int err; |
751 | struct sock *sk; |
752 | struct vsock_sock *vsk; |
753 | struct sockaddr_vm *vm_addr; |
754 | |
755 | sk = sock->sk; |
756 | vsk = vsock_sk(sk); |
757 | err = 0; |
758 | |
759 | lock_sock(sk); |
760 | |
761 | if (peer) { |
762 | if (sock->state != SS_CONNECTED) { |
763 | err = -ENOTCONN; |
764 | goto out; |
765 | } |
766 | vm_addr = &vsk->remote_addr; |
767 | } else { |
768 | vm_addr = &vsk->local_addr; |
769 | } |
770 | |
771 | if (!vm_addr) { |
772 | err = -EINVAL; |
773 | goto out; |
774 | } |
775 | |
776 | /* sys_getsockname() and sys_getpeername() pass us a |
777 | * MAX_SOCK_ADDR-sized buffer and don't set addr_len. Unfortunately |
778 | * that macro is defined in socket.c instead of .h, so we hardcode its |
779 | * value here. |
780 | */ |
781 | BUILD_BUG_ON(sizeof(*vm_addr) > 128); |
782 | memcpy(addr, vm_addr, sizeof(*vm_addr)); |
783 | *addr_len = sizeof(*vm_addr); |
784 | |
785 | out: |
786 | release_sock(sk); |
787 | return err; |
788 | } |
789 | |
790 | static int vsock_shutdown(struct socket *sock, int mode) |
791 | { |
792 | int err; |
793 | struct sock *sk; |
794 | |
795 | /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses |
796 | * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode |
797 | * here like the other address families do. Note also that the |
798 | * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3), |
799 | * which is what we want. |
800 | */ |
801 | mode++; |
802 | |
803 | if ((mode & ~SHUTDOWN_MASK) || !mode) |
804 | return -EINVAL; |
805 | |
806 | /* If this is a STREAM socket and it is not connected then bail out |
807 | * immediately. If it is a DGRAM socket then we must first kick the |
808 | * socket so that it wakes up from any sleeping calls, for example |
809 | * recv(), and then afterwards return the error. |
810 | */ |
811 | |
812 | sk = sock->sk; |
813 | if (sock->state == SS_UNCONNECTED) { |
814 | err = -ENOTCONN; |
815 | if (sk->sk_type == SOCK_STREAM) |
816 | return err; |
817 | } else { |
818 | sock->state = SS_DISCONNECTING; |
819 | err = 0; |
820 | } |
821 | |
822 | /* Receive and send shutdowns are treated alike. */ |
823 | mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN); |
824 | if (mode) { |
825 | lock_sock(sk); |
826 | sk->sk_shutdown |= mode; |
827 | sk->sk_state_change(sk); |
828 | release_sock(sk); |
829 | |
830 | if (sk->sk_type == SOCK_STREAM) { |
831 | sock_reset_flag(sk, SOCK_DONE); |
832 | vsock_send_shutdown(sk, mode); |
833 | } |
834 | } |
835 | |
836 | return err; |
837 | } |
838 | |
839 | static unsigned int vsock_poll(struct file *file, struct socket *sock, |
840 | poll_table *wait) |
841 | { |
842 | struct sock *sk; |
843 | unsigned int mask; |
844 | struct vsock_sock *vsk; |
845 | |
846 | sk = sock->sk; |
847 | vsk = vsock_sk(sk); |
848 | |
849 | poll_wait(file, sk_sleep(sk), wait); |
850 | mask = 0; |
851 | |
852 | if (sk->sk_err) |
853 | /* Signify that there has been an error on this socket. */ |
854 | mask |= POLLERR; |
855 | |
856 | /* INET sockets treat local write shutdown and peer write shutdown as a |
857 | * case of POLLHUP set. |
858 | */ |
859 | if ((sk->sk_shutdown == SHUTDOWN_MASK) || |
860 | ((sk->sk_shutdown & SEND_SHUTDOWN) && |
861 | (vsk->peer_shutdown & SEND_SHUTDOWN))) { |
862 | mask |= POLLHUP; |
863 | } |
864 | |
865 | if (sk->sk_shutdown & RCV_SHUTDOWN || |
866 | vsk->peer_shutdown & SEND_SHUTDOWN) { |
867 | mask |= POLLRDHUP; |
868 | } |
869 | |
870 | if (sock->type == SOCK_DGRAM) { |
871 | /* For datagram sockets we can read if there is something in |
872 | * the queue and write as long as the socket isn't shutdown for |
873 | * sending. |
874 | */ |
875 | if (!skb_queue_empty(&sk->sk_receive_queue) || |
876 | (sk->sk_shutdown & RCV_SHUTDOWN)) { |
877 | mask |= POLLIN | POLLRDNORM; |
878 | } |
879 | |
880 | if (!(sk->sk_shutdown & SEND_SHUTDOWN)) |
881 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; |
882 | |
883 | } else if (sock->type == SOCK_STREAM) { |
884 | lock_sock(sk); |
885 | |
886 | /* Listening sockets that have connections in their accept |
887 | * queue can be read. |
888 | */ |
889 | if (sk->sk_state == SS_LISTEN |
890 | && !vsock_is_accept_queue_empty(sk)) |
891 | mask |= POLLIN | POLLRDNORM; |
892 | |
893 | /* If there is something in the queue then we can read. */ |
894 | if (transport->stream_is_active(vsk) && |
895 | !(sk->sk_shutdown & RCV_SHUTDOWN)) { |
896 | bool data_ready_now = false; |
897 | int ret = transport->notify_poll_in( |
898 | vsk, 1, &data_ready_now); |
899 | if (ret < 0) { |
900 | mask |= POLLERR; |
901 | } else { |
902 | if (data_ready_now) |
903 | mask |= POLLIN | POLLRDNORM; |
904 | |
905 | } |
906 | } |
907 | |
908 | /* Sockets whose connections have been closed, reset, or |
909 | * terminated should also be considered read, and we check the |
910 | * shutdown flag for that. |
911 | */ |
912 | if (sk->sk_shutdown & RCV_SHUTDOWN || |
913 | vsk->peer_shutdown & SEND_SHUTDOWN) { |
914 | mask |= POLLIN | POLLRDNORM; |
915 | } |
916 | |
917 | /* Connected sockets that can produce data can be written. */ |
918 | if (sk->sk_state == SS_CONNECTED) { |
919 | if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { |
920 | bool space_avail_now = false; |
921 | int ret = transport->notify_poll_out( |
922 | vsk, 1, &space_avail_now); |
923 | if (ret < 0) { |
924 | mask |= POLLERR; |
925 | } else { |
926 | if (space_avail_now) |
927 | /* Remove POLLWRBAND since INET |
928 | * sockets are not setting it. |
929 | */ |
930 | mask |= POLLOUT | POLLWRNORM; |
931 | |
932 | } |
933 | } |
934 | } |
935 | |
936 | /* Simulate INET socket poll behaviors, which sets |
937 | * POLLOUT|POLLWRNORM when peer is closed and nothing to read, |
938 | * but local send is not shutdown. |
939 | */ |
940 | if (sk->sk_state == SS_UNCONNECTED) { |
941 | if (!(sk->sk_shutdown & SEND_SHUTDOWN)) |
942 | mask |= POLLOUT | POLLWRNORM; |
943 | |
944 | } |
945 | |
946 | release_sock(sk); |
947 | } |
948 | |
949 | return mask; |
950 | } |
951 | |
952 | static int vsock_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, |
953 | struct msghdr *msg, size_t len) |
954 | { |
955 | int err; |
956 | struct sock *sk; |
957 | struct vsock_sock *vsk; |
958 | struct sockaddr_vm *remote_addr; |
959 | |
960 | if (msg->msg_flags & MSG_OOB) |
961 | return -EOPNOTSUPP; |
962 | |
963 | /* For now, MSG_DONTWAIT is always assumed... */ |
964 | err = 0; |
965 | sk = sock->sk; |
966 | vsk = vsock_sk(sk); |
967 | |
968 | lock_sock(sk); |
969 | |
970 | err = vsock_auto_bind(vsk); |
971 | if (err) |
972 | goto out; |
973 | |
974 | |
975 | /* If the provided message contains an address, use that. Otherwise |
976 | * fall back on the socket's remote handle (if it has been connected). |
977 | */ |
978 | if (msg->msg_name && |
979 | vsock_addr_cast(msg->msg_name, msg->msg_namelen, |
980 | &remote_addr) == 0) { |
981 | /* Ensure this address is of the right type and is a valid |
982 | * destination. |
983 | */ |
984 | |
985 | if (remote_addr->svm_cid == VMADDR_CID_ANY) |
986 | remote_addr->svm_cid = transport->get_local_cid(); |
987 | |
988 | if (!vsock_addr_bound(remote_addr)) { |
989 | err = -EINVAL; |
990 | goto out; |
991 | } |
992 | } else if (sock->state == SS_CONNECTED) { |
993 | remote_addr = &vsk->remote_addr; |
994 | |
995 | if (remote_addr->svm_cid == VMADDR_CID_ANY) |
996 | remote_addr->svm_cid = transport->get_local_cid(); |
997 | |
998 | /* XXX Should connect() or this function ensure remote_addr is |
999 | * bound? |
1000 | */ |
1001 | if (!vsock_addr_bound(&vsk->remote_addr)) { |
1002 | err = -EINVAL; |
1003 | goto out; |
1004 | } |
1005 | } else { |
1006 | err = -EINVAL; |
1007 | goto out; |
1008 | } |
1009 | |
1010 | if (!transport->dgram_allow(remote_addr->svm_cid, |
1011 | remote_addr->svm_port)) { |
1012 | err = -EINVAL; |
1013 | goto out; |
1014 | } |
1015 | |
1016 | err = transport->dgram_enqueue(vsk, remote_addr, msg->msg_iov, len); |
1017 | |
1018 | out: |
1019 | release_sock(sk); |
1020 | return err; |
1021 | } |
1022 | |
1023 | static int vsock_dgram_connect(struct socket *sock, |
1024 | struct sockaddr *addr, int addr_len, int flags) |
1025 | { |
1026 | int err; |
1027 | struct sock *sk; |
1028 | struct vsock_sock *vsk; |
1029 | struct sockaddr_vm *remote_addr; |
1030 | |
1031 | sk = sock->sk; |
1032 | vsk = vsock_sk(sk); |
1033 | |
1034 | err = vsock_addr_cast(addr, addr_len, &remote_addr); |
1035 | if (err == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) { |
1036 | lock_sock(sk); |
1037 | vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, |
1038 | VMADDR_PORT_ANY); |
1039 | sock->state = SS_UNCONNECTED; |
1040 | release_sock(sk); |
1041 | return 0; |
1042 | } else if (err != 0) |
1043 | return -EINVAL; |
1044 | |
1045 | lock_sock(sk); |
1046 | |
1047 | err = vsock_auto_bind(vsk); |
1048 | if (err) |
1049 | goto out; |
1050 | |
1051 | if (!transport->dgram_allow(remote_addr->svm_cid, |
1052 | remote_addr->svm_port)) { |
1053 | err = -EINVAL; |
1054 | goto out; |
1055 | } |
1056 | |
1057 | memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr)); |
1058 | sock->state = SS_CONNECTED; |
1059 | |
1060 | out: |
1061 | release_sock(sk); |
1062 | return err; |
1063 | } |
1064 | |
1065 | static int vsock_dgram_recvmsg(struct kiocb *kiocb, struct socket *sock, |
1066 | struct msghdr *msg, size_t len, int flags) |
1067 | { |
1068 | return transport->dgram_dequeue(kiocb, vsock_sk(sock->sk), msg, len, |
1069 | flags); |
1070 | } |
1071 | |
1072 | static const struct proto_ops vsock_dgram_ops = { |
1073 | .family = PF_VSOCK, |
1074 | .owner = THIS_MODULE, |
1075 | .release = vsock_release, |
1076 | .bind = vsock_bind, |
1077 | .connect = vsock_dgram_connect, |
1078 | .socketpair = sock_no_socketpair, |
1079 | .accept = sock_no_accept, |
1080 | .getname = vsock_getname, |
1081 | .poll = vsock_poll, |
1082 | .ioctl = sock_no_ioctl, |
1083 | .listen = sock_no_listen, |
1084 | .shutdown = vsock_shutdown, |
1085 | .setsockopt = sock_no_setsockopt, |
1086 | .getsockopt = sock_no_getsockopt, |
1087 | .sendmsg = vsock_dgram_sendmsg, |
1088 | .recvmsg = vsock_dgram_recvmsg, |
1089 | .mmap = sock_no_mmap, |
1090 | .sendpage = sock_no_sendpage, |
1091 | }; |
1092 | |
1093 | static void vsock_connect_timeout(struct work_struct *work) |
1094 | { |
1095 | struct sock *sk; |
1096 | struct vsock_sock *vsk; |
1097 | |
1098 | vsk = container_of(work, struct vsock_sock, dwork.work); |
1099 | sk = sk_vsock(vsk); |
1100 | |
1101 | lock_sock(sk); |
1102 | if (sk->sk_state == SS_CONNECTING && |
1103 | (sk->sk_shutdown != SHUTDOWN_MASK)) { |
1104 | sk->sk_state = SS_UNCONNECTED; |
1105 | sk->sk_err = ETIMEDOUT; |
1106 | sk->sk_error_report(sk); |
1107 | } |
1108 | release_sock(sk); |
1109 | |
1110 | sock_put(sk); |
1111 | } |
1112 | |
1113 | static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr, |
1114 | int addr_len, int flags) |
1115 | { |
1116 | int err; |
1117 | struct sock *sk; |
1118 | struct vsock_sock *vsk; |
1119 | struct sockaddr_vm *remote_addr; |
1120 | long timeout; |
1121 | DEFINE_WAIT(wait); |
1122 | |
1123 | err = 0; |
1124 | sk = sock->sk; |
1125 | vsk = vsock_sk(sk); |
1126 | |
1127 | lock_sock(sk); |
1128 | |
1129 | /* XXX AF_UNSPEC should make us disconnect like AF_INET. */ |
1130 | switch (sock->state) { |
1131 | case SS_CONNECTED: |
1132 | err = -EISCONN; |
1133 | goto out; |
1134 | case SS_DISCONNECTING: |
1135 | err = -EINVAL; |
1136 | goto out; |
1137 | case SS_CONNECTING: |
1138 | /* This continues on so we can move sock into the SS_CONNECTED |
1139 | * state once the connection has completed (at which point err |
1140 | * will be set to zero also). Otherwise, we will either wait |
1141 | * for the connection or return -EALREADY should this be a |
1142 | * non-blocking call. |
1143 | */ |
1144 | err = -EALREADY; |
1145 | break; |
1146 | default: |
1147 | if ((sk->sk_state == SS_LISTEN) || |
1148 | vsock_addr_cast(addr, addr_len, &remote_addr) != 0) { |
1149 | err = -EINVAL; |
1150 | goto out; |
1151 | } |
1152 | |
1153 | /* The hypervisor and well-known contexts do not have socket |
1154 | * endpoints. |
1155 | */ |
1156 | if (!transport->stream_allow(remote_addr->svm_cid, |
1157 | remote_addr->svm_port)) { |
1158 | err = -ENETUNREACH; |
1159 | goto out; |
1160 | } |
1161 | |
1162 | /* Set the remote address that we are connecting to. */ |
1163 | memcpy(&vsk->remote_addr, remote_addr, |
1164 | sizeof(vsk->remote_addr)); |
1165 | |
1166 | err = vsock_auto_bind(vsk); |
1167 | if (err) |
1168 | goto out; |
1169 | |
1170 | sk->sk_state = SS_CONNECTING; |
1171 | |
1172 | err = transport->connect(vsk); |
1173 | if (err < 0) |
1174 | goto out; |
1175 | |
1176 | /* Mark sock as connecting and set the error code to in |
1177 | * progress in case this is a non-blocking connect. |
1178 | */ |
1179 | sock->state = SS_CONNECTING; |
1180 | err = -EINPROGRESS; |
1181 | } |
1182 | |
1183 | /* The receive path will handle all communication until we are able to |
1184 | * enter the connected state. Here we wait for the connection to be |
1185 | * completed or a notification of an error. |
1186 | */ |
1187 | timeout = vsk->connect_timeout; |
1188 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1189 | |
1190 | while (sk->sk_state != SS_CONNECTED && sk->sk_err == 0) { |
1191 | if (flags & O_NONBLOCK) { |
1192 | /* If we're not going to block, we schedule a timeout |
1193 | * function to generate a timeout on the connection |
1194 | * attempt, in case the peer doesn't respond in a |
1195 | * timely manner. We hold on to the socket until the |
1196 | * timeout fires. |
1197 | */ |
1198 | sock_hold(sk); |
1199 | INIT_DELAYED_WORK(&vsk->dwork, |
1200 | vsock_connect_timeout); |
1201 | schedule_delayed_work(&vsk->dwork, timeout); |
1202 | |
1203 | /* Skip ahead to preserve error code set above. */ |
1204 | goto out_wait; |
1205 | } |
1206 | |
1207 | release_sock(sk); |
1208 | timeout = schedule_timeout(timeout); |
1209 | lock_sock(sk); |
1210 | |
1211 | if (signal_pending(current)) { |
1212 | err = sock_intr_errno(timeout); |
1213 | goto out_wait_error; |
1214 | } else if (timeout == 0) { |
1215 | err = -ETIMEDOUT; |
1216 | goto out_wait_error; |
1217 | } |
1218 | |
1219 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1220 | } |
1221 | |
1222 | if (sk->sk_err) { |
1223 | err = -sk->sk_err; |
1224 | goto out_wait_error; |
1225 | } else |
1226 | err = 0; |
1227 | |
1228 | out_wait: |
1229 | finish_wait(sk_sleep(sk), &wait); |
1230 | out: |
1231 | release_sock(sk); |
1232 | return err; |
1233 | |
1234 | out_wait_error: |
1235 | sk->sk_state = SS_UNCONNECTED; |
1236 | sock->state = SS_UNCONNECTED; |
1237 | goto out_wait; |
1238 | } |
1239 | |
1240 | static int vsock_accept(struct socket *sock, struct socket *newsock, int flags) |
1241 | { |
1242 | struct sock *listener; |
1243 | int err; |
1244 | struct sock *connected; |
1245 | struct vsock_sock *vconnected; |
1246 | long timeout; |
1247 | DEFINE_WAIT(wait); |
1248 | |
1249 | err = 0; |
1250 | listener = sock->sk; |
1251 | |
1252 | lock_sock(listener); |
1253 | |
1254 | if (sock->type != SOCK_STREAM) { |
1255 | err = -EOPNOTSUPP; |
1256 | goto out; |
1257 | } |
1258 | |
1259 | if (listener->sk_state != SS_LISTEN) { |
1260 | err = -EINVAL; |
1261 | goto out; |
1262 | } |
1263 | |
1264 | /* Wait for children sockets to appear; these are the new sockets |
1265 | * created upon connection establishment. |
1266 | */ |
1267 | timeout = sock_sndtimeo(listener, flags & O_NONBLOCK); |
1268 | prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); |
1269 | |
1270 | while ((connected = vsock_dequeue_accept(listener)) == NULL && |
1271 | listener->sk_err == 0) { |
1272 | release_sock(listener); |
1273 | timeout = schedule_timeout(timeout); |
1274 | lock_sock(listener); |
1275 | |
1276 | if (signal_pending(current)) { |
1277 | err = sock_intr_errno(timeout); |
1278 | goto out_wait; |
1279 | } else if (timeout == 0) { |
1280 | err = -EAGAIN; |
1281 | goto out_wait; |
1282 | } |
1283 | |
1284 | prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); |
1285 | } |
1286 | |
1287 | if (listener->sk_err) |
1288 | err = -listener->sk_err; |
1289 | |
1290 | if (connected) { |
1291 | listener->sk_ack_backlog--; |
1292 | |
1293 | lock_sock(connected); |
1294 | vconnected = vsock_sk(connected); |
1295 | |
1296 | /* If the listener socket has received an error, then we should |
1297 | * reject this socket and return. Note that we simply mark the |
1298 | * socket rejected, drop our reference, and let the cleanup |
1299 | * function handle the cleanup; the fact that we found it in |
1300 | * the listener's accept queue guarantees that the cleanup |
1301 | * function hasn't run yet. |
1302 | */ |
1303 | if (err) { |
1304 | vconnected->rejected = true; |
1305 | release_sock(connected); |
1306 | sock_put(connected); |
1307 | goto out_wait; |
1308 | } |
1309 | |
1310 | newsock->state = SS_CONNECTED; |
1311 | sock_graft(connected, newsock); |
1312 | release_sock(connected); |
1313 | sock_put(connected); |
1314 | } |
1315 | |
1316 | out_wait: |
1317 | finish_wait(sk_sleep(listener), &wait); |
1318 | out: |
1319 | release_sock(listener); |
1320 | return err; |
1321 | } |
1322 | |
1323 | static int vsock_listen(struct socket *sock, int backlog) |
1324 | { |
1325 | int err; |
1326 | struct sock *sk; |
1327 | struct vsock_sock *vsk; |
1328 | |
1329 | sk = sock->sk; |
1330 | |
1331 | lock_sock(sk); |
1332 | |
1333 | if (sock->type != SOCK_STREAM) { |
1334 | err = -EOPNOTSUPP; |
1335 | goto out; |
1336 | } |
1337 | |
1338 | if (sock->state != SS_UNCONNECTED) { |
1339 | err = -EINVAL; |
1340 | goto out; |
1341 | } |
1342 | |
1343 | vsk = vsock_sk(sk); |
1344 | |
1345 | if (!vsock_addr_bound(&vsk->local_addr)) { |
1346 | err = -EINVAL; |
1347 | goto out; |
1348 | } |
1349 | |
1350 | sk->sk_max_ack_backlog = backlog; |
1351 | sk->sk_state = SS_LISTEN; |
1352 | |
1353 | err = 0; |
1354 | |
1355 | out: |
1356 | release_sock(sk); |
1357 | return err; |
1358 | } |
1359 | |
1360 | static int vsock_stream_setsockopt(struct socket *sock, |
1361 | int level, |
1362 | int optname, |
1363 | char __user *optval, |
1364 | unsigned int optlen) |
1365 | { |
1366 | int err; |
1367 | struct sock *sk; |
1368 | struct vsock_sock *vsk; |
1369 | u64 val; |
1370 | |
1371 | if (level != AF_VSOCK) |
1372 | return -ENOPROTOOPT; |
1373 | |
1374 | #define COPY_IN(_v) \ |
1375 | do { \ |
1376 | if (optlen < sizeof(_v)) { \ |
1377 | err = -EINVAL; \ |
1378 | goto exit; \ |
1379 | } \ |
1380 | if (copy_from_user(&_v, optval, sizeof(_v)) != 0) { \ |
1381 | err = -EFAULT; \ |
1382 | goto exit; \ |
1383 | } \ |
1384 | } while (0) |
1385 | |
1386 | err = 0; |
1387 | sk = sock->sk; |
1388 | vsk = vsock_sk(sk); |
1389 | |
1390 | lock_sock(sk); |
1391 | |
1392 | switch (optname) { |
1393 | case SO_VM_SOCKETS_BUFFER_SIZE: |
1394 | COPY_IN(val); |
1395 | transport->set_buffer_size(vsk, val); |
1396 | break; |
1397 | |
1398 | case SO_VM_SOCKETS_BUFFER_MAX_SIZE: |
1399 | COPY_IN(val); |
1400 | transport->set_max_buffer_size(vsk, val); |
1401 | break; |
1402 | |
1403 | case SO_VM_SOCKETS_BUFFER_MIN_SIZE: |
1404 | COPY_IN(val); |
1405 | transport->set_min_buffer_size(vsk, val); |
1406 | break; |
1407 | |
1408 | case SO_VM_SOCKETS_CONNECT_TIMEOUT: { |
1409 | struct timeval tv; |
1410 | COPY_IN(tv); |
1411 | if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC && |
1412 | tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) { |
1413 | vsk->connect_timeout = tv.tv_sec * HZ + |
1414 | DIV_ROUND_UP(tv.tv_usec, (1000000 / HZ)); |
1415 | if (vsk->connect_timeout == 0) |
1416 | vsk->connect_timeout = |
1417 | VSOCK_DEFAULT_CONNECT_TIMEOUT; |
1418 | |
1419 | } else { |
1420 | err = -ERANGE; |
1421 | } |
1422 | break; |
1423 | } |
1424 | |
1425 | default: |
1426 | err = -ENOPROTOOPT; |
1427 | break; |
1428 | } |
1429 | |
1430 | #undef COPY_IN |
1431 | |
1432 | exit: |
1433 | release_sock(sk); |
1434 | return err; |
1435 | } |
1436 | |
1437 | static int vsock_stream_getsockopt(struct socket *sock, |
1438 | int level, int optname, |
1439 | char __user *optval, |
1440 | int __user *optlen) |
1441 | { |
1442 | int err; |
1443 | int len; |
1444 | struct sock *sk; |
1445 | struct vsock_sock *vsk; |
1446 | u64 val; |
1447 | |
1448 | if (level != AF_VSOCK) |
1449 | return -ENOPROTOOPT; |
1450 | |
1451 | err = get_user(len, optlen); |
1452 | if (err != 0) |
1453 | return err; |
1454 | |
1455 | #define COPY_OUT(_v) \ |
1456 | do { \ |
1457 | if (len < sizeof(_v)) \ |
1458 | return -EINVAL; \ |
1459 | \ |
1460 | len = sizeof(_v); \ |
1461 | if (copy_to_user(optval, &_v, len) != 0) \ |
1462 | return -EFAULT; \ |
1463 | \ |
1464 | } while (0) |
1465 | |
1466 | err = 0; |
1467 | sk = sock->sk; |
1468 | vsk = vsock_sk(sk); |
1469 | |
1470 | switch (optname) { |
1471 | case SO_VM_SOCKETS_BUFFER_SIZE: |
1472 | val = transport->get_buffer_size(vsk); |
1473 | COPY_OUT(val); |
1474 | break; |
1475 | |
1476 | case SO_VM_SOCKETS_BUFFER_MAX_SIZE: |
1477 | val = transport->get_max_buffer_size(vsk); |
1478 | COPY_OUT(val); |
1479 | break; |
1480 | |
1481 | case SO_VM_SOCKETS_BUFFER_MIN_SIZE: |
1482 | val = transport->get_min_buffer_size(vsk); |
1483 | COPY_OUT(val); |
1484 | break; |
1485 | |
1486 | case SO_VM_SOCKETS_CONNECT_TIMEOUT: { |
1487 | struct timeval tv; |
1488 | tv.tv_sec = vsk->connect_timeout / HZ; |
1489 | tv.tv_usec = |
1490 | (vsk->connect_timeout - |
1491 | tv.tv_sec * HZ) * (1000000 / HZ); |
1492 | COPY_OUT(tv); |
1493 | break; |
1494 | } |
1495 | default: |
1496 | return -ENOPROTOOPT; |
1497 | } |
1498 | |
1499 | err = put_user(len, optlen); |
1500 | if (err != 0) |
1501 | return -EFAULT; |
1502 | |
1503 | #undef COPY_OUT |
1504 | |
1505 | return 0; |
1506 | } |
1507 | |
1508 | static int vsock_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, |
1509 | struct msghdr *msg, size_t len) |
1510 | { |
1511 | struct sock *sk; |
1512 | struct vsock_sock *vsk; |
1513 | ssize_t total_written; |
1514 | long timeout; |
1515 | int err; |
1516 | struct vsock_transport_send_notify_data send_data; |
1517 | |
1518 | DEFINE_WAIT(wait); |
1519 | |
1520 | sk = sock->sk; |
1521 | vsk = vsock_sk(sk); |
1522 | total_written = 0; |
1523 | err = 0; |
1524 | |
1525 | if (msg->msg_flags & MSG_OOB) |
1526 | return -EOPNOTSUPP; |
1527 | |
1528 | lock_sock(sk); |
1529 | |
1530 | /* Callers should not provide a destination with stream sockets. */ |
1531 | if (msg->msg_namelen) { |
1532 | err = sk->sk_state == SS_CONNECTED ? -EISCONN : -EOPNOTSUPP; |
1533 | goto out; |
1534 | } |
1535 | |
1536 | /* Send data only if both sides are not shutdown in the direction. */ |
1537 | if (sk->sk_shutdown & SEND_SHUTDOWN || |
1538 | vsk->peer_shutdown & RCV_SHUTDOWN) { |
1539 | err = -EPIPE; |
1540 | goto out; |
1541 | } |
1542 | |
1543 | if (sk->sk_state != SS_CONNECTED || |
1544 | !vsock_addr_bound(&vsk->local_addr)) { |
1545 | err = -ENOTCONN; |
1546 | goto out; |
1547 | } |
1548 | |
1549 | if (!vsock_addr_bound(&vsk->remote_addr)) { |
1550 | err = -EDESTADDRREQ; |
1551 | goto out; |
1552 | } |
1553 | |
1554 | /* Wait for room in the produce queue to enqueue our user's data. */ |
1555 | timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); |
1556 | |
1557 | err = transport->notify_send_init(vsk, &send_data); |
1558 | if (err < 0) |
1559 | goto out; |
1560 | |
1561 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1562 | |
1563 | while (total_written < len) { |
1564 | ssize_t written; |
1565 | |
1566 | while (vsock_stream_has_space(vsk) == 0 && |
1567 | sk->sk_err == 0 && |
1568 | !(sk->sk_shutdown & SEND_SHUTDOWN) && |
1569 | !(vsk->peer_shutdown & RCV_SHUTDOWN)) { |
1570 | |
1571 | /* Don't wait for non-blocking sockets. */ |
1572 | if (timeout == 0) { |
1573 | err = -EAGAIN; |
1574 | goto out_wait; |
1575 | } |
1576 | |
1577 | err = transport->notify_send_pre_block(vsk, &send_data); |
1578 | if (err < 0) |
1579 | goto out_wait; |
1580 | |
1581 | release_sock(sk); |
1582 | timeout = schedule_timeout(timeout); |
1583 | lock_sock(sk); |
1584 | if (signal_pending(current)) { |
1585 | err = sock_intr_errno(timeout); |
1586 | goto out_wait; |
1587 | } else if (timeout == 0) { |
1588 | err = -EAGAIN; |
1589 | goto out_wait; |
1590 | } |
1591 | |
1592 | prepare_to_wait(sk_sleep(sk), &wait, |
1593 | TASK_INTERRUPTIBLE); |
1594 | } |
1595 | |
1596 | /* These checks occur both as part of and after the loop |
1597 | * conditional since we need to check before and after |
1598 | * sleeping. |
1599 | */ |
1600 | if (sk->sk_err) { |
1601 | err = -sk->sk_err; |
1602 | goto out_wait; |
1603 | } else if ((sk->sk_shutdown & SEND_SHUTDOWN) || |
1604 | (vsk->peer_shutdown & RCV_SHUTDOWN)) { |
1605 | err = -EPIPE; |
1606 | goto out_wait; |
1607 | } |
1608 | |
1609 | err = transport->notify_send_pre_enqueue(vsk, &send_data); |
1610 | if (err < 0) |
1611 | goto out_wait; |
1612 | |
1613 | /* Note that enqueue will only write as many bytes as are free |
1614 | * in the produce queue, so we don't need to ensure len is |
1615 | * smaller than the queue size. It is the caller's |
1616 | * responsibility to check how many bytes we were able to send. |
1617 | */ |
1618 | |
1619 | written = transport->stream_enqueue( |
1620 | vsk, msg->msg_iov, |
1621 | len - total_written); |
1622 | if (written < 0) { |
1623 | err = -ENOMEM; |
1624 | goto out_wait; |
1625 | } |
1626 | |
1627 | total_written += written; |
1628 | |
1629 | err = transport->notify_send_post_enqueue( |
1630 | vsk, written, &send_data); |
1631 | if (err < 0) |
1632 | goto out_wait; |
1633 | |
1634 | } |
1635 | |
1636 | out_wait: |
1637 | if (total_written > 0) |
1638 | err = total_written; |
1639 | finish_wait(sk_sleep(sk), &wait); |
1640 | out: |
1641 | release_sock(sk); |
1642 | return err; |
1643 | } |
1644 | |
1645 | |
1646 | static int |
1647 | vsock_stream_recvmsg(struct kiocb *kiocb, |
1648 | struct socket *sock, |
1649 | struct msghdr *msg, size_t len, int flags) |
1650 | { |
1651 | struct sock *sk; |
1652 | struct vsock_sock *vsk; |
1653 | int err; |
1654 | size_t target; |
1655 | ssize_t copied; |
1656 | long timeout; |
1657 | struct vsock_transport_recv_notify_data recv_data; |
1658 | |
1659 | DEFINE_WAIT(wait); |
1660 | |
1661 | sk = sock->sk; |
1662 | vsk = vsock_sk(sk); |
1663 | err = 0; |
1664 | |
1665 | lock_sock(sk); |
1666 | |
1667 | if (sk->sk_state != SS_CONNECTED) { |
1668 | /* Recvmsg is supposed to return 0 if a peer performs an |
1669 | * orderly shutdown. Differentiate between that case and when a |
1670 | * peer has not connected or a local shutdown occured with the |
1671 | * SOCK_DONE flag. |
1672 | */ |
1673 | if (sock_flag(sk, SOCK_DONE)) |
1674 | err = 0; |
1675 | else |
1676 | err = -ENOTCONN; |
1677 | |
1678 | goto out; |
1679 | } |
1680 | |
1681 | if (flags & MSG_OOB) { |
1682 | err = -EOPNOTSUPP; |
1683 | goto out; |
1684 | } |
1685 | |
1686 | /* We don't check peer_shutdown flag here since peer may actually shut |
1687 | * down, but there can be data in the queue that a local socket can |
1688 | * receive. |
1689 | */ |
1690 | if (sk->sk_shutdown & RCV_SHUTDOWN) { |
1691 | err = 0; |
1692 | goto out; |
1693 | } |
1694 | |
1695 | /* It is valid on Linux to pass in a zero-length receive buffer. This |
1696 | * is not an error. We may as well bail out now. |
1697 | */ |
1698 | if (!len) { |
1699 | err = 0; |
1700 | goto out; |
1701 | } |
1702 | |
1703 | /* We must not copy less than target bytes into the user's buffer |
1704 | * before returning successfully, so we wait for the consume queue to |
1705 | * have that much data to consume before dequeueing. Note that this |
1706 | * makes it impossible to handle cases where target is greater than the |
1707 | * queue size. |
1708 | */ |
1709 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); |
1710 | if (target >= transport->stream_rcvhiwat(vsk)) { |
1711 | err = -ENOMEM; |
1712 | goto out; |
1713 | } |
1714 | timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); |
1715 | copied = 0; |
1716 | |
1717 | err = transport->notify_recv_init(vsk, target, &recv_data); |
1718 | if (err < 0) |
1719 | goto out; |
1720 | |
1721 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
1722 | |
1723 | while (1) { |
1724 | s64 ready = vsock_stream_has_data(vsk); |
1725 | |
1726 | if (ready < 0) { |
1727 | /* Invalid queue pair content. XXX This should be |
1728 | * changed to a connection reset in a later change. |
1729 | */ |
1730 | |
1731 | err = -ENOMEM; |
1732 | goto out_wait; |
1733 | } else if (ready > 0) { |
1734 | ssize_t read; |
1735 | |
1736 | err = transport->notify_recv_pre_dequeue( |
1737 | vsk, target, &recv_data); |
1738 | if (err < 0) |
1739 | break; |
1740 | |
1741 | read = transport->stream_dequeue( |
1742 | vsk, msg->msg_iov, |
1743 | len - copied, flags); |
1744 | if (read < 0) { |
1745 | err = -ENOMEM; |
1746 | break; |
1747 | } |
1748 | |
1749 | copied += read; |
1750 | |
1751 | err = transport->notify_recv_post_dequeue( |
1752 | vsk, target, read, |
1753 | !(flags & MSG_PEEK), &recv_data); |
1754 | if (err < 0) |
1755 | goto out_wait; |
1756 | |
1757 | if (read >= target || flags & MSG_PEEK) |
1758 | break; |
1759 | |
1760 | target -= read; |
1761 | } else { |
1762 | if (sk->sk_err != 0 || (sk->sk_shutdown & RCV_SHUTDOWN) |
1763 | || (vsk->peer_shutdown & SEND_SHUTDOWN)) { |
1764 | break; |
1765 | } |
1766 | /* Don't wait for non-blocking sockets. */ |
1767 | if (timeout == 0) { |
1768 | err = -EAGAIN; |
1769 | break; |
1770 | } |
1771 | |
1772 | err = transport->notify_recv_pre_block( |
1773 | vsk, target, &recv_data); |
1774 | if (err < 0) |
1775 | break; |
1776 | |
1777 | release_sock(sk); |
1778 | timeout = schedule_timeout(timeout); |
1779 | lock_sock(sk); |
1780 | |
1781 | if (signal_pending(current)) { |
1782 | err = sock_intr_errno(timeout); |
1783 | break; |
1784 | } else if (timeout == 0) { |
1785 | err = -EAGAIN; |
1786 | break; |
1787 | } |
1788 | |
1789 | prepare_to_wait(sk_sleep(sk), &wait, |
1790 | TASK_INTERRUPTIBLE); |
1791 | } |
1792 | } |
1793 | |
1794 | if (sk->sk_err) |
1795 | err = -sk->sk_err; |
1796 | else if (sk->sk_shutdown & RCV_SHUTDOWN) |
1797 | err = 0; |
1798 | |
1799 | if (copied > 0) { |
1800 | /* We only do these additional bookkeeping/notification steps |
1801 | * if we actually copied something out of the queue pair |
1802 | * instead of just peeking ahead. |
1803 | */ |
1804 | |
1805 | if (!(flags & MSG_PEEK)) { |
1806 | /* If the other side has shutdown for sending and there |
1807 | * is nothing more to read, then modify the socket |
1808 | * state. |
1809 | */ |
1810 | if (vsk->peer_shutdown & SEND_SHUTDOWN) { |
1811 | if (vsock_stream_has_data(vsk) <= 0) { |
1812 | sk->sk_state = SS_UNCONNECTED; |
1813 | sock_set_flag(sk, SOCK_DONE); |
1814 | sk->sk_state_change(sk); |
1815 | } |
1816 | } |
1817 | } |
1818 | err = copied; |
1819 | } |
1820 | |
1821 | out_wait: |
1822 | finish_wait(sk_sleep(sk), &wait); |
1823 | out: |
1824 | release_sock(sk); |
1825 | return err; |
1826 | } |
1827 | |
1828 | static const struct proto_ops vsock_stream_ops = { |
1829 | .family = PF_VSOCK, |
1830 | .owner = THIS_MODULE, |
1831 | .release = vsock_release, |
1832 | .bind = vsock_bind, |
1833 | .connect = vsock_stream_connect, |
1834 | .socketpair = sock_no_socketpair, |
1835 | .accept = vsock_accept, |
1836 | .getname = vsock_getname, |
1837 | .poll = vsock_poll, |
1838 | .ioctl = sock_no_ioctl, |
1839 | .listen = vsock_listen, |
1840 | .shutdown = vsock_shutdown, |
1841 | .setsockopt = vsock_stream_setsockopt, |
1842 | .getsockopt = vsock_stream_getsockopt, |
1843 | .sendmsg = vsock_stream_sendmsg, |
1844 | .recvmsg = vsock_stream_recvmsg, |
1845 | .mmap = sock_no_mmap, |
1846 | .sendpage = sock_no_sendpage, |
1847 | }; |
1848 | |
1849 | static int vsock_create(struct net *net, struct socket *sock, |
1850 | int protocol, int kern) |
1851 | { |
1852 | if (!sock) |
1853 | return -EINVAL; |
1854 | |
1855 | if (protocol && protocol != PF_VSOCK) |
1856 | return -EPROTONOSUPPORT; |
1857 | |
1858 | switch (sock->type) { |
1859 | case SOCK_DGRAM: |
1860 | sock->ops = &vsock_dgram_ops; |
1861 | break; |
1862 | case SOCK_STREAM: |
1863 | sock->ops = &vsock_stream_ops; |
1864 | break; |
1865 | default: |
1866 | return -ESOCKTNOSUPPORT; |
1867 | } |
1868 | |
1869 | sock->state = SS_UNCONNECTED; |
1870 | |
1871 | return __vsock_create(net, sock, NULL, GFP_KERNEL, 0) ? 0 : -ENOMEM; |
1872 | } |
1873 | |
1874 | static const struct net_proto_family vsock_family_ops = { |
1875 | .family = AF_VSOCK, |
1876 | .create = vsock_create, |
1877 | .owner = THIS_MODULE, |
1878 | }; |
1879 | |
1880 | static long vsock_dev_do_ioctl(struct file *filp, |
1881 | unsigned int cmd, void __user *ptr) |
1882 | { |
1883 | u32 __user *p = ptr; |
1884 | int retval = 0; |
1885 | |
1886 | switch (cmd) { |
1887 | case IOCTL_VM_SOCKETS_GET_LOCAL_CID: |
1888 | if (put_user(transport->get_local_cid(), p) != 0) |
1889 | retval = -EFAULT; |
1890 | break; |
1891 | |
1892 | default: |
1893 | pr_err("Unknown ioctl %d\n", cmd); |
1894 | retval = -EINVAL; |
1895 | } |
1896 | |
1897 | return retval; |
1898 | } |
1899 | |
1900 | static long vsock_dev_ioctl(struct file *filp, |
1901 | unsigned int cmd, unsigned long arg) |
1902 | { |
1903 | return vsock_dev_do_ioctl(filp, cmd, (void __user *)arg); |
1904 | } |
1905 | |
1906 | #ifdef CONFIG_COMPAT |
1907 | static long vsock_dev_compat_ioctl(struct file *filp, |
1908 | unsigned int cmd, unsigned long arg) |
1909 | { |
1910 | return vsock_dev_do_ioctl(filp, cmd, compat_ptr(arg)); |
1911 | } |
1912 | #endif |
1913 | |
1914 | static const struct file_operations vsock_device_ops = { |
1915 | .owner = THIS_MODULE, |
1916 | .unlocked_ioctl = vsock_dev_ioctl, |
1917 | #ifdef CONFIG_COMPAT |
1918 | .compat_ioctl = vsock_dev_compat_ioctl, |
1919 | #endif |
1920 | .open = nonseekable_open, |
1921 | }; |
1922 | |
1923 | static struct miscdevice vsock_device = { |
1924 | .name = "vsock", |
1925 | .fops = &vsock_device_ops, |
1926 | }; |
1927 | |
1928 | int __vsock_core_init(const struct vsock_transport *t, struct module *owner) |
1929 | { |
1930 | int err = mutex_lock_interruptible(&vsock_register_mutex); |
1931 | |
1932 | if (err) |
1933 | return err; |
1934 | |
1935 | if (transport) { |
1936 | err = -EBUSY; |
1937 | goto err_busy; |
1938 | } |
1939 | |
1940 | /* Transport must be the owner of the protocol so that it can't |
1941 | * unload while there are open sockets. |
1942 | */ |
1943 | vsock_proto.owner = owner; |
1944 | transport = t; |
1945 | |
1946 | vsock_init_tables(); |
1947 | |
1948 | vsock_device.minor = MISC_DYNAMIC_MINOR; |
1949 | err = misc_register(&vsock_device); |
1950 | if (err) { |
1951 | pr_err("Failed to register misc device\n"); |
1952 | return -ENOENT; |
1953 | } |
1954 | |
1955 | err = proto_register(&vsock_proto, 1); /* we want our slab */ |
1956 | if (err) { |
1957 | pr_err("Cannot register vsock protocol\n"); |
1958 | goto err_misc_deregister; |
1959 | } |
1960 | |
1961 | err = sock_register(&vsock_family_ops); |
1962 | if (err) { |
1963 | pr_err("could not register af_vsock (%d) address family: %d\n", |
1964 | AF_VSOCK, err); |
1965 | goto err_unregister_proto; |
1966 | } |
1967 | |
1968 | mutex_unlock(&vsock_register_mutex); |
1969 | return 0; |
1970 | |
1971 | err_unregister_proto: |
1972 | proto_unregister(&vsock_proto); |
1973 | err_misc_deregister: |
1974 | misc_deregister(&vsock_device); |
1975 | transport = NULL; |
1976 | err_busy: |
1977 | mutex_unlock(&vsock_register_mutex); |
1978 | return err; |
1979 | } |
1980 | EXPORT_SYMBOL_GPL(__vsock_core_init); |
1981 | |
1982 | void vsock_core_exit(void) |
1983 | { |
1984 | mutex_lock(&vsock_register_mutex); |
1985 | |
1986 | misc_deregister(&vsock_device); |
1987 | sock_unregister(AF_VSOCK); |
1988 | proto_unregister(&vsock_proto); |
1989 | |
1990 | /* We do not want the assignment below re-ordered. */ |
1991 | mb(); |
1992 | transport = NULL; |
1993 | |
1994 | mutex_unlock(&vsock_register_mutex); |
1995 | } |
1996 | EXPORT_SYMBOL_GPL(vsock_core_exit); |
1997 | |
1998 | MODULE_AUTHOR("VMware, Inc."); |
1999 | MODULE_DESCRIPTION("VMware Virtual Socket Family"); |
2000 | MODULE_VERSION("1.0.1.0-k"); |
2001 | MODULE_LICENSE("GPL v2"); |
2002 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9