Root/
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
3 | * operating system. INET is implemented using the BSD Socket |
4 | * interface as the means of communication with the user level. |
5 | * |
6 | * Generic socket support routines. Memory allocators, socket lock/release |
7 | * handler for protocols to use and generic option handler. |
8 | * |
9 | * |
10 | * Authors: Ross Biro |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Florian La Roche, <flla@stud.uni-sb.de> |
13 | * Alan Cox, <A.Cox@swansea.ac.uk> |
14 | * |
15 | * Fixes: |
16 | * Alan Cox : Numerous verify_area() problems |
17 | * Alan Cox : Connecting on a connecting socket |
18 | * now returns an error for tcp. |
19 | * Alan Cox : sock->protocol is set correctly. |
20 | * and is not sometimes left as 0. |
21 | * Alan Cox : connect handles icmp errors on a |
22 | * connect properly. Unfortunately there |
23 | * is a restart syscall nasty there. I |
24 | * can't match BSD without hacking the C |
25 | * library. Ideas urgently sought! |
26 | * Alan Cox : Disallow bind() to addresses that are |
27 | * not ours - especially broadcast ones!! |
28 | * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost) |
29 | * Alan Cox : sock_wfree/sock_rfree don't destroy sockets, |
30 | * instead they leave that for the DESTROY timer. |
31 | * Alan Cox : Clean up error flag in accept |
32 | * Alan Cox : TCP ack handling is buggy, the DESTROY timer |
33 | * was buggy. Put a remove_sock() in the handler |
34 | * for memory when we hit 0. Also altered the timer |
35 | * code. The ACK stuff can wait and needs major |
36 | * TCP layer surgery. |
37 | * Alan Cox : Fixed TCP ack bug, removed remove sock |
38 | * and fixed timer/inet_bh race. |
39 | * Alan Cox : Added zapped flag for TCP |
40 | * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code |
41 | * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb |
42 | * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources |
43 | * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing. |
44 | * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so... |
45 | * Rick Sladkey : Relaxed UDP rules for matching packets. |
46 | * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support |
47 | * Pauline Middelink : identd support |
48 | * Alan Cox : Fixed connect() taking signals I think. |
49 | * Alan Cox : SO_LINGER supported |
50 | * Alan Cox : Error reporting fixes |
51 | * Anonymous : inet_create tidied up (sk->reuse setting) |
52 | * Alan Cox : inet sockets don't set sk->type! |
53 | * Alan Cox : Split socket option code |
54 | * Alan Cox : Callbacks |
55 | * Alan Cox : Nagle flag for Charles & Johannes stuff |
56 | * Alex : Removed restriction on inet fioctl |
57 | * Alan Cox : Splitting INET from NET core |
58 | * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt() |
59 | * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code |
60 | * Alan Cox : Split IP from generic code |
61 | * Alan Cox : New kfree_skbmem() |
62 | * Alan Cox : Make SO_DEBUG superuser only. |
63 | * Alan Cox : Allow anyone to clear SO_DEBUG |
64 | * (compatibility fix) |
65 | * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput. |
66 | * Alan Cox : Allocator for a socket is settable. |
67 | * Alan Cox : SO_ERROR includes soft errors. |
68 | * Alan Cox : Allow NULL arguments on some SO_ opts |
69 | * Alan Cox : Generic socket allocation to make hooks |
70 | * easier (suggested by Craig Metz). |
71 | * Michael Pall : SO_ERROR returns positive errno again |
72 | * Steve Whitehouse: Added default destructor to free |
73 | * protocol private data. |
74 | * Steve Whitehouse: Added various other default routines |
75 | * common to several socket families. |
76 | * Chris Evans : Call suser() check last on F_SETOWN |
77 | * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER. |
78 | * Andi Kleen : Add sock_kmalloc()/sock_kfree_s() |
79 | * Andi Kleen : Fix write_space callback |
80 | * Chris Evans : Security fixes - signedness again |
81 | * Arnaldo C. Melo : cleanups, use skb_queue_purge |
82 | * |
83 | * To Fix: |
84 | * |
85 | * |
86 | * This program is free software; you can redistribute it and/or |
87 | * modify it under the terms of the GNU General Public License |
88 | * as published by the Free Software Foundation; either version |
89 | * 2 of the License, or (at your option) any later version. |
90 | */ |
91 | |
92 | #include <linux/capability.h> |
93 | #include <linux/errno.h> |
94 | #include <linux/types.h> |
95 | #include <linux/socket.h> |
96 | #include <linux/in.h> |
97 | #include <linux/kernel.h> |
98 | #include <linux/module.h> |
99 | #include <linux/proc_fs.h> |
100 | #include <linux/seq_file.h> |
101 | #include <linux/sched.h> |
102 | #include <linux/timer.h> |
103 | #include <linux/string.h> |
104 | #include <linux/sockios.h> |
105 | #include <linux/net.h> |
106 | #include <linux/mm.h> |
107 | #include <linux/slab.h> |
108 | #include <linux/interrupt.h> |
109 | #include <linux/poll.h> |
110 | #include <linux/tcp.h> |
111 | #include <linux/init.h> |
112 | #include <linux/highmem.h> |
113 | |
114 | #include <asm/uaccess.h> |
115 | #include <asm/system.h> |
116 | |
117 | #include <linux/netdevice.h> |
118 | #include <net/protocol.h> |
119 | #include <linux/skbuff.h> |
120 | #include <net/net_namespace.h> |
121 | #include <net/request_sock.h> |
122 | #include <net/sock.h> |
123 | #include <linux/net_tstamp.h> |
124 | #include <net/xfrm.h> |
125 | #include <linux/ipsec.h> |
126 | |
127 | #include <linux/filter.h> |
128 | |
129 | #ifdef CONFIG_INET |
130 | #include <net/tcp.h> |
131 | #endif |
132 | |
133 | /* |
134 | * Each address family might have different locking rules, so we have |
135 | * one slock key per address family: |
136 | */ |
137 | static struct lock_class_key af_family_keys[AF_MAX]; |
138 | static struct lock_class_key af_family_slock_keys[AF_MAX]; |
139 | |
140 | /* |
141 | * Make lock validator output more readable. (we pre-construct these |
142 | * strings build-time, so that runtime initialization of socket |
143 | * locks is fast): |
144 | */ |
145 | static const char *const af_family_key_strings[AF_MAX+1] = { |
146 | "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" , |
147 | "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK", |
148 | "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" , |
149 | "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" , |
150 | "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" , |
151 | "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" , |
152 | "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" , |
153 | "sk_lock-AF_RDS" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" , |
154 | "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" , |
155 | "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , |
156 | "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , |
157 | "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , |
158 | "sk_lock-AF_IEEE802154", |
159 | "sk_lock-AF_MAX" |
160 | }; |
161 | static const char *const af_family_slock_key_strings[AF_MAX+1] = { |
162 | "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , |
163 | "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK", |
164 | "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" , |
165 | "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" , |
166 | "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" , |
167 | "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" , |
168 | "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" , |
169 | "slock-AF_RDS" , "slock-AF_SNA" , "slock-AF_IRDA" , |
170 | "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" , |
171 | "slock-27" , "slock-28" , "slock-AF_CAN" , |
172 | "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , |
173 | "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , |
174 | "slock-AF_IEEE802154", |
175 | "slock-AF_MAX" |
176 | }; |
177 | static const char *const af_family_clock_key_strings[AF_MAX+1] = { |
178 | "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , |
179 | "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK", |
180 | "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" , |
181 | "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" , |
182 | "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" , |
183 | "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" , |
184 | "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" , |
185 | "clock-AF_RDS" , "clock-AF_SNA" , "clock-AF_IRDA" , |
186 | "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , |
187 | "clock-27" , "clock-28" , "clock-AF_CAN" , |
188 | "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , |
189 | "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , |
190 | "clock-AF_IEEE802154", |
191 | "clock-AF_MAX" |
192 | }; |
193 | |
194 | /* |
195 | * sk_callback_lock locking rules are per-address-family, |
196 | * so split the lock classes by using a per-AF key: |
197 | */ |
198 | static struct lock_class_key af_callback_keys[AF_MAX]; |
199 | |
200 | /* Take into consideration the size of the struct sk_buff overhead in the |
201 | * determination of these values, since that is non-constant across |
202 | * platforms. This makes socket queueing behavior and performance |
203 | * not depend upon such differences. |
204 | */ |
205 | #define _SK_MEM_PACKETS 256 |
206 | #define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256) |
207 | #define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) |
208 | #define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS) |
209 | |
210 | /* Run time adjustable parameters. */ |
211 | __u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX; |
212 | __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX; |
213 | __u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; |
214 | __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; |
215 | |
216 | /* Maximal space eaten by iovec or ancilliary data plus some space */ |
217 | int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); |
218 | EXPORT_SYMBOL(sysctl_optmem_max); |
219 | |
220 | static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen) |
221 | { |
222 | struct timeval tv; |
223 | |
224 | if (optlen < sizeof(tv)) |
225 | return -EINVAL; |
226 | if (copy_from_user(&tv, optval, sizeof(tv))) |
227 | return -EFAULT; |
228 | if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC) |
229 | return -EDOM; |
230 | |
231 | if (tv.tv_sec < 0) { |
232 | static int warned __read_mostly; |
233 | |
234 | *timeo_p = 0; |
235 | if (warned < 10 && net_ratelimit()) { |
236 | warned++; |
237 | printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) " |
238 | "tries to set negative timeout\n", |
239 | current->comm, task_pid_nr(current)); |
240 | } |
241 | return 0; |
242 | } |
243 | *timeo_p = MAX_SCHEDULE_TIMEOUT; |
244 | if (tv.tv_sec == 0 && tv.tv_usec == 0) |
245 | return 0; |
246 | if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1)) |
247 | *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ); |
248 | return 0; |
249 | } |
250 | |
251 | static void sock_warn_obsolete_bsdism(const char *name) |
252 | { |
253 | static int warned; |
254 | static char warncomm[TASK_COMM_LEN]; |
255 | if (strcmp(warncomm, current->comm) && warned < 5) { |
256 | strcpy(warncomm, current->comm); |
257 | printk(KERN_WARNING "process `%s' is using obsolete " |
258 | "%s SO_BSDCOMPAT\n", warncomm, name); |
259 | warned++; |
260 | } |
261 | } |
262 | |
263 | static void sock_disable_timestamp(struct sock *sk, int flag) |
264 | { |
265 | if (sock_flag(sk, flag)) { |
266 | sock_reset_flag(sk, flag); |
267 | if (!sock_flag(sk, SOCK_TIMESTAMP) && |
268 | !sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) { |
269 | net_disable_timestamp(); |
270 | } |
271 | } |
272 | } |
273 | |
274 | |
275 | int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
276 | { |
277 | int err; |
278 | int skb_len; |
279 | unsigned long flags; |
280 | struct sk_buff_head *list = &sk->sk_receive_queue; |
281 | |
282 | /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces |
283 | number of warnings when compiling with -W --ANK |
284 | */ |
285 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= |
286 | (unsigned)sk->sk_rcvbuf) { |
287 | atomic_inc(&sk->sk_drops); |
288 | return -ENOMEM; |
289 | } |
290 | |
291 | err = sk_filter(sk, skb); |
292 | if (err) |
293 | return err; |
294 | |
295 | if (!sk_rmem_schedule(sk, skb->truesize)) { |
296 | atomic_inc(&sk->sk_drops); |
297 | return -ENOBUFS; |
298 | } |
299 | |
300 | skb->dev = NULL; |
301 | skb_set_owner_r(skb, sk); |
302 | |
303 | /* Cache the SKB length before we tack it onto the receive |
304 | * queue. Once it is added it no longer belongs to us and |
305 | * may be freed by other threads of control pulling packets |
306 | * from the queue. |
307 | */ |
308 | skb_len = skb->len; |
309 | |
310 | spin_lock_irqsave(&list->lock, flags); |
311 | skb->dropcount = atomic_read(&sk->sk_drops); |
312 | __skb_queue_tail(list, skb); |
313 | spin_unlock_irqrestore(&list->lock, flags); |
314 | |
315 | if (!sock_flag(sk, SOCK_DEAD)) |
316 | sk->sk_data_ready(sk, skb_len); |
317 | return 0; |
318 | } |
319 | EXPORT_SYMBOL(sock_queue_rcv_skb); |
320 | |
321 | int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested) |
322 | { |
323 | int rc = NET_RX_SUCCESS; |
324 | |
325 | if (sk_filter(sk, skb)) |
326 | goto discard_and_relse; |
327 | |
328 | skb->dev = NULL; |
329 | |
330 | if (nested) |
331 | bh_lock_sock_nested(sk); |
332 | else |
333 | bh_lock_sock(sk); |
334 | if (!sock_owned_by_user(sk)) { |
335 | /* |
336 | * trylock + unlock semantics: |
337 | */ |
338 | mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_); |
339 | |
340 | rc = sk_backlog_rcv(sk, skb); |
341 | |
342 | mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); |
343 | } else if (sk_add_backlog(sk, skb)) { |
344 | bh_unlock_sock(sk); |
345 | atomic_inc(&sk->sk_drops); |
346 | goto discard_and_relse; |
347 | } |
348 | |
349 | bh_unlock_sock(sk); |
350 | out: |
351 | sock_put(sk); |
352 | return rc; |
353 | discard_and_relse: |
354 | kfree_skb(skb); |
355 | goto out; |
356 | } |
357 | EXPORT_SYMBOL(sk_receive_skb); |
358 | |
359 | void sk_reset_txq(struct sock *sk) |
360 | { |
361 | sk_tx_queue_clear(sk); |
362 | } |
363 | EXPORT_SYMBOL(sk_reset_txq); |
364 | |
365 | struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) |
366 | { |
367 | struct dst_entry *dst = sk->sk_dst_cache; |
368 | |
369 | if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { |
370 | sk_tx_queue_clear(sk); |
371 | sk->sk_dst_cache = NULL; |
372 | dst_release(dst); |
373 | return NULL; |
374 | } |
375 | |
376 | return dst; |
377 | } |
378 | EXPORT_SYMBOL(__sk_dst_check); |
379 | |
380 | struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie) |
381 | { |
382 | struct dst_entry *dst = sk_dst_get(sk); |
383 | |
384 | if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { |
385 | sk_dst_reset(sk); |
386 | dst_release(dst); |
387 | return NULL; |
388 | } |
389 | |
390 | return dst; |
391 | } |
392 | EXPORT_SYMBOL(sk_dst_check); |
393 | |
394 | static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen) |
395 | { |
396 | int ret = -ENOPROTOOPT; |
397 | #ifdef CONFIG_NETDEVICES |
398 | struct net *net = sock_net(sk); |
399 | char devname[IFNAMSIZ]; |
400 | int index; |
401 | |
402 | /* Sorry... */ |
403 | ret = -EPERM; |
404 | if (!capable(CAP_NET_RAW)) |
405 | goto out; |
406 | |
407 | ret = -EINVAL; |
408 | if (optlen < 0) |
409 | goto out; |
410 | |
411 | /* Bind this socket to a particular device like "eth0", |
412 | * as specified in the passed interface name. If the |
413 | * name is "" or the option length is zero the socket |
414 | * is not bound. |
415 | */ |
416 | if (optlen > IFNAMSIZ - 1) |
417 | optlen = IFNAMSIZ - 1; |
418 | memset(devname, 0, sizeof(devname)); |
419 | |
420 | ret = -EFAULT; |
421 | if (copy_from_user(devname, optval, optlen)) |
422 | goto out; |
423 | |
424 | index = 0; |
425 | if (devname[0] != '\0') { |
426 | struct net_device *dev; |
427 | |
428 | rcu_read_lock(); |
429 | dev = dev_get_by_name_rcu(net, devname); |
430 | if (dev) |
431 | index = dev->ifindex; |
432 | rcu_read_unlock(); |
433 | ret = -ENODEV; |
434 | if (!dev) |
435 | goto out; |
436 | } |
437 | |
438 | lock_sock(sk); |
439 | sk->sk_bound_dev_if = index; |
440 | sk_dst_reset(sk); |
441 | release_sock(sk); |
442 | |
443 | ret = 0; |
444 | |
445 | out: |
446 | #endif |
447 | |
448 | return ret; |
449 | } |
450 | |
451 | static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool) |
452 | { |
453 | if (valbool) |
454 | sock_set_flag(sk, bit); |
455 | else |
456 | sock_reset_flag(sk, bit); |
457 | } |
458 | |
459 | /* |
460 | * This is meant for all protocols to use and covers goings on |
461 | * at the socket level. Everything here is generic. |
462 | */ |
463 | |
464 | int sock_setsockopt(struct socket *sock, int level, int optname, |
465 | char __user *optval, unsigned int optlen) |
466 | { |
467 | struct sock *sk = sock->sk; |
468 | int val; |
469 | int valbool; |
470 | struct linger ling; |
471 | int ret = 0; |
472 | |
473 | /* |
474 | * Options without arguments |
475 | */ |
476 | |
477 | if (optname == SO_BINDTODEVICE) |
478 | return sock_bindtodevice(sk, optval, optlen); |
479 | |
480 | if (optlen < sizeof(int)) |
481 | return -EINVAL; |
482 | |
483 | if (get_user(val, (int __user *)optval)) |
484 | return -EFAULT; |
485 | |
486 | valbool = val ? 1 : 0; |
487 | |
488 | lock_sock(sk); |
489 | |
490 | switch (optname) { |
491 | case SO_DEBUG: |
492 | if (val && !capable(CAP_NET_ADMIN)) |
493 | ret = -EACCES; |
494 | else |
495 | sock_valbool_flag(sk, SOCK_DBG, valbool); |
496 | break; |
497 | case SO_REUSEADDR: |
498 | sk->sk_reuse = valbool; |
499 | break; |
500 | case SO_TYPE: |
501 | case SO_PROTOCOL: |
502 | case SO_DOMAIN: |
503 | case SO_ERROR: |
504 | ret = -ENOPROTOOPT; |
505 | break; |
506 | case SO_DONTROUTE: |
507 | sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool); |
508 | break; |
509 | case SO_BROADCAST: |
510 | sock_valbool_flag(sk, SOCK_BROADCAST, valbool); |
511 | break; |
512 | case SO_SNDBUF: |
513 | /* Don't error on this BSD doesn't and if you think |
514 | about it this is right. Otherwise apps have to |
515 | play 'guess the biggest size' games. RCVBUF/SNDBUF |
516 | are treated in BSD as hints */ |
517 | |
518 | if (val > sysctl_wmem_max) |
519 | val = sysctl_wmem_max; |
520 | set_sndbuf: |
521 | sk->sk_userlocks |= SOCK_SNDBUF_LOCK; |
522 | if ((val * 2) < SOCK_MIN_SNDBUF) |
523 | sk->sk_sndbuf = SOCK_MIN_SNDBUF; |
524 | else |
525 | sk->sk_sndbuf = val * 2; |
526 | |
527 | /* |
528 | * Wake up sending tasks if we |
529 | * upped the value. |
530 | */ |
531 | sk->sk_write_space(sk); |
532 | break; |
533 | |
534 | case SO_SNDBUFFORCE: |
535 | if (!capable(CAP_NET_ADMIN)) { |
536 | ret = -EPERM; |
537 | break; |
538 | } |
539 | goto set_sndbuf; |
540 | |
541 | case SO_RCVBUF: |
542 | /* Don't error on this BSD doesn't and if you think |
543 | about it this is right. Otherwise apps have to |
544 | play 'guess the biggest size' games. RCVBUF/SNDBUF |
545 | are treated in BSD as hints */ |
546 | |
547 | if (val > sysctl_rmem_max) |
548 | val = sysctl_rmem_max; |
549 | set_rcvbuf: |
550 | sk->sk_userlocks |= SOCK_RCVBUF_LOCK; |
551 | /* |
552 | * We double it on the way in to account for |
553 | * "struct sk_buff" etc. overhead. Applications |
554 | * assume that the SO_RCVBUF setting they make will |
555 | * allow that much actual data to be received on that |
556 | * socket. |
557 | * |
558 | * Applications are unaware that "struct sk_buff" and |
559 | * other overheads allocate from the receive buffer |
560 | * during socket buffer allocation. |
561 | * |
562 | * And after considering the possible alternatives, |
563 | * returning the value we actually used in getsockopt |
564 | * is the most desirable behavior. |
565 | */ |
566 | if ((val * 2) < SOCK_MIN_RCVBUF) |
567 | sk->sk_rcvbuf = SOCK_MIN_RCVBUF; |
568 | else |
569 | sk->sk_rcvbuf = val * 2; |
570 | break; |
571 | |
572 | case SO_RCVBUFFORCE: |
573 | if (!capable(CAP_NET_ADMIN)) { |
574 | ret = -EPERM; |
575 | break; |
576 | } |
577 | goto set_rcvbuf; |
578 | |
579 | case SO_KEEPALIVE: |
580 | #ifdef CONFIG_INET |
581 | if (sk->sk_protocol == IPPROTO_TCP) |
582 | tcp_set_keepalive(sk, valbool); |
583 | #endif |
584 | sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); |
585 | break; |
586 | |
587 | case SO_OOBINLINE: |
588 | sock_valbool_flag(sk, SOCK_URGINLINE, valbool); |
589 | break; |
590 | |
591 | case SO_NO_CHECK: |
592 | sk->sk_no_check = valbool; |
593 | break; |
594 | |
595 | case SO_PRIORITY: |
596 | if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) |
597 | sk->sk_priority = val; |
598 | else |
599 | ret = -EPERM; |
600 | break; |
601 | |
602 | case SO_LINGER: |
603 | if (optlen < sizeof(ling)) { |
604 | ret = -EINVAL; /* 1003.1g */ |
605 | break; |
606 | } |
607 | if (copy_from_user(&ling, optval, sizeof(ling))) { |
608 | ret = -EFAULT; |
609 | break; |
610 | } |
611 | if (!ling.l_onoff) |
612 | sock_reset_flag(sk, SOCK_LINGER); |
613 | else { |
614 | #if (BITS_PER_LONG == 32) |
615 | if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) |
616 | sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; |
617 | else |
618 | #endif |
619 | sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; |
620 | sock_set_flag(sk, SOCK_LINGER); |
621 | } |
622 | break; |
623 | |
624 | case SO_BSDCOMPAT: |
625 | sock_warn_obsolete_bsdism("setsockopt"); |
626 | break; |
627 | |
628 | case SO_PASSCRED: |
629 | if (valbool) |
630 | set_bit(SOCK_PASSCRED, &sock->flags); |
631 | else |
632 | clear_bit(SOCK_PASSCRED, &sock->flags); |
633 | break; |
634 | |
635 | case SO_TIMESTAMP: |
636 | case SO_TIMESTAMPNS: |
637 | if (valbool) { |
638 | if (optname == SO_TIMESTAMP) |
639 | sock_reset_flag(sk, SOCK_RCVTSTAMPNS); |
640 | else |
641 | sock_set_flag(sk, SOCK_RCVTSTAMPNS); |
642 | sock_set_flag(sk, SOCK_RCVTSTAMP); |
643 | sock_enable_timestamp(sk, SOCK_TIMESTAMP); |
644 | } else { |
645 | sock_reset_flag(sk, SOCK_RCVTSTAMP); |
646 | sock_reset_flag(sk, SOCK_RCVTSTAMPNS); |
647 | } |
648 | break; |
649 | |
650 | case SO_TIMESTAMPING: |
651 | if (val & ~SOF_TIMESTAMPING_MASK) { |
652 | ret = -EINVAL; |
653 | break; |
654 | } |
655 | sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE, |
656 | val & SOF_TIMESTAMPING_TX_HARDWARE); |
657 | sock_valbool_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE, |
658 | val & SOF_TIMESTAMPING_TX_SOFTWARE); |
659 | sock_valbool_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE, |
660 | val & SOF_TIMESTAMPING_RX_HARDWARE); |
661 | if (val & SOF_TIMESTAMPING_RX_SOFTWARE) |
662 | sock_enable_timestamp(sk, |
663 | SOCK_TIMESTAMPING_RX_SOFTWARE); |
664 | else |
665 | sock_disable_timestamp(sk, |
666 | SOCK_TIMESTAMPING_RX_SOFTWARE); |
667 | sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE, |
668 | val & SOF_TIMESTAMPING_SOFTWARE); |
669 | sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE, |
670 | val & SOF_TIMESTAMPING_SYS_HARDWARE); |
671 | sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE, |
672 | val & SOF_TIMESTAMPING_RAW_HARDWARE); |
673 | break; |
674 | |
675 | case SO_RCVLOWAT: |
676 | if (val < 0) |
677 | val = INT_MAX; |
678 | sk->sk_rcvlowat = val ? : 1; |
679 | break; |
680 | |
681 | case SO_RCVTIMEO: |
682 | ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); |
683 | break; |
684 | |
685 | case SO_SNDTIMEO: |
686 | ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); |
687 | break; |
688 | |
689 | case SO_ATTACH_FILTER: |
690 | ret = -EINVAL; |
691 | if (optlen == sizeof(struct sock_fprog)) { |
692 | struct sock_fprog fprog; |
693 | |
694 | ret = -EFAULT; |
695 | if (copy_from_user(&fprog, optval, sizeof(fprog))) |
696 | break; |
697 | |
698 | ret = sk_attach_filter(&fprog, sk); |
699 | } |
700 | break; |
701 | |
702 | case SO_DETACH_FILTER: |
703 | ret = sk_detach_filter(sk); |
704 | break; |
705 | |
706 | case SO_PASSSEC: |
707 | if (valbool) |
708 | set_bit(SOCK_PASSSEC, &sock->flags); |
709 | else |
710 | clear_bit(SOCK_PASSSEC, &sock->flags); |
711 | break; |
712 | case SO_MARK: |
713 | if (!capable(CAP_NET_ADMIN)) |
714 | ret = -EPERM; |
715 | else |
716 | sk->sk_mark = val; |
717 | break; |
718 | |
719 | /* We implement the SO_SNDLOWAT etc to |
720 | not be settable (1003.1g 5.3) */ |
721 | case SO_RXQ_OVFL: |
722 | if (valbool) |
723 | sock_set_flag(sk, SOCK_RXQ_OVFL); |
724 | else |
725 | sock_reset_flag(sk, SOCK_RXQ_OVFL); |
726 | break; |
727 | default: |
728 | ret = -ENOPROTOOPT; |
729 | break; |
730 | } |
731 | release_sock(sk); |
732 | return ret; |
733 | } |
734 | EXPORT_SYMBOL(sock_setsockopt); |
735 | |
736 | |
737 | int sock_getsockopt(struct socket *sock, int level, int optname, |
738 | char __user *optval, int __user *optlen) |
739 | { |
740 | struct sock *sk = sock->sk; |
741 | |
742 | union { |
743 | int val; |
744 | struct linger ling; |
745 | struct timeval tm; |
746 | } v; |
747 | |
748 | int lv = sizeof(int); |
749 | int len; |
750 | |
751 | if (get_user(len, optlen)) |
752 | return -EFAULT; |
753 | if (len < 0) |
754 | return -EINVAL; |
755 | |
756 | memset(&v, 0, sizeof(v)); |
757 | |
758 | switch (optname) { |
759 | case SO_DEBUG: |
760 | v.val = sock_flag(sk, SOCK_DBG); |
761 | break; |
762 | |
763 | case SO_DONTROUTE: |
764 | v.val = sock_flag(sk, SOCK_LOCALROUTE); |
765 | break; |
766 | |
767 | case SO_BROADCAST: |
768 | v.val = !!sock_flag(sk, SOCK_BROADCAST); |
769 | break; |
770 | |
771 | case SO_SNDBUF: |
772 | v.val = sk->sk_sndbuf; |
773 | break; |
774 | |
775 | case SO_RCVBUF: |
776 | v.val = sk->sk_rcvbuf; |
777 | break; |
778 | |
779 | case SO_REUSEADDR: |
780 | v.val = sk->sk_reuse; |
781 | break; |
782 | |
783 | case SO_KEEPALIVE: |
784 | v.val = !!sock_flag(sk, SOCK_KEEPOPEN); |
785 | break; |
786 | |
787 | case SO_TYPE: |
788 | v.val = sk->sk_type; |
789 | break; |
790 | |
791 | case SO_PROTOCOL: |
792 | v.val = sk->sk_protocol; |
793 | break; |
794 | |
795 | case SO_DOMAIN: |
796 | v.val = sk->sk_family; |
797 | break; |
798 | |
799 | case SO_ERROR: |
800 | v.val = -sock_error(sk); |
801 | if (v.val == 0) |
802 | v.val = xchg(&sk->sk_err_soft, 0); |
803 | break; |
804 | |
805 | case SO_OOBINLINE: |
806 | v.val = !!sock_flag(sk, SOCK_URGINLINE); |
807 | break; |
808 | |
809 | case SO_NO_CHECK: |
810 | v.val = sk->sk_no_check; |
811 | break; |
812 | |
813 | case SO_PRIORITY: |
814 | v.val = sk->sk_priority; |
815 | break; |
816 | |
817 | case SO_LINGER: |
818 | lv = sizeof(v.ling); |
819 | v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER); |
820 | v.ling.l_linger = sk->sk_lingertime / HZ; |
821 | break; |
822 | |
823 | case SO_BSDCOMPAT: |
824 | sock_warn_obsolete_bsdism("getsockopt"); |
825 | break; |
826 | |
827 | case SO_TIMESTAMP: |
828 | v.val = sock_flag(sk, SOCK_RCVTSTAMP) && |
829 | !sock_flag(sk, SOCK_RCVTSTAMPNS); |
830 | break; |
831 | |
832 | case SO_TIMESTAMPNS: |
833 | v.val = sock_flag(sk, SOCK_RCVTSTAMPNS); |
834 | break; |
835 | |
836 | case SO_TIMESTAMPING: |
837 | v.val = 0; |
838 | if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) |
839 | v.val |= SOF_TIMESTAMPING_TX_HARDWARE; |
840 | if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) |
841 | v.val |= SOF_TIMESTAMPING_TX_SOFTWARE; |
842 | if (sock_flag(sk, SOCK_TIMESTAMPING_RX_HARDWARE)) |
843 | v.val |= SOF_TIMESTAMPING_RX_HARDWARE; |
844 | if (sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE)) |
845 | v.val |= SOF_TIMESTAMPING_RX_SOFTWARE; |
846 | if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) |
847 | v.val |= SOF_TIMESTAMPING_SOFTWARE; |
848 | if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)) |
849 | v.val |= SOF_TIMESTAMPING_SYS_HARDWARE; |
850 | if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) |
851 | v.val |= SOF_TIMESTAMPING_RAW_HARDWARE; |
852 | break; |
853 | |
854 | case SO_RCVTIMEO: |
855 | lv = sizeof(struct timeval); |
856 | if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) { |
857 | v.tm.tv_sec = 0; |
858 | v.tm.tv_usec = 0; |
859 | } else { |
860 | v.tm.tv_sec = sk->sk_rcvtimeo / HZ; |
861 | v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ; |
862 | } |
863 | break; |
864 | |
865 | case SO_SNDTIMEO: |
866 | lv = sizeof(struct timeval); |
867 | if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) { |
868 | v.tm.tv_sec = 0; |
869 | v.tm.tv_usec = 0; |
870 | } else { |
871 | v.tm.tv_sec = sk->sk_sndtimeo / HZ; |
872 | v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ; |
873 | } |
874 | break; |
875 | |
876 | case SO_RCVLOWAT: |
877 | v.val = sk->sk_rcvlowat; |
878 | break; |
879 | |
880 | case SO_SNDLOWAT: |
881 | v.val = 1; |
882 | break; |
883 | |
884 | case SO_PASSCRED: |
885 | v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0; |
886 | break; |
887 | |
888 | case SO_PEERCRED: |
889 | if (len > sizeof(sk->sk_peercred)) |
890 | len = sizeof(sk->sk_peercred); |
891 | if (copy_to_user(optval, &sk->sk_peercred, len)) |
892 | return -EFAULT; |
893 | goto lenout; |
894 | |
895 | case SO_PEERNAME: |
896 | { |
897 | char address[128]; |
898 | |
899 | if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2)) |
900 | return -ENOTCONN; |
901 | if (lv < len) |
902 | return -EINVAL; |
903 | if (copy_to_user(optval, address, len)) |
904 | return -EFAULT; |
905 | goto lenout; |
906 | } |
907 | |
908 | /* Dubious BSD thing... Probably nobody even uses it, but |
909 | * the UNIX standard wants it for whatever reason... -DaveM |
910 | */ |
911 | case SO_ACCEPTCONN: |
912 | v.val = sk->sk_state == TCP_LISTEN; |
913 | break; |
914 | |
915 | case SO_PASSSEC: |
916 | v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0; |
917 | break; |
918 | |
919 | case SO_PEERSEC: |
920 | return security_socket_getpeersec_stream(sock, optval, optlen, len); |
921 | |
922 | case SO_MARK: |
923 | v.val = sk->sk_mark; |
924 | break; |
925 | |
926 | case SO_RXQ_OVFL: |
927 | v.val = !!sock_flag(sk, SOCK_RXQ_OVFL); |
928 | break; |
929 | |
930 | default: |
931 | return -ENOPROTOOPT; |
932 | } |
933 | |
934 | if (len > lv) |
935 | len = lv; |
936 | if (copy_to_user(optval, &v, len)) |
937 | return -EFAULT; |
938 | lenout: |
939 | if (put_user(len, optlen)) |
940 | return -EFAULT; |
941 | return 0; |
942 | } |
943 | |
944 | /* |
945 | * Initialize an sk_lock. |
946 | * |
947 | * (We also register the sk_lock with the lock validator.) |
948 | */ |
949 | static inline void sock_lock_init(struct sock *sk) |
950 | { |
951 | sock_lock_init_class_and_name(sk, |
952 | af_family_slock_key_strings[sk->sk_family], |
953 | af_family_slock_keys + sk->sk_family, |
954 | af_family_key_strings[sk->sk_family], |
955 | af_family_keys + sk->sk_family); |
956 | } |
957 | |
958 | /* |
959 | * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet, |
960 | * even temporarly, because of RCU lookups. sk_node should also be left as is. |
961 | */ |
962 | static void sock_copy(struct sock *nsk, const struct sock *osk) |
963 | { |
964 | #ifdef CONFIG_SECURITY_NETWORK |
965 | void *sptr = nsk->sk_security; |
966 | #endif |
967 | BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) != |
968 | sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) + |
969 | sizeof(osk->sk_tx_queue_mapping)); |
970 | memcpy(&nsk->sk_copy_start, &osk->sk_copy_start, |
971 | osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start)); |
972 | #ifdef CONFIG_SECURITY_NETWORK |
973 | nsk->sk_security = sptr; |
974 | security_sk_clone(osk, nsk); |
975 | #endif |
976 | } |
977 | |
978 | static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, |
979 | int family) |
980 | { |
981 | struct sock *sk; |
982 | struct kmem_cache *slab; |
983 | |
984 | slab = prot->slab; |
985 | if (slab != NULL) { |
986 | sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); |
987 | if (!sk) |
988 | return sk; |
989 | if (priority & __GFP_ZERO) { |
990 | /* |
991 | * caches using SLAB_DESTROY_BY_RCU should let |
992 | * sk_node.next un-modified. Special care is taken |
993 | * when initializing object to zero. |
994 | */ |
995 | if (offsetof(struct sock, sk_node.next) != 0) |
996 | memset(sk, 0, offsetof(struct sock, sk_node.next)); |
997 | memset(&sk->sk_node.pprev, 0, |
998 | prot->obj_size - offsetof(struct sock, |
999 | sk_node.pprev)); |
1000 | } |
1001 | } |
1002 | else |
1003 | sk = kmalloc(prot->obj_size, priority); |
1004 | |
1005 | if (sk != NULL) { |
1006 | kmemcheck_annotate_bitfield(sk, flags); |
1007 | |
1008 | if (security_sk_alloc(sk, family, priority)) |
1009 | goto out_free; |
1010 | |
1011 | if (!try_module_get(prot->owner)) |
1012 | goto out_free_sec; |
1013 | sk_tx_queue_clear(sk); |
1014 | } |
1015 | |
1016 | return sk; |
1017 | |
1018 | out_free_sec: |
1019 | security_sk_free(sk); |
1020 | out_free: |
1021 | if (slab != NULL) |
1022 | kmem_cache_free(slab, sk); |
1023 | else |
1024 | kfree(sk); |
1025 | return NULL; |
1026 | } |
1027 | |
1028 | static void sk_prot_free(struct proto *prot, struct sock *sk) |
1029 | { |
1030 | struct kmem_cache *slab; |
1031 | struct module *owner; |
1032 | |
1033 | owner = prot->owner; |
1034 | slab = prot->slab; |
1035 | |
1036 | security_sk_free(sk); |
1037 | if (slab != NULL) |
1038 | kmem_cache_free(slab, sk); |
1039 | else |
1040 | kfree(sk); |
1041 | module_put(owner); |
1042 | } |
1043 | |
1044 | /** |
1045 | * sk_alloc - All socket objects are allocated here |
1046 | * @net: the applicable net namespace |
1047 | * @family: protocol family |
1048 | * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) |
1049 | * @prot: struct proto associated with this new sock instance |
1050 | */ |
1051 | struct sock *sk_alloc(struct net *net, int family, gfp_t priority, |
1052 | struct proto *prot) |
1053 | { |
1054 | struct sock *sk; |
1055 | |
1056 | sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family); |
1057 | if (sk) { |
1058 | sk->sk_family = family; |
1059 | /* |
1060 | * See comment in struct sock definition to understand |
1061 | * why we need sk_prot_creator -acme |
1062 | */ |
1063 | sk->sk_prot = sk->sk_prot_creator = prot; |
1064 | sock_lock_init(sk); |
1065 | sock_net_set(sk, get_net(net)); |
1066 | atomic_set(&sk->sk_wmem_alloc, 1); |
1067 | } |
1068 | |
1069 | return sk; |
1070 | } |
1071 | EXPORT_SYMBOL(sk_alloc); |
1072 | |
1073 | static void __sk_free(struct sock *sk) |
1074 | { |
1075 | struct sk_filter *filter; |
1076 | |
1077 | if (sk->sk_destruct) |
1078 | sk->sk_destruct(sk); |
1079 | |
1080 | filter = rcu_dereference_check(sk->sk_filter, |
1081 | atomic_read(&sk->sk_wmem_alloc) == 0); |
1082 | if (filter) { |
1083 | sk_filter_uncharge(sk, filter); |
1084 | rcu_assign_pointer(sk->sk_filter, NULL); |
1085 | } |
1086 | |
1087 | sock_disable_timestamp(sk, SOCK_TIMESTAMP); |
1088 | sock_disable_timestamp(sk, SOCK_TIMESTAMPING_RX_SOFTWARE); |
1089 | |
1090 | if (atomic_read(&sk->sk_omem_alloc)) |
1091 | printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", |
1092 | __func__, atomic_read(&sk->sk_omem_alloc)); |
1093 | |
1094 | put_net(sock_net(sk)); |
1095 | sk_prot_free(sk->sk_prot_creator, sk); |
1096 | } |
1097 | |
1098 | void sk_free(struct sock *sk) |
1099 | { |
1100 | /* |
1101 | * We substract one from sk_wmem_alloc and can know if |
1102 | * some packets are still in some tx queue. |
1103 | * If not null, sock_wfree() will call __sk_free(sk) later |
1104 | */ |
1105 | if (atomic_dec_and_test(&sk->sk_wmem_alloc)) |
1106 | __sk_free(sk); |
1107 | } |
1108 | EXPORT_SYMBOL(sk_free); |
1109 | |
1110 | /* |
1111 | * Last sock_put should drop referrence to sk->sk_net. It has already |
1112 | * been dropped in sk_change_net. Taking referrence to stopping namespace |
1113 | * is not an option. |
1114 | * Take referrence to a socket to remove it from hash _alive_ and after that |
1115 | * destroy it in the context of init_net. |
1116 | */ |
1117 | void sk_release_kernel(struct sock *sk) |
1118 | { |
1119 | if (sk == NULL || sk->sk_socket == NULL) |
1120 | return; |
1121 | |
1122 | sock_hold(sk); |
1123 | sock_release(sk->sk_socket); |
1124 | release_net(sock_net(sk)); |
1125 | sock_net_set(sk, get_net(&init_net)); |
1126 | sock_put(sk); |
1127 | } |
1128 | EXPORT_SYMBOL(sk_release_kernel); |
1129 | |
1130 | struct sock *sk_clone(const struct sock *sk, const gfp_t priority) |
1131 | { |
1132 | struct sock *newsk; |
1133 | |
1134 | newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family); |
1135 | if (newsk != NULL) { |
1136 | struct sk_filter *filter; |
1137 | |
1138 | sock_copy(newsk, sk); |
1139 | |
1140 | /* SANITY */ |
1141 | get_net(sock_net(newsk)); |
1142 | sk_node_init(&newsk->sk_node); |
1143 | sock_lock_init(newsk); |
1144 | bh_lock_sock(newsk); |
1145 | newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; |
1146 | newsk->sk_backlog.len = 0; |
1147 | |
1148 | atomic_set(&newsk->sk_rmem_alloc, 0); |
1149 | /* |
1150 | * sk_wmem_alloc set to one (see sk_free() and sock_wfree()) |
1151 | */ |
1152 | atomic_set(&newsk->sk_wmem_alloc, 1); |
1153 | atomic_set(&newsk->sk_omem_alloc, 0); |
1154 | skb_queue_head_init(&newsk->sk_receive_queue); |
1155 | skb_queue_head_init(&newsk->sk_write_queue); |
1156 | #ifdef CONFIG_NET_DMA |
1157 | skb_queue_head_init(&newsk->sk_async_wait_queue); |
1158 | #endif |
1159 | |
1160 | rwlock_init(&newsk->sk_dst_lock); |
1161 | rwlock_init(&newsk->sk_callback_lock); |
1162 | lockdep_set_class_and_name(&newsk->sk_callback_lock, |
1163 | af_callback_keys + newsk->sk_family, |
1164 | af_family_clock_key_strings[newsk->sk_family]); |
1165 | |
1166 | newsk->sk_dst_cache = NULL; |
1167 | newsk->sk_wmem_queued = 0; |
1168 | newsk->sk_forward_alloc = 0; |
1169 | newsk->sk_send_head = NULL; |
1170 | newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; |
1171 | |
1172 | sock_reset_flag(newsk, SOCK_DONE); |
1173 | skb_queue_head_init(&newsk->sk_error_queue); |
1174 | |
1175 | filter = newsk->sk_filter; |
1176 | if (filter != NULL) |
1177 | sk_filter_charge(newsk, filter); |
1178 | |
1179 | if (unlikely(xfrm_sk_clone_policy(newsk))) { |
1180 | /* It is still raw copy of parent, so invalidate |
1181 | * destructor and make plain sk_free() */ |
1182 | newsk->sk_destruct = NULL; |
1183 | sk_free(newsk); |
1184 | newsk = NULL; |
1185 | goto out; |
1186 | } |
1187 | |
1188 | newsk->sk_err = 0; |
1189 | newsk->sk_priority = 0; |
1190 | /* |
1191 | * Before updating sk_refcnt, we must commit prior changes to memory |
1192 | * (Documentation/RCU/rculist_nulls.txt for details) |
1193 | */ |
1194 | smp_wmb(); |
1195 | atomic_set(&newsk->sk_refcnt, 2); |
1196 | |
1197 | /* |
1198 | * Increment the counter in the same struct proto as the master |
1199 | * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that |
1200 | * is the same as sk->sk_prot->socks, as this field was copied |
1201 | * with memcpy). |
1202 | * |
1203 | * This _changes_ the previous behaviour, where |
1204 | * tcp_create_openreq_child always was incrementing the |
1205 | * equivalent to tcp_prot->socks (inet_sock_nr), so this have |
1206 | * to be taken into account in all callers. -acme |
1207 | */ |
1208 | sk_refcnt_debug_inc(newsk); |
1209 | sk_set_socket(newsk, NULL); |
1210 | newsk->sk_sleep = NULL; |
1211 | |
1212 | if (newsk->sk_prot->sockets_allocated) |
1213 | percpu_counter_inc(newsk->sk_prot->sockets_allocated); |
1214 | |
1215 | if (sock_flag(newsk, SOCK_TIMESTAMP) || |
1216 | sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE)) |
1217 | net_enable_timestamp(); |
1218 | } |
1219 | out: |
1220 | return newsk; |
1221 | } |
1222 | EXPORT_SYMBOL_GPL(sk_clone); |
1223 | |
1224 | void sk_setup_caps(struct sock *sk, struct dst_entry *dst) |
1225 | { |
1226 | __sk_dst_set(sk, dst); |
1227 | sk->sk_route_caps = dst->dev->features; |
1228 | if (sk->sk_route_caps & NETIF_F_GSO) |
1229 | sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; |
1230 | if (sk_can_gso(sk)) { |
1231 | if (dst->header_len) { |
1232 | sk->sk_route_caps &= ~NETIF_F_GSO_MASK; |
1233 | } else { |
1234 | sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; |
1235 | sk->sk_gso_max_size = dst->dev->gso_max_size; |
1236 | } |
1237 | } |
1238 | } |
1239 | EXPORT_SYMBOL_GPL(sk_setup_caps); |
1240 | |
1241 | void __init sk_init(void) |
1242 | { |
1243 | if (totalram_pages <= 4096) { |
1244 | sysctl_wmem_max = 32767; |
1245 | sysctl_rmem_max = 32767; |
1246 | sysctl_wmem_default = 32767; |
1247 | sysctl_rmem_default = 32767; |
1248 | } else if (totalram_pages >= 131072) { |
1249 | sysctl_wmem_max = 131071; |
1250 | sysctl_rmem_max = 131071; |
1251 | } |
1252 | } |
1253 | |
1254 | /* |
1255 | * Simple resource managers for sockets. |
1256 | */ |
1257 | |
1258 | |
1259 | /* |
1260 | * Write buffer destructor automatically called from kfree_skb. |
1261 | */ |
1262 | void sock_wfree(struct sk_buff *skb) |
1263 | { |
1264 | struct sock *sk = skb->sk; |
1265 | unsigned int len = skb->truesize; |
1266 | |
1267 | if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE)) { |
1268 | /* |
1269 | * Keep a reference on sk_wmem_alloc, this will be released |
1270 | * after sk_write_space() call |
1271 | */ |
1272 | atomic_sub(len - 1, &sk->sk_wmem_alloc); |
1273 | sk->sk_write_space(sk); |
1274 | len = 1; |
1275 | } |
1276 | /* |
1277 | * if sk_wmem_alloc reaches 0, we must finish what sk_free() |
1278 | * could not do because of in-flight packets |
1279 | */ |
1280 | if (atomic_sub_and_test(len, &sk->sk_wmem_alloc)) |
1281 | __sk_free(sk); |
1282 | } |
1283 | EXPORT_SYMBOL(sock_wfree); |
1284 | |
1285 | /* |
1286 | * Read buffer destructor automatically called from kfree_skb. |
1287 | */ |
1288 | void sock_rfree(struct sk_buff *skb) |
1289 | { |
1290 | struct sock *sk = skb->sk; |
1291 | |
1292 | atomic_sub(skb->truesize, &sk->sk_rmem_alloc); |
1293 | sk_mem_uncharge(skb->sk, skb->truesize); |
1294 | } |
1295 | EXPORT_SYMBOL(sock_rfree); |
1296 | |
1297 | |
1298 | int sock_i_uid(struct sock *sk) |
1299 | { |
1300 | int uid; |
1301 | |
1302 | read_lock(&sk->sk_callback_lock); |
1303 | uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0; |
1304 | read_unlock(&sk->sk_callback_lock); |
1305 | return uid; |
1306 | } |
1307 | EXPORT_SYMBOL(sock_i_uid); |
1308 | |
1309 | unsigned long sock_i_ino(struct sock *sk) |
1310 | { |
1311 | unsigned long ino; |
1312 | |
1313 | read_lock(&sk->sk_callback_lock); |
1314 | ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0; |
1315 | read_unlock(&sk->sk_callback_lock); |
1316 | return ino; |
1317 | } |
1318 | EXPORT_SYMBOL(sock_i_ino); |
1319 | |
1320 | /* |
1321 | * Allocate a skb from the socket's send buffer. |
1322 | */ |
1323 | struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, |
1324 | gfp_t priority) |
1325 | { |
1326 | if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { |
1327 | struct sk_buff *skb = alloc_skb(size, priority); |
1328 | if (skb) { |
1329 | skb_set_owner_w(skb, sk); |
1330 | return skb; |
1331 | } |
1332 | } |
1333 | return NULL; |
1334 | } |
1335 | EXPORT_SYMBOL(sock_wmalloc); |
1336 | |
1337 | /* |
1338 | * Allocate a skb from the socket's receive buffer. |
1339 | */ |
1340 | struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, |
1341 | gfp_t priority) |
1342 | { |
1343 | if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { |
1344 | struct sk_buff *skb = alloc_skb(size, priority); |
1345 | if (skb) { |
1346 | skb_set_owner_r(skb, sk); |
1347 | return skb; |
1348 | } |
1349 | } |
1350 | return NULL; |
1351 | } |
1352 | |
1353 | /* |
1354 | * Allocate a memory block from the socket's option memory buffer. |
1355 | */ |
1356 | void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) |
1357 | { |
1358 | if ((unsigned)size <= sysctl_optmem_max && |
1359 | atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { |
1360 | void *mem; |
1361 | /* First do the add, to avoid the race if kmalloc |
1362 | * might sleep. |
1363 | */ |
1364 | atomic_add(size, &sk->sk_omem_alloc); |
1365 | mem = kmalloc(size, priority); |
1366 | if (mem) |
1367 | return mem; |
1368 | atomic_sub(size, &sk->sk_omem_alloc); |
1369 | } |
1370 | return NULL; |
1371 | } |
1372 | EXPORT_SYMBOL(sock_kmalloc); |
1373 | |
1374 | /* |
1375 | * Free an option memory block. |
1376 | */ |
1377 | void sock_kfree_s(struct sock *sk, void *mem, int size) |
1378 | { |
1379 | kfree(mem); |
1380 | atomic_sub(size, &sk->sk_omem_alloc); |
1381 | } |
1382 | EXPORT_SYMBOL(sock_kfree_s); |
1383 | |
1384 | /* It is almost wait_for_tcp_memory minus release_sock/lock_sock. |
1385 | I think, these locks should be removed for datagram sockets. |
1386 | */ |
1387 | static long sock_wait_for_wmem(struct sock *sk, long timeo) |
1388 | { |
1389 | DEFINE_WAIT(wait); |
1390 | |
1391 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); |
1392 | for (;;) { |
1393 | if (!timeo) |
1394 | break; |
1395 | if (signal_pending(current)) |
1396 | break; |
1397 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
1398 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); |
1399 | if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) |
1400 | break; |
1401 | if (sk->sk_shutdown & SEND_SHUTDOWN) |
1402 | break; |
1403 | if (sk->sk_err) |
1404 | break; |
1405 | timeo = schedule_timeout(timeo); |
1406 | } |
1407 | finish_wait(sk->sk_sleep, &wait); |
1408 | return timeo; |
1409 | } |
1410 | |
1411 | |
1412 | /* |
1413 | * Generic send/receive buffer handlers |
1414 | */ |
1415 | |
1416 | struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, |
1417 | unsigned long data_len, int noblock, |
1418 | int *errcode) |
1419 | { |
1420 | struct sk_buff *skb; |
1421 | gfp_t gfp_mask; |
1422 | long timeo; |
1423 | int err; |
1424 | |
1425 | gfp_mask = sk->sk_allocation; |
1426 | if (gfp_mask & __GFP_WAIT) |
1427 | gfp_mask |= __GFP_REPEAT; |
1428 | |
1429 | timeo = sock_sndtimeo(sk, noblock); |
1430 | while (1) { |
1431 | err = sock_error(sk); |
1432 | if (err != 0) |
1433 | goto failure; |
1434 | |
1435 | err = -EPIPE; |
1436 | if (sk->sk_shutdown & SEND_SHUTDOWN) |
1437 | goto failure; |
1438 | |
1439 | if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { |
1440 | skb = alloc_skb(header_len, gfp_mask); |
1441 | if (skb) { |
1442 | int npages; |
1443 | int i; |
1444 | |
1445 | /* No pages, we're done... */ |
1446 | if (!data_len) |
1447 | break; |
1448 | |
1449 | npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
1450 | skb->truesize += data_len; |
1451 | skb_shinfo(skb)->nr_frags = npages; |
1452 | for (i = 0; i < npages; i++) { |
1453 | struct page *page; |
1454 | skb_frag_t *frag; |
1455 | |
1456 | page = alloc_pages(sk->sk_allocation, 0); |
1457 | if (!page) { |
1458 | err = -ENOBUFS; |
1459 | skb_shinfo(skb)->nr_frags = i; |
1460 | kfree_skb(skb); |
1461 | goto failure; |
1462 | } |
1463 | |
1464 | frag = &skb_shinfo(skb)->frags[i]; |
1465 | frag->page = page; |
1466 | frag->page_offset = 0; |
1467 | frag->size = (data_len >= PAGE_SIZE ? |
1468 | PAGE_SIZE : |
1469 | data_len); |
1470 | data_len -= PAGE_SIZE; |
1471 | } |
1472 | |
1473 | /* Full success... */ |
1474 | break; |
1475 | } |
1476 | err = -ENOBUFS; |
1477 | goto failure; |
1478 | } |
1479 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); |
1480 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
1481 | err = -EAGAIN; |
1482 | if (!timeo) |
1483 | goto failure; |
1484 | if (signal_pending(current)) |
1485 | goto interrupted; |
1486 | timeo = sock_wait_for_wmem(sk, timeo); |
1487 | } |
1488 | |
1489 | skb_set_owner_w(skb, sk); |
1490 | return skb; |
1491 | |
1492 | interrupted: |
1493 | err = sock_intr_errno(timeo); |
1494 | failure: |
1495 | *errcode = err; |
1496 | return NULL; |
1497 | } |
1498 | EXPORT_SYMBOL(sock_alloc_send_pskb); |
1499 | |
1500 | struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, |
1501 | int noblock, int *errcode) |
1502 | { |
1503 | return sock_alloc_send_pskb(sk, size, 0, noblock, errcode); |
1504 | } |
1505 | EXPORT_SYMBOL(sock_alloc_send_skb); |
1506 | |
1507 | static void __lock_sock(struct sock *sk) |
1508 | { |
1509 | DEFINE_WAIT(wait); |
1510 | |
1511 | for (;;) { |
1512 | prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, |
1513 | TASK_UNINTERRUPTIBLE); |
1514 | spin_unlock_bh(&sk->sk_lock.slock); |
1515 | schedule(); |
1516 | spin_lock_bh(&sk->sk_lock.slock); |
1517 | if (!sock_owned_by_user(sk)) |
1518 | break; |
1519 | } |
1520 | finish_wait(&sk->sk_lock.wq, &wait); |
1521 | } |
1522 | |
1523 | static void __release_sock(struct sock *sk) |
1524 | { |
1525 | struct sk_buff *skb = sk->sk_backlog.head; |
1526 | |
1527 | do { |
1528 | sk->sk_backlog.head = sk->sk_backlog.tail = NULL; |
1529 | bh_unlock_sock(sk); |
1530 | |
1531 | do { |
1532 | struct sk_buff *next = skb->next; |
1533 | |
1534 | skb->next = NULL; |
1535 | sk_backlog_rcv(sk, skb); |
1536 | |
1537 | /* |
1538 | * We are in process context here with softirqs |
1539 | * disabled, use cond_resched_softirq() to preempt. |
1540 | * This is safe to do because we've taken the backlog |
1541 | * queue private: |
1542 | */ |
1543 | cond_resched_softirq(); |
1544 | |
1545 | skb = next; |
1546 | } while (skb != NULL); |
1547 | |
1548 | bh_lock_sock(sk); |
1549 | } while ((skb = sk->sk_backlog.head) != NULL); |
1550 | |
1551 | /* |
1552 | * Doing the zeroing here guarantee we can not loop forever |
1553 | * while a wild producer attempts to flood us. |
1554 | */ |
1555 | sk->sk_backlog.len = 0; |
1556 | } |
1557 | |
1558 | /** |
1559 | * sk_wait_data - wait for data to arrive at sk_receive_queue |
1560 | * @sk: sock to wait on |
1561 | * @timeo: for how long |
1562 | * |
1563 | * Now socket state including sk->sk_err is changed only under lock, |
1564 | * hence we may omit checks after joining wait queue. |
1565 | * We check receive queue before schedule() only as optimization; |
1566 | * it is very likely that release_sock() added new data. |
1567 | */ |
1568 | int sk_wait_data(struct sock *sk, long *timeo) |
1569 | { |
1570 | int rc; |
1571 | DEFINE_WAIT(wait); |
1572 | |
1573 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); |
1574 | set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
1575 | rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); |
1576 | clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); |
1577 | finish_wait(sk->sk_sleep, &wait); |
1578 | return rc; |
1579 | } |
1580 | EXPORT_SYMBOL(sk_wait_data); |
1581 | |
1582 | /** |
1583 | * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated |
1584 | * @sk: socket |
1585 | * @size: memory size to allocate |
1586 | * @kind: allocation type |
1587 | * |
1588 | * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means |
1589 | * rmem allocation. This function assumes that protocols which have |
1590 | * memory_pressure use sk_wmem_queued as write buffer accounting. |
1591 | */ |
1592 | int __sk_mem_schedule(struct sock *sk, int size, int kind) |
1593 | { |
1594 | struct proto *prot = sk->sk_prot; |
1595 | int amt = sk_mem_pages(size); |
1596 | int allocated; |
1597 | |
1598 | sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; |
1599 | allocated = atomic_add_return(amt, prot->memory_allocated); |
1600 | |
1601 | /* Under limit. */ |
1602 | if (allocated <= prot->sysctl_mem[0]) { |
1603 | if (prot->memory_pressure && *prot->memory_pressure) |
1604 | *prot->memory_pressure = 0; |
1605 | return 1; |
1606 | } |
1607 | |
1608 | /* Under pressure. */ |
1609 | if (allocated > prot->sysctl_mem[1]) |
1610 | if (prot->enter_memory_pressure) |
1611 | prot->enter_memory_pressure(sk); |
1612 | |
1613 | /* Over hard limit. */ |
1614 | if (allocated > prot->sysctl_mem[2]) |
1615 | goto suppress_allocation; |
1616 | |
1617 | /* guarantee minimum buffer size under pressure */ |
1618 | if (kind == SK_MEM_RECV) { |
1619 | if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0]) |
1620 | return 1; |
1621 | } else { /* SK_MEM_SEND */ |
1622 | if (sk->sk_type == SOCK_STREAM) { |
1623 | if (sk->sk_wmem_queued < prot->sysctl_wmem[0]) |
1624 | return 1; |
1625 | } else if (atomic_read(&sk->sk_wmem_alloc) < |
1626 | prot->sysctl_wmem[0]) |
1627 | return 1; |
1628 | } |
1629 | |
1630 | if (prot->memory_pressure) { |
1631 | int alloc; |
1632 | |
1633 | if (!*prot->memory_pressure) |
1634 | return 1; |
1635 | alloc = percpu_counter_read_positive(prot->sockets_allocated); |
1636 | if (prot->sysctl_mem[2] > alloc * |
1637 | sk_mem_pages(sk->sk_wmem_queued + |
1638 | atomic_read(&sk->sk_rmem_alloc) + |
1639 | sk->sk_forward_alloc)) |
1640 | return 1; |
1641 | } |
1642 | |
1643 | suppress_allocation: |
1644 | |
1645 | if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) { |
1646 | sk_stream_moderate_sndbuf(sk); |
1647 | |
1648 | /* Fail only if socket is _under_ its sndbuf. |
1649 | * In this case we cannot block, so that we have to fail. |
1650 | */ |
1651 | if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) |
1652 | return 1; |
1653 | } |
1654 | |
1655 | /* Alas. Undo changes. */ |
1656 | sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; |
1657 | atomic_sub(amt, prot->memory_allocated); |
1658 | return 0; |
1659 | } |
1660 | EXPORT_SYMBOL(__sk_mem_schedule); |
1661 | |
1662 | /** |
1663 | * __sk_reclaim - reclaim memory_allocated |
1664 | * @sk: socket |
1665 | */ |
1666 | void __sk_mem_reclaim(struct sock *sk) |
1667 | { |
1668 | struct proto *prot = sk->sk_prot; |
1669 | |
1670 | atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, |
1671 | prot->memory_allocated); |
1672 | sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; |
1673 | |
1674 | if (prot->memory_pressure && *prot->memory_pressure && |
1675 | (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0])) |
1676 | *prot->memory_pressure = 0; |
1677 | } |
1678 | EXPORT_SYMBOL(__sk_mem_reclaim); |
1679 | |
1680 | |
1681 | /* |
1682 | * Set of default routines for initialising struct proto_ops when |
1683 | * the protocol does not support a particular function. In certain |
1684 | * cases where it makes no sense for a protocol to have a "do nothing" |
1685 | * function, some default processing is provided. |
1686 | */ |
1687 | |
1688 | int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len) |
1689 | { |
1690 | return -EOPNOTSUPP; |
1691 | } |
1692 | EXPORT_SYMBOL(sock_no_bind); |
1693 | |
1694 | int sock_no_connect(struct socket *sock, struct sockaddr *saddr, |
1695 | int len, int flags) |
1696 | { |
1697 | return -EOPNOTSUPP; |
1698 | } |
1699 | EXPORT_SYMBOL(sock_no_connect); |
1700 | |
1701 | int sock_no_socketpair(struct socket *sock1, struct socket *sock2) |
1702 | { |
1703 | return -EOPNOTSUPP; |
1704 | } |
1705 | EXPORT_SYMBOL(sock_no_socketpair); |
1706 | |
1707 | int sock_no_accept(struct socket *sock, struct socket *newsock, int flags) |
1708 | { |
1709 | return -EOPNOTSUPP; |
1710 | } |
1711 | EXPORT_SYMBOL(sock_no_accept); |
1712 | |
1713 | int sock_no_getname(struct socket *sock, struct sockaddr *saddr, |
1714 | int *len, int peer) |
1715 | { |
1716 | return -EOPNOTSUPP; |
1717 | } |
1718 | EXPORT_SYMBOL(sock_no_getname); |
1719 | |
1720 | unsigned int sock_no_poll(struct file *file, struct socket *sock, poll_table *pt) |
1721 | { |
1722 | return 0; |
1723 | } |
1724 | EXPORT_SYMBOL(sock_no_poll); |
1725 | |
1726 | int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) |
1727 | { |
1728 | return -EOPNOTSUPP; |
1729 | } |
1730 | EXPORT_SYMBOL(sock_no_ioctl); |
1731 | |
1732 | int sock_no_listen(struct socket *sock, int backlog) |
1733 | { |
1734 | return -EOPNOTSUPP; |
1735 | } |
1736 | EXPORT_SYMBOL(sock_no_listen); |
1737 | |
1738 | int sock_no_shutdown(struct socket *sock, int how) |
1739 | { |
1740 | return -EOPNOTSUPP; |
1741 | } |
1742 | EXPORT_SYMBOL(sock_no_shutdown); |
1743 | |
1744 | int sock_no_setsockopt(struct socket *sock, int level, int optname, |
1745 | char __user *optval, unsigned int optlen) |
1746 | { |
1747 | return -EOPNOTSUPP; |
1748 | } |
1749 | EXPORT_SYMBOL(sock_no_setsockopt); |
1750 | |
1751 | int sock_no_getsockopt(struct socket *sock, int level, int optname, |
1752 | char __user *optval, int __user *optlen) |
1753 | { |
1754 | return -EOPNOTSUPP; |
1755 | } |
1756 | EXPORT_SYMBOL(sock_no_getsockopt); |
1757 | |
1758 | int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, |
1759 | size_t len) |
1760 | { |
1761 | return -EOPNOTSUPP; |
1762 | } |
1763 | EXPORT_SYMBOL(sock_no_sendmsg); |
1764 | |
1765 | int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, |
1766 | size_t len, int flags) |
1767 | { |
1768 | return -EOPNOTSUPP; |
1769 | } |
1770 | EXPORT_SYMBOL(sock_no_recvmsg); |
1771 | |
1772 | int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma) |
1773 | { |
1774 | /* Mirror missing mmap method error code */ |
1775 | return -ENODEV; |
1776 | } |
1777 | EXPORT_SYMBOL(sock_no_mmap); |
1778 | |
1779 | ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags) |
1780 | { |
1781 | ssize_t res; |
1782 | struct msghdr msg = {.msg_flags = flags}; |
1783 | struct kvec iov; |
1784 | char *kaddr = kmap(page); |
1785 | iov.iov_base = kaddr + offset; |
1786 | iov.iov_len = size; |
1787 | res = kernel_sendmsg(sock, &msg, &iov, 1, size); |
1788 | kunmap(page); |
1789 | return res; |
1790 | } |
1791 | EXPORT_SYMBOL(sock_no_sendpage); |
1792 | |
1793 | /* |
1794 | * Default Socket Callbacks |
1795 | */ |
1796 | |
1797 | static void sock_def_wakeup(struct sock *sk) |
1798 | { |
1799 | read_lock(&sk->sk_callback_lock); |
1800 | if (sk_has_sleeper(sk)) |
1801 | wake_up_interruptible_all(sk->sk_sleep); |
1802 | read_unlock(&sk->sk_callback_lock); |
1803 | } |
1804 | |
1805 | static void sock_def_error_report(struct sock *sk) |
1806 | { |
1807 | read_lock(&sk->sk_callback_lock); |
1808 | if (sk_has_sleeper(sk)) |
1809 | wake_up_interruptible_poll(sk->sk_sleep, POLLERR); |
1810 | sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); |
1811 | read_unlock(&sk->sk_callback_lock); |
1812 | } |
1813 | |
1814 | static void sock_def_readable(struct sock *sk, int len) |
1815 | { |
1816 | read_lock(&sk->sk_callback_lock); |
1817 | if (sk_has_sleeper(sk)) |
1818 | wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN | |
1819 | POLLRDNORM | POLLRDBAND); |
1820 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); |
1821 | read_unlock(&sk->sk_callback_lock); |
1822 | } |
1823 | |
1824 | static void sock_def_write_space(struct sock *sk) |
1825 | { |
1826 | read_lock(&sk->sk_callback_lock); |
1827 | |
1828 | /* Do not wake up a writer until he can make "significant" |
1829 | * progress. --DaveM |
1830 | */ |
1831 | if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { |
1832 | if (sk_has_sleeper(sk)) |
1833 | wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT | |
1834 | POLLWRNORM | POLLWRBAND); |
1835 | |
1836 | /* Should agree with poll, otherwise some programs break */ |
1837 | if (sock_writeable(sk)) |
1838 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); |
1839 | } |
1840 | |
1841 | read_unlock(&sk->sk_callback_lock); |
1842 | } |
1843 | |
1844 | static void sock_def_destruct(struct sock *sk) |
1845 | { |
1846 | kfree(sk->sk_protinfo); |
1847 | } |
1848 | |
1849 | void sk_send_sigurg(struct sock *sk) |
1850 | { |
1851 | if (sk->sk_socket && sk->sk_socket->file) |
1852 | if (send_sigurg(&sk->sk_socket->file->f_owner)) |
1853 | sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI); |
1854 | } |
1855 | EXPORT_SYMBOL(sk_send_sigurg); |
1856 | |
1857 | void sk_reset_timer(struct sock *sk, struct timer_list* timer, |
1858 | unsigned long expires) |
1859 | { |
1860 | if (!mod_timer(timer, expires)) |
1861 | sock_hold(sk); |
1862 | } |
1863 | EXPORT_SYMBOL(sk_reset_timer); |
1864 | |
1865 | void sk_stop_timer(struct sock *sk, struct timer_list* timer) |
1866 | { |
1867 | if (timer_pending(timer) && del_timer(timer)) |
1868 | __sock_put(sk); |
1869 | } |
1870 | EXPORT_SYMBOL(sk_stop_timer); |
1871 | |
1872 | void sock_init_data(struct socket *sock, struct sock *sk) |
1873 | { |
1874 | skb_queue_head_init(&sk->sk_receive_queue); |
1875 | skb_queue_head_init(&sk->sk_write_queue); |
1876 | skb_queue_head_init(&sk->sk_error_queue); |
1877 | #ifdef CONFIG_NET_DMA |
1878 | skb_queue_head_init(&sk->sk_async_wait_queue); |
1879 | #endif |
1880 | |
1881 | sk->sk_send_head = NULL; |
1882 | |
1883 | init_timer(&sk->sk_timer); |
1884 | |
1885 | sk->sk_allocation = GFP_KERNEL; |
1886 | sk->sk_rcvbuf = sysctl_rmem_default; |
1887 | sk->sk_sndbuf = sysctl_wmem_default; |
1888 | sk->sk_backlog.limit = sk->sk_rcvbuf << 1; |
1889 | sk->sk_state = TCP_CLOSE; |
1890 | sk_set_socket(sk, sock); |
1891 | |
1892 | sock_set_flag(sk, SOCK_ZAPPED); |
1893 | |
1894 | if (sock) { |
1895 | sk->sk_type = sock->type; |
1896 | sk->sk_sleep = &sock->wait; |
1897 | sock->sk = sk; |
1898 | } else |
1899 | sk->sk_sleep = NULL; |
1900 | |
1901 | rwlock_init(&sk->sk_dst_lock); |
1902 | rwlock_init(&sk->sk_callback_lock); |
1903 | lockdep_set_class_and_name(&sk->sk_callback_lock, |
1904 | af_callback_keys + sk->sk_family, |
1905 | af_family_clock_key_strings[sk->sk_family]); |
1906 | |
1907 | sk->sk_state_change = sock_def_wakeup; |
1908 | sk->sk_data_ready = sock_def_readable; |
1909 | sk->sk_write_space = sock_def_write_space; |
1910 | sk->sk_error_report = sock_def_error_report; |
1911 | sk->sk_destruct = sock_def_destruct; |
1912 | |
1913 | sk->sk_sndmsg_page = NULL; |
1914 | sk->sk_sndmsg_off = 0; |
1915 | |
1916 | sk->sk_peercred.pid = 0; |
1917 | sk->sk_peercred.uid = -1; |
1918 | sk->sk_peercred.gid = -1; |
1919 | sk->sk_write_pending = 0; |
1920 | sk->sk_rcvlowat = 1; |
1921 | sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; |
1922 | sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; |
1923 | |
1924 | sk->sk_stamp = ktime_set(-1L, 0); |
1925 | |
1926 | /* |
1927 | * Before updating sk_refcnt, we must commit prior changes to memory |
1928 | * (Documentation/RCU/rculist_nulls.txt for details) |
1929 | */ |
1930 | smp_wmb(); |
1931 | atomic_set(&sk->sk_refcnt, 1); |
1932 | atomic_set(&sk->sk_drops, 0); |
1933 | } |
1934 | EXPORT_SYMBOL(sock_init_data); |
1935 | |
1936 | void lock_sock_nested(struct sock *sk, int subclass) |
1937 | { |
1938 | might_sleep(); |
1939 | spin_lock_bh(&sk->sk_lock.slock); |
1940 | if (sk->sk_lock.owned) |
1941 | __lock_sock(sk); |
1942 | sk->sk_lock.owned = 1; |
1943 | spin_unlock(&sk->sk_lock.slock); |
1944 | /* |
1945 | * The sk_lock has mutex_lock() semantics here: |
1946 | */ |
1947 | mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); |
1948 | local_bh_enable(); |
1949 | } |
1950 | EXPORT_SYMBOL(lock_sock_nested); |
1951 | |
1952 | void release_sock(struct sock *sk) |
1953 | { |
1954 | /* |
1955 | * The sk_lock has mutex_unlock() semantics: |
1956 | */ |
1957 | mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); |
1958 | |
1959 | spin_lock_bh(&sk->sk_lock.slock); |
1960 | if (sk->sk_backlog.tail) |
1961 | __release_sock(sk); |
1962 | sk->sk_lock.owned = 0; |
1963 | if (waitqueue_active(&sk->sk_lock.wq)) |
1964 | wake_up(&sk->sk_lock.wq); |
1965 | spin_unlock_bh(&sk->sk_lock.slock); |
1966 | } |
1967 | EXPORT_SYMBOL(release_sock); |
1968 | |
1969 | int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) |
1970 | { |
1971 | struct timeval tv; |
1972 | if (!sock_flag(sk, SOCK_TIMESTAMP)) |
1973 | sock_enable_timestamp(sk, SOCK_TIMESTAMP); |
1974 | tv = ktime_to_timeval(sk->sk_stamp); |
1975 | if (tv.tv_sec == -1) |
1976 | return -ENOENT; |
1977 | if (tv.tv_sec == 0) { |
1978 | sk->sk_stamp = ktime_get_real(); |
1979 | tv = ktime_to_timeval(sk->sk_stamp); |
1980 | } |
1981 | return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0; |
1982 | } |
1983 | EXPORT_SYMBOL(sock_get_timestamp); |
1984 | |
1985 | int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) |
1986 | { |
1987 | struct timespec ts; |
1988 | if (!sock_flag(sk, SOCK_TIMESTAMP)) |
1989 | sock_enable_timestamp(sk, SOCK_TIMESTAMP); |
1990 | ts = ktime_to_timespec(sk->sk_stamp); |
1991 | if (ts.tv_sec == -1) |
1992 | return -ENOENT; |
1993 | if (ts.tv_sec == 0) { |
1994 | sk->sk_stamp = ktime_get_real(); |
1995 | ts = ktime_to_timespec(sk->sk_stamp); |
1996 | } |
1997 | return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0; |
1998 | } |
1999 | EXPORT_SYMBOL(sock_get_timestampns); |
2000 | |
2001 | void sock_enable_timestamp(struct sock *sk, int flag) |
2002 | { |
2003 | if (!sock_flag(sk, flag)) { |
2004 | sock_set_flag(sk, flag); |
2005 | /* |
2006 | * we just set one of the two flags which require net |
2007 | * time stamping, but time stamping might have been on |
2008 | * already because of the other one |
2009 | */ |
2010 | if (!sock_flag(sk, |
2011 | flag == SOCK_TIMESTAMP ? |
2012 | SOCK_TIMESTAMPING_RX_SOFTWARE : |
2013 | SOCK_TIMESTAMP)) |
2014 | net_enable_timestamp(); |
2015 | } |
2016 | } |
2017 | |
2018 | /* |
2019 | * Get a socket option on an socket. |
2020 | * |
2021 | * FIX: POSIX 1003.1g is very ambiguous here. It states that |
2022 | * asynchronous errors should be reported by getsockopt. We assume |
2023 | * this means if you specify SO_ERROR (otherwise whats the point of it). |
2024 | */ |
2025 | int sock_common_getsockopt(struct socket *sock, int level, int optname, |
2026 | char __user *optval, int __user *optlen) |
2027 | { |
2028 | struct sock *sk = sock->sk; |
2029 | |
2030 | return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); |
2031 | } |
2032 | EXPORT_SYMBOL(sock_common_getsockopt); |
2033 | |
2034 | #ifdef CONFIG_COMPAT |
2035 | int compat_sock_common_getsockopt(struct socket *sock, int level, int optname, |
2036 | char __user *optval, int __user *optlen) |
2037 | { |
2038 | struct sock *sk = sock->sk; |
2039 | |
2040 | if (sk->sk_prot->compat_getsockopt != NULL) |
2041 | return sk->sk_prot->compat_getsockopt(sk, level, optname, |
2042 | optval, optlen); |
2043 | return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen); |
2044 | } |
2045 | EXPORT_SYMBOL(compat_sock_common_getsockopt); |
2046 | #endif |
2047 | |
2048 | int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock, |
2049 | struct msghdr *msg, size_t size, int flags) |
2050 | { |
2051 | struct sock *sk = sock->sk; |
2052 | int addr_len = 0; |
2053 | int err; |
2054 | |
2055 | err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, |
2056 | flags & ~MSG_DONTWAIT, &addr_len); |
2057 | if (err >= 0) |
2058 | msg->msg_namelen = addr_len; |
2059 | return err; |
2060 | } |
2061 | EXPORT_SYMBOL(sock_common_recvmsg); |
2062 | |
2063 | /* |
2064 | * Set socket options on an inet socket. |
2065 | */ |
2066 | int sock_common_setsockopt(struct socket *sock, int level, int optname, |
2067 | char __user *optval, unsigned int optlen) |
2068 | { |
2069 | struct sock *sk = sock->sk; |
2070 | |
2071 | return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); |
2072 | } |
2073 | EXPORT_SYMBOL(sock_common_setsockopt); |
2074 | |
2075 | #ifdef CONFIG_COMPAT |
2076 | int compat_sock_common_setsockopt(struct socket *sock, int level, int optname, |
2077 | char __user *optval, unsigned int optlen) |
2078 | { |
2079 | struct sock *sk = sock->sk; |
2080 | |
2081 | if (sk->sk_prot->compat_setsockopt != NULL) |
2082 | return sk->sk_prot->compat_setsockopt(sk, level, optname, |
2083 | optval, optlen); |
2084 | return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen); |
2085 | } |
2086 | EXPORT_SYMBOL(compat_sock_common_setsockopt); |
2087 | #endif |
2088 | |
2089 | void sk_common_release(struct sock *sk) |
2090 | { |
2091 | if (sk->sk_prot->destroy) |
2092 | sk->sk_prot->destroy(sk); |
2093 | |
2094 | /* |
2095 | * Observation: when sock_common_release is called, processes have |
2096 | * no access to socket. But net still has. |
2097 | * Step one, detach it from networking: |
2098 | * |
2099 | * A. Remove from hash tables. |
2100 | */ |
2101 | |
2102 | sk->sk_prot->unhash(sk); |
2103 | |
2104 | /* |
2105 | * In this point socket cannot receive new packets, but it is possible |
2106 | * that some packets are in flight because some CPU runs receiver and |
2107 | * did hash table lookup before we unhashed socket. They will achieve |
2108 | * receive queue and will be purged by socket destructor. |
2109 | * |
2110 | * Also we still have packets pending on receive queue and probably, |
2111 | * our own packets waiting in device queues. sock_destroy will drain |
2112 | * receive queue, but transmitted packets will delay socket destruction |
2113 | * until the last reference will be released. |
2114 | */ |
2115 | |
2116 | sock_orphan(sk); |
2117 | |
2118 | xfrm_sk_free_policy(sk); |
2119 | |
2120 | sk_refcnt_debug_release(sk); |
2121 | sock_put(sk); |
2122 | } |
2123 | EXPORT_SYMBOL(sk_common_release); |
2124 | |
2125 | static DEFINE_RWLOCK(proto_list_lock); |
2126 | static LIST_HEAD(proto_list); |
2127 | |
2128 | #ifdef CONFIG_PROC_FS |
2129 | #define PROTO_INUSE_NR 64 /* should be enough for the first time */ |
2130 | struct prot_inuse { |
2131 | int val[PROTO_INUSE_NR]; |
2132 | }; |
2133 | |
2134 | static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR); |
2135 | |
2136 | #ifdef CONFIG_NET_NS |
2137 | void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) |
2138 | { |
2139 | int cpu = smp_processor_id(); |
2140 | per_cpu_ptr(net->core.inuse, cpu)->val[prot->inuse_idx] += val; |
2141 | } |
2142 | EXPORT_SYMBOL_GPL(sock_prot_inuse_add); |
2143 | |
2144 | int sock_prot_inuse_get(struct net *net, struct proto *prot) |
2145 | { |
2146 | int cpu, idx = prot->inuse_idx; |
2147 | int res = 0; |
2148 | |
2149 | for_each_possible_cpu(cpu) |
2150 | res += per_cpu_ptr(net->core.inuse, cpu)->val[idx]; |
2151 | |
2152 | return res >= 0 ? res : 0; |
2153 | } |
2154 | EXPORT_SYMBOL_GPL(sock_prot_inuse_get); |
2155 | |
2156 | static int __net_init sock_inuse_init_net(struct net *net) |
2157 | { |
2158 | net->core.inuse = alloc_percpu(struct prot_inuse); |
2159 | return net->core.inuse ? 0 : -ENOMEM; |
2160 | } |
2161 | |
2162 | static void __net_exit sock_inuse_exit_net(struct net *net) |
2163 | { |
2164 | free_percpu(net->core.inuse); |
2165 | } |
2166 | |
2167 | static struct pernet_operations net_inuse_ops = { |
2168 | .init = sock_inuse_init_net, |
2169 | .exit = sock_inuse_exit_net, |
2170 | }; |
2171 | |
2172 | static __init int net_inuse_init(void) |
2173 | { |
2174 | if (register_pernet_subsys(&net_inuse_ops)) |
2175 | panic("Cannot initialize net inuse counters"); |
2176 | |
2177 | return 0; |
2178 | } |
2179 | |
2180 | core_initcall(net_inuse_init); |
2181 | #else |
2182 | static DEFINE_PER_CPU(struct prot_inuse, prot_inuse); |
2183 | |
2184 | void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) |
2185 | { |
2186 | __get_cpu_var(prot_inuse).val[prot->inuse_idx] += val; |
2187 | } |
2188 | EXPORT_SYMBOL_GPL(sock_prot_inuse_add); |
2189 | |
2190 | int sock_prot_inuse_get(struct net *net, struct proto *prot) |
2191 | { |
2192 | int cpu, idx = prot->inuse_idx; |
2193 | int res = 0; |
2194 | |
2195 | for_each_possible_cpu(cpu) |
2196 | res += per_cpu(prot_inuse, cpu).val[idx]; |
2197 | |
2198 | return res >= 0 ? res : 0; |
2199 | } |
2200 | EXPORT_SYMBOL_GPL(sock_prot_inuse_get); |
2201 | #endif |
2202 | |
2203 | static void assign_proto_idx(struct proto *prot) |
2204 | { |
2205 | prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR); |
2206 | |
2207 | if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) { |
2208 | printk(KERN_ERR "PROTO_INUSE_NR exhausted\n"); |
2209 | return; |
2210 | } |
2211 | |
2212 | set_bit(prot->inuse_idx, proto_inuse_idx); |
2213 | } |
2214 | |
2215 | static void release_proto_idx(struct proto *prot) |
2216 | { |
2217 | if (prot->inuse_idx != PROTO_INUSE_NR - 1) |
2218 | clear_bit(prot->inuse_idx, proto_inuse_idx); |
2219 | } |
2220 | #else |
2221 | static inline void assign_proto_idx(struct proto *prot) |
2222 | { |
2223 | } |
2224 | |
2225 | static inline void release_proto_idx(struct proto *prot) |
2226 | { |
2227 | } |
2228 | #endif |
2229 | |
2230 | int proto_register(struct proto *prot, int alloc_slab) |
2231 | { |
2232 | if (alloc_slab) { |
2233 | prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, |
2234 | SLAB_HWCACHE_ALIGN | prot->slab_flags, |
2235 | NULL); |
2236 | |
2237 | if (prot->slab == NULL) { |
2238 | printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n", |
2239 | prot->name); |
2240 | goto out; |
2241 | } |
2242 | |
2243 | if (prot->rsk_prot != NULL) { |
2244 | prot->rsk_prot->slab_name = kasprintf(GFP_KERNEL, "request_sock_%s", prot->name); |
2245 | if (prot->rsk_prot->slab_name == NULL) |
2246 | goto out_free_sock_slab; |
2247 | |
2248 | prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name, |
2249 | prot->rsk_prot->obj_size, 0, |
2250 | SLAB_HWCACHE_ALIGN, NULL); |
2251 | |
2252 | if (prot->rsk_prot->slab == NULL) { |
2253 | printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n", |
2254 | prot->name); |
2255 | goto out_free_request_sock_slab_name; |
2256 | } |
2257 | } |
2258 | |
2259 | if (prot->twsk_prot != NULL) { |
2260 | prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name); |
2261 | |
2262 | if (prot->twsk_prot->twsk_slab_name == NULL) |
2263 | goto out_free_request_sock_slab; |
2264 | |
2265 | prot->twsk_prot->twsk_slab = |
2266 | kmem_cache_create(prot->twsk_prot->twsk_slab_name, |
2267 | prot->twsk_prot->twsk_obj_size, |
2268 | 0, |
2269 | SLAB_HWCACHE_ALIGN | |
2270 | prot->slab_flags, |
2271 | NULL); |
2272 | if (prot->twsk_prot->twsk_slab == NULL) |
2273 | goto out_free_timewait_sock_slab_name; |
2274 | } |
2275 | } |
2276 | |
2277 | write_lock(&proto_list_lock); |
2278 | list_add(&prot->node, &proto_list); |
2279 | assign_proto_idx(prot); |
2280 | write_unlock(&proto_list_lock); |
2281 | return 0; |
2282 | |
2283 | out_free_timewait_sock_slab_name: |
2284 | kfree(prot->twsk_prot->twsk_slab_name); |
2285 | out_free_request_sock_slab: |
2286 | if (prot->rsk_prot && prot->rsk_prot->slab) { |
2287 | kmem_cache_destroy(prot->rsk_prot->slab); |
2288 | prot->rsk_prot->slab = NULL; |
2289 | } |
2290 | out_free_request_sock_slab_name: |
2291 | if (prot->rsk_prot) |
2292 | kfree(prot->rsk_prot->slab_name); |
2293 | out_free_sock_slab: |
2294 | kmem_cache_destroy(prot->slab); |
2295 | prot->slab = NULL; |
2296 | out: |
2297 | return -ENOBUFS; |
2298 | } |
2299 | EXPORT_SYMBOL(proto_register); |
2300 | |
2301 | void proto_unregister(struct proto *prot) |
2302 | { |
2303 | write_lock(&proto_list_lock); |
2304 | release_proto_idx(prot); |
2305 | list_del(&prot->node); |
2306 | write_unlock(&proto_list_lock); |
2307 | |
2308 | if (prot->slab != NULL) { |
2309 | kmem_cache_destroy(prot->slab); |
2310 | prot->slab = NULL; |
2311 | } |
2312 | |
2313 | if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) { |
2314 | kmem_cache_destroy(prot->rsk_prot->slab); |
2315 | kfree(prot->rsk_prot->slab_name); |
2316 | prot->rsk_prot->slab = NULL; |
2317 | } |
2318 | |
2319 | if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) { |
2320 | kmem_cache_destroy(prot->twsk_prot->twsk_slab); |
2321 | kfree(prot->twsk_prot->twsk_slab_name); |
2322 | prot->twsk_prot->twsk_slab = NULL; |
2323 | } |
2324 | } |
2325 | EXPORT_SYMBOL(proto_unregister); |
2326 | |
2327 | #ifdef CONFIG_PROC_FS |
2328 | static void *proto_seq_start(struct seq_file *seq, loff_t *pos) |
2329 | __acquires(proto_list_lock) |
2330 | { |
2331 | read_lock(&proto_list_lock); |
2332 | return seq_list_start_head(&proto_list, *pos); |
2333 | } |
2334 | |
2335 | static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
2336 | { |
2337 | return seq_list_next(v, &proto_list, pos); |
2338 | } |
2339 | |
2340 | static void proto_seq_stop(struct seq_file *seq, void *v) |
2341 | __releases(proto_list_lock) |
2342 | { |
2343 | read_unlock(&proto_list_lock); |
2344 | } |
2345 | |
2346 | static char proto_method_implemented(const void *method) |
2347 | { |
2348 | return method == NULL ? 'n' : 'y'; |
2349 | } |
2350 | |
2351 | static void proto_seq_printf(struct seq_file *seq, struct proto *proto) |
2352 | { |
2353 | seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s " |
2354 | "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", |
2355 | proto->name, |
2356 | proto->obj_size, |
2357 | sock_prot_inuse_get(seq_file_net(seq), proto), |
2358 | proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1, |
2359 | proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI", |
2360 | proto->max_header, |
2361 | proto->slab == NULL ? "no" : "yes", |
2362 | module_name(proto->owner), |
2363 | proto_method_implemented(proto->close), |
2364 | proto_method_implemented(proto->connect), |
2365 | proto_method_implemented(proto->disconnect), |
2366 | proto_method_implemented(proto->accept), |
2367 | proto_method_implemented(proto->ioctl), |
2368 | proto_method_implemented(proto->init), |
2369 | proto_method_implemented(proto->destroy), |
2370 | proto_method_implemented(proto->shutdown), |
2371 | proto_method_implemented(proto->setsockopt), |
2372 | proto_method_implemented(proto->getsockopt), |
2373 | proto_method_implemented(proto->sendmsg), |
2374 | proto_method_implemented(proto->recvmsg), |
2375 | proto_method_implemented(proto->sendpage), |
2376 | proto_method_implemented(proto->bind), |
2377 | proto_method_implemented(proto->backlog_rcv), |
2378 | proto_method_implemented(proto->hash), |
2379 | proto_method_implemented(proto->unhash), |
2380 | proto_method_implemented(proto->get_port), |
2381 | proto_method_implemented(proto->enter_memory_pressure)); |
2382 | } |
2383 | |
2384 | static int proto_seq_show(struct seq_file *seq, void *v) |
2385 | { |
2386 | if (v == &proto_list) |
2387 | seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s", |
2388 | "protocol", |
2389 | "size", |
2390 | "sockets", |
2391 | "memory", |
2392 | "press", |
2393 | "maxhdr", |
2394 | "slab", |
2395 | "module", |
2396 | "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n"); |
2397 | else |
2398 | proto_seq_printf(seq, list_entry(v, struct proto, node)); |
2399 | return 0; |
2400 | } |
2401 | |
2402 | static const struct seq_operations proto_seq_ops = { |
2403 | .start = proto_seq_start, |
2404 | .next = proto_seq_next, |
2405 | .stop = proto_seq_stop, |
2406 | .show = proto_seq_show, |
2407 | }; |
2408 | |
2409 | static int proto_seq_open(struct inode *inode, struct file *file) |
2410 | { |
2411 | return seq_open_net(inode, file, &proto_seq_ops, |
2412 | sizeof(struct seq_net_private)); |
2413 | } |
2414 | |
2415 | static const struct file_operations proto_seq_fops = { |
2416 | .owner = THIS_MODULE, |
2417 | .open = proto_seq_open, |
2418 | .read = seq_read, |
2419 | .llseek = seq_lseek, |
2420 | .release = seq_release_net, |
2421 | }; |
2422 | |
2423 | static __net_init int proto_init_net(struct net *net) |
2424 | { |
2425 | if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops)) |
2426 | return -ENOMEM; |
2427 | |
2428 | return 0; |
2429 | } |
2430 | |
2431 | static __net_exit void proto_exit_net(struct net *net) |
2432 | { |
2433 | proc_net_remove(net, "protocols"); |
2434 | } |
2435 | |
2436 | |
2437 | static __net_initdata struct pernet_operations proto_net_ops = { |
2438 | .init = proto_init_net, |
2439 | .exit = proto_exit_net, |
2440 | }; |
2441 | |
2442 | static int __init proto_init(void) |
2443 | { |
2444 | return register_pernet_subsys(&proto_net_ops); |
2445 | } |
2446 | |
2447 | subsys_initcall(proto_init); |
2448 | |
2449 | #endif /* PROC_FS */ |
2450 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9