Root/
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
3 | * operating system. INET is implemented using the BSD Socket |
4 | * interface as the means of communication with the user level. |
5 | * |
6 | * PF_INET protocol family socket handler. |
7 | * |
8 | * Authors: Ross Biro |
9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
10 | * Florian La Roche, <flla@stud.uni-sb.de> |
11 | * Alan Cox, <A.Cox@swansea.ac.uk> |
12 | * |
13 | * Changes (see also sock.c) |
14 | * |
15 | * piggy, |
16 | * Karl Knutson : Socket protocol table |
17 | * A.N.Kuznetsov : Socket death error in accept(). |
18 | * John Richardson : Fix non blocking error in connect() |
19 | * so sockets that fail to connect |
20 | * don't return -EINPROGRESS. |
21 | * Alan Cox : Asynchronous I/O support |
22 | * Alan Cox : Keep correct socket pointer on sock |
23 | * structures |
24 | * when accept() ed |
25 | * Alan Cox : Semantics of SO_LINGER aren't state |
26 | * moved to close when you look carefully. |
27 | * With this fixed and the accept bug fixed |
28 | * some RPC stuff seems happier. |
29 | * Niibe Yutaka : 4.4BSD style write async I/O |
30 | * Alan Cox, |
31 | * Tony Gale : Fixed reuse semantics. |
32 | * Alan Cox : bind() shouldn't abort existing but dead |
33 | * sockets. Stops FTP netin:.. I hope. |
34 | * Alan Cox : bind() works correctly for RAW sockets. |
35 | * Note that FreeBSD at least was broken |
36 | * in this respect so be careful with |
37 | * compatibility tests... |
38 | * Alan Cox : routing cache support |
39 | * Alan Cox : memzero the socket structure for |
40 | * compactness. |
41 | * Matt Day : nonblock connect error handler |
42 | * Alan Cox : Allow large numbers of pending sockets |
43 | * (eg for big web sites), but only if |
44 | * specifically application requested. |
45 | * Alan Cox : New buffering throughout IP. Used |
46 | * dumbly. |
47 | * Alan Cox : New buffering now used smartly. |
48 | * Alan Cox : BSD rather than common sense |
49 | * interpretation of listen. |
50 | * Germano Caronni : Assorted small races. |
51 | * Alan Cox : sendmsg/recvmsg basic support. |
52 | * Alan Cox : Only sendmsg/recvmsg now supported. |
53 | * Alan Cox : Locked down bind (see security list). |
54 | * Alan Cox : Loosened bind a little. |
55 | * Mike McLagan : ADD/DEL DLCI Ioctls |
56 | * Willy Konynenberg : Transparent proxying support. |
57 | * David S. Miller : New socket lookup architecture. |
58 | * Some other random speedups. |
59 | * Cyrus Durgin : Cleaned up file for kmod hacks. |
60 | * Andi Kleen : Fix inet_stream_connect TCP race. |
61 | * |
62 | * This program is free software; you can redistribute it and/or |
63 | * modify it under the terms of the GNU General Public License |
64 | * as published by the Free Software Foundation; either version |
65 | * 2 of the License, or (at your option) any later version. |
66 | */ |
67 | |
68 | #define pr_fmt(fmt) "IPv4: " fmt |
69 | |
70 | #include <linux/err.h> |
71 | #include <linux/errno.h> |
72 | #include <linux/types.h> |
73 | #include <linux/socket.h> |
74 | #include <linux/in.h> |
75 | #include <linux/kernel.h> |
76 | #include <linux/module.h> |
77 | #include <linux/sched.h> |
78 | #include <linux/timer.h> |
79 | #include <linux/string.h> |
80 | #include <linux/sockios.h> |
81 | #include <linux/net.h> |
82 | #include <linux/capability.h> |
83 | #include <linux/fcntl.h> |
84 | #include <linux/mm.h> |
85 | #include <linux/interrupt.h> |
86 | #include <linux/stat.h> |
87 | #include <linux/init.h> |
88 | #include <linux/poll.h> |
89 | #include <linux/netfilter_ipv4.h> |
90 | #include <linux/random.h> |
91 | #include <linux/slab.h> |
92 | |
93 | #include <asm/uaccess.h> |
94 | |
95 | #include <linux/inet.h> |
96 | #include <linux/igmp.h> |
97 | #include <linux/inetdevice.h> |
98 | #include <linux/netdevice.h> |
99 | #include <net/checksum.h> |
100 | #include <net/ip.h> |
101 | #include <net/protocol.h> |
102 | #include <net/arp.h> |
103 | #include <net/route.h> |
104 | #include <net/ip_fib.h> |
105 | #include <net/inet_connection_sock.h> |
106 | #include <net/tcp.h> |
107 | #include <net/udp.h> |
108 | #include <net/udplite.h> |
109 | #include <net/ping.h> |
110 | #include <linux/skbuff.h> |
111 | #include <net/sock.h> |
112 | #include <net/raw.h> |
113 | #include <net/icmp.h> |
114 | #include <net/ipip.h> |
115 | #include <net/inet_common.h> |
116 | #include <net/xfrm.h> |
117 | #include <net/net_namespace.h> |
118 | #ifdef CONFIG_IP_MROUTE |
119 | #include <linux/mroute.h> |
120 | #endif |
121 | |
122 | |
123 | /* The inetsw table contains everything that inet_create needs to |
124 | * build a new socket. |
125 | */ |
126 | static struct list_head inetsw[SOCK_MAX]; |
127 | static DEFINE_SPINLOCK(inetsw_lock); |
128 | |
129 | struct ipv4_config ipv4_config; |
130 | EXPORT_SYMBOL(ipv4_config); |
131 | |
132 | /* New destruction routine */ |
133 | |
134 | void inet_sock_destruct(struct sock *sk) |
135 | { |
136 | struct inet_sock *inet = inet_sk(sk); |
137 | |
138 | __skb_queue_purge(&sk->sk_receive_queue); |
139 | __skb_queue_purge(&sk->sk_error_queue); |
140 | |
141 | sk_mem_reclaim(sk); |
142 | |
143 | if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) { |
144 | pr_err("Attempt to release TCP socket in state %d %p\n", |
145 | sk->sk_state, sk); |
146 | return; |
147 | } |
148 | if (!sock_flag(sk, SOCK_DEAD)) { |
149 | pr_err("Attempt to release alive inet socket %p\n", sk); |
150 | return; |
151 | } |
152 | |
153 | WARN_ON(atomic_read(&sk->sk_rmem_alloc)); |
154 | WARN_ON(atomic_read(&sk->sk_wmem_alloc)); |
155 | WARN_ON(sk->sk_wmem_queued); |
156 | WARN_ON(sk->sk_forward_alloc); |
157 | |
158 | kfree(rcu_dereference_protected(inet->inet_opt, 1)); |
159 | dst_release(rcu_dereference_check(sk->sk_dst_cache, 1)); |
160 | dst_release(sk->sk_rx_dst); |
161 | sk_refcnt_debug_dec(sk); |
162 | } |
163 | EXPORT_SYMBOL(inet_sock_destruct); |
164 | |
165 | /* |
166 | * The routines beyond this point handle the behaviour of an AF_INET |
167 | * socket object. Mostly it punts to the subprotocols of IP to do |
168 | * the work. |
169 | */ |
170 | |
171 | /* |
172 | * Automatically bind an unbound socket. |
173 | */ |
174 | |
175 | static int inet_autobind(struct sock *sk) |
176 | { |
177 | struct inet_sock *inet; |
178 | /* We may need to bind the socket. */ |
179 | lock_sock(sk); |
180 | inet = inet_sk(sk); |
181 | if (!inet->inet_num) { |
182 | if (sk->sk_prot->get_port(sk, 0)) { |
183 | release_sock(sk); |
184 | return -EAGAIN; |
185 | } |
186 | inet->inet_sport = htons(inet->inet_num); |
187 | } |
188 | release_sock(sk); |
189 | return 0; |
190 | } |
191 | |
192 | /* |
193 | * Move a socket into listening state. |
194 | */ |
195 | int inet_listen(struct socket *sock, int backlog) |
196 | { |
197 | struct sock *sk = sock->sk; |
198 | unsigned char old_state; |
199 | int err; |
200 | |
201 | lock_sock(sk); |
202 | |
203 | err = -EINVAL; |
204 | if (sock->state != SS_UNCONNECTED || sock->type != SOCK_STREAM) |
205 | goto out; |
206 | |
207 | old_state = sk->sk_state; |
208 | if (!((1 << old_state) & (TCPF_CLOSE | TCPF_LISTEN))) |
209 | goto out; |
210 | |
211 | /* Really, if the socket is already in listen state |
212 | * we can only allow the backlog to be adjusted. |
213 | */ |
214 | if (old_state != TCP_LISTEN) { |
215 | /* Check special setups for testing purpose to enable TFO w/o |
216 | * requiring TCP_FASTOPEN sockopt. |
217 | * Note that only TCP sockets (SOCK_STREAM) will reach here. |
218 | * Also fastopenq may already been allocated because this |
219 | * socket was in TCP_LISTEN state previously but was |
220 | * shutdown() (rather than close()). |
221 | */ |
222 | if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) != 0 && |
223 | inet_csk(sk)->icsk_accept_queue.fastopenq == NULL) { |
224 | if ((sysctl_tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) != 0) |
225 | err = fastopen_init_queue(sk, backlog); |
226 | else if ((sysctl_tcp_fastopen & |
227 | TFO_SERVER_WO_SOCKOPT2) != 0) |
228 | err = fastopen_init_queue(sk, |
229 | ((uint)sysctl_tcp_fastopen) >> 16); |
230 | else |
231 | err = 0; |
232 | if (err) |
233 | goto out; |
234 | } |
235 | err = inet_csk_listen_start(sk, backlog); |
236 | if (err) |
237 | goto out; |
238 | } |
239 | sk->sk_max_ack_backlog = backlog; |
240 | err = 0; |
241 | |
242 | out: |
243 | release_sock(sk); |
244 | return err; |
245 | } |
246 | EXPORT_SYMBOL(inet_listen); |
247 | |
248 | u32 inet_ehash_secret __read_mostly; |
249 | EXPORT_SYMBOL(inet_ehash_secret); |
250 | |
251 | u32 ipv6_hash_secret __read_mostly; |
252 | EXPORT_SYMBOL(ipv6_hash_secret); |
253 | |
254 | /* |
255 | * inet_ehash_secret must be set exactly once, and to a non nul value |
256 | * ipv6_hash_secret must be set exactly once. |
257 | */ |
258 | void build_ehash_secret(void) |
259 | { |
260 | u32 rnd; |
261 | |
262 | do { |
263 | get_random_bytes(&rnd, sizeof(rnd)); |
264 | } while (rnd == 0); |
265 | |
266 | if (cmpxchg(&inet_ehash_secret, 0, rnd) == 0) |
267 | get_random_bytes(&ipv6_hash_secret, sizeof(ipv6_hash_secret)); |
268 | } |
269 | EXPORT_SYMBOL(build_ehash_secret); |
270 | |
271 | /* |
272 | * Create an inet socket. |
273 | */ |
274 | |
275 | static int inet_create(struct net *net, struct socket *sock, int protocol, |
276 | int kern) |
277 | { |
278 | struct sock *sk; |
279 | struct inet_protosw *answer; |
280 | struct inet_sock *inet; |
281 | struct proto *answer_prot; |
282 | unsigned char answer_flags; |
283 | char answer_no_check; |
284 | int try_loading_module = 0; |
285 | int err; |
286 | |
287 | if (unlikely(!inet_ehash_secret)) |
288 | if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) |
289 | build_ehash_secret(); |
290 | |
291 | sock->state = SS_UNCONNECTED; |
292 | |
293 | /* Look for the requested type/protocol pair. */ |
294 | lookup_protocol: |
295 | err = -ESOCKTNOSUPPORT; |
296 | rcu_read_lock(); |
297 | list_for_each_entry_rcu(answer, &inetsw[sock->type], list) { |
298 | |
299 | err = 0; |
300 | /* Check the non-wild match. */ |
301 | if (protocol == answer->protocol) { |
302 | if (protocol != IPPROTO_IP) |
303 | break; |
304 | } else { |
305 | /* Check for the two wild cases. */ |
306 | if (IPPROTO_IP == protocol) { |
307 | protocol = answer->protocol; |
308 | break; |
309 | } |
310 | if (IPPROTO_IP == answer->protocol) |
311 | break; |
312 | } |
313 | err = -EPROTONOSUPPORT; |
314 | } |
315 | |
316 | if (unlikely(err)) { |
317 | if (try_loading_module < 2) { |
318 | rcu_read_unlock(); |
319 | /* |
320 | * Be more specific, e.g. net-pf-2-proto-132-type-1 |
321 | * (net-pf-PF_INET-proto-IPPROTO_SCTP-type-SOCK_STREAM) |
322 | */ |
323 | if (++try_loading_module == 1) |
324 | request_module("net-pf-%d-proto-%d-type-%d", |
325 | PF_INET, protocol, sock->type); |
326 | /* |
327 | * Fall back to generic, e.g. net-pf-2-proto-132 |
328 | * (net-pf-PF_INET-proto-IPPROTO_SCTP) |
329 | */ |
330 | else |
331 | request_module("net-pf-%d-proto-%d", |
332 | PF_INET, protocol); |
333 | goto lookup_protocol; |
334 | } else |
335 | goto out_rcu_unlock; |
336 | } |
337 | |
338 | err = -EPERM; |
339 | if (sock->type == SOCK_RAW && !kern && |
340 | !ns_capable(net->user_ns, CAP_NET_RAW)) |
341 | goto out_rcu_unlock; |
342 | |
343 | sock->ops = answer->ops; |
344 | answer_prot = answer->prot; |
345 | answer_no_check = answer->no_check; |
346 | answer_flags = answer->flags; |
347 | rcu_read_unlock(); |
348 | |
349 | WARN_ON(answer_prot->slab == NULL); |
350 | |
351 | err = -ENOBUFS; |
352 | sk = sk_alloc(net, PF_INET, GFP_KERNEL, answer_prot); |
353 | if (sk == NULL) |
354 | goto out; |
355 | |
356 | err = 0; |
357 | sk->sk_no_check = answer_no_check; |
358 | if (INET_PROTOSW_REUSE & answer_flags) |
359 | sk->sk_reuse = SK_CAN_REUSE; |
360 | |
361 | inet = inet_sk(sk); |
362 | inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; |
363 | |
364 | inet->nodefrag = 0; |
365 | |
366 | if (SOCK_RAW == sock->type) { |
367 | inet->inet_num = protocol; |
368 | if (IPPROTO_RAW == protocol) |
369 | inet->hdrincl = 1; |
370 | } |
371 | |
372 | if (ipv4_config.no_pmtu_disc) |
373 | inet->pmtudisc = IP_PMTUDISC_DONT; |
374 | else |
375 | inet->pmtudisc = IP_PMTUDISC_WANT; |
376 | |
377 | inet->inet_id = 0; |
378 | |
379 | sock_init_data(sock, sk); |
380 | |
381 | sk->sk_destruct = inet_sock_destruct; |
382 | sk->sk_protocol = protocol; |
383 | sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; |
384 | |
385 | inet->uc_ttl = -1; |
386 | inet->mc_loop = 1; |
387 | inet->mc_ttl = 1; |
388 | inet->mc_all = 1; |
389 | inet->mc_index = 0; |
390 | inet->mc_list = NULL; |
391 | inet->rcv_tos = 0; |
392 | |
393 | sk_refcnt_debug_inc(sk); |
394 | |
395 | if (inet->inet_num) { |
396 | /* It assumes that any protocol which allows |
397 | * the user to assign a number at socket |
398 | * creation time automatically |
399 | * shares. |
400 | */ |
401 | inet->inet_sport = htons(inet->inet_num); |
402 | /* Add to protocol hash chains. */ |
403 | sk->sk_prot->hash(sk); |
404 | } |
405 | |
406 | if (sk->sk_prot->init) { |
407 | err = sk->sk_prot->init(sk); |
408 | if (err) |
409 | sk_common_release(sk); |
410 | } |
411 | out: |
412 | return err; |
413 | out_rcu_unlock: |
414 | rcu_read_unlock(); |
415 | goto out; |
416 | } |
417 | |
418 | |
419 | /* |
420 | * The peer socket should always be NULL (or else). When we call this |
421 | * function we are destroying the object and from then on nobody |
422 | * should refer to it. |
423 | */ |
424 | int inet_release(struct socket *sock) |
425 | { |
426 | struct sock *sk = sock->sk; |
427 | |
428 | if (sk) { |
429 | long timeout; |
430 | |
431 | sock_rps_reset_flow(sk); |
432 | |
433 | /* Applications forget to leave groups before exiting */ |
434 | ip_mc_drop_socket(sk); |
435 | |
436 | /* If linger is set, we don't return until the close |
437 | * is complete. Otherwise we return immediately. The |
438 | * actually closing is done the same either way. |
439 | * |
440 | * If the close is due to the process exiting, we never |
441 | * linger.. |
442 | */ |
443 | timeout = 0; |
444 | if (sock_flag(sk, SOCK_LINGER) && |
445 | !(current->flags & PF_EXITING)) |
446 | timeout = sk->sk_lingertime; |
447 | sock->sk = NULL; |
448 | sk->sk_prot->close(sk, timeout); |
449 | } |
450 | return 0; |
451 | } |
452 | EXPORT_SYMBOL(inet_release); |
453 | |
454 | /* It is off by default, see below. */ |
455 | int sysctl_ip_nonlocal_bind __read_mostly; |
456 | EXPORT_SYMBOL(sysctl_ip_nonlocal_bind); |
457 | |
458 | int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) |
459 | { |
460 | struct sockaddr_in *addr = (struct sockaddr_in *)uaddr; |
461 | struct sock *sk = sock->sk; |
462 | struct inet_sock *inet = inet_sk(sk); |
463 | struct net *net = sock_net(sk); |
464 | unsigned short snum; |
465 | int chk_addr_ret; |
466 | int err; |
467 | |
468 | /* If the socket has its own bind function then use it. (RAW) */ |
469 | if (sk->sk_prot->bind) { |
470 | err = sk->sk_prot->bind(sk, uaddr, addr_len); |
471 | goto out; |
472 | } |
473 | err = -EINVAL; |
474 | if (addr_len < sizeof(struct sockaddr_in)) |
475 | goto out; |
476 | |
477 | if (addr->sin_family != AF_INET) { |
478 | /* Compatibility games : accept AF_UNSPEC (mapped to AF_INET) |
479 | * only if s_addr is INADDR_ANY. |
480 | */ |
481 | err = -EAFNOSUPPORT; |
482 | if (addr->sin_family != AF_UNSPEC || |
483 | addr->sin_addr.s_addr != htonl(INADDR_ANY)) |
484 | goto out; |
485 | } |
486 | |
487 | chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr); |
488 | |
489 | /* Not specified by any standard per-se, however it breaks too |
490 | * many applications when removed. It is unfortunate since |
491 | * allowing applications to make a non-local bind solves |
492 | * several problems with systems using dynamic addressing. |
493 | * (ie. your servers still start up even if your ISDN link |
494 | * is temporarily down) |
495 | */ |
496 | err = -EADDRNOTAVAIL; |
497 | if (!sysctl_ip_nonlocal_bind && |
498 | !(inet->freebind || inet->transparent) && |
499 | addr->sin_addr.s_addr != htonl(INADDR_ANY) && |
500 | chk_addr_ret != RTN_LOCAL && |
501 | chk_addr_ret != RTN_MULTICAST && |
502 | chk_addr_ret != RTN_BROADCAST) |
503 | goto out; |
504 | |
505 | snum = ntohs(addr->sin_port); |
506 | err = -EACCES; |
507 | if (snum && snum < PROT_SOCK && |
508 | !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) |
509 | goto out; |
510 | |
511 | /* We keep a pair of addresses. rcv_saddr is the one |
512 | * used by hash lookups, and saddr is used for transmit. |
513 | * |
514 | * In the BSD API these are the same except where it |
515 | * would be illegal to use them (multicast/broadcast) in |
516 | * which case the sending device address is used. |
517 | */ |
518 | lock_sock(sk); |
519 | |
520 | /* Check these errors (active socket, double bind). */ |
521 | err = -EINVAL; |
522 | if (sk->sk_state != TCP_CLOSE || inet->inet_num) |
523 | goto out_release_sock; |
524 | |
525 | inet->inet_rcv_saddr = inet->inet_saddr = addr->sin_addr.s_addr; |
526 | if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) |
527 | inet->inet_saddr = 0; /* Use device */ |
528 | |
529 | /* Make sure we are allowed to bind here. */ |
530 | if (sk->sk_prot->get_port(sk, snum)) { |
531 | inet->inet_saddr = inet->inet_rcv_saddr = 0; |
532 | err = -EADDRINUSE; |
533 | goto out_release_sock; |
534 | } |
535 | |
536 | if (inet->inet_rcv_saddr) |
537 | sk->sk_userlocks |= SOCK_BINDADDR_LOCK; |
538 | if (snum) |
539 | sk->sk_userlocks |= SOCK_BINDPORT_LOCK; |
540 | inet->inet_sport = htons(inet->inet_num); |
541 | inet->inet_daddr = 0; |
542 | inet->inet_dport = 0; |
543 | sk_dst_reset(sk); |
544 | err = 0; |
545 | out_release_sock: |
546 | release_sock(sk); |
547 | out: |
548 | return err; |
549 | } |
550 | EXPORT_SYMBOL(inet_bind); |
551 | |
552 | int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr, |
553 | int addr_len, int flags) |
554 | { |
555 | struct sock *sk = sock->sk; |
556 | |
557 | if (addr_len < sizeof(uaddr->sa_family)) |
558 | return -EINVAL; |
559 | if (uaddr->sa_family == AF_UNSPEC) |
560 | return sk->sk_prot->disconnect(sk, flags); |
561 | |
562 | if (!inet_sk(sk)->inet_num && inet_autobind(sk)) |
563 | return -EAGAIN; |
564 | return sk->sk_prot->connect(sk, uaddr, addr_len); |
565 | } |
566 | EXPORT_SYMBOL(inet_dgram_connect); |
567 | |
568 | static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) |
569 | { |
570 | DEFINE_WAIT(wait); |
571 | |
572 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
573 | sk->sk_write_pending += writebias; |
574 | |
575 | /* Basic assumption: if someone sets sk->sk_err, he _must_ |
576 | * change state of the socket from TCP_SYN_*. |
577 | * Connect() does not allow to get error notifications |
578 | * without closing the socket. |
579 | */ |
580 | while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
581 | release_sock(sk); |
582 | timeo = schedule_timeout(timeo); |
583 | lock_sock(sk); |
584 | if (signal_pending(current) || !timeo) |
585 | break; |
586 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); |
587 | } |
588 | finish_wait(sk_sleep(sk), &wait); |
589 | sk->sk_write_pending -= writebias; |
590 | return timeo; |
591 | } |
592 | |
593 | /* |
594 | * Connect to a remote host. There is regrettably still a little |
595 | * TCP 'magic' in here. |
596 | */ |
597 | int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, |
598 | int addr_len, int flags) |
599 | { |
600 | struct sock *sk = sock->sk; |
601 | int err; |
602 | long timeo; |
603 | |
604 | if (addr_len < sizeof(uaddr->sa_family)) |
605 | return -EINVAL; |
606 | |
607 | if (uaddr->sa_family == AF_UNSPEC) { |
608 | err = sk->sk_prot->disconnect(sk, flags); |
609 | sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED; |
610 | goto out; |
611 | } |
612 | |
613 | switch (sock->state) { |
614 | default: |
615 | err = -EINVAL; |
616 | goto out; |
617 | case SS_CONNECTED: |
618 | err = -EISCONN; |
619 | goto out; |
620 | case SS_CONNECTING: |
621 | err = -EALREADY; |
622 | /* Fall out of switch with err, set for this state */ |
623 | break; |
624 | case SS_UNCONNECTED: |
625 | err = -EISCONN; |
626 | if (sk->sk_state != TCP_CLOSE) |
627 | goto out; |
628 | |
629 | err = sk->sk_prot->connect(sk, uaddr, addr_len); |
630 | if (err < 0) |
631 | goto out; |
632 | |
633 | sock->state = SS_CONNECTING; |
634 | |
635 | /* Just entered SS_CONNECTING state; the only |
636 | * difference is that return value in non-blocking |
637 | * case is EINPROGRESS, rather than EALREADY. |
638 | */ |
639 | err = -EINPROGRESS; |
640 | break; |
641 | } |
642 | |
643 | timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); |
644 | |
645 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
646 | int writebias = (sk->sk_protocol == IPPROTO_TCP) && |
647 | tcp_sk(sk)->fastopen_req && |
648 | tcp_sk(sk)->fastopen_req->data ? 1 : 0; |
649 | |
650 | /* Error code is set above */ |
651 | if (!timeo || !inet_wait_for_connect(sk, timeo, writebias)) |
652 | goto out; |
653 | |
654 | err = sock_intr_errno(timeo); |
655 | if (signal_pending(current)) |
656 | goto out; |
657 | } |
658 | |
659 | /* Connection was closed by RST, timeout, ICMP error |
660 | * or another process disconnected us. |
661 | */ |
662 | if (sk->sk_state == TCP_CLOSE) |
663 | goto sock_error; |
664 | |
665 | /* sk->sk_err may be not zero now, if RECVERR was ordered by user |
666 | * and error was received after socket entered established state. |
667 | * Hence, it is handled normally after connect() return successfully. |
668 | */ |
669 | |
670 | sock->state = SS_CONNECTED; |
671 | err = 0; |
672 | out: |
673 | return err; |
674 | |
675 | sock_error: |
676 | err = sock_error(sk) ? : -ECONNABORTED; |
677 | sock->state = SS_UNCONNECTED; |
678 | if (sk->sk_prot->disconnect(sk, flags)) |
679 | sock->state = SS_DISCONNECTING; |
680 | goto out; |
681 | } |
682 | EXPORT_SYMBOL(__inet_stream_connect); |
683 | |
684 | int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, |
685 | int addr_len, int flags) |
686 | { |
687 | int err; |
688 | |
689 | lock_sock(sock->sk); |
690 | err = __inet_stream_connect(sock, uaddr, addr_len, flags); |
691 | release_sock(sock->sk); |
692 | return err; |
693 | } |
694 | EXPORT_SYMBOL(inet_stream_connect); |
695 | |
696 | /* |
697 | * Accept a pending connection. The TCP layer now gives BSD semantics. |
698 | */ |
699 | |
700 | int inet_accept(struct socket *sock, struct socket *newsock, int flags) |
701 | { |
702 | struct sock *sk1 = sock->sk; |
703 | int err = -EINVAL; |
704 | struct sock *sk2 = sk1->sk_prot->accept(sk1, flags, &err); |
705 | |
706 | if (!sk2) |
707 | goto do_err; |
708 | |
709 | lock_sock(sk2); |
710 | |
711 | sock_rps_record_flow(sk2); |
712 | WARN_ON(!((1 << sk2->sk_state) & |
713 | (TCPF_ESTABLISHED | TCPF_SYN_RECV | |
714 | TCPF_CLOSE_WAIT | TCPF_CLOSE))); |
715 | |
716 | sock_graft(sk2, newsock); |
717 | |
718 | newsock->state = SS_CONNECTED; |
719 | err = 0; |
720 | release_sock(sk2); |
721 | do_err: |
722 | return err; |
723 | } |
724 | EXPORT_SYMBOL(inet_accept); |
725 | |
726 | |
727 | /* |
728 | * This does both peername and sockname. |
729 | */ |
730 | int inet_getname(struct socket *sock, struct sockaddr *uaddr, |
731 | int *uaddr_len, int peer) |
732 | { |
733 | struct sock *sk = sock->sk; |
734 | struct inet_sock *inet = inet_sk(sk); |
735 | DECLARE_SOCKADDR(struct sockaddr_in *, sin, uaddr); |
736 | |
737 | sin->sin_family = AF_INET; |
738 | if (peer) { |
739 | if (!inet->inet_dport || |
740 | (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)) && |
741 | peer == 1)) |
742 | return -ENOTCONN; |
743 | sin->sin_port = inet->inet_dport; |
744 | sin->sin_addr.s_addr = inet->inet_daddr; |
745 | } else { |
746 | __be32 addr = inet->inet_rcv_saddr; |
747 | if (!addr) |
748 | addr = inet->inet_saddr; |
749 | sin->sin_port = inet->inet_sport; |
750 | sin->sin_addr.s_addr = addr; |
751 | } |
752 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); |
753 | *uaddr_len = sizeof(*sin); |
754 | return 0; |
755 | } |
756 | EXPORT_SYMBOL(inet_getname); |
757 | |
758 | int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, |
759 | size_t size) |
760 | { |
761 | struct sock *sk = sock->sk; |
762 | |
763 | sock_rps_record_flow(sk); |
764 | |
765 | /* We may need to bind the socket. */ |
766 | if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind && |
767 | inet_autobind(sk)) |
768 | return -EAGAIN; |
769 | |
770 | return sk->sk_prot->sendmsg(iocb, sk, msg, size); |
771 | } |
772 | EXPORT_SYMBOL(inet_sendmsg); |
773 | |
774 | ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, |
775 | size_t size, int flags) |
776 | { |
777 | struct sock *sk = sock->sk; |
778 | |
779 | sock_rps_record_flow(sk); |
780 | |
781 | /* We may need to bind the socket. */ |
782 | if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind && |
783 | inet_autobind(sk)) |
784 | return -EAGAIN; |
785 | |
786 | if (sk->sk_prot->sendpage) |
787 | return sk->sk_prot->sendpage(sk, page, offset, size, flags); |
788 | return sock_no_sendpage(sock, page, offset, size, flags); |
789 | } |
790 | EXPORT_SYMBOL(inet_sendpage); |
791 | |
792 | int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, |
793 | size_t size, int flags) |
794 | { |
795 | struct sock *sk = sock->sk; |
796 | int addr_len = 0; |
797 | int err; |
798 | |
799 | sock_rps_record_flow(sk); |
800 | |
801 | err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT, |
802 | flags & ~MSG_DONTWAIT, &addr_len); |
803 | if (err >= 0) |
804 | msg->msg_namelen = addr_len; |
805 | return err; |
806 | } |
807 | EXPORT_SYMBOL(inet_recvmsg); |
808 | |
809 | int inet_shutdown(struct socket *sock, int how) |
810 | { |
811 | struct sock *sk = sock->sk; |
812 | int err = 0; |
813 | |
814 | /* This should really check to make sure |
815 | * the socket is a TCP socket. (WHY AC...) |
816 | */ |
817 | how++; /* maps 0->1 has the advantage of making bit 1 rcvs and |
818 | 1->2 bit 2 snds. |
819 | 2->3 */ |
820 | if ((how & ~SHUTDOWN_MASK) || !how) /* MAXINT->0 */ |
821 | return -EINVAL; |
822 | |
823 | lock_sock(sk); |
824 | if (sock->state == SS_CONNECTING) { |
825 | if ((1 << sk->sk_state) & |
826 | (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE)) |
827 | sock->state = SS_DISCONNECTING; |
828 | else |
829 | sock->state = SS_CONNECTED; |
830 | } |
831 | |
832 | switch (sk->sk_state) { |
833 | case TCP_CLOSE: |
834 | err = -ENOTCONN; |
835 | /* Hack to wake up other listeners, who can poll for |
836 | POLLHUP, even on eg. unconnected UDP sockets -- RR */ |
837 | default: |
838 | sk->sk_shutdown |= how; |
839 | if (sk->sk_prot->shutdown) |
840 | sk->sk_prot->shutdown(sk, how); |
841 | break; |
842 | |
843 | /* Remaining two branches are temporary solution for missing |
844 | * close() in multithreaded environment. It is _not_ a good idea, |
845 | * but we have no choice until close() is repaired at VFS level. |
846 | */ |
847 | case TCP_LISTEN: |
848 | if (!(how & RCV_SHUTDOWN)) |
849 | break; |
850 | /* Fall through */ |
851 | case TCP_SYN_SENT: |
852 | err = sk->sk_prot->disconnect(sk, O_NONBLOCK); |
853 | sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED; |
854 | break; |
855 | } |
856 | |
857 | /* Wake up anyone sleeping in poll. */ |
858 | sk->sk_state_change(sk); |
859 | release_sock(sk); |
860 | return err; |
861 | } |
862 | EXPORT_SYMBOL(inet_shutdown); |
863 | |
864 | /* |
865 | * ioctl() calls you can issue on an INET socket. Most of these are |
866 | * device configuration and stuff and very rarely used. Some ioctls |
867 | * pass on to the socket itself. |
868 | * |
869 | * NOTE: I like the idea of a module for the config stuff. ie ifconfig |
870 | * loads the devconfigure module does its configuring and unloads it. |
871 | * There's a good 20K of config code hanging around the kernel. |
872 | */ |
873 | |
874 | int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) |
875 | { |
876 | struct sock *sk = sock->sk; |
877 | int err = 0; |
878 | struct net *net = sock_net(sk); |
879 | |
880 | switch (cmd) { |
881 | case SIOCGSTAMP: |
882 | err = sock_get_timestamp(sk, (struct timeval __user *)arg); |
883 | break; |
884 | case SIOCGSTAMPNS: |
885 | err = sock_get_timestampns(sk, (struct timespec __user *)arg); |
886 | break; |
887 | case SIOCADDRT: |
888 | case SIOCDELRT: |
889 | case SIOCRTMSG: |
890 | err = ip_rt_ioctl(net, cmd, (void __user *)arg); |
891 | break; |
892 | case SIOCDARP: |
893 | case SIOCGARP: |
894 | case SIOCSARP: |
895 | err = arp_ioctl(net, cmd, (void __user *)arg); |
896 | break; |
897 | case SIOCGIFADDR: |
898 | case SIOCSIFADDR: |
899 | case SIOCGIFBRDADDR: |
900 | case SIOCSIFBRDADDR: |
901 | case SIOCGIFNETMASK: |
902 | case SIOCSIFNETMASK: |
903 | case SIOCGIFDSTADDR: |
904 | case SIOCSIFDSTADDR: |
905 | case SIOCSIFPFLAGS: |
906 | case SIOCGIFPFLAGS: |
907 | case SIOCSIFFLAGS: |
908 | err = devinet_ioctl(net, cmd, (void __user *)arg); |
909 | break; |
910 | default: |
911 | if (sk->sk_prot->ioctl) |
912 | err = sk->sk_prot->ioctl(sk, cmd, arg); |
913 | else |
914 | err = -ENOIOCTLCMD; |
915 | break; |
916 | } |
917 | return err; |
918 | } |
919 | EXPORT_SYMBOL(inet_ioctl); |
920 | |
921 | #ifdef CONFIG_COMPAT |
922 | static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) |
923 | { |
924 | struct sock *sk = sock->sk; |
925 | int err = -ENOIOCTLCMD; |
926 | |
927 | if (sk->sk_prot->compat_ioctl) |
928 | err = sk->sk_prot->compat_ioctl(sk, cmd, arg); |
929 | |
930 | return err; |
931 | } |
932 | #endif |
933 | |
934 | const struct proto_ops inet_stream_ops = { |
935 | .family = PF_INET, |
936 | .owner = THIS_MODULE, |
937 | .release = inet_release, |
938 | .bind = inet_bind, |
939 | .connect = inet_stream_connect, |
940 | .socketpair = sock_no_socketpair, |
941 | .accept = inet_accept, |
942 | .getname = inet_getname, |
943 | .poll = tcp_poll, |
944 | .ioctl = inet_ioctl, |
945 | .listen = inet_listen, |
946 | .shutdown = inet_shutdown, |
947 | .setsockopt = sock_common_setsockopt, |
948 | .getsockopt = sock_common_getsockopt, |
949 | .sendmsg = inet_sendmsg, |
950 | .recvmsg = inet_recvmsg, |
951 | .mmap = sock_no_mmap, |
952 | .sendpage = inet_sendpage, |
953 | .splice_read = tcp_splice_read, |
954 | #ifdef CONFIG_COMPAT |
955 | .compat_setsockopt = compat_sock_common_setsockopt, |
956 | .compat_getsockopt = compat_sock_common_getsockopt, |
957 | .compat_ioctl = inet_compat_ioctl, |
958 | #endif |
959 | }; |
960 | EXPORT_SYMBOL(inet_stream_ops); |
961 | |
962 | const struct proto_ops inet_dgram_ops = { |
963 | .family = PF_INET, |
964 | .owner = THIS_MODULE, |
965 | .release = inet_release, |
966 | .bind = inet_bind, |
967 | .connect = inet_dgram_connect, |
968 | .socketpair = sock_no_socketpair, |
969 | .accept = sock_no_accept, |
970 | .getname = inet_getname, |
971 | .poll = udp_poll, |
972 | .ioctl = inet_ioctl, |
973 | .listen = sock_no_listen, |
974 | .shutdown = inet_shutdown, |
975 | .setsockopt = sock_common_setsockopt, |
976 | .getsockopt = sock_common_getsockopt, |
977 | .sendmsg = inet_sendmsg, |
978 | .recvmsg = inet_recvmsg, |
979 | .mmap = sock_no_mmap, |
980 | .sendpage = inet_sendpage, |
981 | #ifdef CONFIG_COMPAT |
982 | .compat_setsockopt = compat_sock_common_setsockopt, |
983 | .compat_getsockopt = compat_sock_common_getsockopt, |
984 | .compat_ioctl = inet_compat_ioctl, |
985 | #endif |
986 | }; |
987 | EXPORT_SYMBOL(inet_dgram_ops); |
988 | |
989 | /* |
990 | * For SOCK_RAW sockets; should be the same as inet_dgram_ops but without |
991 | * udp_poll |
992 | */ |
993 | static const struct proto_ops inet_sockraw_ops = { |
994 | .family = PF_INET, |
995 | .owner = THIS_MODULE, |
996 | .release = inet_release, |
997 | .bind = inet_bind, |
998 | .connect = inet_dgram_connect, |
999 | .socketpair = sock_no_socketpair, |
1000 | .accept = sock_no_accept, |
1001 | .getname = inet_getname, |
1002 | .poll = datagram_poll, |
1003 | .ioctl = inet_ioctl, |
1004 | .listen = sock_no_listen, |
1005 | .shutdown = inet_shutdown, |
1006 | .setsockopt = sock_common_setsockopt, |
1007 | .getsockopt = sock_common_getsockopt, |
1008 | .sendmsg = inet_sendmsg, |
1009 | .recvmsg = inet_recvmsg, |
1010 | .mmap = sock_no_mmap, |
1011 | .sendpage = inet_sendpage, |
1012 | #ifdef CONFIG_COMPAT |
1013 | .compat_setsockopt = compat_sock_common_setsockopt, |
1014 | .compat_getsockopt = compat_sock_common_getsockopt, |
1015 | .compat_ioctl = inet_compat_ioctl, |
1016 | #endif |
1017 | }; |
1018 | |
1019 | static const struct net_proto_family inet_family_ops = { |
1020 | .family = PF_INET, |
1021 | .create = inet_create, |
1022 | .owner = THIS_MODULE, |
1023 | }; |
1024 | |
1025 | /* Upon startup we insert all the elements in inetsw_array[] into |
1026 | * the linked list inetsw. |
1027 | */ |
1028 | static struct inet_protosw inetsw_array[] = |
1029 | { |
1030 | { |
1031 | .type = SOCK_STREAM, |
1032 | .protocol = IPPROTO_TCP, |
1033 | .prot = &tcp_prot, |
1034 | .ops = &inet_stream_ops, |
1035 | .no_check = 0, |
1036 | .flags = INET_PROTOSW_PERMANENT | |
1037 | INET_PROTOSW_ICSK, |
1038 | }, |
1039 | |
1040 | { |
1041 | .type = SOCK_DGRAM, |
1042 | .protocol = IPPROTO_UDP, |
1043 | .prot = &udp_prot, |
1044 | .ops = &inet_dgram_ops, |
1045 | .no_check = UDP_CSUM_DEFAULT, |
1046 | .flags = INET_PROTOSW_PERMANENT, |
1047 | }, |
1048 | |
1049 | { |
1050 | .type = SOCK_DGRAM, |
1051 | .protocol = IPPROTO_ICMP, |
1052 | .prot = &ping_prot, |
1053 | .ops = &inet_dgram_ops, |
1054 | .no_check = UDP_CSUM_DEFAULT, |
1055 | .flags = INET_PROTOSW_REUSE, |
1056 | }, |
1057 | |
1058 | { |
1059 | .type = SOCK_RAW, |
1060 | .protocol = IPPROTO_IP, /* wild card */ |
1061 | .prot = &raw_prot, |
1062 | .ops = &inet_sockraw_ops, |
1063 | .no_check = UDP_CSUM_DEFAULT, |
1064 | .flags = INET_PROTOSW_REUSE, |
1065 | } |
1066 | }; |
1067 | |
1068 | #define INETSW_ARRAY_LEN ARRAY_SIZE(inetsw_array) |
1069 | |
1070 | void inet_register_protosw(struct inet_protosw *p) |
1071 | { |
1072 | struct list_head *lh; |
1073 | struct inet_protosw *answer; |
1074 | int protocol = p->protocol; |
1075 | struct list_head *last_perm; |
1076 | |
1077 | spin_lock_bh(&inetsw_lock); |
1078 | |
1079 | if (p->type >= SOCK_MAX) |
1080 | goto out_illegal; |
1081 | |
1082 | /* If we are trying to override a permanent protocol, bail. */ |
1083 | answer = NULL; |
1084 | last_perm = &inetsw[p->type]; |
1085 | list_for_each(lh, &inetsw[p->type]) { |
1086 | answer = list_entry(lh, struct inet_protosw, list); |
1087 | |
1088 | /* Check only the non-wild match. */ |
1089 | if (INET_PROTOSW_PERMANENT & answer->flags) { |
1090 | if (protocol == answer->protocol) |
1091 | break; |
1092 | last_perm = lh; |
1093 | } |
1094 | |
1095 | answer = NULL; |
1096 | } |
1097 | if (answer) |
1098 | goto out_permanent; |
1099 | |
1100 | /* Add the new entry after the last permanent entry if any, so that |
1101 | * the new entry does not override a permanent entry when matched with |
1102 | * a wild-card protocol. But it is allowed to override any existing |
1103 | * non-permanent entry. This means that when we remove this entry, the |
1104 | * system automatically returns to the old behavior. |
1105 | */ |
1106 | list_add_rcu(&p->list, last_perm); |
1107 | out: |
1108 | spin_unlock_bh(&inetsw_lock); |
1109 | |
1110 | return; |
1111 | |
1112 | out_permanent: |
1113 | pr_err("Attempt to override permanent protocol %d\n", protocol); |
1114 | goto out; |
1115 | |
1116 | out_illegal: |
1117 | pr_err("Ignoring attempt to register invalid socket type %d\n", |
1118 | p->type); |
1119 | goto out; |
1120 | } |
1121 | EXPORT_SYMBOL(inet_register_protosw); |
1122 | |
1123 | void inet_unregister_protosw(struct inet_protosw *p) |
1124 | { |
1125 | if (INET_PROTOSW_PERMANENT & p->flags) { |
1126 | pr_err("Attempt to unregister permanent protocol %d\n", |
1127 | p->protocol); |
1128 | } else { |
1129 | spin_lock_bh(&inetsw_lock); |
1130 | list_del_rcu(&p->list); |
1131 | spin_unlock_bh(&inetsw_lock); |
1132 | |
1133 | synchronize_net(); |
1134 | } |
1135 | } |
1136 | EXPORT_SYMBOL(inet_unregister_protosw); |
1137 | |
1138 | /* |
1139 | * Shall we try to damage output packets if routing dev changes? |
1140 | */ |
1141 | |
1142 | int sysctl_ip_dynaddr __read_mostly; |
1143 | |
1144 | static int inet_sk_reselect_saddr(struct sock *sk) |
1145 | { |
1146 | struct inet_sock *inet = inet_sk(sk); |
1147 | __be32 old_saddr = inet->inet_saddr; |
1148 | __be32 daddr = inet->inet_daddr; |
1149 | struct flowi4 *fl4; |
1150 | struct rtable *rt; |
1151 | __be32 new_saddr; |
1152 | struct ip_options_rcu *inet_opt; |
1153 | |
1154 | inet_opt = rcu_dereference_protected(inet->inet_opt, |
1155 | sock_owned_by_user(sk)); |
1156 | if (inet_opt && inet_opt->opt.srr) |
1157 | daddr = inet_opt->opt.faddr; |
1158 | |
1159 | /* Query new route. */ |
1160 | fl4 = &inet->cork.fl.u.ip4; |
1161 | rt = ip_route_connect(fl4, daddr, 0, RT_CONN_FLAGS(sk), |
1162 | sk->sk_bound_dev_if, sk->sk_protocol, |
1163 | inet->inet_sport, inet->inet_dport, sk, false); |
1164 | if (IS_ERR(rt)) |
1165 | return PTR_ERR(rt); |
1166 | |
1167 | sk_setup_caps(sk, &rt->dst); |
1168 | |
1169 | new_saddr = fl4->saddr; |
1170 | |
1171 | if (new_saddr == old_saddr) |
1172 | return 0; |
1173 | |
1174 | if (sysctl_ip_dynaddr > 1) { |
1175 | pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n", |
1176 | __func__, &old_saddr, &new_saddr); |
1177 | } |
1178 | |
1179 | inet->inet_saddr = inet->inet_rcv_saddr = new_saddr; |
1180 | |
1181 | /* |
1182 | * XXX The only one ugly spot where we need to |
1183 | * XXX really change the sockets identity after |
1184 | * XXX it has entered the hashes. -DaveM |
1185 | * |
1186 | * Besides that, it does not check for connection |
1187 | * uniqueness. Wait for troubles. |
1188 | */ |
1189 | __sk_prot_rehash(sk); |
1190 | return 0; |
1191 | } |
1192 | |
1193 | int inet_sk_rebuild_header(struct sock *sk) |
1194 | { |
1195 | struct inet_sock *inet = inet_sk(sk); |
1196 | struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0); |
1197 | __be32 daddr; |
1198 | struct ip_options_rcu *inet_opt; |
1199 | struct flowi4 *fl4; |
1200 | int err; |
1201 | |
1202 | /* Route is OK, nothing to do. */ |
1203 | if (rt) |
1204 | return 0; |
1205 | |
1206 | /* Reroute. */ |
1207 | rcu_read_lock(); |
1208 | inet_opt = rcu_dereference(inet->inet_opt); |
1209 | daddr = inet->inet_daddr; |
1210 | if (inet_opt && inet_opt->opt.srr) |
1211 | daddr = inet_opt->opt.faddr; |
1212 | rcu_read_unlock(); |
1213 | fl4 = &inet->cork.fl.u.ip4; |
1214 | rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr, |
1215 | inet->inet_dport, inet->inet_sport, |
1216 | sk->sk_protocol, RT_CONN_FLAGS(sk), |
1217 | sk->sk_bound_dev_if); |
1218 | if (!IS_ERR(rt)) { |
1219 | err = 0; |
1220 | sk_setup_caps(sk, &rt->dst); |
1221 | } else { |
1222 | err = PTR_ERR(rt); |
1223 | |
1224 | /* Routing failed... */ |
1225 | sk->sk_route_caps = 0; |
1226 | /* |
1227 | * Other protocols have to map its equivalent state to TCP_SYN_SENT. |
1228 | * DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme |
1229 | */ |
1230 | if (!sysctl_ip_dynaddr || |
1231 | sk->sk_state != TCP_SYN_SENT || |
1232 | (sk->sk_userlocks & SOCK_BINDADDR_LOCK) || |
1233 | (err = inet_sk_reselect_saddr(sk)) != 0) |
1234 | sk->sk_err_soft = -err; |
1235 | } |
1236 | |
1237 | return err; |
1238 | } |
1239 | EXPORT_SYMBOL(inet_sk_rebuild_header); |
1240 | |
1241 | static int inet_gso_send_check(struct sk_buff *skb) |
1242 | { |
1243 | const struct net_offload *ops; |
1244 | const struct iphdr *iph; |
1245 | int proto; |
1246 | int ihl; |
1247 | int err = -EINVAL; |
1248 | |
1249 | if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) |
1250 | goto out; |
1251 | |
1252 | iph = ip_hdr(skb); |
1253 | ihl = iph->ihl * 4; |
1254 | if (ihl < sizeof(*iph)) |
1255 | goto out; |
1256 | |
1257 | if (unlikely(!pskb_may_pull(skb, ihl))) |
1258 | goto out; |
1259 | |
1260 | __skb_pull(skb, ihl); |
1261 | skb_reset_transport_header(skb); |
1262 | iph = ip_hdr(skb); |
1263 | proto = iph->protocol; |
1264 | err = -EPROTONOSUPPORT; |
1265 | |
1266 | rcu_read_lock(); |
1267 | ops = rcu_dereference(inet_offloads[proto]); |
1268 | if (likely(ops && ops->callbacks.gso_send_check)) |
1269 | err = ops->callbacks.gso_send_check(skb); |
1270 | rcu_read_unlock(); |
1271 | |
1272 | out: |
1273 | return err; |
1274 | } |
1275 | |
1276 | static struct sk_buff *inet_gso_segment(struct sk_buff *skb, |
1277 | netdev_features_t features) |
1278 | { |
1279 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
1280 | const struct net_offload *ops; |
1281 | struct iphdr *iph; |
1282 | int proto; |
1283 | int ihl; |
1284 | int id; |
1285 | unsigned int offset = 0; |
1286 | |
1287 | if (!(features & NETIF_F_V4_CSUM)) |
1288 | features &= ~NETIF_F_SG; |
1289 | |
1290 | if (unlikely(skb_shinfo(skb)->gso_type & |
1291 | ~(SKB_GSO_TCPV4 | |
1292 | SKB_GSO_UDP | |
1293 | SKB_GSO_DODGY | |
1294 | SKB_GSO_TCP_ECN | |
1295 | SKB_GSO_GRE | |
1296 | 0))) |
1297 | goto out; |
1298 | |
1299 | if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) |
1300 | goto out; |
1301 | |
1302 | iph = ip_hdr(skb); |
1303 | ihl = iph->ihl * 4; |
1304 | if (ihl < sizeof(*iph)) |
1305 | goto out; |
1306 | |
1307 | if (unlikely(!pskb_may_pull(skb, ihl))) |
1308 | goto out; |
1309 | |
1310 | __skb_pull(skb, ihl); |
1311 | skb_reset_transport_header(skb); |
1312 | iph = ip_hdr(skb); |
1313 | id = ntohs(iph->id); |
1314 | proto = iph->protocol; |
1315 | segs = ERR_PTR(-EPROTONOSUPPORT); |
1316 | |
1317 | rcu_read_lock(); |
1318 | ops = rcu_dereference(inet_offloads[proto]); |
1319 | if (likely(ops && ops->callbacks.gso_segment)) |
1320 | segs = ops->callbacks.gso_segment(skb, features); |
1321 | rcu_read_unlock(); |
1322 | |
1323 | if (IS_ERR_OR_NULL(segs)) |
1324 | goto out; |
1325 | |
1326 | skb = segs; |
1327 | do { |
1328 | iph = ip_hdr(skb); |
1329 | if (proto == IPPROTO_UDP) { |
1330 | iph->id = htons(id); |
1331 | iph->frag_off = htons(offset >> 3); |
1332 | if (skb->next != NULL) |
1333 | iph->frag_off |= htons(IP_MF); |
1334 | offset += (skb->len - skb->mac_len - iph->ihl * 4); |
1335 | } else { |
1336 | iph->id = htons(id++); |
1337 | } |
1338 | iph->tot_len = htons(skb->len - skb->mac_len); |
1339 | iph->check = 0; |
1340 | iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl); |
1341 | } while ((skb = skb->next)); |
1342 | |
1343 | out: |
1344 | return segs; |
1345 | } |
1346 | |
1347 | static struct sk_buff **inet_gro_receive(struct sk_buff **head, |
1348 | struct sk_buff *skb) |
1349 | { |
1350 | const struct net_offload *ops; |
1351 | struct sk_buff **pp = NULL; |
1352 | struct sk_buff *p; |
1353 | const struct iphdr *iph; |
1354 | unsigned int hlen; |
1355 | unsigned int off; |
1356 | unsigned int id; |
1357 | int flush = 1; |
1358 | int proto; |
1359 | |
1360 | off = skb_gro_offset(skb); |
1361 | hlen = off + sizeof(*iph); |
1362 | iph = skb_gro_header_fast(skb, off); |
1363 | if (skb_gro_header_hard(skb, hlen)) { |
1364 | iph = skb_gro_header_slow(skb, hlen, off); |
1365 | if (unlikely(!iph)) |
1366 | goto out; |
1367 | } |
1368 | |
1369 | proto = iph->protocol; |
1370 | |
1371 | rcu_read_lock(); |
1372 | ops = rcu_dereference(inet_offloads[proto]); |
1373 | if (!ops || !ops->callbacks.gro_receive) |
1374 | goto out_unlock; |
1375 | |
1376 | if (*(u8 *)iph != 0x45) |
1377 | goto out_unlock; |
1378 | |
1379 | if (unlikely(ip_fast_csum((u8 *)iph, 5))) |
1380 | goto out_unlock; |
1381 | |
1382 | id = ntohl(*(__be32 *)&iph->id); |
1383 | flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF)); |
1384 | id >>= 16; |
1385 | |
1386 | for (p = *head; p; p = p->next) { |
1387 | struct iphdr *iph2; |
1388 | |
1389 | if (!NAPI_GRO_CB(p)->same_flow) |
1390 | continue; |
1391 | |
1392 | iph2 = ip_hdr(p); |
1393 | |
1394 | if ((iph->protocol ^ iph2->protocol) | |
1395 | ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) | |
1396 | ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) { |
1397 | NAPI_GRO_CB(p)->same_flow = 0; |
1398 | continue; |
1399 | } |
1400 | |
1401 | /* All fields must match except length and checksum. */ |
1402 | NAPI_GRO_CB(p)->flush |= |
1403 | (iph->ttl ^ iph2->ttl) | |
1404 | (iph->tos ^ iph2->tos) | |
1405 | ((u16)(ntohs(iph2->id) + NAPI_GRO_CB(p)->count) ^ id); |
1406 | |
1407 | NAPI_GRO_CB(p)->flush |= flush; |
1408 | } |
1409 | |
1410 | NAPI_GRO_CB(skb)->flush |= flush; |
1411 | skb_gro_pull(skb, sizeof(*iph)); |
1412 | skb_set_transport_header(skb, skb_gro_offset(skb)); |
1413 | |
1414 | pp = ops->callbacks.gro_receive(head, skb); |
1415 | |
1416 | out_unlock: |
1417 | rcu_read_unlock(); |
1418 | |
1419 | out: |
1420 | NAPI_GRO_CB(skb)->flush |= flush; |
1421 | |
1422 | return pp; |
1423 | } |
1424 | |
1425 | static int inet_gro_complete(struct sk_buff *skb) |
1426 | { |
1427 | __be16 newlen = htons(skb->len - skb_network_offset(skb)); |
1428 | struct iphdr *iph = ip_hdr(skb); |
1429 | const struct net_offload *ops; |
1430 | int proto = iph->protocol; |
1431 | int err = -ENOSYS; |
1432 | |
1433 | csum_replace2(&iph->check, iph->tot_len, newlen); |
1434 | iph->tot_len = newlen; |
1435 | |
1436 | rcu_read_lock(); |
1437 | ops = rcu_dereference(inet_offloads[proto]); |
1438 | if (WARN_ON(!ops || !ops->callbacks.gro_complete)) |
1439 | goto out_unlock; |
1440 | |
1441 | err = ops->callbacks.gro_complete(skb); |
1442 | |
1443 | out_unlock: |
1444 | rcu_read_unlock(); |
1445 | |
1446 | return err; |
1447 | } |
1448 | |
1449 | int inet_ctl_sock_create(struct sock **sk, unsigned short family, |
1450 | unsigned short type, unsigned char protocol, |
1451 | struct net *net) |
1452 | { |
1453 | struct socket *sock; |
1454 | int rc = sock_create_kern(family, type, protocol, &sock); |
1455 | |
1456 | if (rc == 0) { |
1457 | *sk = sock->sk; |
1458 | (*sk)->sk_allocation = GFP_ATOMIC; |
1459 | /* |
1460 | * Unhash it so that IP input processing does not even see it, |
1461 | * we do not wish this socket to see incoming packets. |
1462 | */ |
1463 | (*sk)->sk_prot->unhash(*sk); |
1464 | |
1465 | sk_change_net(*sk, net); |
1466 | } |
1467 | return rc; |
1468 | } |
1469 | EXPORT_SYMBOL_GPL(inet_ctl_sock_create); |
1470 | |
1471 | unsigned long snmp_fold_field(void __percpu *mib[], int offt) |
1472 | { |
1473 | unsigned long res = 0; |
1474 | int i, j; |
1475 | |
1476 | for_each_possible_cpu(i) { |
1477 | for (j = 0; j < SNMP_ARRAY_SZ; j++) |
1478 | res += *(((unsigned long *) per_cpu_ptr(mib[j], i)) + offt); |
1479 | } |
1480 | return res; |
1481 | } |
1482 | EXPORT_SYMBOL_GPL(snmp_fold_field); |
1483 | |
1484 | #if BITS_PER_LONG==32 |
1485 | |
1486 | u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset) |
1487 | { |
1488 | u64 res = 0; |
1489 | int cpu; |
1490 | |
1491 | for_each_possible_cpu(cpu) { |
1492 | void *bhptr; |
1493 | struct u64_stats_sync *syncp; |
1494 | u64 v; |
1495 | unsigned int start; |
1496 | |
1497 | bhptr = per_cpu_ptr(mib[0], cpu); |
1498 | syncp = (struct u64_stats_sync *)(bhptr + syncp_offset); |
1499 | do { |
1500 | start = u64_stats_fetch_begin_bh(syncp); |
1501 | v = *(((u64 *) bhptr) + offt); |
1502 | } while (u64_stats_fetch_retry_bh(syncp, start)); |
1503 | |
1504 | res += v; |
1505 | } |
1506 | return res; |
1507 | } |
1508 | EXPORT_SYMBOL_GPL(snmp_fold_field64); |
1509 | #endif |
1510 | |
1511 | int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align) |
1512 | { |
1513 | BUG_ON(ptr == NULL); |
1514 | ptr[0] = __alloc_percpu(mibsize, align); |
1515 | if (!ptr[0]) |
1516 | return -ENOMEM; |
1517 | #if SNMP_ARRAY_SZ == 2 |
1518 | ptr[1] = __alloc_percpu(mibsize, align); |
1519 | if (!ptr[1]) { |
1520 | free_percpu(ptr[0]); |
1521 | ptr[0] = NULL; |
1522 | return -ENOMEM; |
1523 | } |
1524 | #endif |
1525 | return 0; |
1526 | } |
1527 | EXPORT_SYMBOL_GPL(snmp_mib_init); |
1528 | |
1529 | void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ]) |
1530 | { |
1531 | int i; |
1532 | |
1533 | BUG_ON(ptr == NULL); |
1534 | for (i = 0; i < SNMP_ARRAY_SZ; i++) { |
1535 | free_percpu(ptr[i]); |
1536 | ptr[i] = NULL; |
1537 | } |
1538 | } |
1539 | EXPORT_SYMBOL_GPL(snmp_mib_free); |
1540 | |
1541 | #ifdef CONFIG_IP_MULTICAST |
1542 | static const struct net_protocol igmp_protocol = { |
1543 | .handler = igmp_rcv, |
1544 | .netns_ok = 1, |
1545 | }; |
1546 | #endif |
1547 | |
1548 | static const struct net_protocol tcp_protocol = { |
1549 | .early_demux = tcp_v4_early_demux, |
1550 | .handler = tcp_v4_rcv, |
1551 | .err_handler = tcp_v4_err, |
1552 | .no_policy = 1, |
1553 | .netns_ok = 1, |
1554 | }; |
1555 | |
1556 | static const struct net_offload tcp_offload = { |
1557 | .callbacks = { |
1558 | .gso_send_check = tcp_v4_gso_send_check, |
1559 | .gso_segment = tcp_tso_segment, |
1560 | .gro_receive = tcp4_gro_receive, |
1561 | .gro_complete = tcp4_gro_complete, |
1562 | }, |
1563 | }; |
1564 | |
1565 | static const struct net_protocol udp_protocol = { |
1566 | .handler = udp_rcv, |
1567 | .err_handler = udp_err, |
1568 | .no_policy = 1, |
1569 | .netns_ok = 1, |
1570 | }; |
1571 | |
1572 | static const struct net_offload udp_offload = { |
1573 | .callbacks = { |
1574 | .gso_send_check = udp4_ufo_send_check, |
1575 | .gso_segment = udp4_ufo_fragment, |
1576 | }, |
1577 | }; |
1578 | |
1579 | static const struct net_protocol icmp_protocol = { |
1580 | .handler = icmp_rcv, |
1581 | .err_handler = icmp_err, |
1582 | .no_policy = 1, |
1583 | .netns_ok = 1, |
1584 | }; |
1585 | |
1586 | static __net_init int ipv4_mib_init_net(struct net *net) |
1587 | { |
1588 | if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics, |
1589 | sizeof(struct tcp_mib), |
1590 | __alignof__(struct tcp_mib)) < 0) |
1591 | goto err_tcp_mib; |
1592 | if (snmp_mib_init((void __percpu **)net->mib.ip_statistics, |
1593 | sizeof(struct ipstats_mib), |
1594 | __alignof__(struct ipstats_mib)) < 0) |
1595 | goto err_ip_mib; |
1596 | if (snmp_mib_init((void __percpu **)net->mib.net_statistics, |
1597 | sizeof(struct linux_mib), |
1598 | __alignof__(struct linux_mib)) < 0) |
1599 | goto err_net_mib; |
1600 | if (snmp_mib_init((void __percpu **)net->mib.udp_statistics, |
1601 | sizeof(struct udp_mib), |
1602 | __alignof__(struct udp_mib)) < 0) |
1603 | goto err_udp_mib; |
1604 | if (snmp_mib_init((void __percpu **)net->mib.udplite_statistics, |
1605 | sizeof(struct udp_mib), |
1606 | __alignof__(struct udp_mib)) < 0) |
1607 | goto err_udplite_mib; |
1608 | if (snmp_mib_init((void __percpu **)net->mib.icmp_statistics, |
1609 | sizeof(struct icmp_mib), |
1610 | __alignof__(struct icmp_mib)) < 0) |
1611 | goto err_icmp_mib; |
1612 | net->mib.icmpmsg_statistics = kzalloc(sizeof(struct icmpmsg_mib), |
1613 | GFP_KERNEL); |
1614 | if (!net->mib.icmpmsg_statistics) |
1615 | goto err_icmpmsg_mib; |
1616 | |
1617 | tcp_mib_init(net); |
1618 | return 0; |
1619 | |
1620 | err_icmpmsg_mib: |
1621 | snmp_mib_free((void __percpu **)net->mib.icmp_statistics); |
1622 | err_icmp_mib: |
1623 | snmp_mib_free((void __percpu **)net->mib.udplite_statistics); |
1624 | err_udplite_mib: |
1625 | snmp_mib_free((void __percpu **)net->mib.udp_statistics); |
1626 | err_udp_mib: |
1627 | snmp_mib_free((void __percpu **)net->mib.net_statistics); |
1628 | err_net_mib: |
1629 | snmp_mib_free((void __percpu **)net->mib.ip_statistics); |
1630 | err_ip_mib: |
1631 | snmp_mib_free((void __percpu **)net->mib.tcp_statistics); |
1632 | err_tcp_mib: |
1633 | return -ENOMEM; |
1634 | } |
1635 | |
1636 | static __net_exit void ipv4_mib_exit_net(struct net *net) |
1637 | { |
1638 | kfree(net->mib.icmpmsg_statistics); |
1639 | snmp_mib_free((void __percpu **)net->mib.icmp_statistics); |
1640 | snmp_mib_free((void __percpu **)net->mib.udplite_statistics); |
1641 | snmp_mib_free((void __percpu **)net->mib.udp_statistics); |
1642 | snmp_mib_free((void __percpu **)net->mib.net_statistics); |
1643 | snmp_mib_free((void __percpu **)net->mib.ip_statistics); |
1644 | snmp_mib_free((void __percpu **)net->mib.tcp_statistics); |
1645 | } |
1646 | |
1647 | static __net_initdata struct pernet_operations ipv4_mib_ops = { |
1648 | .init = ipv4_mib_init_net, |
1649 | .exit = ipv4_mib_exit_net, |
1650 | }; |
1651 | |
1652 | static int __init init_ipv4_mibs(void) |
1653 | { |
1654 | return register_pernet_subsys(&ipv4_mib_ops); |
1655 | } |
1656 | |
1657 | static int ipv4_proc_init(void); |
1658 | |
1659 | /* |
1660 | * IP protocol layer initialiser |
1661 | */ |
1662 | |
1663 | static struct packet_offload ip_packet_offload __read_mostly = { |
1664 | .type = cpu_to_be16(ETH_P_IP), |
1665 | .callbacks = { |
1666 | .gso_send_check = inet_gso_send_check, |
1667 | .gso_segment = inet_gso_segment, |
1668 | .gro_receive = inet_gro_receive, |
1669 | .gro_complete = inet_gro_complete, |
1670 | }, |
1671 | }; |
1672 | |
1673 | static int __init ipv4_offload_init(void) |
1674 | { |
1675 | /* |
1676 | * Add offloads |
1677 | */ |
1678 | if (inet_add_offload(&udp_offload, IPPROTO_UDP) < 0) |
1679 | pr_crit("%s: Cannot add UDP protocol offload\n", __func__); |
1680 | if (inet_add_offload(&tcp_offload, IPPROTO_TCP) < 0) |
1681 | pr_crit("%s: Cannot add TCP protocol offlaod\n", __func__); |
1682 | |
1683 | dev_add_offload(&ip_packet_offload); |
1684 | return 0; |
1685 | } |
1686 | |
1687 | fs_initcall(ipv4_offload_init); |
1688 | |
1689 | static struct packet_type ip_packet_type __read_mostly = { |
1690 | .type = cpu_to_be16(ETH_P_IP), |
1691 | .func = ip_rcv, |
1692 | }; |
1693 | |
1694 | static int __init inet_init(void) |
1695 | { |
1696 | struct inet_protosw *q; |
1697 | struct list_head *r; |
1698 | int rc = -EINVAL; |
1699 | |
1700 | BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb)); |
1701 | |
1702 | sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL); |
1703 | if (!sysctl_local_reserved_ports) |
1704 | goto out; |
1705 | |
1706 | rc = proto_register(&tcp_prot, 1); |
1707 | if (rc) |
1708 | goto out_free_reserved_ports; |
1709 | |
1710 | rc = proto_register(&udp_prot, 1); |
1711 | if (rc) |
1712 | goto out_unregister_tcp_proto; |
1713 | |
1714 | rc = proto_register(&raw_prot, 1); |
1715 | if (rc) |
1716 | goto out_unregister_udp_proto; |
1717 | |
1718 | rc = proto_register(&ping_prot, 1); |
1719 | if (rc) |
1720 | goto out_unregister_raw_proto; |
1721 | |
1722 | /* |
1723 | * Tell SOCKET that we are alive... |
1724 | */ |
1725 | |
1726 | (void)sock_register(&inet_family_ops); |
1727 | |
1728 | #ifdef CONFIG_SYSCTL |
1729 | ip_static_sysctl_init(); |
1730 | #endif |
1731 | |
1732 | tcp_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem; |
1733 | |
1734 | /* |
1735 | * Add all the base protocols. |
1736 | */ |
1737 | |
1738 | if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0) |
1739 | pr_crit("%s: Cannot add ICMP protocol\n", __func__); |
1740 | if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0) |
1741 | pr_crit("%s: Cannot add UDP protocol\n", __func__); |
1742 | if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0) |
1743 | pr_crit("%s: Cannot add TCP protocol\n", __func__); |
1744 | #ifdef CONFIG_IP_MULTICAST |
1745 | if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0) |
1746 | pr_crit("%s: Cannot add IGMP protocol\n", __func__); |
1747 | #endif |
1748 | |
1749 | /* Register the socket-side information for inet_create. */ |
1750 | for (r = &inetsw[0]; r < &inetsw[SOCK_MAX]; ++r) |
1751 | INIT_LIST_HEAD(r); |
1752 | |
1753 | for (q = inetsw_array; q < &inetsw_array[INETSW_ARRAY_LEN]; ++q) |
1754 | inet_register_protosw(q); |
1755 | |
1756 | /* |
1757 | * Set the ARP module up |
1758 | */ |
1759 | |
1760 | arp_init(); |
1761 | |
1762 | /* |
1763 | * Set the IP module up |
1764 | */ |
1765 | |
1766 | ip_init(); |
1767 | |
1768 | tcp_v4_init(); |
1769 | |
1770 | /* Setup TCP slab cache for open requests. */ |
1771 | tcp_init(); |
1772 | |
1773 | /* Setup UDP memory threshold */ |
1774 | udp_init(); |
1775 | |
1776 | /* Add UDP-Lite (RFC 3828) */ |
1777 | udplite4_register(); |
1778 | |
1779 | ping_init(); |
1780 | |
1781 | /* |
1782 | * Set the ICMP layer up |
1783 | */ |
1784 | |
1785 | if (icmp_init() < 0) |
1786 | panic("Failed to create the ICMP control socket.\n"); |
1787 | |
1788 | /* |
1789 | * Initialise the multicast router |
1790 | */ |
1791 | #if defined(CONFIG_IP_MROUTE) |
1792 | if (ip_mr_init()) |
1793 | pr_crit("%s: Cannot init ipv4 mroute\n", __func__); |
1794 | #endif |
1795 | /* |
1796 | * Initialise per-cpu ipv4 mibs |
1797 | */ |
1798 | |
1799 | if (init_ipv4_mibs()) |
1800 | pr_crit("%s: Cannot init ipv4 mibs\n", __func__); |
1801 | |
1802 | ipv4_proc_init(); |
1803 | |
1804 | ipfrag_init(); |
1805 | |
1806 | dev_add_pack(&ip_packet_type); |
1807 | |
1808 | rc = 0; |
1809 | out: |
1810 | return rc; |
1811 | out_unregister_raw_proto: |
1812 | proto_unregister(&raw_prot); |
1813 | out_unregister_udp_proto: |
1814 | proto_unregister(&udp_prot); |
1815 | out_unregister_tcp_proto: |
1816 | proto_unregister(&tcp_prot); |
1817 | out_free_reserved_ports: |
1818 | kfree(sysctl_local_reserved_ports); |
1819 | goto out; |
1820 | } |
1821 | |
1822 | fs_initcall(inet_init); |
1823 | |
1824 | /* ------------------------------------------------------------------------ */ |
1825 | |
1826 | #ifdef CONFIG_PROC_FS |
1827 | static int __init ipv4_proc_init(void) |
1828 | { |
1829 | int rc = 0; |
1830 | |
1831 | if (raw_proc_init()) |
1832 | goto out_raw; |
1833 | if (tcp4_proc_init()) |
1834 | goto out_tcp; |
1835 | if (udp4_proc_init()) |
1836 | goto out_udp; |
1837 | if (ping_proc_init()) |
1838 | goto out_ping; |
1839 | if (ip_misc_proc_init()) |
1840 | goto out_misc; |
1841 | out: |
1842 | return rc; |
1843 | out_misc: |
1844 | ping_proc_exit(); |
1845 | out_ping: |
1846 | udp4_proc_exit(); |
1847 | out_udp: |
1848 | tcp4_proc_exit(); |
1849 | out_tcp: |
1850 | raw_proc_exit(); |
1851 | out_raw: |
1852 | rc = -ENOMEM; |
1853 | goto out; |
1854 | } |
1855 | |
1856 | #else /* CONFIG_PROC_FS */ |
1857 | static int __init ipv4_proc_init(void) |
1858 | { |
1859 | return 0; |
1860 | } |
1861 | #endif /* CONFIG_PROC_FS */ |
1862 | |
1863 | MODULE_ALIAS_NETPROTO(PF_INET); |
1864 | |
1865 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9