Root/
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
3 | * operating system. INET is implemented using the BSD Socket |
4 | * interface as the means of communication with the user level. |
5 | * |
6 | * Implementation of the Transmission Control Protocol(TCP). |
7 | * |
8 | * Authors: Ross Biro |
9 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
10 | * Mark Evans, <evansmp@uhura.aston.ac.uk> |
11 | * Corey Minyard <wf-rch!minyard@relay.EU.net> |
12 | * Florian La Roche, <flla@stud.uni-sb.de> |
13 | * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> |
14 | * Linus Torvalds, <torvalds@cs.helsinki.fi> |
15 | * Alan Cox, <gw4pts@gw4pts.ampr.org> |
16 | * Matthew Dillon, <dillon@apollo.west.oic.com> |
17 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> |
18 | * Jorge Cwik, <jorge@laser.satlink.net> |
19 | * |
20 | * Fixes: |
21 | * Alan Cox : Numerous verify_area() calls |
22 | * Alan Cox : Set the ACK bit on a reset |
23 | * Alan Cox : Stopped it crashing if it closed while |
24 | * sk->inuse=1 and was trying to connect |
25 | * (tcp_err()). |
26 | * Alan Cox : All icmp error handling was broken |
27 | * pointers passed where wrong and the |
28 | * socket was looked up backwards. Nobody |
29 | * tested any icmp error code obviously. |
30 | * Alan Cox : tcp_err() now handled properly. It |
31 | * wakes people on errors. poll |
32 | * behaves and the icmp error race |
33 | * has gone by moving it into sock.c |
34 | * Alan Cox : tcp_send_reset() fixed to work for |
35 | * everything not just packets for |
36 | * unknown sockets. |
37 | * Alan Cox : tcp option processing. |
38 | * Alan Cox : Reset tweaked (still not 100%) [Had |
39 | * syn rule wrong] |
40 | * Herp Rosmanith : More reset fixes |
41 | * Alan Cox : No longer acks invalid rst frames. |
42 | * Acking any kind of RST is right out. |
43 | * Alan Cox : Sets an ignore me flag on an rst |
44 | * receive otherwise odd bits of prattle |
45 | * escape still |
46 | * Alan Cox : Fixed another acking RST frame bug. |
47 | * Should stop LAN workplace lockups. |
48 | * Alan Cox : Some tidyups using the new skb list |
49 | * facilities |
50 | * Alan Cox : sk->keepopen now seems to work |
51 | * Alan Cox : Pulls options out correctly on accepts |
52 | * Alan Cox : Fixed assorted sk->rqueue->next errors |
53 | * Alan Cox : PSH doesn't end a TCP read. Switched a |
54 | * bit to skb ops. |
55 | * Alan Cox : Tidied tcp_data to avoid a potential |
56 | * nasty. |
57 | * Alan Cox : Added some better commenting, as the |
58 | * tcp is hard to follow |
59 | * Alan Cox : Removed incorrect check for 20 * psh |
60 | * Michael O'Reilly : ack < copied bug fix. |
61 | * Johannes Stille : Misc tcp fixes (not all in yet). |
62 | * Alan Cox : FIN with no memory -> CRASH |
63 | * Alan Cox : Added socket option proto entries. |
64 | * Also added awareness of them to accept. |
65 | * Alan Cox : Added TCP options (SOL_TCP) |
66 | * Alan Cox : Switched wakeup calls to callbacks, |
67 | * so the kernel can layer network |
68 | * sockets. |
69 | * Alan Cox : Use ip_tos/ip_ttl settings. |
70 | * Alan Cox : Handle FIN (more) properly (we hope). |
71 | * Alan Cox : RST frames sent on unsynchronised |
72 | * state ack error. |
73 | * Alan Cox : Put in missing check for SYN bit. |
74 | * Alan Cox : Added tcp_select_window() aka NET2E |
75 | * window non shrink trick. |
76 | * Alan Cox : Added a couple of small NET2E timer |
77 | * fixes |
78 | * Charles Hedrick : TCP fixes |
79 | * Toomas Tamm : TCP window fixes |
80 | * Alan Cox : Small URG fix to rlogin ^C ack fight |
81 | * Charles Hedrick : Rewrote most of it to actually work |
82 | * Linus : Rewrote tcp_read() and URG handling |
83 | * completely |
84 | * Gerhard Koerting: Fixed some missing timer handling |
85 | * Matthew Dillon : Reworked TCP machine states as per RFC |
86 | * Gerhard Koerting: PC/TCP workarounds |
87 | * Adam Caldwell : Assorted timer/timing errors |
88 | * Matthew Dillon : Fixed another RST bug |
89 | * Alan Cox : Move to kernel side addressing changes. |
90 | * Alan Cox : Beginning work on TCP fastpathing |
91 | * (not yet usable) |
92 | * Arnt Gulbrandsen: Turbocharged tcp_check() routine. |
93 | * Alan Cox : TCP fast path debugging |
94 | * Alan Cox : Window clamping |
95 | * Michael Riepe : Bug in tcp_check() |
96 | * Matt Dillon : More TCP improvements and RST bug fixes |
97 | * Matt Dillon : Yet more small nasties remove from the |
98 | * TCP code (Be very nice to this man if |
99 | * tcp finally works 100%) 8) |
100 | * Alan Cox : BSD accept semantics. |
101 | * Alan Cox : Reset on closedown bug. |
102 | * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). |
103 | * Michael Pall : Handle poll() after URG properly in |
104 | * all cases. |
105 | * Michael Pall : Undo the last fix in tcp_read_urg() |
106 | * (multi URG PUSH broke rlogin). |
107 | * Michael Pall : Fix the multi URG PUSH problem in |
108 | * tcp_readable(), poll() after URG |
109 | * works now. |
110 | * Michael Pall : recv(...,MSG_OOB) never blocks in the |
111 | * BSD api. |
112 | * Alan Cox : Changed the semantics of sk->socket to |
113 | * fix a race and a signal problem with |
114 | * accept() and async I/O. |
115 | * Alan Cox : Relaxed the rules on tcp_sendto(). |
116 | * Yury Shevchuk : Really fixed accept() blocking problem. |
117 | * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for |
118 | * clients/servers which listen in on |
119 | * fixed ports. |
120 | * Alan Cox : Cleaned the above up and shrank it to |
121 | * a sensible code size. |
122 | * Alan Cox : Self connect lockup fix. |
123 | * Alan Cox : No connect to multicast. |
124 | * Ross Biro : Close unaccepted children on master |
125 | * socket close. |
126 | * Alan Cox : Reset tracing code. |
127 | * Alan Cox : Spurious resets on shutdown. |
128 | * Alan Cox : Giant 15 minute/60 second timer error |
129 | * Alan Cox : Small whoops in polling before an |
130 | * accept. |
131 | * Alan Cox : Kept the state trace facility since |
132 | * it's handy for debugging. |
133 | * Alan Cox : More reset handler fixes. |
134 | * Alan Cox : Started rewriting the code based on |
135 | * the RFC's for other useful protocol |
136 | * references see: Comer, KA9Q NOS, and |
137 | * for a reference on the difference |
138 | * between specifications and how BSD |
139 | * works see the 4.4lite source. |
140 | * A.N.Kuznetsov : Don't time wait on completion of tidy |
141 | * close. |
142 | * Linus Torvalds : Fin/Shutdown & copied_seq changes. |
143 | * Linus Torvalds : Fixed BSD port reuse to work first syn |
144 | * Alan Cox : Reimplemented timers as per the RFC |
145 | * and using multiple timers for sanity. |
146 | * Alan Cox : Small bug fixes, and a lot of new |
147 | * comments. |
148 | * Alan Cox : Fixed dual reader crash by locking |
149 | * the buffers (much like datagram.c) |
150 | * Alan Cox : Fixed stuck sockets in probe. A probe |
151 | * now gets fed up of retrying without |
152 | * (even a no space) answer. |
153 | * Alan Cox : Extracted closing code better |
154 | * Alan Cox : Fixed the closing state machine to |
155 | * resemble the RFC. |
156 | * Alan Cox : More 'per spec' fixes. |
157 | * Jorge Cwik : Even faster checksumming. |
158 | * Alan Cox : tcp_data() doesn't ack illegal PSH |
159 | * only frames. At least one pc tcp stack |
160 | * generates them. |
161 | * Alan Cox : Cache last socket. |
162 | * Alan Cox : Per route irtt. |
163 | * Matt Day : poll()->select() match BSD precisely on error |
164 | * Alan Cox : New buffers |
165 | * Marc Tamsky : Various sk->prot->retransmits and |
166 | * sk->retransmits misupdating fixed. |
167 | * Fixed tcp_write_timeout: stuck close, |
168 | * and TCP syn retries gets used now. |
169 | * Mark Yarvis : In tcp_read_wakeup(), don't send an |
170 | * ack if state is TCP_CLOSED. |
171 | * Alan Cox : Look up device on a retransmit - routes may |
172 | * change. Doesn't yet cope with MSS shrink right |
173 | * but it's a start! |
174 | * Marc Tamsky : Closing in closing fixes. |
175 | * Mike Shaver : RFC1122 verifications. |
176 | * Alan Cox : rcv_saddr errors. |
177 | * Alan Cox : Block double connect(). |
178 | * Alan Cox : Small hooks for enSKIP. |
179 | * Alexey Kuznetsov: Path MTU discovery. |
180 | * Alan Cox : Support soft errors. |
181 | * Alan Cox : Fix MTU discovery pathological case |
182 | * when the remote claims no mtu! |
183 | * Marc Tamsky : TCP_CLOSE fix. |
184 | * Colin (G3TNE) : Send a reset on syn ack replies in |
185 | * window but wrong (fixes NT lpd problems) |
186 | * Pedro Roque : Better TCP window handling, delayed ack. |
187 | * Joerg Reuter : No modification of locked buffers in |
188 | * tcp_do_retransmit() |
189 | * Eric Schenk : Changed receiver side silly window |
190 | * avoidance algorithm to BSD style |
191 | * algorithm. This doubles throughput |
192 | * against machines running Solaris, |
193 | * and seems to result in general |
194 | * improvement. |
195 | * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD |
196 | * Willy Konynenberg : Transparent proxying support. |
197 | * Mike McLagan : Routing by source |
198 | * Keith Owens : Do proper merging with partial SKB's in |
199 | * tcp_do_sendmsg to avoid burstiness. |
200 | * Eric Schenk : Fix fast close down bug with |
201 | * shutdown() followed by close(). |
202 | * Andi Kleen : Make poll agree with SIGIO |
203 | * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and |
204 | * lingertime == 0 (RFC 793 ABORT Call) |
205 | * Hirokazu Takahashi : Use copy_from_user() instead of |
206 | * csum_and_copy_from_user() if possible. |
207 | * |
208 | * This program is free software; you can redistribute it and/or |
209 | * modify it under the terms of the GNU General Public License |
210 | * as published by the Free Software Foundation; either version |
211 | * 2 of the License, or(at your option) any later version. |
212 | * |
213 | * Description of States: |
214 | * |
215 | * TCP_SYN_SENT sent a connection request, waiting for ack |
216 | * |
217 | * TCP_SYN_RECV received a connection request, sent ack, |
218 | * waiting for final ack in three-way handshake. |
219 | * |
220 | * TCP_ESTABLISHED connection established |
221 | * |
222 | * TCP_FIN_WAIT1 our side has shutdown, waiting to complete |
223 | * transmission of remaining buffered data |
224 | * |
225 | * TCP_FIN_WAIT2 all buffered data sent, waiting for remote |
226 | * to shutdown |
227 | * |
228 | * TCP_CLOSING both sides have shutdown but we still have |
229 | * data we have to finish sending |
230 | * |
231 | * TCP_TIME_WAIT timeout to catch resent junk before entering |
232 | * closed, can only be entered from FIN_WAIT2 |
233 | * or CLOSING. Required because the other end |
234 | * may not have gotten our last ACK causing it |
235 | * to retransmit the data packet (which we ignore) |
236 | * |
237 | * TCP_CLOSE_WAIT remote side has shutdown and is waiting for |
238 | * us to finish writing our data and to shutdown |
239 | * (we have to close() to move on to LAST_ACK) |
240 | * |
241 | * TCP_LAST_ACK out side has shutdown after remote has |
242 | * shutdown. There may still be data in our |
243 | * buffer that we have to finish sending |
244 | * |
245 | * TCP_CLOSE socket is finished |
246 | */ |
247 | |
248 | #include <linux/kernel.h> |
249 | #include <linux/module.h> |
250 | #include <linux/types.h> |
251 | #include <linux/fcntl.h> |
252 | #include <linux/poll.h> |
253 | #include <linux/init.h> |
254 | #include <linux/fs.h> |
255 | #include <linux/skbuff.h> |
256 | #include <linux/scatterlist.h> |
257 | #include <linux/splice.h> |
258 | #include <linux/net.h> |
259 | #include <linux/socket.h> |
260 | #include <linux/random.h> |
261 | #include <linux/bootmem.h> |
262 | #include <linux/highmem.h> |
263 | #include <linux/swap.h> |
264 | #include <linux/cache.h> |
265 | #include <linux/err.h> |
266 | #include <linux/crypto.h> |
267 | #include <linux/time.h> |
268 | #include <linux/slab.h> |
269 | |
270 | #include <net/icmp.h> |
271 | #include <net/tcp.h> |
272 | #include <net/xfrm.h> |
273 | #include <net/ip.h> |
274 | #include <net/netdma.h> |
275 | #include <net/sock.h> |
276 | |
277 | #include <asm/uaccess.h> |
278 | #include <asm/ioctls.h> |
279 | |
280 | int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; |
281 | |
282 | struct percpu_counter tcp_orphan_count; |
283 | EXPORT_SYMBOL_GPL(tcp_orphan_count); |
284 | |
285 | int sysctl_tcp_mem[3] __read_mostly; |
286 | int sysctl_tcp_wmem[3] __read_mostly; |
287 | int sysctl_tcp_rmem[3] __read_mostly; |
288 | |
289 | EXPORT_SYMBOL(sysctl_tcp_mem); |
290 | EXPORT_SYMBOL(sysctl_tcp_rmem); |
291 | EXPORT_SYMBOL(sysctl_tcp_wmem); |
292 | |
293 | atomic_t tcp_memory_allocated; /* Current allocated memory. */ |
294 | EXPORT_SYMBOL(tcp_memory_allocated); |
295 | |
296 | /* |
297 | * Current number of TCP sockets. |
298 | */ |
299 | struct percpu_counter tcp_sockets_allocated; |
300 | EXPORT_SYMBOL(tcp_sockets_allocated); |
301 | |
302 | /* |
303 | * TCP splice context |
304 | */ |
305 | struct tcp_splice_state { |
306 | struct pipe_inode_info *pipe; |
307 | size_t len; |
308 | unsigned int flags; |
309 | }; |
310 | |
311 | /* |
312 | * Pressure flag: try to collapse. |
313 | * Technical note: it is used by multiple contexts non atomically. |
314 | * All the __sk_mem_schedule() is of this nature: accounting |
315 | * is strict, actions are advisory and have some latency. |
316 | */ |
317 | int tcp_memory_pressure __read_mostly; |
318 | |
319 | EXPORT_SYMBOL(tcp_memory_pressure); |
320 | |
321 | void tcp_enter_memory_pressure(struct sock *sk) |
322 | { |
323 | if (!tcp_memory_pressure) { |
324 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); |
325 | tcp_memory_pressure = 1; |
326 | } |
327 | } |
328 | |
329 | EXPORT_SYMBOL(tcp_enter_memory_pressure); |
330 | |
331 | /* Convert seconds to retransmits based on initial and max timeout */ |
332 | static u8 secs_to_retrans(int seconds, int timeout, int rto_max) |
333 | { |
334 | u8 res = 0; |
335 | |
336 | if (seconds > 0) { |
337 | int period = timeout; |
338 | |
339 | res = 1; |
340 | while (seconds > period && res < 255) { |
341 | res++; |
342 | timeout <<= 1; |
343 | if (timeout > rto_max) |
344 | timeout = rto_max; |
345 | period += timeout; |
346 | } |
347 | } |
348 | return res; |
349 | } |
350 | |
351 | /* Convert retransmits to seconds based on initial and max timeout */ |
352 | static int retrans_to_secs(u8 retrans, int timeout, int rto_max) |
353 | { |
354 | int period = 0; |
355 | |
356 | if (retrans > 0) { |
357 | period = timeout; |
358 | while (--retrans) { |
359 | timeout <<= 1; |
360 | if (timeout > rto_max) |
361 | timeout = rto_max; |
362 | period += timeout; |
363 | } |
364 | } |
365 | return period; |
366 | } |
367 | |
368 | /* |
369 | * Wait for a TCP event. |
370 | * |
371 | * Note that we don't need to lock the socket, as the upper poll layers |
372 | * take care of normal races (between the test and the event) and we don't |
373 | * go look at any of the socket buffers directly. |
374 | */ |
375 | unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) |
376 | { |
377 | unsigned int mask; |
378 | struct sock *sk = sock->sk; |
379 | struct tcp_sock *tp = tcp_sk(sk); |
380 | |
381 | sock_poll_wait(file, sk->sk_sleep, wait); |
382 | if (sk->sk_state == TCP_LISTEN) |
383 | return inet_csk_listen_poll(sk); |
384 | |
385 | /* Socket is not locked. We are protected from async events |
386 | * by poll logic and correct handling of state changes |
387 | * made by other threads is impossible in any case. |
388 | */ |
389 | |
390 | mask = 0; |
391 | if (sk->sk_err) |
392 | mask = POLLERR; |
393 | |
394 | /* |
395 | * POLLHUP is certainly not done right. But poll() doesn't |
396 | * have a notion of HUP in just one direction, and for a |
397 | * socket the read side is more interesting. |
398 | * |
399 | * Some poll() documentation says that POLLHUP is incompatible |
400 | * with the POLLOUT/POLLWR flags, so somebody should check this |
401 | * all. But careful, it tends to be safer to return too many |
402 | * bits than too few, and you can easily break real applications |
403 | * if you don't tell them that something has hung up! |
404 | * |
405 | * Check-me. |
406 | * |
407 | * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and |
408 | * our fs/select.c). It means that after we received EOF, |
409 | * poll always returns immediately, making impossible poll() on write() |
410 | * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP |
411 | * if and only if shutdown has been made in both directions. |
412 | * Actually, it is interesting to look how Solaris and DUX |
413 | * solve this dilemma. I would prefer, if POLLHUP were maskable, |
414 | * then we could set it on SND_SHUTDOWN. BTW examples given |
415 | * in Stevens' books assume exactly this behaviour, it explains |
416 | * why POLLHUP is incompatible with POLLOUT. --ANK |
417 | * |
418 | * NOTE. Check for TCP_CLOSE is added. The goal is to prevent |
419 | * blocking on fresh not-connected or disconnected socket. --ANK |
420 | */ |
421 | if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) |
422 | mask |= POLLHUP; |
423 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
424 | mask |= POLLIN | POLLRDNORM | POLLRDHUP; |
425 | |
426 | /* Connected? */ |
427 | if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
428 | int target = sock_rcvlowat(sk, 0, INT_MAX); |
429 | |
430 | if (tp->urg_seq == tp->copied_seq && |
431 | !sock_flag(sk, SOCK_URGINLINE) && |
432 | tp->urg_data) |
433 | target++; |
434 | |
435 | /* Potential race condition. If read of tp below will |
436 | * escape above sk->sk_state, we can be illegally awaken |
437 | * in SYN_* states. */ |
438 | if (tp->rcv_nxt - tp->copied_seq >= target) |
439 | mask |= POLLIN | POLLRDNORM; |
440 | |
441 | if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { |
442 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { |
443 | mask |= POLLOUT | POLLWRNORM; |
444 | } else { /* send SIGIO later */ |
445 | set_bit(SOCK_ASYNC_NOSPACE, |
446 | &sk->sk_socket->flags); |
447 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
448 | |
449 | /* Race breaker. If space is freed after |
450 | * wspace test but before the flags are set, |
451 | * IO signal will be lost. |
452 | */ |
453 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) |
454 | mask |= POLLOUT | POLLWRNORM; |
455 | } |
456 | } |
457 | |
458 | if (tp->urg_data & TCP_URG_VALID) |
459 | mask |= POLLPRI; |
460 | } |
461 | return mask; |
462 | } |
463 | |
464 | int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) |
465 | { |
466 | struct tcp_sock *tp = tcp_sk(sk); |
467 | int answ; |
468 | |
469 | switch (cmd) { |
470 | case SIOCINQ: |
471 | if (sk->sk_state == TCP_LISTEN) |
472 | return -EINVAL; |
473 | |
474 | lock_sock(sk); |
475 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) |
476 | answ = 0; |
477 | else if (sock_flag(sk, SOCK_URGINLINE) || |
478 | !tp->urg_data || |
479 | before(tp->urg_seq, tp->copied_seq) || |
480 | !before(tp->urg_seq, tp->rcv_nxt)) { |
481 | struct sk_buff *skb; |
482 | |
483 | answ = tp->rcv_nxt - tp->copied_seq; |
484 | |
485 | /* Subtract 1, if FIN is in queue. */ |
486 | skb = skb_peek_tail(&sk->sk_receive_queue); |
487 | if (answ && skb) |
488 | answ -= tcp_hdr(skb)->fin; |
489 | } else |
490 | answ = tp->urg_seq - tp->copied_seq; |
491 | release_sock(sk); |
492 | break; |
493 | case SIOCATMARK: |
494 | answ = tp->urg_data && tp->urg_seq == tp->copied_seq; |
495 | break; |
496 | case SIOCOUTQ: |
497 | if (sk->sk_state == TCP_LISTEN) |
498 | return -EINVAL; |
499 | |
500 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) |
501 | answ = 0; |
502 | else |
503 | answ = tp->write_seq - tp->snd_una; |
504 | break; |
505 | default: |
506 | return -ENOIOCTLCMD; |
507 | } |
508 | |
509 | return put_user(answ, (int __user *)arg); |
510 | } |
511 | |
512 | static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) |
513 | { |
514 | TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; |
515 | tp->pushed_seq = tp->write_seq; |
516 | } |
517 | |
518 | static inline int forced_push(struct tcp_sock *tp) |
519 | { |
520 | return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); |
521 | } |
522 | |
523 | static inline void skb_entail(struct sock *sk, struct sk_buff *skb) |
524 | { |
525 | struct tcp_sock *tp = tcp_sk(sk); |
526 | struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); |
527 | |
528 | skb->csum = 0; |
529 | tcb->seq = tcb->end_seq = tp->write_seq; |
530 | tcb->flags = TCPCB_FLAG_ACK; |
531 | tcb->sacked = 0; |
532 | skb_header_release(skb); |
533 | tcp_add_write_queue_tail(sk, skb); |
534 | sk->sk_wmem_queued += skb->truesize; |
535 | sk_mem_charge(sk, skb->truesize); |
536 | if (tp->nonagle & TCP_NAGLE_PUSH) |
537 | tp->nonagle &= ~TCP_NAGLE_PUSH; |
538 | } |
539 | |
540 | static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) |
541 | { |
542 | if (flags & MSG_OOB) |
543 | tp->snd_up = tp->write_seq; |
544 | } |
545 | |
546 | static inline void tcp_push(struct sock *sk, int flags, int mss_now, |
547 | int nonagle) |
548 | { |
549 | if (tcp_send_head(sk)) { |
550 | struct tcp_sock *tp = tcp_sk(sk); |
551 | |
552 | if (!(flags & MSG_MORE) || forced_push(tp)) |
553 | tcp_mark_push(tp, tcp_write_queue_tail(sk)); |
554 | |
555 | tcp_mark_urg(tp, flags); |
556 | __tcp_push_pending_frames(sk, mss_now, |
557 | (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); |
558 | } |
559 | } |
560 | |
561 | static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, |
562 | unsigned int offset, size_t len) |
563 | { |
564 | struct tcp_splice_state *tss = rd_desc->arg.data; |
565 | int ret; |
566 | |
567 | ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len), |
568 | tss->flags); |
569 | if (ret > 0) |
570 | rd_desc->count -= ret; |
571 | return ret; |
572 | } |
573 | |
574 | static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) |
575 | { |
576 | /* Store TCP splice context information in read_descriptor_t. */ |
577 | read_descriptor_t rd_desc = { |
578 | .arg.data = tss, |
579 | .count = tss->len, |
580 | }; |
581 | |
582 | return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); |
583 | } |
584 | |
585 | /** |
586 | * tcp_splice_read - splice data from TCP socket to a pipe |
587 | * @sock: socket to splice from |
588 | * @ppos: position (not valid) |
589 | * @pipe: pipe to splice to |
590 | * @len: number of bytes to splice |
591 | * @flags: splice modifier flags |
592 | * |
593 | * Description: |
594 | * Will read pages from given socket and fill them into a pipe. |
595 | * |
596 | **/ |
597 | ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, |
598 | struct pipe_inode_info *pipe, size_t len, |
599 | unsigned int flags) |
600 | { |
601 | struct sock *sk = sock->sk; |
602 | struct tcp_splice_state tss = { |
603 | .pipe = pipe, |
604 | .len = len, |
605 | .flags = flags, |
606 | }; |
607 | long timeo; |
608 | ssize_t spliced; |
609 | int ret; |
610 | |
611 | /* |
612 | * We can't seek on a socket input |
613 | */ |
614 | if (unlikely(*ppos)) |
615 | return -ESPIPE; |
616 | |
617 | ret = spliced = 0; |
618 | |
619 | lock_sock(sk); |
620 | |
621 | timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); |
622 | while (tss.len) { |
623 | ret = __tcp_splice_read(sk, &tss); |
624 | if (ret < 0) |
625 | break; |
626 | else if (!ret) { |
627 | if (spliced) |
628 | break; |
629 | if (sock_flag(sk, SOCK_DONE)) |
630 | break; |
631 | if (sk->sk_err) { |
632 | ret = sock_error(sk); |
633 | break; |
634 | } |
635 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
636 | break; |
637 | if (sk->sk_state == TCP_CLOSE) { |
638 | /* |
639 | * This occurs when user tries to read |
640 | * from never connected socket. |
641 | */ |
642 | if (!sock_flag(sk, SOCK_DONE)) |
643 | ret = -ENOTCONN; |
644 | break; |
645 | } |
646 | if (!timeo) { |
647 | ret = -EAGAIN; |
648 | break; |
649 | } |
650 | sk_wait_data(sk, &timeo); |
651 | if (signal_pending(current)) { |
652 | ret = sock_intr_errno(timeo); |
653 | break; |
654 | } |
655 | continue; |
656 | } |
657 | tss.len -= ret; |
658 | spliced += ret; |
659 | |
660 | if (!timeo) |
661 | break; |
662 | release_sock(sk); |
663 | lock_sock(sk); |
664 | |
665 | if (sk->sk_err || sk->sk_state == TCP_CLOSE || |
666 | (sk->sk_shutdown & RCV_SHUTDOWN) || |
667 | signal_pending(current)) |
668 | break; |
669 | } |
670 | |
671 | release_sock(sk); |
672 | |
673 | if (spliced) |
674 | return spliced; |
675 | |
676 | return ret; |
677 | } |
678 | |
679 | struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) |
680 | { |
681 | struct sk_buff *skb; |
682 | |
683 | /* The TCP header must be at least 32-bit aligned. */ |
684 | size = ALIGN(size, 4); |
685 | |
686 | skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); |
687 | if (skb) { |
688 | if (sk_wmem_schedule(sk, skb->truesize)) { |
689 | /* |
690 | * Make sure that we have exactly size bytes |
691 | * available to the caller, no more, no less. |
692 | */ |
693 | skb_reserve(skb, skb_tailroom(skb) - size); |
694 | return skb; |
695 | } |
696 | __kfree_skb(skb); |
697 | } else { |
698 | sk->sk_prot->enter_memory_pressure(sk); |
699 | sk_stream_moderate_sndbuf(sk); |
700 | } |
701 | return NULL; |
702 | } |
703 | |
704 | static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, |
705 | int large_allowed) |
706 | { |
707 | struct tcp_sock *tp = tcp_sk(sk); |
708 | u32 xmit_size_goal, old_size_goal; |
709 | |
710 | xmit_size_goal = mss_now; |
711 | |
712 | if (large_allowed && sk_can_gso(sk)) { |
713 | xmit_size_goal = ((sk->sk_gso_max_size - 1) - |
714 | inet_csk(sk)->icsk_af_ops->net_header_len - |
715 | inet_csk(sk)->icsk_ext_hdr_len - |
716 | tp->tcp_header_len); |
717 | |
718 | xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal); |
719 | |
720 | /* We try hard to avoid divides here */ |
721 | old_size_goal = tp->xmit_size_goal_segs * mss_now; |
722 | |
723 | if (likely(old_size_goal <= xmit_size_goal && |
724 | old_size_goal + mss_now > xmit_size_goal)) { |
725 | xmit_size_goal = old_size_goal; |
726 | } else { |
727 | tp->xmit_size_goal_segs = xmit_size_goal / mss_now; |
728 | xmit_size_goal = tp->xmit_size_goal_segs * mss_now; |
729 | } |
730 | } |
731 | |
732 | return max(xmit_size_goal, mss_now); |
733 | } |
734 | |
735 | static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) |
736 | { |
737 | int mss_now; |
738 | |
739 | mss_now = tcp_current_mss(sk); |
740 | *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); |
741 | |
742 | return mss_now; |
743 | } |
744 | |
745 | static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, |
746 | size_t psize, int flags) |
747 | { |
748 | struct tcp_sock *tp = tcp_sk(sk); |
749 | int mss_now, size_goal; |
750 | int err; |
751 | ssize_t copied; |
752 | long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); |
753 | |
754 | /* Wait for a connection to finish. */ |
755 | if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) |
756 | if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) |
757 | goto out_err; |
758 | |
759 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); |
760 | |
761 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
762 | copied = 0; |
763 | |
764 | err = -EPIPE; |
765 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) |
766 | goto out_err; |
767 | |
768 | while (psize > 0) { |
769 | struct sk_buff *skb = tcp_write_queue_tail(sk); |
770 | struct page *page = pages[poffset / PAGE_SIZE]; |
771 | int copy, i, can_coalesce; |
772 | int offset = poffset % PAGE_SIZE; |
773 | int size = min_t(size_t, psize, PAGE_SIZE - offset); |
774 | |
775 | if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { |
776 | new_segment: |
777 | if (!sk_stream_memory_free(sk)) |
778 | goto wait_for_sndbuf; |
779 | |
780 | skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); |
781 | if (!skb) |
782 | goto wait_for_memory; |
783 | |
784 | skb_entail(sk, skb); |
785 | copy = size_goal; |
786 | } |
787 | |
788 | if (copy > size) |
789 | copy = size; |
790 | |
791 | i = skb_shinfo(skb)->nr_frags; |
792 | can_coalesce = skb_can_coalesce(skb, i, page, offset); |
793 | if (!can_coalesce && i >= MAX_SKB_FRAGS) { |
794 | tcp_mark_push(tp, skb); |
795 | goto new_segment; |
796 | } |
797 | if (!sk_wmem_schedule(sk, copy)) |
798 | goto wait_for_memory; |
799 | |
800 | if (can_coalesce) { |
801 | skb_shinfo(skb)->frags[i - 1].size += copy; |
802 | } else { |
803 | get_page(page); |
804 | skb_fill_page_desc(skb, i, page, offset, copy); |
805 | } |
806 | |
807 | skb->len += copy; |
808 | skb->data_len += copy; |
809 | skb->truesize += copy; |
810 | sk->sk_wmem_queued += copy; |
811 | sk_mem_charge(sk, copy); |
812 | skb->ip_summed = CHECKSUM_PARTIAL; |
813 | tp->write_seq += copy; |
814 | TCP_SKB_CB(skb)->end_seq += copy; |
815 | skb_shinfo(skb)->gso_segs = 0; |
816 | |
817 | if (!copied) |
818 | TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH; |
819 | |
820 | copied += copy; |
821 | poffset += copy; |
822 | if (!(psize -= copy)) |
823 | goto out; |
824 | |
825 | if (skb->len < size_goal || (flags & MSG_OOB)) |
826 | continue; |
827 | |
828 | if (forced_push(tp)) { |
829 | tcp_mark_push(tp, skb); |
830 | __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); |
831 | } else if (skb == tcp_send_head(sk)) |
832 | tcp_push_one(sk, mss_now); |
833 | continue; |
834 | |
835 | wait_for_sndbuf: |
836 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
837 | wait_for_memory: |
838 | if (copied) |
839 | tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); |
840 | |
841 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) |
842 | goto do_error; |
843 | |
844 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
845 | } |
846 | |
847 | out: |
848 | if (copied) |
849 | tcp_push(sk, flags, mss_now, tp->nonagle); |
850 | return copied; |
851 | |
852 | do_error: |
853 | if (copied) |
854 | goto out; |
855 | out_err: |
856 | return sk_stream_error(sk, flags, err); |
857 | } |
858 | |
859 | ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, |
860 | size_t size, int flags) |
861 | { |
862 | ssize_t res; |
863 | struct sock *sk = sock->sk; |
864 | |
865 | if (!(sk->sk_route_caps & NETIF_F_SG) || |
866 | !(sk->sk_route_caps & NETIF_F_ALL_CSUM)) |
867 | return sock_no_sendpage(sock, page, offset, size, flags); |
868 | |
869 | lock_sock(sk); |
870 | TCP_CHECK_TIMER(sk); |
871 | res = do_tcp_sendpages(sk, &page, offset, size, flags); |
872 | TCP_CHECK_TIMER(sk); |
873 | release_sock(sk); |
874 | return res; |
875 | } |
876 | |
877 | #define TCP_PAGE(sk) (sk->sk_sndmsg_page) |
878 | #define TCP_OFF(sk) (sk->sk_sndmsg_off) |
879 | |
880 | static inline int select_size(struct sock *sk, int sg) |
881 | { |
882 | struct tcp_sock *tp = tcp_sk(sk); |
883 | int tmp = tp->mss_cache; |
884 | |
885 | if (sg) { |
886 | if (sk_can_gso(sk)) |
887 | tmp = 0; |
888 | else { |
889 | int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER); |
890 | |
891 | if (tmp >= pgbreak && |
892 | tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE) |
893 | tmp = pgbreak; |
894 | } |
895 | } |
896 | |
897 | return tmp; |
898 | } |
899 | |
900 | int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, |
901 | size_t size) |
902 | { |
903 | struct sock *sk = sock->sk; |
904 | struct iovec *iov; |
905 | struct tcp_sock *tp = tcp_sk(sk); |
906 | struct sk_buff *skb; |
907 | int iovlen, flags; |
908 | int mss_now, size_goal; |
909 | int sg, err, copied; |
910 | long timeo; |
911 | |
912 | lock_sock(sk); |
913 | TCP_CHECK_TIMER(sk); |
914 | |
915 | flags = msg->msg_flags; |
916 | timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); |
917 | |
918 | /* Wait for a connection to finish. */ |
919 | if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) |
920 | if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) |
921 | goto out_err; |
922 | |
923 | /* This should be in poll */ |
924 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); |
925 | |
926 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
927 | |
928 | /* Ok commence sending. */ |
929 | iovlen = msg->msg_iovlen; |
930 | iov = msg->msg_iov; |
931 | copied = 0; |
932 | |
933 | err = -EPIPE; |
934 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) |
935 | goto out_err; |
936 | |
937 | sg = sk->sk_route_caps & NETIF_F_SG; |
938 | |
939 | while (--iovlen >= 0) { |
940 | int seglen = iov->iov_len; |
941 | unsigned char __user *from = iov->iov_base; |
942 | |
943 | iov++; |
944 | |
945 | while (seglen > 0) { |
946 | int copy = 0; |
947 | int max = size_goal; |
948 | |
949 | skb = tcp_write_queue_tail(sk); |
950 | if (tcp_send_head(sk)) { |
951 | if (skb->ip_summed == CHECKSUM_NONE) |
952 | max = mss_now; |
953 | copy = max - skb->len; |
954 | } |
955 | |
956 | if (copy <= 0) { |
957 | new_segment: |
958 | /* Allocate new segment. If the interface is SG, |
959 | * allocate skb fitting to single page. |
960 | */ |
961 | if (!sk_stream_memory_free(sk)) |
962 | goto wait_for_sndbuf; |
963 | |
964 | skb = sk_stream_alloc_skb(sk, |
965 | select_size(sk, sg), |
966 | sk->sk_allocation); |
967 | if (!skb) |
968 | goto wait_for_memory; |
969 | |
970 | /* |
971 | * Check whether we can use HW checksum. |
972 | */ |
973 | if (sk->sk_route_caps & NETIF_F_ALL_CSUM) |
974 | skb->ip_summed = CHECKSUM_PARTIAL; |
975 | |
976 | skb_entail(sk, skb); |
977 | copy = size_goal; |
978 | max = size_goal; |
979 | } |
980 | |
981 | /* Try to append data to the end of skb. */ |
982 | if (copy > seglen) |
983 | copy = seglen; |
984 | |
985 | /* Where to copy to? */ |
986 | if (skb_tailroom(skb) > 0) { |
987 | /* We have some space in skb head. Superb! */ |
988 | if (copy > skb_tailroom(skb)) |
989 | copy = skb_tailroom(skb); |
990 | if ((err = skb_add_data(skb, from, copy)) != 0) |
991 | goto do_fault; |
992 | } else { |
993 | int merge = 0; |
994 | int i = skb_shinfo(skb)->nr_frags; |
995 | struct page *page = TCP_PAGE(sk); |
996 | int off = TCP_OFF(sk); |
997 | |
998 | if (skb_can_coalesce(skb, i, page, off) && |
999 | off != PAGE_SIZE) { |
1000 | /* We can extend the last page |
1001 | * fragment. */ |
1002 | merge = 1; |
1003 | } else if (i == MAX_SKB_FRAGS || !sg) { |
1004 | /* Need to add new fragment and cannot |
1005 | * do this because interface is non-SG, |
1006 | * or because all the page slots are |
1007 | * busy. */ |
1008 | tcp_mark_push(tp, skb); |
1009 | goto new_segment; |
1010 | } else if (page) { |
1011 | if (off == PAGE_SIZE) { |
1012 | put_page(page); |
1013 | TCP_PAGE(sk) = page = NULL; |
1014 | off = 0; |
1015 | } |
1016 | } else |
1017 | off = 0; |
1018 | |
1019 | if (copy > PAGE_SIZE - off) |
1020 | copy = PAGE_SIZE - off; |
1021 | |
1022 | if (!sk_wmem_schedule(sk, copy)) |
1023 | goto wait_for_memory; |
1024 | |
1025 | if (!page) { |
1026 | /* Allocate new cache page. */ |
1027 | if (!(page = sk_stream_alloc_page(sk))) |
1028 | goto wait_for_memory; |
1029 | } |
1030 | |
1031 | /* Time to copy data. We are close to |
1032 | * the end! */ |
1033 | err = skb_copy_to_page(sk, from, skb, page, |
1034 | off, copy); |
1035 | if (err) { |
1036 | /* If this page was new, give it to the |
1037 | * socket so it does not get leaked. |
1038 | */ |
1039 | if (!TCP_PAGE(sk)) { |
1040 | TCP_PAGE(sk) = page; |
1041 | TCP_OFF(sk) = 0; |
1042 | } |
1043 | goto do_error; |
1044 | } |
1045 | |
1046 | /* Update the skb. */ |
1047 | if (merge) { |
1048 | skb_shinfo(skb)->frags[i - 1].size += |
1049 | copy; |
1050 | } else { |
1051 | skb_fill_page_desc(skb, i, page, off, copy); |
1052 | if (TCP_PAGE(sk)) { |
1053 | get_page(page); |
1054 | } else if (off + copy < PAGE_SIZE) { |
1055 | get_page(page); |
1056 | TCP_PAGE(sk) = page; |
1057 | } |
1058 | } |
1059 | |
1060 | TCP_OFF(sk) = off + copy; |
1061 | } |
1062 | |
1063 | if (!copied) |
1064 | TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH; |
1065 | |
1066 | tp->write_seq += copy; |
1067 | TCP_SKB_CB(skb)->end_seq += copy; |
1068 | skb_shinfo(skb)->gso_segs = 0; |
1069 | |
1070 | from += copy; |
1071 | copied += copy; |
1072 | if ((seglen -= copy) == 0 && iovlen == 0) |
1073 | goto out; |
1074 | |
1075 | if (skb->len < max || (flags & MSG_OOB)) |
1076 | continue; |
1077 | |
1078 | if (forced_push(tp)) { |
1079 | tcp_mark_push(tp, skb); |
1080 | __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); |
1081 | } else if (skb == tcp_send_head(sk)) |
1082 | tcp_push_one(sk, mss_now); |
1083 | continue; |
1084 | |
1085 | wait_for_sndbuf: |
1086 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
1087 | wait_for_memory: |
1088 | if (copied) |
1089 | tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); |
1090 | |
1091 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) |
1092 | goto do_error; |
1093 | |
1094 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
1095 | } |
1096 | } |
1097 | |
1098 | out: |
1099 | if (copied) |
1100 | tcp_push(sk, flags, mss_now, tp->nonagle); |
1101 | TCP_CHECK_TIMER(sk); |
1102 | release_sock(sk); |
1103 | return copied; |
1104 | |
1105 | do_fault: |
1106 | if (!skb->len) { |
1107 | tcp_unlink_write_queue(skb, sk); |
1108 | /* It is the one place in all of TCP, except connection |
1109 | * reset, where we can be unlinking the send_head. |
1110 | */ |
1111 | tcp_check_send_head(sk, skb); |
1112 | sk_wmem_free_skb(sk, skb); |
1113 | } |
1114 | |
1115 | do_error: |
1116 | if (copied) |
1117 | goto out; |
1118 | out_err: |
1119 | err = sk_stream_error(sk, flags, err); |
1120 | TCP_CHECK_TIMER(sk); |
1121 | release_sock(sk); |
1122 | return err; |
1123 | } |
1124 | |
1125 | /* |
1126 | * Handle reading urgent data. BSD has very simple semantics for |
1127 | * this, no blocking and very strange errors 8) |
1128 | */ |
1129 | |
1130 | static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) |
1131 | { |
1132 | struct tcp_sock *tp = tcp_sk(sk); |
1133 | |
1134 | /* No URG data to read. */ |
1135 | if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || |
1136 | tp->urg_data == TCP_URG_READ) |
1137 | return -EINVAL; /* Yes this is right ! */ |
1138 | |
1139 | if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) |
1140 | return -ENOTCONN; |
1141 | |
1142 | if (tp->urg_data & TCP_URG_VALID) { |
1143 | int err = 0; |
1144 | char c = tp->urg_data; |
1145 | |
1146 | if (!(flags & MSG_PEEK)) |
1147 | tp->urg_data = TCP_URG_READ; |
1148 | |
1149 | /* Read urgent data. */ |
1150 | msg->msg_flags |= MSG_OOB; |
1151 | |
1152 | if (len > 0) { |
1153 | if (!(flags & MSG_TRUNC)) |
1154 | err = memcpy_toiovec(msg->msg_iov, &c, 1); |
1155 | len = 1; |
1156 | } else |
1157 | msg->msg_flags |= MSG_TRUNC; |
1158 | |
1159 | return err ? -EFAULT : len; |
1160 | } |
1161 | |
1162 | if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) |
1163 | return 0; |
1164 | |
1165 | /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and |
1166 | * the available implementations agree in this case: |
1167 | * this call should never block, independent of the |
1168 | * blocking state of the socket. |
1169 | * Mike <pall@rz.uni-karlsruhe.de> |
1170 | */ |
1171 | return -EAGAIN; |
1172 | } |
1173 | |
1174 | /* Clean up the receive buffer for full frames taken by the user, |
1175 | * then send an ACK if necessary. COPIED is the number of bytes |
1176 | * tcp_recvmsg has given to the user so far, it speeds up the |
1177 | * calculation of whether or not we must ACK for the sake of |
1178 | * a window update. |
1179 | */ |
1180 | void tcp_cleanup_rbuf(struct sock *sk, int copied) |
1181 | { |
1182 | struct tcp_sock *tp = tcp_sk(sk); |
1183 | int time_to_ack = 0; |
1184 | |
1185 | #if TCP_DEBUG |
1186 | struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); |
1187 | |
1188 | WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), |
1189 | KERN_INFO "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", |
1190 | tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); |
1191 | #endif |
1192 | |
1193 | if (inet_csk_ack_scheduled(sk)) { |
1194 | const struct inet_connection_sock *icsk = inet_csk(sk); |
1195 | /* Delayed ACKs frequently hit locked sockets during bulk |
1196 | * receive. */ |
1197 | if (icsk->icsk_ack.blocked || |
1198 | /* Once-per-two-segments ACK was not sent by tcp_input.c */ |
1199 | tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || |
1200 | /* |
1201 | * If this read emptied read buffer, we send ACK, if |
1202 | * connection is not bidirectional, user drained |
1203 | * receive buffer and there was a small segment |
1204 | * in queue. |
1205 | */ |
1206 | (copied > 0 && |
1207 | ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || |
1208 | ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && |
1209 | !icsk->icsk_ack.pingpong)) && |
1210 | !atomic_read(&sk->sk_rmem_alloc))) |
1211 | time_to_ack = 1; |
1212 | } |
1213 | |
1214 | /* We send an ACK if we can now advertise a non-zero window |
1215 | * which has been raised "significantly". |
1216 | * |
1217 | * Even if window raised up to infinity, do not send window open ACK |
1218 | * in states, where we will not receive more. It is useless. |
1219 | */ |
1220 | if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { |
1221 | __u32 rcv_window_now = tcp_receive_window(tp); |
1222 | |
1223 | /* Optimize, __tcp_select_window() is not cheap. */ |
1224 | if (2*rcv_window_now <= tp->window_clamp) { |
1225 | __u32 new_window = __tcp_select_window(sk); |
1226 | |
1227 | /* Send ACK now, if this read freed lots of space |
1228 | * in our buffer. Certainly, new_window is new window. |
1229 | * We can advertise it now, if it is not less than current one. |
1230 | * "Lots" means "at least twice" here. |
1231 | */ |
1232 | if (new_window && new_window >= 2 * rcv_window_now) |
1233 | time_to_ack = 1; |
1234 | } |
1235 | } |
1236 | if (time_to_ack) |
1237 | tcp_send_ack(sk); |
1238 | } |
1239 | |
1240 | static void tcp_prequeue_process(struct sock *sk) |
1241 | { |
1242 | struct sk_buff *skb; |
1243 | struct tcp_sock *tp = tcp_sk(sk); |
1244 | |
1245 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED); |
1246 | |
1247 | /* RX process wants to run with disabled BHs, though it is not |
1248 | * necessary */ |
1249 | local_bh_disable(); |
1250 | while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) |
1251 | sk_backlog_rcv(sk, skb); |
1252 | local_bh_enable(); |
1253 | |
1254 | /* Clear memory counter. */ |
1255 | tp->ucopy.memory = 0; |
1256 | } |
1257 | |
1258 | #ifdef CONFIG_NET_DMA |
1259 | static void tcp_service_net_dma(struct sock *sk, bool wait) |
1260 | { |
1261 | dma_cookie_t done, used; |
1262 | dma_cookie_t last_issued; |
1263 | struct tcp_sock *tp = tcp_sk(sk); |
1264 | |
1265 | if (!tp->ucopy.dma_chan) |
1266 | return; |
1267 | |
1268 | last_issued = tp->ucopy.dma_cookie; |
1269 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); |
1270 | |
1271 | do { |
1272 | if (dma_async_memcpy_complete(tp->ucopy.dma_chan, |
1273 | last_issued, &done, |
1274 | &used) == DMA_SUCCESS) { |
1275 | /* Safe to free early-copied skbs now */ |
1276 | __skb_queue_purge(&sk->sk_async_wait_queue); |
1277 | break; |
1278 | } else { |
1279 | struct sk_buff *skb; |
1280 | while ((skb = skb_peek(&sk->sk_async_wait_queue)) && |
1281 | (dma_async_is_complete(skb->dma_cookie, done, |
1282 | used) == DMA_SUCCESS)) { |
1283 | __skb_dequeue(&sk->sk_async_wait_queue); |
1284 | kfree_skb(skb); |
1285 | } |
1286 | } |
1287 | } while (wait); |
1288 | } |
1289 | #endif |
1290 | |
1291 | static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) |
1292 | { |
1293 | struct sk_buff *skb; |
1294 | u32 offset; |
1295 | |
1296 | skb_queue_walk(&sk->sk_receive_queue, skb) { |
1297 | offset = seq - TCP_SKB_CB(skb)->seq; |
1298 | if (tcp_hdr(skb)->syn) |
1299 | offset--; |
1300 | if (offset < skb->len || tcp_hdr(skb)->fin) { |
1301 | *off = offset; |
1302 | return skb; |
1303 | } |
1304 | } |
1305 | return NULL; |
1306 | } |
1307 | |
1308 | /* |
1309 | * This routine provides an alternative to tcp_recvmsg() for routines |
1310 | * that would like to handle copying from skbuffs directly in 'sendfile' |
1311 | * fashion. |
1312 | * Note: |
1313 | * - It is assumed that the socket was locked by the caller. |
1314 | * - The routine does not block. |
1315 | * - At present, there is no support for reading OOB data |
1316 | * or for 'peeking' the socket using this routine |
1317 | * (although both would be easy to implement). |
1318 | */ |
1319 | int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, |
1320 | sk_read_actor_t recv_actor) |
1321 | { |
1322 | struct sk_buff *skb; |
1323 | struct tcp_sock *tp = tcp_sk(sk); |
1324 | u32 seq = tp->copied_seq; |
1325 | u32 offset; |
1326 | int copied = 0; |
1327 | |
1328 | if (sk->sk_state == TCP_LISTEN) |
1329 | return -ENOTCONN; |
1330 | while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { |
1331 | if (offset < skb->len) { |
1332 | int used; |
1333 | size_t len; |
1334 | |
1335 | len = skb->len - offset; |
1336 | /* Stop reading if we hit a patch of urgent data */ |
1337 | if (tp->urg_data) { |
1338 | u32 urg_offset = tp->urg_seq - seq; |
1339 | if (urg_offset < len) |
1340 | len = urg_offset; |
1341 | if (!len) |
1342 | break; |
1343 | } |
1344 | used = recv_actor(desc, skb, offset, len); |
1345 | if (used < 0) { |
1346 | if (!copied) |
1347 | copied = used; |
1348 | break; |
1349 | } else if (used <= len) { |
1350 | seq += used; |
1351 | copied += used; |
1352 | offset += used; |
1353 | } |
1354 | /* |
1355 | * If recv_actor drops the lock (e.g. TCP splice |
1356 | * receive) the skb pointer might be invalid when |
1357 | * getting here: tcp_collapse might have deleted it |
1358 | * while aggregating skbs from the socket queue. |
1359 | */ |
1360 | skb = tcp_recv_skb(sk, seq-1, &offset); |
1361 | if (!skb || (offset+1 != skb->len)) |
1362 | break; |
1363 | } |
1364 | if (tcp_hdr(skb)->fin) { |
1365 | sk_eat_skb(sk, skb, 0); |
1366 | ++seq; |
1367 | break; |
1368 | } |
1369 | sk_eat_skb(sk, skb, 0); |
1370 | if (!desc->count) |
1371 | break; |
1372 | tp->copied_seq = seq; |
1373 | } |
1374 | tp->copied_seq = seq; |
1375 | |
1376 | tcp_rcv_space_adjust(sk); |
1377 | |
1378 | /* Clean up data we have read: This will do ACK frames. */ |
1379 | if (copied > 0) |
1380 | tcp_cleanup_rbuf(sk, copied); |
1381 | return copied; |
1382 | } |
1383 | |
1384 | /* |
1385 | * This routine copies from a sock struct into the user buffer. |
1386 | * |
1387 | * Technical note: in 2.3 we work on _locked_ socket, so that |
1388 | * tricks with *seq access order and skb->users are not required. |
1389 | * Probably, code can be easily improved even more. |
1390 | */ |
1391 | |
1392 | int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, |
1393 | size_t len, int nonblock, int flags, int *addr_len) |
1394 | { |
1395 | struct tcp_sock *tp = tcp_sk(sk); |
1396 | int copied = 0; |
1397 | u32 peek_seq; |
1398 | u32 *seq; |
1399 | unsigned long used; |
1400 | int err; |
1401 | int target; /* Read at least this many bytes */ |
1402 | long timeo; |
1403 | struct task_struct *user_recv = NULL; |
1404 | int copied_early = 0; |
1405 | struct sk_buff *skb; |
1406 | u32 urg_hole = 0; |
1407 | |
1408 | lock_sock(sk); |
1409 | |
1410 | TCP_CHECK_TIMER(sk); |
1411 | |
1412 | err = -ENOTCONN; |
1413 | if (sk->sk_state == TCP_LISTEN) |
1414 | goto out; |
1415 | |
1416 | timeo = sock_rcvtimeo(sk, nonblock); |
1417 | |
1418 | /* Urgent data needs to be handled specially. */ |
1419 | if (flags & MSG_OOB) |
1420 | goto recv_urg; |
1421 | |
1422 | seq = &tp->copied_seq; |
1423 | if (flags & MSG_PEEK) { |
1424 | peek_seq = tp->copied_seq; |
1425 | seq = &peek_seq; |
1426 | } |
1427 | |
1428 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); |
1429 | |
1430 | #ifdef CONFIG_NET_DMA |
1431 | tp->ucopy.dma_chan = NULL; |
1432 | preempt_disable(); |
1433 | skb = skb_peek_tail(&sk->sk_receive_queue); |
1434 | { |
1435 | int available = 0; |
1436 | |
1437 | if (skb) |
1438 | available = TCP_SKB_CB(skb)->seq + skb->len - (*seq); |
1439 | if ((available < target) && |
1440 | (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && |
1441 | !sysctl_tcp_low_latency && |
1442 | dma_find_channel(DMA_MEMCPY)) { |
1443 | preempt_enable_no_resched(); |
1444 | tp->ucopy.pinned_list = |
1445 | dma_pin_iovec_pages(msg->msg_iov, len); |
1446 | } else { |
1447 | preempt_enable_no_resched(); |
1448 | } |
1449 | } |
1450 | #endif |
1451 | |
1452 | do { |
1453 | u32 offset; |
1454 | |
1455 | /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ |
1456 | if (tp->urg_data && tp->urg_seq == *seq) { |
1457 | if (copied) |
1458 | break; |
1459 | if (signal_pending(current)) { |
1460 | copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; |
1461 | break; |
1462 | } |
1463 | } |
1464 | |
1465 | /* Next get a buffer. */ |
1466 | |
1467 | skb_queue_walk(&sk->sk_receive_queue, skb) { |
1468 | /* Now that we have two receive queues this |
1469 | * shouldn't happen. |
1470 | */ |
1471 | if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), |
1472 | KERN_INFO "recvmsg bug: copied %X " |
1473 | "seq %X rcvnxt %X fl %X\n", *seq, |
1474 | TCP_SKB_CB(skb)->seq, tp->rcv_nxt, |
1475 | flags)) |
1476 | break; |
1477 | |
1478 | offset = *seq - TCP_SKB_CB(skb)->seq; |
1479 | if (tcp_hdr(skb)->syn) |
1480 | offset--; |
1481 | if (offset < skb->len) |
1482 | goto found_ok_skb; |
1483 | if (tcp_hdr(skb)->fin) |
1484 | goto found_fin_ok; |
1485 | WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: " |
1486 | "copied %X seq %X rcvnxt %X fl %X\n", |
1487 | *seq, TCP_SKB_CB(skb)->seq, |
1488 | tp->rcv_nxt, flags); |
1489 | } |
1490 | |
1491 | /* Well, if we have backlog, try to process it now yet. */ |
1492 | |
1493 | if (copied >= target && !sk->sk_backlog.tail) |
1494 | break; |
1495 | |
1496 | if (copied) { |
1497 | if (sk->sk_err || |
1498 | sk->sk_state == TCP_CLOSE || |
1499 | (sk->sk_shutdown & RCV_SHUTDOWN) || |
1500 | !timeo || |
1501 | signal_pending(current)) |
1502 | break; |
1503 | } else { |
1504 | if (sock_flag(sk, SOCK_DONE)) |
1505 | break; |
1506 | |
1507 | if (sk->sk_err) { |
1508 | copied = sock_error(sk); |
1509 | break; |
1510 | } |
1511 | |
1512 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
1513 | break; |
1514 | |
1515 | if (sk->sk_state == TCP_CLOSE) { |
1516 | if (!sock_flag(sk, SOCK_DONE)) { |
1517 | /* This occurs when user tries to read |
1518 | * from never connected socket. |
1519 | */ |
1520 | copied = -ENOTCONN; |
1521 | break; |
1522 | } |
1523 | break; |
1524 | } |
1525 | |
1526 | if (!timeo) { |
1527 | copied = -EAGAIN; |
1528 | break; |
1529 | } |
1530 | |
1531 | if (signal_pending(current)) { |
1532 | copied = sock_intr_errno(timeo); |
1533 | break; |
1534 | } |
1535 | } |
1536 | |
1537 | tcp_cleanup_rbuf(sk, copied); |
1538 | |
1539 | if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) { |
1540 | /* Install new reader */ |
1541 | if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) { |
1542 | user_recv = current; |
1543 | tp->ucopy.task = user_recv; |
1544 | tp->ucopy.iov = msg->msg_iov; |
1545 | } |
1546 | |
1547 | tp->ucopy.len = len; |
1548 | |
1549 | WARN_ON(tp->copied_seq != tp->rcv_nxt && |
1550 | !(flags & (MSG_PEEK | MSG_TRUNC))); |
1551 | |
1552 | /* Ugly... If prequeue is not empty, we have to |
1553 | * process it before releasing socket, otherwise |
1554 | * order will be broken at second iteration. |
1555 | * More elegant solution is required!!! |
1556 | * |
1557 | * Look: we have the following (pseudo)queues: |
1558 | * |
1559 | * 1. packets in flight |
1560 | * 2. backlog |
1561 | * 3. prequeue |
1562 | * 4. receive_queue |
1563 | * |
1564 | * Each queue can be processed only if the next ones |
1565 | * are empty. At this point we have empty receive_queue. |
1566 | * But prequeue _can_ be not empty after 2nd iteration, |
1567 | * when we jumped to start of loop because backlog |
1568 | * processing added something to receive_queue. |
1569 | * We cannot release_sock(), because backlog contains |
1570 | * packets arrived _after_ prequeued ones. |
1571 | * |
1572 | * Shortly, algorithm is clear --- to process all |
1573 | * the queues in order. We could make it more directly, |
1574 | * requeueing packets from backlog to prequeue, if |
1575 | * is not empty. It is more elegant, but eats cycles, |
1576 | * unfortunately. |
1577 | */ |
1578 | if (!skb_queue_empty(&tp->ucopy.prequeue)) |
1579 | goto do_prequeue; |
1580 | |
1581 | /* __ Set realtime policy in scheduler __ */ |
1582 | } |
1583 | |
1584 | #ifdef CONFIG_NET_DMA |
1585 | if (tp->ucopy.dma_chan) |
1586 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); |
1587 | #endif |
1588 | if (copied >= target) { |
1589 | /* Do not sleep, just process backlog. */ |
1590 | release_sock(sk); |
1591 | lock_sock(sk); |
1592 | } else |
1593 | sk_wait_data(sk, &timeo); |
1594 | |
1595 | #ifdef CONFIG_NET_DMA |
1596 | tcp_service_net_dma(sk, false); /* Don't block */ |
1597 | tp->ucopy.wakeup = 0; |
1598 | #endif |
1599 | |
1600 | if (user_recv) { |
1601 | int chunk; |
1602 | |
1603 | /* __ Restore normal policy in scheduler __ */ |
1604 | |
1605 | if ((chunk = len - tp->ucopy.len) != 0) { |
1606 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); |
1607 | len -= chunk; |
1608 | copied += chunk; |
1609 | } |
1610 | |
1611 | if (tp->rcv_nxt == tp->copied_seq && |
1612 | !skb_queue_empty(&tp->ucopy.prequeue)) { |
1613 | do_prequeue: |
1614 | tcp_prequeue_process(sk); |
1615 | |
1616 | if ((chunk = len - tp->ucopy.len) != 0) { |
1617 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); |
1618 | len -= chunk; |
1619 | copied += chunk; |
1620 | } |
1621 | } |
1622 | } |
1623 | if ((flags & MSG_PEEK) && |
1624 | (peek_seq - copied - urg_hole != tp->copied_seq)) { |
1625 | if (net_ratelimit()) |
1626 | printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n", |
1627 | current->comm, task_pid_nr(current)); |
1628 | peek_seq = tp->copied_seq; |
1629 | } |
1630 | continue; |
1631 | |
1632 | found_ok_skb: |
1633 | /* Ok so how much can we use? */ |
1634 | used = skb->len - offset; |
1635 | if (len < used) |
1636 | used = len; |
1637 | |
1638 | /* Do we have urgent data here? */ |
1639 | if (tp->urg_data) { |
1640 | u32 urg_offset = tp->urg_seq - *seq; |
1641 | if (urg_offset < used) { |
1642 | if (!urg_offset) { |
1643 | if (!sock_flag(sk, SOCK_URGINLINE)) { |
1644 | ++*seq; |
1645 | urg_hole++; |
1646 | offset++; |
1647 | used--; |
1648 | if (!used) |
1649 | goto skip_copy; |
1650 | } |
1651 | } else |
1652 | used = urg_offset; |
1653 | } |
1654 | } |
1655 | |
1656 | if (!(flags & MSG_TRUNC)) { |
1657 | #ifdef CONFIG_NET_DMA |
1658 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) |
1659 | tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); |
1660 | |
1661 | if (tp->ucopy.dma_chan) { |
1662 | tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( |
1663 | tp->ucopy.dma_chan, skb, offset, |
1664 | msg->msg_iov, used, |
1665 | tp->ucopy.pinned_list); |
1666 | |
1667 | if (tp->ucopy.dma_cookie < 0) { |
1668 | |
1669 | printk(KERN_ALERT "dma_cookie < 0\n"); |
1670 | |
1671 | /* Exception. Bailout! */ |
1672 | if (!copied) |
1673 | copied = -EFAULT; |
1674 | break; |
1675 | } |
1676 | |
1677 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); |
1678 | |
1679 | if ((offset + used) == skb->len) |
1680 | copied_early = 1; |
1681 | |
1682 | } else |
1683 | #endif |
1684 | { |
1685 | err = skb_copy_datagram_iovec(skb, offset, |
1686 | msg->msg_iov, used); |
1687 | if (err) { |
1688 | /* Exception. Bailout! */ |
1689 | if (!copied) |
1690 | copied = -EFAULT; |
1691 | break; |
1692 | } |
1693 | } |
1694 | } |
1695 | |
1696 | *seq += used; |
1697 | copied += used; |
1698 | len -= used; |
1699 | |
1700 | tcp_rcv_space_adjust(sk); |
1701 | |
1702 | skip_copy: |
1703 | if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { |
1704 | tp->urg_data = 0; |
1705 | tcp_fast_path_check(sk); |
1706 | } |
1707 | if (used + offset < skb->len) |
1708 | continue; |
1709 | |
1710 | if (tcp_hdr(skb)->fin) |
1711 | goto found_fin_ok; |
1712 | if (!(flags & MSG_PEEK)) { |
1713 | sk_eat_skb(sk, skb, copied_early); |
1714 | copied_early = 0; |
1715 | } |
1716 | continue; |
1717 | |
1718 | found_fin_ok: |
1719 | /* Process the FIN. */ |
1720 | ++*seq; |
1721 | if (!(flags & MSG_PEEK)) { |
1722 | sk_eat_skb(sk, skb, copied_early); |
1723 | copied_early = 0; |
1724 | } |
1725 | break; |
1726 | } while (len > 0); |
1727 | |
1728 | if (user_recv) { |
1729 | if (!skb_queue_empty(&tp->ucopy.prequeue)) { |
1730 | int chunk; |
1731 | |
1732 | tp->ucopy.len = copied > 0 ? len : 0; |
1733 | |
1734 | tcp_prequeue_process(sk); |
1735 | |
1736 | if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { |
1737 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); |
1738 | len -= chunk; |
1739 | copied += chunk; |
1740 | } |
1741 | } |
1742 | |
1743 | tp->ucopy.task = NULL; |
1744 | tp->ucopy.len = 0; |
1745 | } |
1746 | |
1747 | #ifdef CONFIG_NET_DMA |
1748 | tcp_service_net_dma(sk, true); /* Wait for queue to drain */ |
1749 | tp->ucopy.dma_chan = NULL; |
1750 | |
1751 | if (tp->ucopy.pinned_list) { |
1752 | dma_unpin_iovec_pages(tp->ucopy.pinned_list); |
1753 | tp->ucopy.pinned_list = NULL; |
1754 | } |
1755 | #endif |
1756 | |
1757 | /* According to UNIX98, msg_name/msg_namelen are ignored |
1758 | * on connected socket. I was just happy when found this 8) --ANK |
1759 | */ |
1760 | |
1761 | /* Clean up data we have read: This will do ACK frames. */ |
1762 | tcp_cleanup_rbuf(sk, copied); |
1763 | |
1764 | TCP_CHECK_TIMER(sk); |
1765 | release_sock(sk); |
1766 | return copied; |
1767 | |
1768 | out: |
1769 | TCP_CHECK_TIMER(sk); |
1770 | release_sock(sk); |
1771 | return err; |
1772 | |
1773 | recv_urg: |
1774 | err = tcp_recv_urg(sk, msg, len, flags); |
1775 | goto out; |
1776 | } |
1777 | |
1778 | void tcp_set_state(struct sock *sk, int state) |
1779 | { |
1780 | int oldstate = sk->sk_state; |
1781 | |
1782 | switch (state) { |
1783 | case TCP_ESTABLISHED: |
1784 | if (oldstate != TCP_ESTABLISHED) |
1785 | TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); |
1786 | break; |
1787 | |
1788 | case TCP_CLOSE: |
1789 | if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) |
1790 | TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); |
1791 | |
1792 | sk->sk_prot->unhash(sk); |
1793 | if (inet_csk(sk)->icsk_bind_hash && |
1794 | !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) |
1795 | inet_put_port(sk); |
1796 | /* fall through */ |
1797 | default: |
1798 | if (oldstate == TCP_ESTABLISHED) |
1799 | TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); |
1800 | } |
1801 | |
1802 | /* Change state AFTER socket is unhashed to avoid closed |
1803 | * socket sitting in hash tables. |
1804 | */ |
1805 | sk->sk_state = state; |
1806 | |
1807 | #ifdef STATE_TRACE |
1808 | SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]); |
1809 | #endif |
1810 | } |
1811 | EXPORT_SYMBOL_GPL(tcp_set_state); |
1812 | |
1813 | /* |
1814 | * State processing on a close. This implements the state shift for |
1815 | * sending our FIN frame. Note that we only send a FIN for some |
1816 | * states. A shutdown() may have already sent the FIN, or we may be |
1817 | * closed. |
1818 | */ |
1819 | |
1820 | static const unsigned char new_state[16] = { |
1821 | /* current state: new state: action: */ |
1822 | /* (Invalid) */ TCP_CLOSE, |
1823 | /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, |
1824 | /* TCP_SYN_SENT */ TCP_CLOSE, |
1825 | /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, |
1826 | /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1, |
1827 | /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2, |
1828 | /* TCP_TIME_WAIT */ TCP_CLOSE, |
1829 | /* TCP_CLOSE */ TCP_CLOSE, |
1830 | /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN, |
1831 | /* TCP_LAST_ACK */ TCP_LAST_ACK, |
1832 | /* TCP_LISTEN */ TCP_CLOSE, |
1833 | /* TCP_CLOSING */ TCP_CLOSING, |
1834 | }; |
1835 | |
1836 | static int tcp_close_state(struct sock *sk) |
1837 | { |
1838 | int next = (int)new_state[sk->sk_state]; |
1839 | int ns = next & TCP_STATE_MASK; |
1840 | |
1841 | tcp_set_state(sk, ns); |
1842 | |
1843 | return next & TCP_ACTION_FIN; |
1844 | } |
1845 | |
1846 | /* |
1847 | * Shutdown the sending side of a connection. Much like close except |
1848 | * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD). |
1849 | */ |
1850 | |
1851 | void tcp_shutdown(struct sock *sk, int how) |
1852 | { |
1853 | /* We need to grab some memory, and put together a FIN, |
1854 | * and then put it into the queue to be sent. |
1855 | * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. |
1856 | */ |
1857 | if (!(how & SEND_SHUTDOWN)) |
1858 | return; |
1859 | |
1860 | /* If we've already sent a FIN, or it's a closed state, skip this. */ |
1861 | if ((1 << sk->sk_state) & |
1862 | (TCPF_ESTABLISHED | TCPF_SYN_SENT | |
1863 | TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { |
1864 | /* Clear out any half completed packets. FIN if needed. */ |
1865 | if (tcp_close_state(sk)) |
1866 | tcp_send_fin(sk); |
1867 | } |
1868 | } |
1869 | |
1870 | void tcp_close(struct sock *sk, long timeout) |
1871 | { |
1872 | struct sk_buff *skb; |
1873 | int data_was_unread = 0; |
1874 | int state; |
1875 | |
1876 | lock_sock(sk); |
1877 | sk->sk_shutdown = SHUTDOWN_MASK; |
1878 | |
1879 | if (sk->sk_state == TCP_LISTEN) { |
1880 | tcp_set_state(sk, TCP_CLOSE); |
1881 | |
1882 | /* Special case. */ |
1883 | inet_csk_listen_stop(sk); |
1884 | |
1885 | goto adjudge_to_death; |
1886 | } |
1887 | |
1888 | /* We need to flush the recv. buffs. We do this only on the |
1889 | * descriptor close, not protocol-sourced closes, because the |
1890 | * reader process may not have drained the data yet! |
1891 | */ |
1892 | while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { |
1893 | u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq - |
1894 | tcp_hdr(skb)->fin; |
1895 | data_was_unread += len; |
1896 | __kfree_skb(skb); |
1897 | } |
1898 | |
1899 | sk_mem_reclaim(sk); |
1900 | |
1901 | /* As outlined in RFC 2525, section 2.17, we send a RST here because |
1902 | * data was lost. To witness the awful effects of the old behavior of |
1903 | * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk |
1904 | * GET in an FTP client, suspend the process, wait for the client to |
1905 | * advertise a zero window, then kill -9 the FTP client, wheee... |
1906 | * Note: timeout is always zero in such a case. |
1907 | */ |
1908 | if (data_was_unread) { |
1909 | /* Unread data was tossed, zap the connection. */ |
1910 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); |
1911 | tcp_set_state(sk, TCP_CLOSE); |
1912 | tcp_send_active_reset(sk, sk->sk_allocation); |
1913 | } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { |
1914 | /* Check zero linger _after_ checking for unread data. */ |
1915 | sk->sk_prot->disconnect(sk, 0); |
1916 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA); |
1917 | } else if (tcp_close_state(sk)) { |
1918 | /* We FIN if the application ate all the data before |
1919 | * zapping the connection. |
1920 | */ |
1921 | |
1922 | /* RED-PEN. Formally speaking, we have broken TCP state |
1923 | * machine. State transitions: |
1924 | * |
1925 | * TCP_ESTABLISHED -> TCP_FIN_WAIT1 |
1926 | * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible) |
1927 | * TCP_CLOSE_WAIT -> TCP_LAST_ACK |
1928 | * |
1929 | * are legal only when FIN has been sent (i.e. in window), |
1930 | * rather than queued out of window. Purists blame. |
1931 | * |
1932 | * F.e. "RFC state" is ESTABLISHED, |
1933 | * if Linux state is FIN-WAIT-1, but FIN is still not sent. |
1934 | * |
1935 | * The visible declinations are that sometimes |
1936 | * we enter time-wait state, when it is not required really |
1937 | * (harmless), do not send active resets, when they are |
1938 | * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when |
1939 | * they look as CLOSING or LAST_ACK for Linux) |
1940 | * Probably, I missed some more holelets. |
1941 | * --ANK |
1942 | */ |
1943 | tcp_send_fin(sk); |
1944 | } |
1945 | |
1946 | sk_stream_wait_close(sk, timeout); |
1947 | |
1948 | adjudge_to_death: |
1949 | state = sk->sk_state; |
1950 | sock_hold(sk); |
1951 | sock_orphan(sk); |
1952 | |
1953 | /* It is the last release_sock in its life. It will remove backlog. */ |
1954 | release_sock(sk); |
1955 | |
1956 | |
1957 | /* Now socket is owned by kernel and we acquire BH lock |
1958 | to finish close. No need to check for user refs. |
1959 | */ |
1960 | local_bh_disable(); |
1961 | bh_lock_sock(sk); |
1962 | WARN_ON(sock_owned_by_user(sk)); |
1963 | |
1964 | percpu_counter_inc(sk->sk_prot->orphan_count); |
1965 | |
1966 | /* Have we already been destroyed by a softirq or backlog? */ |
1967 | if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) |
1968 | goto out; |
1969 | |
1970 | /* This is a (useful) BSD violating of the RFC. There is a |
1971 | * problem with TCP as specified in that the other end could |
1972 | * keep a socket open forever with no application left this end. |
1973 | * We use a 3 minute timeout (about the same as BSD) then kill |
1974 | * our end. If they send after that then tough - BUT: long enough |
1975 | * that we won't make the old 4*rto = almost no time - whoops |
1976 | * reset mistake. |
1977 | * |
1978 | * Nope, it was not mistake. It is really desired behaviour |
1979 | * f.e. on http servers, when such sockets are useless, but |
1980 | * consume significant resources. Let's do it with special |
1981 | * linger2 option. --ANK |
1982 | */ |
1983 | |
1984 | if (sk->sk_state == TCP_FIN_WAIT2) { |
1985 | struct tcp_sock *tp = tcp_sk(sk); |
1986 | if (tp->linger2 < 0) { |
1987 | tcp_set_state(sk, TCP_CLOSE); |
1988 | tcp_send_active_reset(sk, GFP_ATOMIC); |
1989 | NET_INC_STATS_BH(sock_net(sk), |
1990 | LINUX_MIB_TCPABORTONLINGER); |
1991 | } else { |
1992 | const int tmo = tcp_fin_time(sk); |
1993 | |
1994 | if (tmo > TCP_TIMEWAIT_LEN) { |
1995 | inet_csk_reset_keepalive_timer(sk, |
1996 | tmo - TCP_TIMEWAIT_LEN); |
1997 | } else { |
1998 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); |
1999 | goto out; |
2000 | } |
2001 | } |
2002 | } |
2003 | if (sk->sk_state != TCP_CLOSE) { |
2004 | int orphan_count = percpu_counter_read_positive( |
2005 | sk->sk_prot->orphan_count); |
2006 | |
2007 | sk_mem_reclaim(sk); |
2008 | if (tcp_too_many_orphans(sk, orphan_count)) { |
2009 | if (net_ratelimit()) |
2010 | printk(KERN_INFO "TCP: too many of orphaned " |
2011 | "sockets\n"); |
2012 | tcp_set_state(sk, TCP_CLOSE); |
2013 | tcp_send_active_reset(sk, GFP_ATOMIC); |
2014 | NET_INC_STATS_BH(sock_net(sk), |
2015 | LINUX_MIB_TCPABORTONMEMORY); |
2016 | } |
2017 | } |
2018 | |
2019 | if (sk->sk_state == TCP_CLOSE) |
2020 | inet_csk_destroy_sock(sk); |
2021 | /* Otherwise, socket is reprieved until protocol close. */ |
2022 | |
2023 | out: |
2024 | bh_unlock_sock(sk); |
2025 | local_bh_enable(); |
2026 | sock_put(sk); |
2027 | } |
2028 | |
2029 | /* These states need RST on ABORT according to RFC793 */ |
2030 | |
2031 | static inline int tcp_need_reset(int state) |
2032 | { |
2033 | return (1 << state) & |
2034 | (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | |
2035 | TCPF_FIN_WAIT2 | TCPF_SYN_RECV); |
2036 | } |
2037 | |
2038 | int tcp_disconnect(struct sock *sk, int flags) |
2039 | { |
2040 | struct inet_sock *inet = inet_sk(sk); |
2041 | struct inet_connection_sock *icsk = inet_csk(sk); |
2042 | struct tcp_sock *tp = tcp_sk(sk); |
2043 | int err = 0; |
2044 | int old_state = sk->sk_state; |
2045 | |
2046 | if (old_state != TCP_CLOSE) |
2047 | tcp_set_state(sk, TCP_CLOSE); |
2048 | |
2049 | /* ABORT function of RFC793 */ |
2050 | if (old_state == TCP_LISTEN) { |
2051 | inet_csk_listen_stop(sk); |
2052 | } else if (tcp_need_reset(old_state) || |
2053 | (tp->snd_nxt != tp->write_seq && |
2054 | (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { |
2055 | /* The last check adjusts for discrepancy of Linux wrt. RFC |
2056 | * states |
2057 | */ |
2058 | tcp_send_active_reset(sk, gfp_any()); |
2059 | sk->sk_err = ECONNRESET; |
2060 | } else if (old_state == TCP_SYN_SENT) |
2061 | sk->sk_err = ECONNRESET; |
2062 | |
2063 | tcp_clear_xmit_timers(sk); |
2064 | __skb_queue_purge(&sk->sk_receive_queue); |
2065 | tcp_write_queue_purge(sk); |
2066 | __skb_queue_purge(&tp->out_of_order_queue); |
2067 | #ifdef CONFIG_NET_DMA |
2068 | __skb_queue_purge(&sk->sk_async_wait_queue); |
2069 | #endif |
2070 | |
2071 | inet->inet_dport = 0; |
2072 | |
2073 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) |
2074 | inet_reset_saddr(sk); |
2075 | |
2076 | sk->sk_shutdown = 0; |
2077 | sock_reset_flag(sk, SOCK_DONE); |
2078 | tp->srtt = 0; |
2079 | if ((tp->write_seq += tp->max_window + 2) == 0) |
2080 | tp->write_seq = 1; |
2081 | icsk->icsk_backoff = 0; |
2082 | tp->snd_cwnd = 2; |
2083 | icsk->icsk_probes_out = 0; |
2084 | tp->packets_out = 0; |
2085 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
2086 | tp->snd_cwnd_cnt = 0; |
2087 | tp->bytes_acked = 0; |
2088 | tp->window_clamp = 0; |
2089 | tcp_set_ca_state(sk, TCP_CA_Open); |
2090 | tcp_clear_retrans(tp); |
2091 | inet_csk_delack_init(sk); |
2092 | tcp_init_send_head(sk); |
2093 | memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); |
2094 | __sk_dst_reset(sk); |
2095 | |
2096 | WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); |
2097 | |
2098 | sk->sk_error_report(sk); |
2099 | return err; |
2100 | } |
2101 | |
2102 | /* |
2103 | * Socket option code for TCP. |
2104 | */ |
2105 | static int do_tcp_setsockopt(struct sock *sk, int level, |
2106 | int optname, char __user *optval, unsigned int optlen) |
2107 | { |
2108 | struct tcp_sock *tp = tcp_sk(sk); |
2109 | struct inet_connection_sock *icsk = inet_csk(sk); |
2110 | int val; |
2111 | int err = 0; |
2112 | |
2113 | /* These are data/string values, all the others are ints */ |
2114 | switch (optname) { |
2115 | case TCP_CONGESTION: { |
2116 | char name[TCP_CA_NAME_MAX]; |
2117 | |
2118 | if (optlen < 1) |
2119 | return -EINVAL; |
2120 | |
2121 | val = strncpy_from_user(name, optval, |
2122 | min_t(long, TCP_CA_NAME_MAX-1, optlen)); |
2123 | if (val < 0) |
2124 | return -EFAULT; |
2125 | name[val] = 0; |
2126 | |
2127 | lock_sock(sk); |
2128 | err = tcp_set_congestion_control(sk, name); |
2129 | release_sock(sk); |
2130 | return err; |
2131 | } |
2132 | case TCP_COOKIE_TRANSACTIONS: { |
2133 | struct tcp_cookie_transactions ctd; |
2134 | struct tcp_cookie_values *cvp = NULL; |
2135 | |
2136 | if (sizeof(ctd) > optlen) |
2137 | return -EINVAL; |
2138 | if (copy_from_user(&ctd, optval, sizeof(ctd))) |
2139 | return -EFAULT; |
2140 | |
2141 | if (ctd.tcpct_used > sizeof(ctd.tcpct_value) || |
2142 | ctd.tcpct_s_data_desired > TCP_MSS_DESIRED) |
2143 | return -EINVAL; |
2144 | |
2145 | if (ctd.tcpct_cookie_desired == 0) { |
2146 | /* default to global value */ |
2147 | } else if ((0x1 & ctd.tcpct_cookie_desired) || |
2148 | ctd.tcpct_cookie_desired > TCP_COOKIE_MAX || |
2149 | ctd.tcpct_cookie_desired < TCP_COOKIE_MIN) { |
2150 | return -EINVAL; |
2151 | } |
2152 | |
2153 | if (TCP_COOKIE_OUT_NEVER & ctd.tcpct_flags) { |
2154 | /* Supercedes all other values */ |
2155 | lock_sock(sk); |
2156 | if (tp->cookie_values != NULL) { |
2157 | kref_put(&tp->cookie_values->kref, |
2158 | tcp_cookie_values_release); |
2159 | tp->cookie_values = NULL; |
2160 | } |
2161 | tp->rx_opt.cookie_in_always = 0; /* false */ |
2162 | tp->rx_opt.cookie_out_never = 1; /* true */ |
2163 | release_sock(sk); |
2164 | return err; |
2165 | } |
2166 | |
2167 | /* Allocate ancillary memory before locking. |
2168 | */ |
2169 | if (ctd.tcpct_used > 0 || |
2170 | (tp->cookie_values == NULL && |
2171 | (sysctl_tcp_cookie_size > 0 || |
2172 | ctd.tcpct_cookie_desired > 0 || |
2173 | ctd.tcpct_s_data_desired > 0))) { |
2174 | cvp = kzalloc(sizeof(*cvp) + ctd.tcpct_used, |
2175 | GFP_KERNEL); |
2176 | if (cvp == NULL) |
2177 | return -ENOMEM; |
2178 | } |
2179 | lock_sock(sk); |
2180 | tp->rx_opt.cookie_in_always = |
2181 | (TCP_COOKIE_IN_ALWAYS & ctd.tcpct_flags); |
2182 | tp->rx_opt.cookie_out_never = 0; /* false */ |
2183 | |
2184 | if (tp->cookie_values != NULL) { |
2185 | if (cvp != NULL) { |
2186 | /* Changed values are recorded by a changed |
2187 | * pointer, ensuring the cookie will differ, |
2188 | * without separately hashing each value later. |
2189 | */ |
2190 | kref_put(&tp->cookie_values->kref, |
2191 | tcp_cookie_values_release); |
2192 | kref_init(&cvp->kref); |
2193 | tp->cookie_values = cvp; |
2194 | } else { |
2195 | cvp = tp->cookie_values; |
2196 | } |
2197 | } |
2198 | if (cvp != NULL) { |
2199 | cvp->cookie_desired = ctd.tcpct_cookie_desired; |
2200 | |
2201 | if (ctd.tcpct_used > 0) { |
2202 | memcpy(cvp->s_data_payload, ctd.tcpct_value, |
2203 | ctd.tcpct_used); |
2204 | cvp->s_data_desired = ctd.tcpct_used; |
2205 | cvp->s_data_constant = 1; /* true */ |
2206 | } else { |
2207 | /* No constant payload data. */ |
2208 | cvp->s_data_desired = ctd.tcpct_s_data_desired; |
2209 | cvp->s_data_constant = 0; /* false */ |
2210 | } |
2211 | } |
2212 | release_sock(sk); |
2213 | return err; |
2214 | } |
2215 | default: |
2216 | /* fallthru */ |
2217 | break; |
2218 | }; |
2219 | |
2220 | if (optlen < sizeof(int)) |
2221 | return -EINVAL; |
2222 | |
2223 | if (get_user(val, (int __user *)optval)) |
2224 | return -EFAULT; |
2225 | |
2226 | lock_sock(sk); |
2227 | |
2228 | switch (optname) { |
2229 | case TCP_MAXSEG: |
2230 | /* Values greater than interface MTU won't take effect. However |
2231 | * at the point when this call is done we typically don't yet |
2232 | * know which interface is going to be used */ |
2233 | if (val < 8 || val > MAX_TCP_WINDOW) { |
2234 | err = -EINVAL; |
2235 | break; |
2236 | } |
2237 | tp->rx_opt.user_mss = val; |
2238 | break; |
2239 | |
2240 | case TCP_NODELAY: |
2241 | if (val) { |
2242 | /* TCP_NODELAY is weaker than TCP_CORK, so that |
2243 | * this option on corked socket is remembered, but |
2244 | * it is not activated until cork is cleared. |
2245 | * |
2246 | * However, when TCP_NODELAY is set we make |
2247 | * an explicit push, which overrides even TCP_CORK |
2248 | * for currently queued segments. |
2249 | */ |
2250 | tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; |
2251 | tcp_push_pending_frames(sk); |
2252 | } else { |
2253 | tp->nonagle &= ~TCP_NAGLE_OFF; |
2254 | } |
2255 | break; |
2256 | |
2257 | case TCP_THIN_LINEAR_TIMEOUTS: |
2258 | if (val < 0 || val > 1) |
2259 | err = -EINVAL; |
2260 | else |
2261 | tp->thin_lto = val; |
2262 | break; |
2263 | |
2264 | case TCP_THIN_DUPACK: |
2265 | if (val < 0 || val > 1) |
2266 | err = -EINVAL; |
2267 | else |
2268 | tp->thin_dupack = val; |
2269 | break; |
2270 | |
2271 | case TCP_CORK: |
2272 | /* When set indicates to always queue non-full frames. |
2273 | * Later the user clears this option and we transmit |
2274 | * any pending partial frames in the queue. This is |
2275 | * meant to be used alongside sendfile() to get properly |
2276 | * filled frames when the user (for example) must write |
2277 | * out headers with a write() call first and then use |
2278 | * sendfile to send out the data parts. |
2279 | * |
2280 | * TCP_CORK can be set together with TCP_NODELAY and it is |
2281 | * stronger than TCP_NODELAY. |
2282 | */ |
2283 | if (val) { |
2284 | tp->nonagle |= TCP_NAGLE_CORK; |
2285 | } else { |
2286 | tp->nonagle &= ~TCP_NAGLE_CORK; |
2287 | if (tp->nonagle&TCP_NAGLE_OFF) |
2288 | tp->nonagle |= TCP_NAGLE_PUSH; |
2289 | tcp_push_pending_frames(sk); |
2290 | } |
2291 | break; |
2292 | |
2293 | case TCP_KEEPIDLE: |
2294 | if (val < 1 || val > MAX_TCP_KEEPIDLE) |
2295 | err = -EINVAL; |
2296 | else { |
2297 | tp->keepalive_time = val * HZ; |
2298 | if (sock_flag(sk, SOCK_KEEPOPEN) && |
2299 | !((1 << sk->sk_state) & |
2300 | (TCPF_CLOSE | TCPF_LISTEN))) { |
2301 | __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp; |
2302 | if (tp->keepalive_time > elapsed) |
2303 | elapsed = tp->keepalive_time - elapsed; |
2304 | else |
2305 | elapsed = 0; |
2306 | inet_csk_reset_keepalive_timer(sk, elapsed); |
2307 | } |
2308 | } |
2309 | break; |
2310 | case TCP_KEEPINTVL: |
2311 | if (val < 1 || val > MAX_TCP_KEEPINTVL) |
2312 | err = -EINVAL; |
2313 | else |
2314 | tp->keepalive_intvl = val * HZ; |
2315 | break; |
2316 | case TCP_KEEPCNT: |
2317 | if (val < 1 || val > MAX_TCP_KEEPCNT) |
2318 | err = -EINVAL; |
2319 | else |
2320 | tp->keepalive_probes = val; |
2321 | break; |
2322 | case TCP_SYNCNT: |
2323 | if (val < 1 || val > MAX_TCP_SYNCNT) |
2324 | err = -EINVAL; |
2325 | else |
2326 | icsk->icsk_syn_retries = val; |
2327 | break; |
2328 | |
2329 | case TCP_LINGER2: |
2330 | if (val < 0) |
2331 | tp->linger2 = -1; |
2332 | else if (val > sysctl_tcp_fin_timeout / HZ) |
2333 | tp->linger2 = 0; |
2334 | else |
2335 | tp->linger2 = val * HZ; |
2336 | break; |
2337 | |
2338 | case TCP_DEFER_ACCEPT: |
2339 | /* Translate value in seconds to number of retransmits */ |
2340 | icsk->icsk_accept_queue.rskq_defer_accept = |
2341 | secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, |
2342 | TCP_RTO_MAX / HZ); |
2343 | break; |
2344 | |
2345 | case TCP_WINDOW_CLAMP: |
2346 | if (!val) { |
2347 | if (sk->sk_state != TCP_CLOSE) { |
2348 | err = -EINVAL; |
2349 | break; |
2350 | } |
2351 | tp->window_clamp = 0; |
2352 | } else |
2353 | tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ? |
2354 | SOCK_MIN_RCVBUF / 2 : val; |
2355 | break; |
2356 | |
2357 | case TCP_QUICKACK: |
2358 | if (!val) { |
2359 | icsk->icsk_ack.pingpong = 1; |
2360 | } else { |
2361 | icsk->icsk_ack.pingpong = 0; |
2362 | if ((1 << sk->sk_state) & |
2363 | (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && |
2364 | inet_csk_ack_scheduled(sk)) { |
2365 | icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; |
2366 | tcp_cleanup_rbuf(sk, 1); |
2367 | if (!(val & 1)) |
2368 | icsk->icsk_ack.pingpong = 1; |
2369 | } |
2370 | } |
2371 | break; |
2372 | |
2373 | #ifdef CONFIG_TCP_MD5SIG |
2374 | case TCP_MD5SIG: |
2375 | /* Read the IP->Key mappings from userspace */ |
2376 | err = tp->af_specific->md5_parse(sk, optval, optlen); |
2377 | break; |
2378 | #endif |
2379 | |
2380 | default: |
2381 | err = -ENOPROTOOPT; |
2382 | break; |
2383 | } |
2384 | |
2385 | release_sock(sk); |
2386 | return err; |
2387 | } |
2388 | |
2389 | int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, |
2390 | unsigned int optlen) |
2391 | { |
2392 | struct inet_connection_sock *icsk = inet_csk(sk); |
2393 | |
2394 | if (level != SOL_TCP) |
2395 | return icsk->icsk_af_ops->setsockopt(sk, level, optname, |
2396 | optval, optlen); |
2397 | return do_tcp_setsockopt(sk, level, optname, optval, optlen); |
2398 | } |
2399 | |
2400 | #ifdef CONFIG_COMPAT |
2401 | int compat_tcp_setsockopt(struct sock *sk, int level, int optname, |
2402 | char __user *optval, unsigned int optlen) |
2403 | { |
2404 | if (level != SOL_TCP) |
2405 | return inet_csk_compat_setsockopt(sk, level, optname, |
2406 | optval, optlen); |
2407 | return do_tcp_setsockopt(sk, level, optname, optval, optlen); |
2408 | } |
2409 | |
2410 | EXPORT_SYMBOL(compat_tcp_setsockopt); |
2411 | #endif |
2412 | |
2413 | /* Return information about state of tcp endpoint in API format. */ |
2414 | void tcp_get_info(struct sock *sk, struct tcp_info *info) |
2415 | { |
2416 | struct tcp_sock *tp = tcp_sk(sk); |
2417 | const struct inet_connection_sock *icsk = inet_csk(sk); |
2418 | u32 now = tcp_time_stamp; |
2419 | |
2420 | memset(info, 0, sizeof(*info)); |
2421 | |
2422 | info->tcpi_state = sk->sk_state; |
2423 | info->tcpi_ca_state = icsk->icsk_ca_state; |
2424 | info->tcpi_retransmits = icsk->icsk_retransmits; |
2425 | info->tcpi_probes = icsk->icsk_probes_out; |
2426 | info->tcpi_backoff = icsk->icsk_backoff; |
2427 | |
2428 | if (tp->rx_opt.tstamp_ok) |
2429 | info->tcpi_options |= TCPI_OPT_TIMESTAMPS; |
2430 | if (tcp_is_sack(tp)) |
2431 | info->tcpi_options |= TCPI_OPT_SACK; |
2432 | if (tp->rx_opt.wscale_ok) { |
2433 | info->tcpi_options |= TCPI_OPT_WSCALE; |
2434 | info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; |
2435 | info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; |
2436 | } |
2437 | |
2438 | if (tp->ecn_flags&TCP_ECN_OK) |
2439 | info->tcpi_options |= TCPI_OPT_ECN; |
2440 | |
2441 | info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); |
2442 | info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); |
2443 | info->tcpi_snd_mss = tp->mss_cache; |
2444 | info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; |
2445 | |
2446 | if (sk->sk_state == TCP_LISTEN) { |
2447 | info->tcpi_unacked = sk->sk_ack_backlog; |
2448 | info->tcpi_sacked = sk->sk_max_ack_backlog; |
2449 | } else { |
2450 | info->tcpi_unacked = tp->packets_out; |
2451 | info->tcpi_sacked = tp->sacked_out; |
2452 | } |
2453 | info->tcpi_lost = tp->lost_out; |
2454 | info->tcpi_retrans = tp->retrans_out; |
2455 | info->tcpi_fackets = tp->fackets_out; |
2456 | |
2457 | info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); |
2458 | info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); |
2459 | info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); |
2460 | |
2461 | info->tcpi_pmtu = icsk->icsk_pmtu_cookie; |
2462 | info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; |
2463 | info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3; |
2464 | info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2; |
2465 | info->tcpi_snd_ssthresh = tp->snd_ssthresh; |
2466 | info->tcpi_snd_cwnd = tp->snd_cwnd; |
2467 | info->tcpi_advmss = tp->advmss; |
2468 | info->tcpi_reordering = tp->reordering; |
2469 | |
2470 | info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3; |
2471 | info->tcpi_rcv_space = tp->rcvq_space.space; |
2472 | |
2473 | info->tcpi_total_retrans = tp->total_retrans; |
2474 | } |
2475 | |
2476 | EXPORT_SYMBOL_GPL(tcp_get_info); |
2477 | |
2478 | static int do_tcp_getsockopt(struct sock *sk, int level, |
2479 | int optname, char __user *optval, int __user *optlen) |
2480 | { |
2481 | struct inet_connection_sock *icsk = inet_csk(sk); |
2482 | struct tcp_sock *tp = tcp_sk(sk); |
2483 | int val, len; |
2484 | |
2485 | if (get_user(len, optlen)) |
2486 | return -EFAULT; |
2487 | |
2488 | len = min_t(unsigned int, len, sizeof(int)); |
2489 | |
2490 | if (len < 0) |
2491 | return -EINVAL; |
2492 | |
2493 | switch (optname) { |
2494 | case TCP_MAXSEG: |
2495 | val = tp->mss_cache; |
2496 | if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) |
2497 | val = tp->rx_opt.user_mss; |
2498 | break; |
2499 | case TCP_NODELAY: |
2500 | val = !!(tp->nonagle&TCP_NAGLE_OFF); |
2501 | break; |
2502 | case TCP_CORK: |
2503 | val = !!(tp->nonagle&TCP_NAGLE_CORK); |
2504 | break; |
2505 | case TCP_KEEPIDLE: |
2506 | val = keepalive_time_when(tp) / HZ; |
2507 | break; |
2508 | case TCP_KEEPINTVL: |
2509 | val = keepalive_intvl_when(tp) / HZ; |
2510 | break; |
2511 | case TCP_KEEPCNT: |
2512 | val = keepalive_probes(tp); |
2513 | break; |
2514 | case TCP_SYNCNT: |
2515 | val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; |
2516 | break; |
2517 | case TCP_LINGER2: |
2518 | val = tp->linger2; |
2519 | if (val >= 0) |
2520 | val = (val ? : sysctl_tcp_fin_timeout) / HZ; |
2521 | break; |
2522 | case TCP_DEFER_ACCEPT: |
2523 | val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept, |
2524 | TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ); |
2525 | break; |
2526 | case TCP_WINDOW_CLAMP: |
2527 | val = tp->window_clamp; |
2528 | break; |
2529 | case TCP_INFO: { |
2530 | struct tcp_info info; |
2531 | |
2532 | if (get_user(len, optlen)) |
2533 | return -EFAULT; |
2534 | |
2535 | tcp_get_info(sk, &info); |
2536 | |
2537 | len = min_t(unsigned int, len, sizeof(info)); |
2538 | if (put_user(len, optlen)) |
2539 | return -EFAULT; |
2540 | if (copy_to_user(optval, &info, len)) |
2541 | return -EFAULT; |
2542 | return 0; |
2543 | } |
2544 | case TCP_QUICKACK: |
2545 | val = !icsk->icsk_ack.pingpong; |
2546 | break; |
2547 | |
2548 | case TCP_CONGESTION: |
2549 | if (get_user(len, optlen)) |
2550 | return -EFAULT; |
2551 | len = min_t(unsigned int, len, TCP_CA_NAME_MAX); |
2552 | if (put_user(len, optlen)) |
2553 | return -EFAULT; |
2554 | if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) |
2555 | return -EFAULT; |
2556 | return 0; |
2557 | |
2558 | case TCP_COOKIE_TRANSACTIONS: { |
2559 | struct tcp_cookie_transactions ctd; |
2560 | struct tcp_cookie_values *cvp = tp->cookie_values; |
2561 | |
2562 | if (get_user(len, optlen)) |
2563 | return -EFAULT; |
2564 | if (len < sizeof(ctd)) |
2565 | return -EINVAL; |
2566 | |
2567 | memset(&ctd, 0, sizeof(ctd)); |
2568 | ctd.tcpct_flags = (tp->rx_opt.cookie_in_always ? |
2569 | TCP_COOKIE_IN_ALWAYS : 0) |
2570 | | (tp->rx_opt.cookie_out_never ? |
2571 | TCP_COOKIE_OUT_NEVER : 0); |
2572 | |
2573 | if (cvp != NULL) { |
2574 | ctd.tcpct_flags |= (cvp->s_data_in ? |
2575 | TCP_S_DATA_IN : 0) |
2576 | | (cvp->s_data_out ? |
2577 | TCP_S_DATA_OUT : 0); |
2578 | |
2579 | ctd.tcpct_cookie_desired = cvp->cookie_desired; |
2580 | ctd.tcpct_s_data_desired = cvp->s_data_desired; |
2581 | |
2582 | memcpy(&ctd.tcpct_value[0], &cvp->cookie_pair[0], |
2583 | cvp->cookie_pair_size); |
2584 | ctd.tcpct_used = cvp->cookie_pair_size; |
2585 | } |
2586 | |
2587 | if (put_user(sizeof(ctd), optlen)) |
2588 | return -EFAULT; |
2589 | if (copy_to_user(optval, &ctd, sizeof(ctd))) |
2590 | return -EFAULT; |
2591 | return 0; |
2592 | } |
2593 | default: |
2594 | return -ENOPROTOOPT; |
2595 | } |
2596 | |
2597 | if (put_user(len, optlen)) |
2598 | return -EFAULT; |
2599 | if (copy_to_user(optval, &val, len)) |
2600 | return -EFAULT; |
2601 | return 0; |
2602 | } |
2603 | |
2604 | int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, |
2605 | int __user *optlen) |
2606 | { |
2607 | struct inet_connection_sock *icsk = inet_csk(sk); |
2608 | |
2609 | if (level != SOL_TCP) |
2610 | return icsk->icsk_af_ops->getsockopt(sk, level, optname, |
2611 | optval, optlen); |
2612 | return do_tcp_getsockopt(sk, level, optname, optval, optlen); |
2613 | } |
2614 | |
2615 | #ifdef CONFIG_COMPAT |
2616 | int compat_tcp_getsockopt(struct sock *sk, int level, int optname, |
2617 | char __user *optval, int __user *optlen) |
2618 | { |
2619 | if (level != SOL_TCP) |
2620 | return inet_csk_compat_getsockopt(sk, level, optname, |
2621 | optval, optlen); |
2622 | return do_tcp_getsockopt(sk, level, optname, optval, optlen); |
2623 | } |
2624 | |
2625 | EXPORT_SYMBOL(compat_tcp_getsockopt); |
2626 | #endif |
2627 | |
2628 | struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) |
2629 | { |
2630 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
2631 | struct tcphdr *th; |
2632 | unsigned thlen; |
2633 | unsigned int seq; |
2634 | __be32 delta; |
2635 | unsigned int oldlen; |
2636 | unsigned int mss; |
2637 | |
2638 | if (!pskb_may_pull(skb, sizeof(*th))) |
2639 | goto out; |
2640 | |
2641 | th = tcp_hdr(skb); |
2642 | thlen = th->doff * 4; |
2643 | if (thlen < sizeof(*th)) |
2644 | goto out; |
2645 | |
2646 | if (!pskb_may_pull(skb, thlen)) |
2647 | goto out; |
2648 | |
2649 | oldlen = (u16)~skb->len; |
2650 | __skb_pull(skb, thlen); |
2651 | |
2652 | mss = skb_shinfo(skb)->gso_size; |
2653 | if (unlikely(skb->len <= mss)) |
2654 | goto out; |
2655 | |
2656 | if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { |
2657 | /* Packet is from an untrusted source, reset gso_segs. */ |
2658 | int type = skb_shinfo(skb)->gso_type; |
2659 | |
2660 | if (unlikely(type & |
2661 | ~(SKB_GSO_TCPV4 | |
2662 | SKB_GSO_DODGY | |
2663 | SKB_GSO_TCP_ECN | |
2664 | SKB_GSO_TCPV6 | |
2665 | 0) || |
2666 | !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) |
2667 | goto out; |
2668 | |
2669 | skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); |
2670 | |
2671 | segs = NULL; |
2672 | goto out; |
2673 | } |
2674 | |
2675 | segs = skb_segment(skb, features); |
2676 | if (IS_ERR(segs)) |
2677 | goto out; |
2678 | |
2679 | delta = htonl(oldlen + (thlen + mss)); |
2680 | |
2681 | skb = segs; |
2682 | th = tcp_hdr(skb); |
2683 | seq = ntohl(th->seq); |
2684 | |
2685 | do { |
2686 | th->fin = th->psh = 0; |
2687 | |
2688 | th->check = ~csum_fold((__force __wsum)((__force u32)th->check + |
2689 | (__force u32)delta)); |
2690 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
2691 | th->check = |
2692 | csum_fold(csum_partial(skb_transport_header(skb), |
2693 | thlen, skb->csum)); |
2694 | |
2695 | seq += mss; |
2696 | skb = skb->next; |
2697 | th = tcp_hdr(skb); |
2698 | |
2699 | th->seq = htonl(seq); |
2700 | th->cwr = 0; |
2701 | } while (skb->next); |
2702 | |
2703 | delta = htonl(oldlen + (skb->tail - skb->transport_header) + |
2704 | skb->data_len); |
2705 | th->check = ~csum_fold((__force __wsum)((__force u32)th->check + |
2706 | (__force u32)delta)); |
2707 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
2708 | th->check = csum_fold(csum_partial(skb_transport_header(skb), |
2709 | thlen, skb->csum)); |
2710 | |
2711 | out: |
2712 | return segs; |
2713 | } |
2714 | EXPORT_SYMBOL(tcp_tso_segment); |
2715 | |
2716 | struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) |
2717 | { |
2718 | struct sk_buff **pp = NULL; |
2719 | struct sk_buff *p; |
2720 | struct tcphdr *th; |
2721 | struct tcphdr *th2; |
2722 | unsigned int len; |
2723 | unsigned int thlen; |
2724 | unsigned int flags; |
2725 | unsigned int mss = 1; |
2726 | unsigned int hlen; |
2727 | unsigned int off; |
2728 | int flush = 1; |
2729 | int i; |
2730 | |
2731 | off = skb_gro_offset(skb); |
2732 | hlen = off + sizeof(*th); |
2733 | th = skb_gro_header_fast(skb, off); |
2734 | if (skb_gro_header_hard(skb, hlen)) { |
2735 | th = skb_gro_header_slow(skb, hlen, off); |
2736 | if (unlikely(!th)) |
2737 | goto out; |
2738 | } |
2739 | |
2740 | thlen = th->doff * 4; |
2741 | if (thlen < sizeof(*th)) |
2742 | goto out; |
2743 | |
2744 | hlen = off + thlen; |
2745 | if (skb_gro_header_hard(skb, hlen)) { |
2746 | th = skb_gro_header_slow(skb, hlen, off); |
2747 | if (unlikely(!th)) |
2748 | goto out; |
2749 | } |
2750 | |
2751 | skb_gro_pull(skb, thlen); |
2752 | |
2753 | len = skb_gro_len(skb); |
2754 | flags = tcp_flag_word(th); |
2755 | |
2756 | for (; (p = *head); head = &p->next) { |
2757 | if (!NAPI_GRO_CB(p)->same_flow) |
2758 | continue; |
2759 | |
2760 | th2 = tcp_hdr(p); |
2761 | |
2762 | if (*(u32 *)&th->source ^ *(u32 *)&th2->source) { |
2763 | NAPI_GRO_CB(p)->same_flow = 0; |
2764 | continue; |
2765 | } |
2766 | |
2767 | goto found; |
2768 | } |
2769 | |
2770 | goto out_check_final; |
2771 | |
2772 | found: |
2773 | flush = NAPI_GRO_CB(p)->flush; |
2774 | flush |= flags & TCP_FLAG_CWR; |
2775 | flush |= (flags ^ tcp_flag_word(th2)) & |
2776 | ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH); |
2777 | flush |= th->ack_seq ^ th2->ack_seq; |
2778 | for (i = sizeof(*th); i < thlen; i += 4) |
2779 | flush |= *(u32 *)((u8 *)th + i) ^ |
2780 | *(u32 *)((u8 *)th2 + i); |
2781 | |
2782 | mss = skb_shinfo(p)->gso_size; |
2783 | |
2784 | flush |= (len - 1) >= mss; |
2785 | flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); |
2786 | |
2787 | if (flush || skb_gro_receive(head, skb)) { |
2788 | mss = 1; |
2789 | goto out_check_final; |
2790 | } |
2791 | |
2792 | p = *head; |
2793 | th2 = tcp_hdr(p); |
2794 | tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); |
2795 | |
2796 | out_check_final: |
2797 | flush = len < mss; |
2798 | flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST | |
2799 | TCP_FLAG_SYN | TCP_FLAG_FIN); |
2800 | |
2801 | if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) |
2802 | pp = head; |
2803 | |
2804 | out: |
2805 | NAPI_GRO_CB(skb)->flush |= flush; |
2806 | |
2807 | return pp; |
2808 | } |
2809 | EXPORT_SYMBOL(tcp_gro_receive); |
2810 | |
2811 | int tcp_gro_complete(struct sk_buff *skb) |
2812 | { |
2813 | struct tcphdr *th = tcp_hdr(skb); |
2814 | |
2815 | skb->csum_start = skb_transport_header(skb) - skb->head; |
2816 | skb->csum_offset = offsetof(struct tcphdr, check); |
2817 | skb->ip_summed = CHECKSUM_PARTIAL; |
2818 | |
2819 | skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; |
2820 | |
2821 | if (th->cwr) |
2822 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; |
2823 | |
2824 | return 0; |
2825 | } |
2826 | EXPORT_SYMBOL(tcp_gro_complete); |
2827 | |
2828 | #ifdef CONFIG_TCP_MD5SIG |
2829 | static unsigned long tcp_md5sig_users; |
2830 | static struct tcp_md5sig_pool * __percpu *tcp_md5sig_pool; |
2831 | static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); |
2832 | |
2833 | static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool) |
2834 | { |
2835 | int cpu; |
2836 | for_each_possible_cpu(cpu) { |
2837 | struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu); |
2838 | if (p) { |
2839 | if (p->md5_desc.tfm) |
2840 | crypto_free_hash(p->md5_desc.tfm); |
2841 | kfree(p); |
2842 | p = NULL; |
2843 | } |
2844 | } |
2845 | free_percpu(pool); |
2846 | } |
2847 | |
2848 | void tcp_free_md5sig_pool(void) |
2849 | { |
2850 | struct tcp_md5sig_pool * __percpu *pool = NULL; |
2851 | |
2852 | spin_lock_bh(&tcp_md5sig_pool_lock); |
2853 | if (--tcp_md5sig_users == 0) { |
2854 | pool = tcp_md5sig_pool; |
2855 | tcp_md5sig_pool = NULL; |
2856 | } |
2857 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
2858 | if (pool) |
2859 | __tcp_free_md5sig_pool(pool); |
2860 | } |
2861 | |
2862 | EXPORT_SYMBOL(tcp_free_md5sig_pool); |
2863 | |
2864 | static struct tcp_md5sig_pool * __percpu * |
2865 | __tcp_alloc_md5sig_pool(struct sock *sk) |
2866 | { |
2867 | int cpu; |
2868 | struct tcp_md5sig_pool * __percpu *pool; |
2869 | |
2870 | pool = alloc_percpu(struct tcp_md5sig_pool *); |
2871 | if (!pool) |
2872 | return NULL; |
2873 | |
2874 | for_each_possible_cpu(cpu) { |
2875 | struct tcp_md5sig_pool *p; |
2876 | struct crypto_hash *hash; |
2877 | |
2878 | p = kzalloc(sizeof(*p), sk->sk_allocation); |
2879 | if (!p) |
2880 | goto out_free; |
2881 | *per_cpu_ptr(pool, cpu) = p; |
2882 | |
2883 | hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); |
2884 | if (!hash || IS_ERR(hash)) |
2885 | goto out_free; |
2886 | |
2887 | p->md5_desc.tfm = hash; |
2888 | } |
2889 | return pool; |
2890 | out_free: |
2891 | __tcp_free_md5sig_pool(pool); |
2892 | return NULL; |
2893 | } |
2894 | |
2895 | struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk) |
2896 | { |
2897 | struct tcp_md5sig_pool * __percpu *pool; |
2898 | int alloc = 0; |
2899 | |
2900 | retry: |
2901 | spin_lock_bh(&tcp_md5sig_pool_lock); |
2902 | pool = tcp_md5sig_pool; |
2903 | if (tcp_md5sig_users++ == 0) { |
2904 | alloc = 1; |
2905 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
2906 | } else if (!pool) { |
2907 | tcp_md5sig_users--; |
2908 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
2909 | cpu_relax(); |
2910 | goto retry; |
2911 | } else |
2912 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
2913 | |
2914 | if (alloc) { |
2915 | /* we cannot hold spinlock here because this may sleep. */ |
2916 | struct tcp_md5sig_pool * __percpu *p; |
2917 | |
2918 | p = __tcp_alloc_md5sig_pool(sk); |
2919 | spin_lock_bh(&tcp_md5sig_pool_lock); |
2920 | if (!p) { |
2921 | tcp_md5sig_users--; |
2922 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
2923 | return NULL; |
2924 | } |
2925 | pool = tcp_md5sig_pool; |
2926 | if (pool) { |
2927 | /* oops, it has already been assigned. */ |
2928 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
2929 | __tcp_free_md5sig_pool(p); |
2930 | } else { |
2931 | tcp_md5sig_pool = pool = p; |
2932 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
2933 | } |
2934 | } |
2935 | return pool; |
2936 | } |
2937 | |
2938 | EXPORT_SYMBOL(tcp_alloc_md5sig_pool); |
2939 | |
2940 | struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) |
2941 | { |
2942 | struct tcp_md5sig_pool * __percpu *p; |
2943 | spin_lock_bh(&tcp_md5sig_pool_lock); |
2944 | p = tcp_md5sig_pool; |
2945 | if (p) |
2946 | tcp_md5sig_users++; |
2947 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
2948 | return (p ? *per_cpu_ptr(p, cpu) : NULL); |
2949 | } |
2950 | |
2951 | EXPORT_SYMBOL(__tcp_get_md5sig_pool); |
2952 | |
2953 | void __tcp_put_md5sig_pool(void) |
2954 | { |
2955 | tcp_free_md5sig_pool(); |
2956 | } |
2957 | |
2958 | EXPORT_SYMBOL(__tcp_put_md5sig_pool); |
2959 | |
2960 | int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, |
2961 | struct tcphdr *th) |
2962 | { |
2963 | struct scatterlist sg; |
2964 | int err; |
2965 | |
2966 | __sum16 old_checksum = th->check; |
2967 | th->check = 0; |
2968 | /* options aren't included in the hash */ |
2969 | sg_init_one(&sg, th, sizeof(struct tcphdr)); |
2970 | err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(struct tcphdr)); |
2971 | th->check = old_checksum; |
2972 | return err; |
2973 | } |
2974 | |
2975 | EXPORT_SYMBOL(tcp_md5_hash_header); |
2976 | |
2977 | int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, |
2978 | struct sk_buff *skb, unsigned header_len) |
2979 | { |
2980 | struct scatterlist sg; |
2981 | const struct tcphdr *tp = tcp_hdr(skb); |
2982 | struct hash_desc *desc = &hp->md5_desc; |
2983 | unsigned i; |
2984 | const unsigned head_data_len = skb_headlen(skb) > header_len ? |
2985 | skb_headlen(skb) - header_len : 0; |
2986 | const struct skb_shared_info *shi = skb_shinfo(skb); |
2987 | |
2988 | sg_init_table(&sg, 1); |
2989 | |
2990 | sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len); |
2991 | if (crypto_hash_update(desc, &sg, head_data_len)) |
2992 | return 1; |
2993 | |
2994 | for (i = 0; i < shi->nr_frags; ++i) { |
2995 | const struct skb_frag_struct *f = &shi->frags[i]; |
2996 | sg_set_page(&sg, f->page, f->size, f->page_offset); |
2997 | if (crypto_hash_update(desc, &sg, f->size)) |
2998 | return 1; |
2999 | } |
3000 | |
3001 | return 0; |
3002 | } |
3003 | |
3004 | EXPORT_SYMBOL(tcp_md5_hash_skb_data); |
3005 | |
3006 | int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key) |
3007 | { |
3008 | struct scatterlist sg; |
3009 | |
3010 | sg_init_one(&sg, key->key, key->keylen); |
3011 | return crypto_hash_update(&hp->md5_desc, &sg, key->keylen); |
3012 | } |
3013 | |
3014 | EXPORT_SYMBOL(tcp_md5_hash_key); |
3015 | |
3016 | #endif |
3017 | |
3018 | /** |
3019 | * Each Responder maintains up to two secret values concurrently for |
3020 | * efficient secret rollover. Each secret value has 4 states: |
3021 | * |
3022 | * Generating. (tcp_secret_generating != tcp_secret_primary) |
3023 | * Generates new Responder-Cookies, but not yet used for primary |
3024 | * verification. This is a short-term state, typically lasting only |
3025 | * one round trip time (RTT). |
3026 | * |
3027 | * Primary. (tcp_secret_generating == tcp_secret_primary) |
3028 | * Used both for generation and primary verification. |
3029 | * |
3030 | * Retiring. (tcp_secret_retiring != tcp_secret_secondary) |
3031 | * Used for verification, until the first failure that can be |
3032 | * verified by the newer Generating secret. At that time, this |
3033 | * cookie's state is changed to Secondary, and the Generating |
3034 | * cookie's state is changed to Primary. This is a short-term state, |
3035 | * typically lasting only one round trip time (RTT). |
3036 | * |
3037 | * Secondary. (tcp_secret_retiring == tcp_secret_secondary) |
3038 | * Used for secondary verification, after primary verification |
3039 | * failures. This state lasts no more than twice the Maximum Segment |
3040 | * Lifetime (2MSL). Then, the secret is discarded. |
3041 | */ |
3042 | struct tcp_cookie_secret { |
3043 | /* The secret is divided into two parts. The digest part is the |
3044 | * equivalent of previously hashing a secret and saving the state, |
3045 | * and serves as an initialization vector (IV). The message part |
3046 | * serves as the trailing secret. |
3047 | */ |
3048 | u32 secrets[COOKIE_WORKSPACE_WORDS]; |
3049 | unsigned long expires; |
3050 | }; |
3051 | |
3052 | #define TCP_SECRET_1MSL (HZ * TCP_PAWS_MSL) |
3053 | #define TCP_SECRET_2MSL (HZ * TCP_PAWS_MSL * 2) |
3054 | #define TCP_SECRET_LIFE (HZ * 600) |
3055 | |
3056 | static struct tcp_cookie_secret tcp_secret_one; |
3057 | static struct tcp_cookie_secret tcp_secret_two; |
3058 | |
3059 | /* Essentially a circular list, without dynamic allocation. */ |
3060 | static struct tcp_cookie_secret *tcp_secret_generating; |
3061 | static struct tcp_cookie_secret *tcp_secret_primary; |
3062 | static struct tcp_cookie_secret *tcp_secret_retiring; |
3063 | static struct tcp_cookie_secret *tcp_secret_secondary; |
3064 | |
3065 | static DEFINE_SPINLOCK(tcp_secret_locker); |
3066 | |
3067 | /* Select a pseudo-random word in the cookie workspace. |
3068 | */ |
3069 | static inline u32 tcp_cookie_work(const u32 *ws, const int n) |
3070 | { |
3071 | return ws[COOKIE_DIGEST_WORDS + ((COOKIE_MESSAGE_WORDS-1) & ws[n])]; |
3072 | } |
3073 | |
3074 | /* Fill bakery[COOKIE_WORKSPACE_WORDS] with generator, updating as needed. |
3075 | * Called in softirq context. |
3076 | * Returns: 0 for success. |
3077 | */ |
3078 | int tcp_cookie_generator(u32 *bakery) |
3079 | { |
3080 | unsigned long jiffy = jiffies; |
3081 | |
3082 | if (unlikely(time_after_eq(jiffy, tcp_secret_generating->expires))) { |
3083 | spin_lock_bh(&tcp_secret_locker); |
3084 | if (!time_after_eq(jiffy, tcp_secret_generating->expires)) { |
3085 | /* refreshed by another */ |
3086 | memcpy(bakery, |
3087 | &tcp_secret_generating->secrets[0], |
3088 | COOKIE_WORKSPACE_WORDS); |
3089 | } else { |
3090 | /* still needs refreshing */ |
3091 | get_random_bytes(bakery, COOKIE_WORKSPACE_WORDS); |
3092 | |
3093 | /* The first time, paranoia assumes that the |
3094 | * randomization function isn't as strong. But, |
3095 | * this secret initialization is delayed until |
3096 | * the last possible moment (packet arrival). |
3097 | * Although that time is observable, it is |
3098 | * unpredictably variable. Mash in the most |
3099 | * volatile clock bits available, and expire the |
3100 | * secret extra quickly. |
3101 | */ |
3102 | if (unlikely(tcp_secret_primary->expires == |
3103 | tcp_secret_secondary->expires)) { |
3104 | struct timespec tv; |
3105 | |
3106 | getnstimeofday(&tv); |
3107 | bakery[COOKIE_DIGEST_WORDS+0] ^= |
3108 | (u32)tv.tv_nsec; |
3109 | |
3110 | tcp_secret_secondary->expires = jiffy |
3111 | + TCP_SECRET_1MSL |
3112 | + (0x0f & tcp_cookie_work(bakery, 0)); |
3113 | } else { |
3114 | tcp_secret_secondary->expires = jiffy |
3115 | + TCP_SECRET_LIFE |
3116 | + (0xff & tcp_cookie_work(bakery, 1)); |
3117 | tcp_secret_primary->expires = jiffy |
3118 | + TCP_SECRET_2MSL |
3119 | + (0x1f & tcp_cookie_work(bakery, 2)); |
3120 | } |
3121 | memcpy(&tcp_secret_secondary->secrets[0], |
3122 | bakery, COOKIE_WORKSPACE_WORDS); |
3123 | |
3124 | rcu_assign_pointer(tcp_secret_generating, |
3125 | tcp_secret_secondary); |
3126 | rcu_assign_pointer(tcp_secret_retiring, |
3127 | tcp_secret_primary); |
3128 | /* |
3129 | * Neither call_rcu() nor synchronize_rcu() needed. |
3130 | * Retiring data is not freed. It is replaced after |
3131 | * further (locked) pointer updates, and a quiet time |
3132 | * (minimum 1MSL, maximum LIFE - 2MSL). |
3133 | */ |
3134 | } |
3135 | spin_unlock_bh(&tcp_secret_locker); |
3136 | } else { |
3137 | rcu_read_lock_bh(); |
3138 | memcpy(bakery, |
3139 | &rcu_dereference(tcp_secret_generating)->secrets[0], |
3140 | COOKIE_WORKSPACE_WORDS); |
3141 | rcu_read_unlock_bh(); |
3142 | } |
3143 | return 0; |
3144 | } |
3145 | EXPORT_SYMBOL(tcp_cookie_generator); |
3146 | |
3147 | void tcp_done(struct sock *sk) |
3148 | { |
3149 | if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) |
3150 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); |
3151 | |
3152 | tcp_set_state(sk, TCP_CLOSE); |
3153 | tcp_clear_xmit_timers(sk); |
3154 | |
3155 | sk->sk_shutdown = SHUTDOWN_MASK; |
3156 | |
3157 | if (!sock_flag(sk, SOCK_DEAD)) |
3158 | sk->sk_state_change(sk); |
3159 | else |
3160 | inet_csk_destroy_sock(sk); |
3161 | } |
3162 | EXPORT_SYMBOL_GPL(tcp_done); |
3163 | |
3164 | extern struct tcp_congestion_ops tcp_reno; |
3165 | |
3166 | static __initdata unsigned long thash_entries; |
3167 | static int __init set_thash_entries(char *str) |
3168 | { |
3169 | if (!str) |
3170 | return 0; |
3171 | thash_entries = simple_strtoul(str, &str, 0); |
3172 | return 1; |
3173 | } |
3174 | __setup("thash_entries=", set_thash_entries); |
3175 | |
3176 | void __init tcp_init(void) |
3177 | { |
3178 | struct sk_buff *skb = NULL; |
3179 | unsigned long nr_pages, limit; |
3180 | int order, i, max_share; |
3181 | unsigned long jiffy = jiffies; |
3182 | |
3183 | BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); |
3184 | |
3185 | percpu_counter_init(&tcp_sockets_allocated, 0); |
3186 | percpu_counter_init(&tcp_orphan_count, 0); |
3187 | tcp_hashinfo.bind_bucket_cachep = |
3188 | kmem_cache_create("tcp_bind_bucket", |
3189 | sizeof(struct inet_bind_bucket), 0, |
3190 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
3191 | |
3192 | /* Size and allocate the main established and bind bucket |
3193 | * hash tables. |
3194 | * |
3195 | * The methodology is similar to that of the buffer cache. |
3196 | */ |
3197 | tcp_hashinfo.ehash = |
3198 | alloc_large_system_hash("TCP established", |
3199 | sizeof(struct inet_ehash_bucket), |
3200 | thash_entries, |
3201 | (totalram_pages >= 128 * 1024) ? |
3202 | 13 : 15, |
3203 | 0, |
3204 | NULL, |
3205 | &tcp_hashinfo.ehash_mask, |
3206 | thash_entries ? 0 : 512 * 1024); |
3207 | for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) { |
3208 | INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); |
3209 | INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i); |
3210 | } |
3211 | if (inet_ehash_locks_alloc(&tcp_hashinfo)) |
3212 | panic("TCP: failed to alloc ehash_locks"); |
3213 | tcp_hashinfo.bhash = |
3214 | alloc_large_system_hash("TCP bind", |
3215 | sizeof(struct inet_bind_hashbucket), |
3216 | tcp_hashinfo.ehash_mask + 1, |
3217 | (totalram_pages >= 128 * 1024) ? |
3218 | 13 : 15, |
3219 | 0, |
3220 | &tcp_hashinfo.bhash_size, |
3221 | NULL, |
3222 | 64 * 1024); |
3223 | tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size; |
3224 | for (i = 0; i < tcp_hashinfo.bhash_size; i++) { |
3225 | spin_lock_init(&tcp_hashinfo.bhash[i].lock); |
3226 | INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); |
3227 | } |
3228 | |
3229 | /* Try to be a bit smarter and adjust defaults depending |
3230 | * on available memory. |
3231 | */ |
3232 | for (order = 0; ((1 << order) << PAGE_SHIFT) < |
3233 | (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket)); |
3234 | order++) |
3235 | ; |
3236 | if (order >= 4) { |
3237 | tcp_death_row.sysctl_max_tw_buckets = 180000; |
3238 | sysctl_tcp_max_orphans = 4096 << (order - 4); |
3239 | sysctl_max_syn_backlog = 1024; |
3240 | } else if (order < 3) { |
3241 | tcp_death_row.sysctl_max_tw_buckets >>= (3 - order); |
3242 | sysctl_tcp_max_orphans >>= (3 - order); |
3243 | sysctl_max_syn_backlog = 128; |
3244 | } |
3245 | |
3246 | /* Set the pressure threshold to be a fraction of global memory that |
3247 | * is up to 1/2 at 256 MB, decreasing toward zero with the amount of |
3248 | * memory, with a floor of 128 pages. |
3249 | */ |
3250 | nr_pages = totalram_pages - totalhigh_pages; |
3251 | limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT); |
3252 | limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11); |
3253 | limit = max(limit, 128UL); |
3254 | sysctl_tcp_mem[0] = limit / 4 * 3; |
3255 | sysctl_tcp_mem[1] = limit; |
3256 | sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2; |
3257 | |
3258 | /* Set per-socket limits to no more than 1/128 the pressure threshold */ |
3259 | limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7); |
3260 | max_share = min(4UL*1024*1024, limit); |
3261 | |
3262 | sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; |
3263 | sysctl_tcp_wmem[1] = 16*1024; |
3264 | sysctl_tcp_wmem[2] = max(64*1024, max_share); |
3265 | |
3266 | sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; |
3267 | sysctl_tcp_rmem[1] = 87380; |
3268 | sysctl_tcp_rmem[2] = max(87380, max_share); |
3269 | |
3270 | printk(KERN_INFO "TCP: Hash tables configured " |
3271 | "(established %u bind %u)\n", |
3272 | tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); |
3273 | |
3274 | tcp_register_congestion_control(&tcp_reno); |
3275 | |
3276 | memset(&tcp_secret_one.secrets[0], 0, sizeof(tcp_secret_one.secrets)); |
3277 | memset(&tcp_secret_two.secrets[0], 0, sizeof(tcp_secret_two.secrets)); |
3278 | tcp_secret_one.expires = jiffy; /* past due */ |
3279 | tcp_secret_two.expires = jiffy; /* past due */ |
3280 | tcp_secret_generating = &tcp_secret_one; |
3281 | tcp_secret_primary = &tcp_secret_one; |
3282 | tcp_secret_retiring = &tcp_secret_two; |
3283 | tcp_secret_secondary = &tcp_secret_two; |
3284 | } |
3285 | |
3286 | EXPORT_SYMBOL(tcp_close); |
3287 | EXPORT_SYMBOL(tcp_disconnect); |
3288 | EXPORT_SYMBOL(tcp_getsockopt); |
3289 | EXPORT_SYMBOL(tcp_ioctl); |
3290 | EXPORT_SYMBOL(tcp_poll); |
3291 | EXPORT_SYMBOL(tcp_read_sock); |
3292 | EXPORT_SYMBOL(tcp_recvmsg); |
3293 | EXPORT_SYMBOL(tcp_sendmsg); |
3294 | EXPORT_SYMBOL(tcp_splice_read); |
3295 | EXPORT_SYMBOL(tcp_sendpage); |
3296 | EXPORT_SYMBOL(tcp_setsockopt); |
3297 | EXPORT_SYMBOL(tcp_shutdown); |
3298 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9