Root/
1 | /* |
2 | * net/dccp/output.c |
3 | * |
4 | * An implementation of the DCCP protocol |
5 | * Arnaldo Carvalho de Melo <acme@conectiva.com.br> |
6 | * |
7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License |
9 | * as published by the Free Software Foundation; either version |
10 | * 2 of the License, or (at your option) any later version. |
11 | */ |
12 | |
13 | #include <linux/dccp.h> |
14 | #include <linux/kernel.h> |
15 | #include <linux/skbuff.h> |
16 | #include <linux/slab.h> |
17 | |
18 | #include <net/inet_sock.h> |
19 | #include <net/sock.h> |
20 | |
21 | #include "ackvec.h" |
22 | #include "ccid.h" |
23 | #include "dccp.h" |
24 | |
25 | static inline void dccp_event_ack_sent(struct sock *sk) |
26 | { |
27 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); |
28 | } |
29 | |
30 | static void dccp_skb_entail(struct sock *sk, struct sk_buff *skb) |
31 | { |
32 | skb_set_owner_w(skb, sk); |
33 | WARN_ON(sk->sk_send_head); |
34 | sk->sk_send_head = skb; |
35 | } |
36 | |
37 | /* |
38 | * All SKB's seen here are completely headerless. It is our |
39 | * job to build the DCCP header, and pass the packet down to |
40 | * IP so it can do the same plus pass the packet off to the |
41 | * device. |
42 | */ |
43 | static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) |
44 | { |
45 | if (likely(skb != NULL)) { |
46 | const struct inet_sock *inet = inet_sk(sk); |
47 | const struct inet_connection_sock *icsk = inet_csk(sk); |
48 | struct dccp_sock *dp = dccp_sk(sk); |
49 | struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); |
50 | struct dccp_hdr *dh; |
51 | /* XXX For now we're using only 48 bits sequence numbers */ |
52 | const u32 dccp_header_size = sizeof(*dh) + |
53 | sizeof(struct dccp_hdr_ext) + |
54 | dccp_packet_hdr_len(dcb->dccpd_type); |
55 | int err, set_ack = 1; |
56 | u64 ackno = dp->dccps_gsr; |
57 | /* |
58 | * Increment GSS here already in case the option code needs it. |
59 | * Update GSS for real only if option processing below succeeds. |
60 | */ |
61 | dcb->dccpd_seq = ADD48(dp->dccps_gss, 1); |
62 | |
63 | switch (dcb->dccpd_type) { |
64 | case DCCP_PKT_DATA: |
65 | set_ack = 0; |
66 | /* fall through */ |
67 | case DCCP_PKT_DATAACK: |
68 | case DCCP_PKT_RESET: |
69 | break; |
70 | |
71 | case DCCP_PKT_REQUEST: |
72 | set_ack = 0; |
73 | /* Use ISS on the first (non-retransmitted) Request. */ |
74 | if (icsk->icsk_retransmits == 0) |
75 | dcb->dccpd_seq = dp->dccps_iss; |
76 | /* fall through */ |
77 | |
78 | case DCCP_PKT_SYNC: |
79 | case DCCP_PKT_SYNCACK: |
80 | ackno = dcb->dccpd_ack_seq; |
81 | /* fall through */ |
82 | default: |
83 | /* |
84 | * Set owner/destructor: some skbs are allocated via |
85 | * alloc_skb (e.g. when retransmission may happen). |
86 | * Only Data, DataAck, and Reset packets should come |
87 | * through here with skb->sk set. |
88 | */ |
89 | WARN_ON(skb->sk); |
90 | skb_set_owner_w(skb, sk); |
91 | break; |
92 | } |
93 | |
94 | if (dccp_insert_options(sk, skb)) { |
95 | kfree_skb(skb); |
96 | return -EPROTO; |
97 | } |
98 | |
99 | |
100 | /* Build DCCP header and checksum it. */ |
101 | dh = dccp_zeroed_hdr(skb, dccp_header_size); |
102 | dh->dccph_type = dcb->dccpd_type; |
103 | dh->dccph_sport = inet->inet_sport; |
104 | dh->dccph_dport = inet->inet_dport; |
105 | dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; |
106 | dh->dccph_ccval = dcb->dccpd_ccval; |
107 | dh->dccph_cscov = dp->dccps_pcslen; |
108 | /* XXX For now we're using only 48 bits sequence numbers */ |
109 | dh->dccph_x = 1; |
110 | |
111 | dccp_update_gss(sk, dcb->dccpd_seq); |
112 | dccp_hdr_set_seq(dh, dp->dccps_gss); |
113 | if (set_ack) |
114 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno); |
115 | |
116 | switch (dcb->dccpd_type) { |
117 | case DCCP_PKT_REQUEST: |
118 | dccp_hdr_request(skb)->dccph_req_service = |
119 | dp->dccps_service; |
120 | /* |
121 | * Limit Ack window to ISS <= P.ackno <= GSS, so that |
122 | * only Responses to Requests we sent are considered. |
123 | */ |
124 | dp->dccps_awl = dp->dccps_iss; |
125 | break; |
126 | case DCCP_PKT_RESET: |
127 | dccp_hdr_reset(skb)->dccph_reset_code = |
128 | dcb->dccpd_reset_code; |
129 | break; |
130 | } |
131 | |
132 | icsk->icsk_af_ops->send_check(sk, 0, skb); |
133 | |
134 | if (set_ack) |
135 | dccp_event_ack_sent(sk); |
136 | |
137 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); |
138 | |
139 | err = icsk->icsk_af_ops->queue_xmit(skb, 0); |
140 | return net_xmit_eval(err); |
141 | } |
142 | return -ENOBUFS; |
143 | } |
144 | |
145 | /** |
146 | * dccp_determine_ccmps - Find out about CCID-specfic packet-size limits |
147 | * We only consider the HC-sender CCID for setting the CCMPS (RFC 4340, 14.), |
148 | * since the RX CCID is restricted to feedback packets (Acks), which are small |
149 | * in comparison with the data traffic. A value of 0 means "no current CCMPS". |
150 | */ |
151 | static u32 dccp_determine_ccmps(const struct dccp_sock *dp) |
152 | { |
153 | const struct ccid *tx_ccid = dp->dccps_hc_tx_ccid; |
154 | |
155 | if (tx_ccid == NULL || tx_ccid->ccid_ops == NULL) |
156 | return 0; |
157 | return tx_ccid->ccid_ops->ccid_ccmps; |
158 | } |
159 | |
160 | unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu) |
161 | { |
162 | struct inet_connection_sock *icsk = inet_csk(sk); |
163 | struct dccp_sock *dp = dccp_sk(sk); |
164 | u32 ccmps = dccp_determine_ccmps(dp); |
165 | u32 cur_mps = ccmps ? min(pmtu, ccmps) : pmtu; |
166 | |
167 | /* Account for header lengths and IPv4/v6 option overhead */ |
168 | cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len + |
169 | sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext)); |
170 | |
171 | /* |
172 | * Leave enough headroom for common DCCP header options. |
173 | * This only considers options which may appear on DCCP-Data packets, as |
174 | * per table 3 in RFC 4340, 5.8. When running out of space for other |
175 | * options (eg. Ack Vector which can take up to 255 bytes), it is better |
176 | * to schedule a separate Ack. Thus we leave headroom for the following: |
177 | * - 1 byte for Slow Receiver (11.6) |
178 | * - 6 bytes for Timestamp (13.1) |
179 | * - 10 bytes for Timestamp Echo (13.3) |
180 | * - 8 bytes for NDP count (7.7, when activated) |
181 | * - 6 bytes for Data Checksum (9.3) |
182 | * - %DCCPAV_MIN_OPTLEN bytes for Ack Vector size (11.4, when enabled) |
183 | */ |
184 | cur_mps -= roundup(1 + 6 + 10 + dp->dccps_send_ndp_count * 8 + 6 + |
185 | (dp->dccps_hc_rx_ackvec ? DCCPAV_MIN_OPTLEN : 0), 4); |
186 | |
187 | /* And store cached results */ |
188 | icsk->icsk_pmtu_cookie = pmtu; |
189 | dp->dccps_mss_cache = cur_mps; |
190 | |
191 | return cur_mps; |
192 | } |
193 | |
194 | EXPORT_SYMBOL_GPL(dccp_sync_mss); |
195 | |
196 | void dccp_write_space(struct sock *sk) |
197 | { |
198 | read_lock(&sk->sk_callback_lock); |
199 | |
200 | if (sk_has_sleeper(sk)) |
201 | wake_up_interruptible(sk->sk_sleep); |
202 | /* Should agree with poll, otherwise some programs break */ |
203 | if (sock_writeable(sk)) |
204 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); |
205 | |
206 | read_unlock(&sk->sk_callback_lock); |
207 | } |
208 | |
209 | /** |
210 | * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet |
211 | * @sk: socket to wait for |
212 | * @skb: current skb to pass on for waiting |
213 | * @delay: sleep timeout in milliseconds (> 0) |
214 | * This function is called by default when the socket is closed, and |
215 | * when a non-zero linger time is set on the socket. For consistency |
216 | */ |
217 | static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay) |
218 | { |
219 | struct dccp_sock *dp = dccp_sk(sk); |
220 | DEFINE_WAIT(wait); |
221 | unsigned long jiffdelay; |
222 | int rc; |
223 | |
224 | do { |
225 | dccp_pr_debug("delayed send by %d msec\n", delay); |
226 | jiffdelay = msecs_to_jiffies(delay); |
227 | |
228 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); |
229 | |
230 | sk->sk_write_pending++; |
231 | release_sock(sk); |
232 | schedule_timeout(jiffdelay); |
233 | lock_sock(sk); |
234 | sk->sk_write_pending--; |
235 | |
236 | if (sk->sk_err) |
237 | goto do_error; |
238 | if (signal_pending(current)) |
239 | goto do_interrupted; |
240 | |
241 | rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); |
242 | } while ((delay = rc) > 0); |
243 | out: |
244 | finish_wait(sk->sk_sleep, &wait); |
245 | return rc; |
246 | |
247 | do_error: |
248 | rc = -EPIPE; |
249 | goto out; |
250 | do_interrupted: |
251 | rc = -EINTR; |
252 | goto out; |
253 | } |
254 | |
255 | void dccp_write_xmit(struct sock *sk, int block) |
256 | { |
257 | struct dccp_sock *dp = dccp_sk(sk); |
258 | struct sk_buff *skb; |
259 | |
260 | while ((skb = skb_peek(&sk->sk_write_queue))) { |
261 | int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); |
262 | |
263 | if (err > 0) { |
264 | if (!block) { |
265 | sk_reset_timer(sk, &dp->dccps_xmit_timer, |
266 | msecs_to_jiffies(err)+jiffies); |
267 | break; |
268 | } else |
269 | err = dccp_wait_for_ccid(sk, skb, err); |
270 | if (err && err != -EINTR) |
271 | DCCP_BUG("err=%d after dccp_wait_for_ccid", err); |
272 | } |
273 | |
274 | skb_dequeue(&sk->sk_write_queue); |
275 | if (err == 0) { |
276 | struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); |
277 | const int len = skb->len; |
278 | |
279 | if (sk->sk_state == DCCP_PARTOPEN) { |
280 | const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD; |
281 | /* |
282 | * See 8.1.5 - Handshake Completion. |
283 | * |
284 | * For robustness we resend Confirm options until the client has |
285 | * entered OPEN. During the initial feature negotiation, the MPS |
286 | * is smaller than usual, reduced by the Change/Confirm options. |
287 | */ |
288 | if (!list_empty(&dp->dccps_featneg) && len > cur_mps) { |
289 | DCCP_WARN("Payload too large (%d) for featneg.\n", len); |
290 | dccp_send_ack(sk); |
291 | dccp_feat_list_purge(&dp->dccps_featneg); |
292 | } |
293 | |
294 | inet_csk_schedule_ack(sk); |
295 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, |
296 | inet_csk(sk)->icsk_rto, |
297 | DCCP_RTO_MAX); |
298 | dcb->dccpd_type = DCCP_PKT_DATAACK; |
299 | } else if (dccp_ack_pending(sk)) |
300 | dcb->dccpd_type = DCCP_PKT_DATAACK; |
301 | else |
302 | dcb->dccpd_type = DCCP_PKT_DATA; |
303 | |
304 | err = dccp_transmit_skb(sk, skb); |
305 | ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len); |
306 | if (err) |
307 | DCCP_BUG("err=%d after ccid_hc_tx_packet_sent", |
308 | err); |
309 | } else { |
310 | dccp_pr_debug("packet discarded due to err=%d\n", err); |
311 | kfree_skb(skb); |
312 | } |
313 | } |
314 | } |
315 | |
316 | /** |
317 | * dccp_retransmit_skb - Retransmit Request, Close, or CloseReq packets |
318 | * There are only four retransmittable packet types in DCCP: |
319 | * - Request in client-REQUEST state (sec. 8.1.1), |
320 | * - CloseReq in server-CLOSEREQ state (sec. 8.3), |
321 | * - Close in node-CLOSING state (sec. 8.3), |
322 | * - Acks in client-PARTOPEN state (sec. 8.1.5, handled by dccp_delack_timer()). |
323 | * This function expects sk->sk_send_head to contain the original skb. |
324 | */ |
325 | int dccp_retransmit_skb(struct sock *sk) |
326 | { |
327 | WARN_ON(sk->sk_send_head == NULL); |
328 | |
329 | if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0) |
330 | return -EHOSTUNREACH; /* Routing failure or similar. */ |
331 | |
332 | /* this count is used to distinguish original and retransmitted skb */ |
333 | inet_csk(sk)->icsk_retransmits++; |
334 | |
335 | return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC)); |
336 | } |
337 | |
338 | struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, |
339 | struct request_sock *req) |
340 | { |
341 | struct dccp_hdr *dh; |
342 | struct dccp_request_sock *dreq; |
343 | const u32 dccp_header_size = sizeof(struct dccp_hdr) + |
344 | sizeof(struct dccp_hdr_ext) + |
345 | sizeof(struct dccp_hdr_response); |
346 | struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, |
347 | GFP_ATOMIC); |
348 | if (skb == NULL) |
349 | return NULL; |
350 | |
351 | /* Reserve space for headers. */ |
352 | skb_reserve(skb, sk->sk_prot->max_header); |
353 | |
354 | skb_dst_set(skb, dst_clone(dst)); |
355 | |
356 | dreq = dccp_rsk(req); |
357 | if (inet_rsk(req)->acked) /* increase ISS upon retransmission */ |
358 | dccp_inc_seqno(&dreq->dreq_iss); |
359 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; |
360 | DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss; |
361 | |
362 | /* Resolve feature dependencies resulting from choice of CCID */ |
363 | if (dccp_feat_server_ccid_dependencies(dreq)) |
364 | goto response_failed; |
365 | |
366 | if (dccp_insert_options_rsk(dreq, skb)) |
367 | goto response_failed; |
368 | |
369 | /* Build and checksum header */ |
370 | dh = dccp_zeroed_hdr(skb, dccp_header_size); |
371 | |
372 | dh->dccph_sport = inet_rsk(req)->loc_port; |
373 | dh->dccph_dport = inet_rsk(req)->rmt_port; |
374 | dh->dccph_doff = (dccp_header_size + |
375 | DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; |
376 | dh->dccph_type = DCCP_PKT_RESPONSE; |
377 | dh->dccph_x = 1; |
378 | dccp_hdr_set_seq(dh, dreq->dreq_iss); |
379 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr); |
380 | dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; |
381 | |
382 | dccp_csum_outgoing(skb); |
383 | |
384 | /* We use `acked' to remember that a Response was already sent. */ |
385 | inet_rsk(req)->acked = 1; |
386 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); |
387 | return skb; |
388 | response_failed: |
389 | kfree_skb(skb); |
390 | return NULL; |
391 | } |
392 | |
393 | EXPORT_SYMBOL_GPL(dccp_make_response); |
394 | |
395 | /* answer offending packet in @rcv_skb with Reset from control socket @ctl */ |
396 | struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb) |
397 | { |
398 | struct dccp_hdr *rxdh = dccp_hdr(rcv_skb), *dh; |
399 | struct dccp_skb_cb *dcb = DCCP_SKB_CB(rcv_skb); |
400 | const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) + |
401 | sizeof(struct dccp_hdr_ext) + |
402 | sizeof(struct dccp_hdr_reset); |
403 | struct dccp_hdr_reset *dhr; |
404 | struct sk_buff *skb; |
405 | |
406 | skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC); |
407 | if (skb == NULL) |
408 | return NULL; |
409 | |
410 | skb_reserve(skb, sk->sk_prot->max_header); |
411 | |
412 | /* Swap the send and the receive. */ |
413 | dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len); |
414 | dh->dccph_type = DCCP_PKT_RESET; |
415 | dh->dccph_sport = rxdh->dccph_dport; |
416 | dh->dccph_dport = rxdh->dccph_sport; |
417 | dh->dccph_doff = dccp_hdr_reset_len / 4; |
418 | dh->dccph_x = 1; |
419 | |
420 | dhr = dccp_hdr_reset(skb); |
421 | dhr->dccph_reset_code = dcb->dccpd_reset_code; |
422 | |
423 | switch (dcb->dccpd_reset_code) { |
424 | case DCCP_RESET_CODE_PACKET_ERROR: |
425 | dhr->dccph_reset_data[0] = rxdh->dccph_type; |
426 | break; |
427 | case DCCP_RESET_CODE_OPTION_ERROR: /* fall through */ |
428 | case DCCP_RESET_CODE_MANDATORY_ERROR: |
429 | memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3); |
430 | break; |
431 | } |
432 | /* |
433 | * From RFC 4340, 8.3.1: |
434 | * If P.ackno exists, set R.seqno := P.ackno + 1. |
435 | * Else set R.seqno := 0. |
436 | */ |
437 | if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ) |
438 | dccp_hdr_set_seq(dh, ADD48(dcb->dccpd_ack_seq, 1)); |
439 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dcb->dccpd_seq); |
440 | |
441 | dccp_csum_outgoing(skb); |
442 | return skb; |
443 | } |
444 | |
445 | EXPORT_SYMBOL_GPL(dccp_ctl_make_reset); |
446 | |
447 | /* send Reset on established socket, to close or abort the connection */ |
448 | int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code) |
449 | { |
450 | struct sk_buff *skb; |
451 | /* |
452 | * FIXME: what if rebuild_header fails? |
453 | * Should we be doing a rebuild_header here? |
454 | */ |
455 | int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk); |
456 | |
457 | if (err != 0) |
458 | return err; |
459 | |
460 | skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC); |
461 | if (skb == NULL) |
462 | return -ENOBUFS; |
463 | |
464 | /* Reserve space for headers and prepare control bits. */ |
465 | skb_reserve(skb, sk->sk_prot->max_header); |
466 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET; |
467 | DCCP_SKB_CB(skb)->dccpd_reset_code = code; |
468 | |
469 | return dccp_transmit_skb(sk, skb); |
470 | } |
471 | |
472 | /* |
473 | * Do all connect socket setups that can be done AF independent. |
474 | */ |
475 | static inline void dccp_connect_init(struct sock *sk) |
476 | { |
477 | struct dccp_sock *dp = dccp_sk(sk); |
478 | struct dst_entry *dst = __sk_dst_get(sk); |
479 | struct inet_connection_sock *icsk = inet_csk(sk); |
480 | |
481 | sk->sk_err = 0; |
482 | sock_reset_flag(sk, SOCK_DONE); |
483 | |
484 | dccp_sync_mss(sk, dst_mtu(dst)); |
485 | |
486 | /* Initialise GAR as per 8.5; AWL/AWH are set in dccp_transmit_skb() */ |
487 | dp->dccps_gar = dp->dccps_iss; |
488 | |
489 | icsk->icsk_retransmits = 0; |
490 | } |
491 | |
492 | int dccp_connect(struct sock *sk) |
493 | { |
494 | struct sk_buff *skb; |
495 | struct inet_connection_sock *icsk = inet_csk(sk); |
496 | |
497 | /* do not connect if feature negotiation setup fails */ |
498 | if (dccp_feat_finalise_settings(dccp_sk(sk))) |
499 | return -EPROTO; |
500 | |
501 | dccp_connect_init(sk); |
502 | |
503 | skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation); |
504 | if (unlikely(skb == NULL)) |
505 | return -ENOBUFS; |
506 | |
507 | /* Reserve space for headers. */ |
508 | skb_reserve(skb, sk->sk_prot->max_header); |
509 | |
510 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST; |
511 | |
512 | dccp_skb_entail(sk, skb); |
513 | dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL)); |
514 | DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS); |
515 | |
516 | /* Timer for repeating the REQUEST until an answer. */ |
517 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
518 | icsk->icsk_rto, DCCP_RTO_MAX); |
519 | return 0; |
520 | } |
521 | |
522 | EXPORT_SYMBOL_GPL(dccp_connect); |
523 | |
524 | void dccp_send_ack(struct sock *sk) |
525 | { |
526 | /* If we have been reset, we may not send again. */ |
527 | if (sk->sk_state != DCCP_CLOSED) { |
528 | struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, |
529 | GFP_ATOMIC); |
530 | |
531 | if (skb == NULL) { |
532 | inet_csk_schedule_ack(sk); |
533 | inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; |
534 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, |
535 | TCP_DELACK_MAX, |
536 | DCCP_RTO_MAX); |
537 | return; |
538 | } |
539 | |
540 | /* Reserve space for headers */ |
541 | skb_reserve(skb, sk->sk_prot->max_header); |
542 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK; |
543 | dccp_transmit_skb(sk, skb); |
544 | } |
545 | } |
546 | |
547 | EXPORT_SYMBOL_GPL(dccp_send_ack); |
548 | |
549 | #if 0 |
550 | /* FIXME: Is this still necessary (11.3) - currently nowhere used by DCCP. */ |
551 | void dccp_send_delayed_ack(struct sock *sk) |
552 | { |
553 | struct inet_connection_sock *icsk = inet_csk(sk); |
554 | /* |
555 | * FIXME: tune this timer. elapsed time fixes the skew, so no problem |
556 | * with using 2s, and active senders also piggyback the ACK into a |
557 | * DATAACK packet, so this is really for quiescent senders. |
558 | */ |
559 | unsigned long timeout = jiffies + 2 * HZ; |
560 | |
561 | /* Use new timeout only if there wasn't a older one earlier. */ |
562 | if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { |
563 | /* If delack timer was blocked or is about to expire, |
564 | * send ACK now. |
565 | * |
566 | * FIXME: check the "about to expire" part |
567 | */ |
568 | if (icsk->icsk_ack.blocked) { |
569 | dccp_send_ack(sk); |
570 | return; |
571 | } |
572 | |
573 | if (!time_before(timeout, icsk->icsk_ack.timeout)) |
574 | timeout = icsk->icsk_ack.timeout; |
575 | } |
576 | icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; |
577 | icsk->icsk_ack.timeout = timeout; |
578 | sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); |
579 | } |
580 | #endif |
581 | |
582 | void dccp_send_sync(struct sock *sk, const u64 ackno, |
583 | const enum dccp_pkt_type pkt_type) |
584 | { |
585 | /* |
586 | * We are not putting this on the write queue, so |
587 | * dccp_transmit_skb() will set the ownership to this |
588 | * sock. |
589 | */ |
590 | struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC); |
591 | |
592 | if (skb == NULL) { |
593 | /* FIXME: how to make sure the sync is sent? */ |
594 | DCCP_CRIT("could not send %s", dccp_packet_name(pkt_type)); |
595 | return; |
596 | } |
597 | |
598 | /* Reserve space for headers and prepare control bits. */ |
599 | skb_reserve(skb, sk->sk_prot->max_header); |
600 | DCCP_SKB_CB(skb)->dccpd_type = pkt_type; |
601 | DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno; |
602 | |
603 | dccp_transmit_skb(sk, skb); |
604 | } |
605 | |
606 | EXPORT_SYMBOL_GPL(dccp_send_sync); |
607 | |
608 | /* |
609 | * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This |
610 | * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under |
611 | * any circumstances. |
612 | */ |
613 | void dccp_send_close(struct sock *sk, const int active) |
614 | { |
615 | struct dccp_sock *dp = dccp_sk(sk); |
616 | struct sk_buff *skb; |
617 | const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC; |
618 | |
619 | skb = alloc_skb(sk->sk_prot->max_header, prio); |
620 | if (skb == NULL) |
621 | return; |
622 | |
623 | /* Reserve space for headers and prepare control bits. */ |
624 | skb_reserve(skb, sk->sk_prot->max_header); |
625 | if (dp->dccps_role == DCCP_ROLE_SERVER && !dp->dccps_server_timewait) |
626 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSEREQ; |
627 | else |
628 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE; |
629 | |
630 | if (active) { |
631 | dccp_write_xmit(sk, 1); |
632 | dccp_skb_entail(sk, skb); |
633 | dccp_transmit_skb(sk, skb_clone(skb, prio)); |
634 | /* |
635 | * Retransmission timer for active-close: RFC 4340, 8.3 requires |
636 | * to retransmit the Close/CloseReq until the CLOSING/CLOSEREQ |
637 | * state can be left. The initial timeout is 2 RTTs. |
638 | * Since RTT measurement is done by the CCIDs, there is no easy |
639 | * way to get an RTT sample. The fallback RTT from RFC 4340, 3.4 |
640 | * is too low (200ms); we use a high value to avoid unnecessary |
641 | * retransmissions when the link RTT is > 0.2 seconds. |
642 | * FIXME: Let main module sample RTTs and use that instead. |
643 | */ |
644 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
645 | DCCP_TIMEOUT_INIT, DCCP_RTO_MAX); |
646 | } else |
647 | dccp_transmit_skb(sk, skb); |
648 | } |
649 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9