Root/
1 | /* |
2 | * Plugable TCP congestion control support and newReno |
3 | * congestion control. |
4 | * Based on ideas from I/O scheduler suport and Web100. |
5 | * |
6 | * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org> |
7 | */ |
8 | |
9 | #include <linux/module.h> |
10 | #include <linux/mm.h> |
11 | #include <linux/types.h> |
12 | #include <linux/list.h> |
13 | #include <net/tcp.h> |
14 | |
15 | int sysctl_tcp_max_ssthresh = 0; |
16 | |
17 | static DEFINE_SPINLOCK(tcp_cong_list_lock); |
18 | static LIST_HEAD(tcp_cong_list); |
19 | |
20 | /* Simple linear search, don't expect many entries! */ |
21 | static struct tcp_congestion_ops *tcp_ca_find(const char *name) |
22 | { |
23 | struct tcp_congestion_ops *e; |
24 | |
25 | list_for_each_entry_rcu(e, &tcp_cong_list, list) { |
26 | if (strcmp(e->name, name) == 0) |
27 | return e; |
28 | } |
29 | |
30 | return NULL; |
31 | } |
32 | |
33 | /* |
34 | * Attach new congestion control algorithm to the list |
35 | * of available options. |
36 | */ |
37 | int tcp_register_congestion_control(struct tcp_congestion_ops *ca) |
38 | { |
39 | int ret = 0; |
40 | |
41 | /* all algorithms must implement ssthresh and cong_avoid ops */ |
42 | if (!ca->ssthresh || !ca->cong_avoid) { |
43 | printk(KERN_ERR "TCP %s does not implement required ops\n", |
44 | ca->name); |
45 | return -EINVAL; |
46 | } |
47 | |
48 | spin_lock(&tcp_cong_list_lock); |
49 | if (tcp_ca_find(ca->name)) { |
50 | printk(KERN_NOTICE "TCP %s already registered\n", ca->name); |
51 | ret = -EEXIST; |
52 | } else { |
53 | list_add_tail_rcu(&ca->list, &tcp_cong_list); |
54 | printk(KERN_INFO "TCP %s registered\n", ca->name); |
55 | } |
56 | spin_unlock(&tcp_cong_list_lock); |
57 | |
58 | return ret; |
59 | } |
60 | EXPORT_SYMBOL_GPL(tcp_register_congestion_control); |
61 | |
62 | /* |
63 | * Remove congestion control algorithm, called from |
64 | * the module's remove function. Module ref counts are used |
65 | * to ensure that this can't be done till all sockets using |
66 | * that method are closed. |
67 | */ |
68 | void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca) |
69 | { |
70 | spin_lock(&tcp_cong_list_lock); |
71 | list_del_rcu(&ca->list); |
72 | spin_unlock(&tcp_cong_list_lock); |
73 | } |
74 | EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control); |
75 | |
76 | /* Assign choice of congestion control. */ |
77 | void tcp_init_congestion_control(struct sock *sk) |
78 | { |
79 | struct inet_connection_sock *icsk = inet_csk(sk); |
80 | struct tcp_congestion_ops *ca; |
81 | |
82 | /* if no choice made yet assign the current value set as default */ |
83 | if (icsk->icsk_ca_ops == &tcp_init_congestion_ops) { |
84 | rcu_read_lock(); |
85 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) { |
86 | if (try_module_get(ca->owner)) { |
87 | icsk->icsk_ca_ops = ca; |
88 | break; |
89 | } |
90 | |
91 | /* fallback to next available */ |
92 | } |
93 | rcu_read_unlock(); |
94 | } |
95 | |
96 | if (icsk->icsk_ca_ops->init) |
97 | icsk->icsk_ca_ops->init(sk); |
98 | } |
99 | |
100 | /* Manage refcounts on socket close. */ |
101 | void tcp_cleanup_congestion_control(struct sock *sk) |
102 | { |
103 | struct inet_connection_sock *icsk = inet_csk(sk); |
104 | |
105 | if (icsk->icsk_ca_ops->release) |
106 | icsk->icsk_ca_ops->release(sk); |
107 | module_put(icsk->icsk_ca_ops->owner); |
108 | } |
109 | |
110 | /* Used by sysctl to change default congestion control */ |
111 | int tcp_set_default_congestion_control(const char *name) |
112 | { |
113 | struct tcp_congestion_ops *ca; |
114 | int ret = -ENOENT; |
115 | |
116 | spin_lock(&tcp_cong_list_lock); |
117 | ca = tcp_ca_find(name); |
118 | #ifdef CONFIG_MODULES |
119 | if (!ca && capable(CAP_NET_ADMIN)) { |
120 | spin_unlock(&tcp_cong_list_lock); |
121 | |
122 | request_module("tcp_%s", name); |
123 | spin_lock(&tcp_cong_list_lock); |
124 | ca = tcp_ca_find(name); |
125 | } |
126 | #endif |
127 | |
128 | if (ca) { |
129 | ca->flags |= TCP_CONG_NON_RESTRICTED; /* default is always allowed */ |
130 | list_move(&ca->list, &tcp_cong_list); |
131 | ret = 0; |
132 | } |
133 | spin_unlock(&tcp_cong_list_lock); |
134 | |
135 | return ret; |
136 | } |
137 | |
138 | /* Set default value from kernel configuration at bootup */ |
139 | static int __init tcp_congestion_default(void) |
140 | { |
141 | return tcp_set_default_congestion_control(CONFIG_DEFAULT_TCP_CONG); |
142 | } |
143 | late_initcall(tcp_congestion_default); |
144 | |
145 | |
146 | /* Build string with list of available congestion control values */ |
147 | void tcp_get_available_congestion_control(char *buf, size_t maxlen) |
148 | { |
149 | struct tcp_congestion_ops *ca; |
150 | size_t offs = 0; |
151 | |
152 | rcu_read_lock(); |
153 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) { |
154 | offs += snprintf(buf + offs, maxlen - offs, |
155 | "%s%s", |
156 | offs == 0 ? "" : " ", ca->name); |
157 | |
158 | } |
159 | rcu_read_unlock(); |
160 | } |
161 | |
162 | /* Get current default congestion control */ |
163 | void tcp_get_default_congestion_control(char *name) |
164 | { |
165 | struct tcp_congestion_ops *ca; |
166 | /* We will always have reno... */ |
167 | BUG_ON(list_empty(&tcp_cong_list)); |
168 | |
169 | rcu_read_lock(); |
170 | ca = list_entry(tcp_cong_list.next, struct tcp_congestion_ops, list); |
171 | strncpy(name, ca->name, TCP_CA_NAME_MAX); |
172 | rcu_read_unlock(); |
173 | } |
174 | |
175 | /* Built list of non-restricted congestion control values */ |
176 | void tcp_get_allowed_congestion_control(char *buf, size_t maxlen) |
177 | { |
178 | struct tcp_congestion_ops *ca; |
179 | size_t offs = 0; |
180 | |
181 | *buf = '\0'; |
182 | rcu_read_lock(); |
183 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) { |
184 | if (!(ca->flags & TCP_CONG_NON_RESTRICTED)) |
185 | continue; |
186 | offs += snprintf(buf + offs, maxlen - offs, |
187 | "%s%s", |
188 | offs == 0 ? "" : " ", ca->name); |
189 | |
190 | } |
191 | rcu_read_unlock(); |
192 | } |
193 | |
194 | /* Change list of non-restricted congestion control */ |
195 | int tcp_set_allowed_congestion_control(char *val) |
196 | { |
197 | struct tcp_congestion_ops *ca; |
198 | char *clone, *name; |
199 | int ret = 0; |
200 | |
201 | clone = kstrdup(val, GFP_USER); |
202 | if (!clone) |
203 | return -ENOMEM; |
204 | |
205 | spin_lock(&tcp_cong_list_lock); |
206 | /* pass 1 check for bad entries */ |
207 | while ((name = strsep(&clone, " ")) && *name) { |
208 | ca = tcp_ca_find(name); |
209 | if (!ca) { |
210 | ret = -ENOENT; |
211 | goto out; |
212 | } |
213 | } |
214 | |
215 | /* pass 2 clear old values */ |
216 | list_for_each_entry_rcu(ca, &tcp_cong_list, list) |
217 | ca->flags &= ~TCP_CONG_NON_RESTRICTED; |
218 | |
219 | /* pass 3 mark as allowed */ |
220 | while ((name = strsep(&val, " ")) && *name) { |
221 | ca = tcp_ca_find(name); |
222 | WARN_ON(!ca); |
223 | if (ca) |
224 | ca->flags |= TCP_CONG_NON_RESTRICTED; |
225 | } |
226 | out: |
227 | spin_unlock(&tcp_cong_list_lock); |
228 | |
229 | return ret; |
230 | } |
231 | |
232 | |
233 | /* Change congestion control for socket */ |
234 | int tcp_set_congestion_control(struct sock *sk, const char *name) |
235 | { |
236 | struct inet_connection_sock *icsk = inet_csk(sk); |
237 | struct tcp_congestion_ops *ca; |
238 | int err = 0; |
239 | |
240 | rcu_read_lock(); |
241 | ca = tcp_ca_find(name); |
242 | |
243 | /* no change asking for existing value */ |
244 | if (ca == icsk->icsk_ca_ops) |
245 | goto out; |
246 | |
247 | #ifdef CONFIG_MODULES |
248 | /* not found attempt to autoload module */ |
249 | if (!ca && capable(CAP_NET_ADMIN)) { |
250 | rcu_read_unlock(); |
251 | request_module("tcp_%s", name); |
252 | rcu_read_lock(); |
253 | ca = tcp_ca_find(name); |
254 | } |
255 | #endif |
256 | if (!ca) |
257 | err = -ENOENT; |
258 | |
259 | else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || capable(CAP_NET_ADMIN))) |
260 | err = -EPERM; |
261 | |
262 | else if (!try_module_get(ca->owner)) |
263 | err = -EBUSY; |
264 | |
265 | else { |
266 | tcp_cleanup_congestion_control(sk); |
267 | icsk->icsk_ca_ops = ca; |
268 | |
269 | if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init) |
270 | icsk->icsk_ca_ops->init(sk); |
271 | } |
272 | out: |
273 | rcu_read_unlock(); |
274 | return err; |
275 | } |
276 | |
277 | /* RFC2861 Check whether we are limited by application or congestion window |
278 | * This is the inverse of cwnd check in tcp_tso_should_defer |
279 | */ |
280 | int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) |
281 | { |
282 | const struct tcp_sock *tp = tcp_sk(sk); |
283 | u32 left; |
284 | |
285 | if (in_flight >= tp->snd_cwnd) |
286 | return 1; |
287 | |
288 | left = tp->snd_cwnd - in_flight; |
289 | if (sk_can_gso(sk) && |
290 | left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && |
291 | left * tp->mss_cache < sk->sk_gso_max_size) |
292 | return 1; |
293 | return left <= tcp_max_burst(tp); |
294 | } |
295 | EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited); |
296 | |
297 | /* |
298 | * Slow start is used when congestion window is less than slow start |
299 | * threshold. This version implements the basic RFC2581 version |
300 | * and optionally supports: |
301 | * RFC3742 Limited Slow Start - growth limited to max_ssthresh |
302 | * RFC3465 Appropriate Byte Counting - growth limited by bytes acknowledged |
303 | */ |
304 | void tcp_slow_start(struct tcp_sock *tp) |
305 | { |
306 | int cnt; /* increase in packets */ |
307 | |
308 | /* RFC3465: ABC Slow start |
309 | * Increase only after a full MSS of bytes is acked |
310 | * |
311 | * TCP sender SHOULD increase cwnd by the number of |
312 | * previously unacknowledged bytes ACKed by each incoming |
313 | * acknowledgment, provided the increase is not more than L |
314 | */ |
315 | if (sysctl_tcp_abc && tp->bytes_acked < tp->mss_cache) |
316 | return; |
317 | |
318 | if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) |
319 | cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ |
320 | else |
321 | cnt = tp->snd_cwnd; /* exponential increase */ |
322 | |
323 | /* RFC3465: ABC |
324 | * We MAY increase by 2 if discovered delayed ack |
325 | */ |
326 | if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache) |
327 | cnt <<= 1; |
328 | tp->bytes_acked = 0; |
329 | |
330 | tp->snd_cwnd_cnt += cnt; |
331 | while (tp->snd_cwnd_cnt >= tp->snd_cwnd) { |
332 | tp->snd_cwnd_cnt -= tp->snd_cwnd; |
333 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) |
334 | tp->snd_cwnd++; |
335 | } |
336 | } |
337 | EXPORT_SYMBOL_GPL(tcp_slow_start); |
338 | |
339 | /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */ |
340 | void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w) |
341 | { |
342 | if (tp->snd_cwnd_cnt >= w) { |
343 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) |
344 | tp->snd_cwnd++; |
345 | tp->snd_cwnd_cnt = 0; |
346 | } else { |
347 | tp->snd_cwnd_cnt++; |
348 | } |
349 | } |
350 | EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai); |
351 | |
352 | /* |
353 | * TCP Reno congestion control |
354 | * This is special case used for fallback as well. |
355 | */ |
356 | /* This is Jacobson's slow start and congestion avoidance. |
357 | * SIGCOMM '88, p. 328. |
358 | */ |
359 | void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) |
360 | { |
361 | struct tcp_sock *tp = tcp_sk(sk); |
362 | |
363 | if (!tcp_is_cwnd_limited(sk, in_flight)) |
364 | return; |
365 | |
366 | /* In "safe" area, increase. */ |
367 | if (tp->snd_cwnd <= tp->snd_ssthresh) |
368 | tcp_slow_start(tp); |
369 | |
370 | /* In dangerous area, increase slowly. */ |
371 | else if (sysctl_tcp_abc) { |
372 | /* RFC3465: Appropriate Byte Count |
373 | * increase once for each full cwnd acked |
374 | */ |
375 | if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) { |
376 | tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache; |
377 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) |
378 | tp->snd_cwnd++; |
379 | } |
380 | } else { |
381 | tcp_cong_avoid_ai(tp, tp->snd_cwnd); |
382 | } |
383 | } |
384 | EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); |
385 | |
386 | /* Slow start threshold is half the congestion window (min 2) */ |
387 | u32 tcp_reno_ssthresh(struct sock *sk) |
388 | { |
389 | const struct tcp_sock *tp = tcp_sk(sk); |
390 | return max(tp->snd_cwnd >> 1U, 2U); |
391 | } |
392 | EXPORT_SYMBOL_GPL(tcp_reno_ssthresh); |
393 | |
394 | /* Lower bound on congestion window with halving. */ |
395 | u32 tcp_reno_min_cwnd(const struct sock *sk) |
396 | { |
397 | const struct tcp_sock *tp = tcp_sk(sk); |
398 | return tp->snd_ssthresh/2; |
399 | } |
400 | EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd); |
401 | |
402 | struct tcp_congestion_ops tcp_reno = { |
403 | .flags = TCP_CONG_NON_RESTRICTED, |
404 | .name = "reno", |
405 | .owner = THIS_MODULE, |
406 | .ssthresh = tcp_reno_ssthresh, |
407 | .cong_avoid = tcp_reno_cong_avoid, |
408 | .min_cwnd = tcp_reno_min_cwnd, |
409 | }; |
410 | |
411 | /* Initial congestion control used (until SYN) |
412 | * really reno under another name so we can tell difference |
413 | * during tcp_set_default_congestion_control |
414 | */ |
415 | struct tcp_congestion_ops tcp_init_congestion_ops = { |
416 | .name = "", |
417 | .owner = THIS_MODULE, |
418 | .ssthresh = tcp_reno_ssthresh, |
419 | .cong_avoid = tcp_reno_cong_avoid, |
420 | .min_cwnd = tcp_reno_min_cwnd, |
421 | }; |
422 | EXPORT_SYMBOL_GPL(tcp_init_congestion_ops); |
423 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9