Root/
1 | /* |
2 | * INETPEER - A storage for permanent information about peers |
3 | * |
4 | * This source is covered by the GNU GPL, the same as all kernel sources. |
5 | * |
6 | * Authors: Andrey V. Savochkin <saw@msu.ru> |
7 | */ |
8 | |
9 | #include <linux/module.h> |
10 | #include <linux/types.h> |
11 | #include <linux/slab.h> |
12 | #include <linux/interrupt.h> |
13 | #include <linux/spinlock.h> |
14 | #include <linux/random.h> |
15 | #include <linux/timer.h> |
16 | #include <linux/time.h> |
17 | #include <linux/kernel.h> |
18 | #include <linux/mm.h> |
19 | #include <linux/net.h> |
20 | #include <net/ip.h> |
21 | #include <net/inetpeer.h> |
22 | |
23 | /* |
24 | * Theory of operations. |
25 | * We keep one entry for each peer IP address. The nodes contains long-living |
26 | * information about the peer which doesn't depend on routes. |
27 | * At this moment this information consists only of ID field for the next |
28 | * outgoing IP packet. This field is incremented with each packet as encoded |
29 | * in inet_getid() function (include/net/inetpeer.h). |
30 | * At the moment of writing this notes identifier of IP packets is generated |
31 | * to be unpredictable using this code only for packets subjected |
32 | * (actually or potentially) to defragmentation. I.e. DF packets less than |
33 | * PMTU in size uses a constant ID and do not use this code (see |
34 | * ip_select_ident() in include/net/ip.h). |
35 | * |
36 | * Route cache entries hold references to our nodes. |
37 | * New cache entries get references via lookup by destination IP address in |
38 | * the avl tree. The reference is grabbed only when it's needed i.e. only |
39 | * when we try to output IP packet which needs an unpredictable ID (see |
40 | * __ip_select_ident() in net/ipv4/route.c). |
41 | * Nodes are removed only when reference counter goes to 0. |
42 | * When it's happened the node may be removed when a sufficient amount of |
43 | * time has been passed since its last use. The less-recently-used entry can |
44 | * also be removed if the pool is overloaded i.e. if the total amount of |
45 | * entries is greater-or-equal than the threshold. |
46 | * |
47 | * Node pool is organised as an AVL tree. |
48 | * Such an implementation has been chosen not just for fun. It's a way to |
49 | * prevent easy and efficient DoS attacks by creating hash collisions. A huge |
50 | * amount of long living nodes in a single hash slot would significantly delay |
51 | * lookups performed with disabled BHs. |
52 | * |
53 | * Serialisation issues. |
54 | * 1. Nodes may appear in the tree only with the pool lock held. |
55 | * 2. Nodes may disappear from the tree only with the pool lock held |
56 | * AND reference count being 0. |
57 | * 3. Nodes appears and disappears from unused node list only under |
58 | * "inet_peer_unused_lock". |
59 | * 4. Global variable peer_total is modified under the pool lock. |
60 | * 5. struct inet_peer fields modification: |
61 | * avl_left, avl_right, avl_parent, avl_height: pool lock |
62 | * unused: unused node list lock |
63 | * refcnt: atomically against modifications on other CPU; |
64 | * usually under some other lock to prevent node disappearing |
65 | * dtime: unused node list lock |
66 | * daddr: unchangeable |
67 | * ip_id_count: atomic value (no lock needed) |
68 | */ |
69 | |
70 | static struct kmem_cache *peer_cachep __read_mostly; |
71 | |
72 | #define node_height(x) x->avl_height |
73 | |
74 | #define peer_avl_empty ((struct inet_peer *)&peer_fake_node) |
75 | #define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node) |
76 | static const struct inet_peer peer_fake_node = { |
77 | .avl_left = peer_avl_empty_rcu, |
78 | .avl_right = peer_avl_empty_rcu, |
79 | .avl_height = 0 |
80 | }; |
81 | |
82 | struct inet_peer_base { |
83 | struct inet_peer __rcu *root; |
84 | seqlock_t lock; |
85 | int total; |
86 | }; |
87 | |
88 | static struct inet_peer_base v4_peers = { |
89 | .root = peer_avl_empty_rcu, |
90 | .lock = __SEQLOCK_UNLOCKED(v4_peers.lock), |
91 | .total = 0, |
92 | }; |
93 | |
94 | static struct inet_peer_base v6_peers = { |
95 | .root = peer_avl_empty_rcu, |
96 | .lock = __SEQLOCK_UNLOCKED(v6_peers.lock), |
97 | .total = 0, |
98 | }; |
99 | |
100 | #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ |
101 | |
102 | /* Exported for sysctl_net_ipv4. */ |
103 | int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more |
104 | * aggressively at this stage */ |
105 | int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */ |
106 | int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */ |
107 | int inet_peer_gc_mintime __read_mostly = 10 * HZ; |
108 | int inet_peer_gc_maxtime __read_mostly = 120 * HZ; |
109 | |
110 | static struct { |
111 | struct list_head list; |
112 | spinlock_t lock; |
113 | } unused_peers = { |
114 | .list = LIST_HEAD_INIT(unused_peers.list), |
115 | .lock = __SPIN_LOCK_UNLOCKED(unused_peers.lock), |
116 | }; |
117 | |
118 | static void peer_check_expire(unsigned long dummy); |
119 | static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0); |
120 | |
121 | |
122 | /* Called from ip_output.c:ip_init */ |
123 | void __init inet_initpeers(void) |
124 | { |
125 | struct sysinfo si; |
126 | |
127 | /* Use the straight interface to information about memory. */ |
128 | si_meminfo(&si); |
129 | /* The values below were suggested by Alexey Kuznetsov |
130 | * <kuznet@ms2.inr.ac.ru>. I don't have any opinion about the values |
131 | * myself. --SAW |
132 | */ |
133 | if (si.totalram <= (32768*1024)/PAGE_SIZE) |
134 | inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */ |
135 | if (si.totalram <= (16384*1024)/PAGE_SIZE) |
136 | inet_peer_threshold >>= 1; /* about 512KB */ |
137 | if (si.totalram <= (8192*1024)/PAGE_SIZE) |
138 | inet_peer_threshold >>= 2; /* about 128KB */ |
139 | |
140 | peer_cachep = kmem_cache_create("inet_peer_cache", |
141 | sizeof(struct inet_peer), |
142 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, |
143 | NULL); |
144 | |
145 | /* All the timers, started at system startup tend |
146 | to synchronize. Perturb it a bit. |
147 | */ |
148 | peer_periodic_timer.expires = jiffies |
149 | + net_random() % inet_peer_gc_maxtime |
150 | + inet_peer_gc_maxtime; |
151 | add_timer(&peer_periodic_timer); |
152 | } |
153 | |
154 | /* Called with or without local BH being disabled. */ |
155 | static void unlink_from_unused(struct inet_peer *p) |
156 | { |
157 | if (!list_empty(&p->unused)) { |
158 | spin_lock_bh(&unused_peers.lock); |
159 | list_del_init(&p->unused); |
160 | spin_unlock_bh(&unused_peers.lock); |
161 | } |
162 | } |
163 | |
164 | static int addr_compare(const struct inetpeer_addr *a, |
165 | const struct inetpeer_addr *b) |
166 | { |
167 | int i, n = (a->family == AF_INET ? 1 : 4); |
168 | |
169 | for (i = 0; i < n; i++) { |
170 | if (a->addr.a6[i] == b->addr.a6[i]) |
171 | continue; |
172 | if (a->addr.a6[i] < b->addr.a6[i]) |
173 | return -1; |
174 | return 1; |
175 | } |
176 | |
177 | return 0; |
178 | } |
179 | |
180 | #define rcu_deref_locked(X, BASE) \ |
181 | rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock)) |
182 | |
183 | /* |
184 | * Called with local BH disabled and the pool lock held. |
185 | */ |
186 | #define lookup(_daddr, _stack, _base) \ |
187 | ({ \ |
188 | struct inet_peer *u; \ |
189 | struct inet_peer __rcu **v; \ |
190 | \ |
191 | stackptr = _stack; \ |
192 | *stackptr++ = &_base->root; \ |
193 | for (u = rcu_deref_locked(_base->root, _base); \ |
194 | u != peer_avl_empty; ) { \ |
195 | int cmp = addr_compare(_daddr, &u->daddr); \ |
196 | if (cmp == 0) \ |
197 | break; \ |
198 | if (cmp == -1) \ |
199 | v = &u->avl_left; \ |
200 | else \ |
201 | v = &u->avl_right; \ |
202 | *stackptr++ = v; \ |
203 | u = rcu_deref_locked(*v, _base); \ |
204 | } \ |
205 | u; \ |
206 | }) |
207 | |
208 | /* |
209 | * Called with rcu_read_lock() |
210 | * Because we hold no lock against a writer, its quite possible we fall |
211 | * in an endless loop. |
212 | * But every pointer we follow is guaranteed to be valid thanks to RCU. |
213 | * We exit from this function if number of links exceeds PEER_MAXDEPTH |
214 | */ |
215 | static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr, |
216 | struct inet_peer_base *base) |
217 | { |
218 | struct inet_peer *u = rcu_dereference(base->root); |
219 | int count = 0; |
220 | |
221 | while (u != peer_avl_empty) { |
222 | int cmp = addr_compare(daddr, &u->daddr); |
223 | if (cmp == 0) { |
224 | /* Before taking a reference, check if this entry was |
225 | * deleted, unlink_from_pool() sets refcnt=-1 to make |
226 | * distinction between an unused entry (refcnt=0) and |
227 | * a freed one. |
228 | */ |
229 | if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1))) |
230 | u = NULL; |
231 | return u; |
232 | } |
233 | if (cmp == -1) |
234 | u = rcu_dereference(u->avl_left); |
235 | else |
236 | u = rcu_dereference(u->avl_right); |
237 | if (unlikely(++count == PEER_MAXDEPTH)) |
238 | break; |
239 | } |
240 | return NULL; |
241 | } |
242 | |
243 | /* Called with local BH disabled and the pool lock held. */ |
244 | #define lookup_rightempty(start, base) \ |
245 | ({ \ |
246 | struct inet_peer *u; \ |
247 | struct inet_peer __rcu **v; \ |
248 | *stackptr++ = &start->avl_left; \ |
249 | v = &start->avl_left; \ |
250 | for (u = rcu_deref_locked(*v, base); \ |
251 | u->avl_right != peer_avl_empty_rcu; ) { \ |
252 | v = &u->avl_right; \ |
253 | *stackptr++ = v; \ |
254 | u = rcu_deref_locked(*v, base); \ |
255 | } \ |
256 | u; \ |
257 | }) |
258 | |
259 | /* Called with local BH disabled and the pool lock held. |
260 | * Variable names are the proof of operation correctness. |
261 | * Look into mm/map_avl.c for more detail description of the ideas. |
262 | */ |
263 | static void peer_avl_rebalance(struct inet_peer __rcu **stack[], |
264 | struct inet_peer __rcu ***stackend, |
265 | struct inet_peer_base *base) |
266 | { |
267 | struct inet_peer __rcu **nodep; |
268 | struct inet_peer *node, *l, *r; |
269 | int lh, rh; |
270 | |
271 | while (stackend > stack) { |
272 | nodep = *--stackend; |
273 | node = rcu_deref_locked(*nodep, base); |
274 | l = rcu_deref_locked(node->avl_left, base); |
275 | r = rcu_deref_locked(node->avl_right, base); |
276 | lh = node_height(l); |
277 | rh = node_height(r); |
278 | if (lh > rh + 1) { /* l: RH+2 */ |
279 | struct inet_peer *ll, *lr, *lrl, *lrr; |
280 | int lrh; |
281 | ll = rcu_deref_locked(l->avl_left, base); |
282 | lr = rcu_deref_locked(l->avl_right, base); |
283 | lrh = node_height(lr); |
284 | if (lrh <= node_height(ll)) { /* ll: RH+1 */ |
285 | RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */ |
286 | RCU_INIT_POINTER(node->avl_right, r); /* r: RH */ |
287 | node->avl_height = lrh + 1; /* RH+1 or RH+2 */ |
288 | RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH+1 */ |
289 | RCU_INIT_POINTER(l->avl_right, node); /* node: RH+1 or RH+2 */ |
290 | l->avl_height = node->avl_height + 1; |
291 | RCU_INIT_POINTER(*nodep, l); |
292 | } else { /* ll: RH, lr: RH+1 */ |
293 | lrl = rcu_deref_locked(lr->avl_left, base);/* lrl: RH or RH-1 */ |
294 | lrr = rcu_deref_locked(lr->avl_right, base);/* lrr: RH or RH-1 */ |
295 | RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */ |
296 | RCU_INIT_POINTER(node->avl_right, r); /* r: RH */ |
297 | node->avl_height = rh + 1; /* node: RH+1 */ |
298 | RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH */ |
299 | RCU_INIT_POINTER(l->avl_right, lrl); /* lrl: RH or RH-1 */ |
300 | l->avl_height = rh + 1; /* l: RH+1 */ |
301 | RCU_INIT_POINTER(lr->avl_left, l); /* l: RH+1 */ |
302 | RCU_INIT_POINTER(lr->avl_right, node); /* node: RH+1 */ |
303 | lr->avl_height = rh + 2; |
304 | RCU_INIT_POINTER(*nodep, lr); |
305 | } |
306 | } else if (rh > lh + 1) { /* r: LH+2 */ |
307 | struct inet_peer *rr, *rl, *rlr, *rll; |
308 | int rlh; |
309 | rr = rcu_deref_locked(r->avl_right, base); |
310 | rl = rcu_deref_locked(r->avl_left, base); |
311 | rlh = node_height(rl); |
312 | if (rlh <= node_height(rr)) { /* rr: LH+1 */ |
313 | RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */ |
314 | RCU_INIT_POINTER(node->avl_left, l); /* l: LH */ |
315 | node->avl_height = rlh + 1; /* LH+1 or LH+2 */ |
316 | RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH+1 */ |
317 | RCU_INIT_POINTER(r->avl_left, node); /* node: LH+1 or LH+2 */ |
318 | r->avl_height = node->avl_height + 1; |
319 | RCU_INIT_POINTER(*nodep, r); |
320 | } else { /* rr: RH, rl: RH+1 */ |
321 | rlr = rcu_deref_locked(rl->avl_right, base);/* rlr: LH or LH-1 */ |
322 | rll = rcu_deref_locked(rl->avl_left, base);/* rll: LH or LH-1 */ |
323 | RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */ |
324 | RCU_INIT_POINTER(node->avl_left, l); /* l: LH */ |
325 | node->avl_height = lh + 1; /* node: LH+1 */ |
326 | RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH */ |
327 | RCU_INIT_POINTER(r->avl_left, rlr); /* rlr: LH or LH-1 */ |
328 | r->avl_height = lh + 1; /* r: LH+1 */ |
329 | RCU_INIT_POINTER(rl->avl_right, r); /* r: LH+1 */ |
330 | RCU_INIT_POINTER(rl->avl_left, node); /* node: LH+1 */ |
331 | rl->avl_height = lh + 2; |
332 | RCU_INIT_POINTER(*nodep, rl); |
333 | } |
334 | } else { |
335 | node->avl_height = (lh > rh ? lh : rh) + 1; |
336 | } |
337 | } |
338 | } |
339 | |
340 | /* Called with local BH disabled and the pool lock held. */ |
341 | #define link_to_pool(n, base) \ |
342 | do { \ |
343 | n->avl_height = 1; \ |
344 | n->avl_left = peer_avl_empty_rcu; \ |
345 | n->avl_right = peer_avl_empty_rcu; \ |
346 | /* lockless readers can catch us now */ \ |
347 | rcu_assign_pointer(**--stackptr, n); \ |
348 | peer_avl_rebalance(stack, stackptr, base); \ |
349 | } while (0) |
350 | |
351 | static void inetpeer_free_rcu(struct rcu_head *head) |
352 | { |
353 | kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu)); |
354 | } |
355 | |
356 | /* May be called with local BH enabled. */ |
357 | static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base, |
358 | struct inet_peer __rcu **stack[PEER_MAXDEPTH]) |
359 | { |
360 | int do_free; |
361 | |
362 | do_free = 0; |
363 | |
364 | write_seqlock_bh(&base->lock); |
365 | /* Check the reference counter. It was artificially incremented by 1 |
366 | * in cleanup() function to prevent sudden disappearing. If we can |
367 | * atomically (because of lockless readers) take this last reference, |
368 | * it's safe to remove the node and free it later. |
369 | * We use refcnt=-1 to alert lockless readers this entry is deleted. |
370 | */ |
371 | if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { |
372 | struct inet_peer __rcu ***stackptr, ***delp; |
373 | if (lookup(&p->daddr, stack, base) != p) |
374 | BUG(); |
375 | delp = stackptr - 1; /* *delp[0] == p */ |
376 | if (p->avl_left == peer_avl_empty_rcu) { |
377 | *delp[0] = p->avl_right; |
378 | --stackptr; |
379 | } else { |
380 | /* look for a node to insert instead of p */ |
381 | struct inet_peer *t; |
382 | t = lookup_rightempty(p, base); |
383 | BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t); |
384 | **--stackptr = t->avl_left; |
385 | /* t is removed, t->daddr > x->daddr for any |
386 | * x in p->avl_left subtree. |
387 | * Put t in the old place of p. */ |
388 | RCU_INIT_POINTER(*delp[0], t); |
389 | t->avl_left = p->avl_left; |
390 | t->avl_right = p->avl_right; |
391 | t->avl_height = p->avl_height; |
392 | BUG_ON(delp[1] != &p->avl_left); |
393 | delp[1] = &t->avl_left; /* was &p->avl_left */ |
394 | } |
395 | peer_avl_rebalance(stack, stackptr, base); |
396 | base->total--; |
397 | do_free = 1; |
398 | } |
399 | write_sequnlock_bh(&base->lock); |
400 | |
401 | if (do_free) |
402 | call_rcu(&p->rcu, inetpeer_free_rcu); |
403 | else |
404 | /* The node is used again. Decrease the reference counter |
405 | * back. The loop "cleanup -> unlink_from_unused |
406 | * -> unlink_from_pool -> putpeer -> link_to_unused |
407 | * -> cleanup (for the same node)" |
408 | * doesn't really exist because the entry will have a |
409 | * recent deletion time and will not be cleaned again soon. |
410 | */ |
411 | inet_putpeer(p); |
412 | } |
413 | |
414 | static struct inet_peer_base *family_to_base(int family) |
415 | { |
416 | return (family == AF_INET ? &v4_peers : &v6_peers); |
417 | } |
418 | |
419 | static struct inet_peer_base *peer_to_base(struct inet_peer *p) |
420 | { |
421 | return family_to_base(p->daddr.family); |
422 | } |
423 | |
424 | /* May be called with local BH enabled. */ |
425 | static int cleanup_once(unsigned long ttl, struct inet_peer __rcu **stack[PEER_MAXDEPTH]) |
426 | { |
427 | struct inet_peer *p = NULL; |
428 | |
429 | /* Remove the first entry from the list of unused nodes. */ |
430 | spin_lock_bh(&unused_peers.lock); |
431 | if (!list_empty(&unused_peers.list)) { |
432 | __u32 delta; |
433 | |
434 | p = list_first_entry(&unused_peers.list, struct inet_peer, unused); |
435 | delta = (__u32)jiffies - p->dtime; |
436 | |
437 | if (delta < ttl) { |
438 | /* Do not prune fresh entries. */ |
439 | spin_unlock_bh(&unused_peers.lock); |
440 | return -1; |
441 | } |
442 | |
443 | list_del_init(&p->unused); |
444 | |
445 | /* Grab an extra reference to prevent node disappearing |
446 | * before unlink_from_pool() call. */ |
447 | atomic_inc(&p->refcnt); |
448 | } |
449 | spin_unlock_bh(&unused_peers.lock); |
450 | |
451 | if (p == NULL) |
452 | /* It means that the total number of USED entries has |
453 | * grown over inet_peer_threshold. It shouldn't really |
454 | * happen because of entry limits in route cache. */ |
455 | return -1; |
456 | |
457 | unlink_from_pool(p, peer_to_base(p), stack); |
458 | return 0; |
459 | } |
460 | |
461 | /* Called with or without local BH being disabled. */ |
462 | struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create) |
463 | { |
464 | struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr; |
465 | struct inet_peer_base *base = family_to_base(daddr->family); |
466 | struct inet_peer *p; |
467 | unsigned int sequence; |
468 | int invalidated; |
469 | |
470 | /* Look up for the address quickly, lockless. |
471 | * Because of a concurrent writer, we might not find an existing entry. |
472 | */ |
473 | rcu_read_lock(); |
474 | sequence = read_seqbegin(&base->lock); |
475 | p = lookup_rcu(daddr, base); |
476 | invalidated = read_seqretry(&base->lock, sequence); |
477 | rcu_read_unlock(); |
478 | |
479 | if (p) { |
480 | /* The existing node has been found. |
481 | * Remove the entry from unused list if it was there. |
482 | */ |
483 | unlink_from_unused(p); |
484 | return p; |
485 | } |
486 | |
487 | /* If no writer did a change during our lookup, we can return early. */ |
488 | if (!create && !invalidated) |
489 | return NULL; |
490 | |
491 | /* retry an exact lookup, taking the lock before. |
492 | * At least, nodes should be hot in our cache. |
493 | */ |
494 | write_seqlock_bh(&base->lock); |
495 | p = lookup(daddr, stack, base); |
496 | if (p != peer_avl_empty) { |
497 | atomic_inc(&p->refcnt); |
498 | write_sequnlock_bh(&base->lock); |
499 | /* Remove the entry from unused list if it was there. */ |
500 | unlink_from_unused(p); |
501 | return p; |
502 | } |
503 | p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL; |
504 | if (p) { |
505 | p->daddr = *daddr; |
506 | atomic_set(&p->refcnt, 1); |
507 | atomic_set(&p->rid, 0); |
508 | atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4)); |
509 | p->tcp_ts_stamp = 0; |
510 | p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; |
511 | p->rate_tokens = 0; |
512 | p->rate_last = 0; |
513 | p->pmtu_expires = 0; |
514 | p->pmtu_orig = 0; |
515 | memset(&p->redirect_learned, 0, sizeof(p->redirect_learned)); |
516 | INIT_LIST_HEAD(&p->unused); |
517 | |
518 | |
519 | /* Link the node. */ |
520 | link_to_pool(p, base); |
521 | base->total++; |
522 | } |
523 | write_sequnlock_bh(&base->lock); |
524 | |
525 | if (base->total >= inet_peer_threshold) |
526 | /* Remove one less-recently-used entry. */ |
527 | cleanup_once(0, stack); |
528 | |
529 | return p; |
530 | } |
531 | |
532 | static int compute_total(void) |
533 | { |
534 | return v4_peers.total + v6_peers.total; |
535 | } |
536 | EXPORT_SYMBOL_GPL(inet_getpeer); |
537 | |
538 | /* Called with local BH disabled. */ |
539 | static void peer_check_expire(unsigned long dummy) |
540 | { |
541 | unsigned long now = jiffies; |
542 | int ttl, total; |
543 | struct inet_peer __rcu **stack[PEER_MAXDEPTH]; |
544 | |
545 | total = compute_total(); |
546 | if (total >= inet_peer_threshold) |
547 | ttl = inet_peer_minttl; |
548 | else |
549 | ttl = inet_peer_maxttl |
550 | - (inet_peer_maxttl - inet_peer_minttl) / HZ * |
551 | total / inet_peer_threshold * HZ; |
552 | while (!cleanup_once(ttl, stack)) { |
553 | if (jiffies != now) |
554 | break; |
555 | } |
556 | |
557 | /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime |
558 | * interval depending on the total number of entries (more entries, |
559 | * less interval). */ |
560 | total = compute_total(); |
561 | if (total >= inet_peer_threshold) |
562 | peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime; |
563 | else |
564 | peer_periodic_timer.expires = jiffies |
565 | + inet_peer_gc_maxtime |
566 | - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ * |
567 | total / inet_peer_threshold * HZ; |
568 | add_timer(&peer_periodic_timer); |
569 | } |
570 | |
571 | void inet_putpeer(struct inet_peer *p) |
572 | { |
573 | local_bh_disable(); |
574 | |
575 | if (atomic_dec_and_lock(&p->refcnt, &unused_peers.lock)) { |
576 | list_add_tail(&p->unused, &unused_peers.list); |
577 | p->dtime = (__u32)jiffies; |
578 | spin_unlock(&unused_peers.lock); |
579 | } |
580 | |
581 | local_bh_enable(); |
582 | } |
583 | EXPORT_SYMBOL_GPL(inet_putpeer); |
584 | |
585 | /* |
586 | * Check transmit rate limitation for given message. |
587 | * The rate information is held in the inet_peer entries now. |
588 | * This function is generic and could be used for other purposes |
589 | * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov. |
590 | * |
591 | * Note that the same inet_peer fields are modified by functions in |
592 | * route.c too, but these work for packet destinations while xrlim_allow |
593 | * works for icmp destinations. This means the rate limiting information |
594 | * for one "ip object" is shared - and these ICMPs are twice limited: |
595 | * by source and by destination. |
596 | * |
597 | * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate |
598 | * SHOULD allow setting of rate limits |
599 | * |
600 | * Shared between ICMPv4 and ICMPv6. |
601 | */ |
602 | #define XRLIM_BURST_FACTOR 6 |
603 | bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) |
604 | { |
605 | unsigned long now, token; |
606 | bool rc = false; |
607 | |
608 | if (!peer) |
609 | return true; |
610 | |
611 | token = peer->rate_tokens; |
612 | now = jiffies; |
613 | token += now - peer->rate_last; |
614 | peer->rate_last = now; |
615 | if (token > XRLIM_BURST_FACTOR * timeout) |
616 | token = XRLIM_BURST_FACTOR * timeout; |
617 | if (token >= timeout) { |
618 | token -= timeout; |
619 | rc = true; |
620 | } |
621 | peer->rate_tokens = token; |
622 | return rc; |
623 | } |
624 | EXPORT_SYMBOL(inet_peer_xrlim_allow); |
625 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9