Root/
1 | /* |
2 | * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier. |
3 | * |
4 | * This program is free software; you can redistribute it and/or |
5 | * modify it under the terms of the GNU General Public License |
6 | * as published by the Free Software Foundation; either version |
7 | * 2 of the License, or (at your option) any later version. |
8 | * |
9 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
10 | * |
11 | * The filters are packed to hash tables of key nodes |
12 | * with a set of 32bit key/mask pairs at every node. |
13 | * Nodes reference next level hash tables etc. |
14 | * |
15 | * This scheme is the best universal classifier I managed to |
16 | * invent; it is not super-fast, but it is not slow (provided you |
17 | * program it correctly), and general enough. And its relative |
18 | * speed grows as the number of rules becomes larger. |
19 | * |
20 | * It seems that it represents the best middle point between |
21 | * speed and manageability both by human and by machine. |
22 | * |
23 | * It is especially useful for link sharing combined with QoS; |
24 | * pure RSVP doesn't need such a general approach and can use |
25 | * much simpler (and faster) schemes, sort of cls_rsvp.c. |
26 | * |
27 | * JHS: We should remove the CONFIG_NET_CLS_IND from here |
28 | * eventually when the meta match extension is made available |
29 | * |
30 | * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro> |
31 | */ |
32 | |
33 | #include <linux/module.h> |
34 | #include <linux/slab.h> |
35 | #include <linux/types.h> |
36 | #include <linux/kernel.h> |
37 | #include <linux/string.h> |
38 | #include <linux/errno.h> |
39 | #include <linux/rtnetlink.h> |
40 | #include <linux/skbuff.h> |
41 | #include <net/netlink.h> |
42 | #include <net/act_api.h> |
43 | #include <net/pkt_cls.h> |
44 | |
45 | struct tc_u_knode { |
46 | struct tc_u_knode *next; |
47 | u32 handle; |
48 | struct tc_u_hnode *ht_up; |
49 | struct tcf_exts exts; |
50 | #ifdef CONFIG_NET_CLS_IND |
51 | int ifindex; |
52 | #endif |
53 | u8 fshift; |
54 | struct tcf_result res; |
55 | struct tc_u_hnode *ht_down; |
56 | #ifdef CONFIG_CLS_U32_PERF |
57 | struct tc_u32_pcnt *pf; |
58 | #endif |
59 | #ifdef CONFIG_CLS_U32_MARK |
60 | struct tc_u32_mark mark; |
61 | #endif |
62 | struct tc_u32_sel sel; |
63 | }; |
64 | |
65 | struct tc_u_hnode { |
66 | struct tc_u_hnode *next; |
67 | u32 handle; |
68 | u32 prio; |
69 | struct tc_u_common *tp_c; |
70 | int refcnt; |
71 | unsigned int divisor; |
72 | struct tc_u_knode *ht[1]; |
73 | }; |
74 | |
75 | struct tc_u_common { |
76 | struct tc_u_hnode *hlist; |
77 | struct Qdisc *q; |
78 | int refcnt; |
79 | u32 hgenerator; |
80 | }; |
81 | |
82 | static inline unsigned int u32_hash_fold(__be32 key, |
83 | const struct tc_u32_sel *sel, |
84 | u8 fshift) |
85 | { |
86 | unsigned int h = ntohl(key & sel->hmask) >> fshift; |
87 | |
88 | return h; |
89 | } |
90 | |
91 | static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res) |
92 | { |
93 | struct { |
94 | struct tc_u_knode *knode; |
95 | unsigned int off; |
96 | } stack[TC_U32_MAXDEPTH]; |
97 | |
98 | struct tc_u_hnode *ht = tp->root; |
99 | unsigned int off = skb_network_offset(skb); |
100 | struct tc_u_knode *n; |
101 | int sdepth = 0; |
102 | int off2 = 0; |
103 | int sel = 0; |
104 | #ifdef CONFIG_CLS_U32_PERF |
105 | int j; |
106 | #endif |
107 | int i, r; |
108 | |
109 | next_ht: |
110 | n = ht->ht[sel]; |
111 | |
112 | next_knode: |
113 | if (n) { |
114 | struct tc_u32_key *key = n->sel.keys; |
115 | |
116 | #ifdef CONFIG_CLS_U32_PERF |
117 | n->pf->rcnt += 1; |
118 | j = 0; |
119 | #endif |
120 | |
121 | #ifdef CONFIG_CLS_U32_MARK |
122 | if ((skb->mark & n->mark.mask) != n->mark.val) { |
123 | n = n->next; |
124 | goto next_knode; |
125 | } else { |
126 | n->mark.success++; |
127 | } |
128 | #endif |
129 | |
130 | for (i = n->sel.nkeys; i > 0; i--, key++) { |
131 | int toff = off + key->off + (off2 & key->offmask); |
132 | __be32 *data, hdata; |
133 | |
134 | if (skb_headroom(skb) + toff > INT_MAX) |
135 | goto out; |
136 | |
137 | data = skb_header_pointer(skb, toff, 4, &hdata); |
138 | if (!data) |
139 | goto out; |
140 | if ((*data ^ key->val) & key->mask) { |
141 | n = n->next; |
142 | goto next_knode; |
143 | } |
144 | #ifdef CONFIG_CLS_U32_PERF |
145 | n->pf->kcnts[j] += 1; |
146 | j++; |
147 | #endif |
148 | } |
149 | if (n->ht_down == NULL) { |
150 | check_terminal: |
151 | if (n->sel.flags & TC_U32_TERMINAL) { |
152 | |
153 | *res = n->res; |
154 | #ifdef CONFIG_NET_CLS_IND |
155 | if (!tcf_match_indev(skb, n->ifindex)) { |
156 | n = n->next; |
157 | goto next_knode; |
158 | } |
159 | #endif |
160 | #ifdef CONFIG_CLS_U32_PERF |
161 | n->pf->rhit += 1; |
162 | #endif |
163 | r = tcf_exts_exec(skb, &n->exts, res); |
164 | if (r < 0) { |
165 | n = n->next; |
166 | goto next_knode; |
167 | } |
168 | |
169 | return r; |
170 | } |
171 | n = n->next; |
172 | goto next_knode; |
173 | } |
174 | |
175 | /* PUSH */ |
176 | if (sdepth >= TC_U32_MAXDEPTH) |
177 | goto deadloop; |
178 | stack[sdepth].knode = n; |
179 | stack[sdepth].off = off; |
180 | sdepth++; |
181 | |
182 | ht = n->ht_down; |
183 | sel = 0; |
184 | if (ht->divisor) { |
185 | __be32 *data, hdata; |
186 | |
187 | data = skb_header_pointer(skb, off + n->sel.hoff, 4, |
188 | &hdata); |
189 | if (!data) |
190 | goto out; |
191 | sel = ht->divisor & u32_hash_fold(*data, &n->sel, |
192 | n->fshift); |
193 | } |
194 | if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT))) |
195 | goto next_ht; |
196 | |
197 | if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) { |
198 | off2 = n->sel.off + 3; |
199 | if (n->sel.flags & TC_U32_VAROFFSET) { |
200 | __be16 *data, hdata; |
201 | |
202 | data = skb_header_pointer(skb, |
203 | off + n->sel.offoff, |
204 | 2, &hdata); |
205 | if (!data) |
206 | goto out; |
207 | off2 += ntohs(n->sel.offmask & *data) >> |
208 | n->sel.offshift; |
209 | } |
210 | off2 &= ~3; |
211 | } |
212 | if (n->sel.flags & TC_U32_EAT) { |
213 | off += off2; |
214 | off2 = 0; |
215 | } |
216 | |
217 | if (off < skb->len) |
218 | goto next_ht; |
219 | } |
220 | |
221 | /* POP */ |
222 | if (sdepth--) { |
223 | n = stack[sdepth].knode; |
224 | ht = n->ht_up; |
225 | off = stack[sdepth].off; |
226 | goto check_terminal; |
227 | } |
228 | out: |
229 | return -1; |
230 | |
231 | deadloop: |
232 | net_warn_ratelimited("cls_u32: dead loop\n"); |
233 | return -1; |
234 | } |
235 | |
236 | static struct tc_u_hnode * |
237 | u32_lookup_ht(struct tc_u_common *tp_c, u32 handle) |
238 | { |
239 | struct tc_u_hnode *ht; |
240 | |
241 | for (ht = tp_c->hlist; ht; ht = ht->next) |
242 | if (ht->handle == handle) |
243 | break; |
244 | |
245 | return ht; |
246 | } |
247 | |
248 | static struct tc_u_knode * |
249 | u32_lookup_key(struct tc_u_hnode *ht, u32 handle) |
250 | { |
251 | unsigned int sel; |
252 | struct tc_u_knode *n = NULL; |
253 | |
254 | sel = TC_U32_HASH(handle); |
255 | if (sel > ht->divisor) |
256 | goto out; |
257 | |
258 | for (n = ht->ht[sel]; n; n = n->next) |
259 | if (n->handle == handle) |
260 | break; |
261 | out: |
262 | return n; |
263 | } |
264 | |
265 | |
266 | static unsigned long u32_get(struct tcf_proto *tp, u32 handle) |
267 | { |
268 | struct tc_u_hnode *ht; |
269 | struct tc_u_common *tp_c = tp->data; |
270 | |
271 | if (TC_U32_HTID(handle) == TC_U32_ROOT) |
272 | ht = tp->root; |
273 | else |
274 | ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle)); |
275 | |
276 | if (!ht) |
277 | return 0; |
278 | |
279 | if (TC_U32_KEY(handle) == 0) |
280 | return (unsigned long)ht; |
281 | |
282 | return (unsigned long)u32_lookup_key(ht, handle); |
283 | } |
284 | |
285 | static void u32_put(struct tcf_proto *tp, unsigned long f) |
286 | { |
287 | } |
288 | |
289 | static u32 gen_new_htid(struct tc_u_common *tp_c) |
290 | { |
291 | int i = 0x800; |
292 | |
293 | do { |
294 | if (++tp_c->hgenerator == 0x7FF) |
295 | tp_c->hgenerator = 1; |
296 | } while (--i > 0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20)); |
297 | |
298 | return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0; |
299 | } |
300 | |
301 | static int u32_init(struct tcf_proto *tp) |
302 | { |
303 | struct tc_u_hnode *root_ht; |
304 | struct tc_u_common *tp_c; |
305 | |
306 | tp_c = tp->q->u32_node; |
307 | |
308 | root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL); |
309 | if (root_ht == NULL) |
310 | return -ENOBUFS; |
311 | |
312 | root_ht->divisor = 0; |
313 | root_ht->refcnt++; |
314 | root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000; |
315 | root_ht->prio = tp->prio; |
316 | |
317 | if (tp_c == NULL) { |
318 | tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL); |
319 | if (tp_c == NULL) { |
320 | kfree(root_ht); |
321 | return -ENOBUFS; |
322 | } |
323 | tp_c->q = tp->q; |
324 | tp->q->u32_node = tp_c; |
325 | } |
326 | |
327 | tp_c->refcnt++; |
328 | root_ht->next = tp_c->hlist; |
329 | tp_c->hlist = root_ht; |
330 | root_ht->tp_c = tp_c; |
331 | |
332 | tp->root = root_ht; |
333 | tp->data = tp_c; |
334 | return 0; |
335 | } |
336 | |
337 | static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n) |
338 | { |
339 | tcf_unbind_filter(tp, &n->res); |
340 | tcf_exts_destroy(tp, &n->exts); |
341 | if (n->ht_down) |
342 | n->ht_down->refcnt--; |
343 | #ifdef CONFIG_CLS_U32_PERF |
344 | kfree(n->pf); |
345 | #endif |
346 | kfree(n); |
347 | return 0; |
348 | } |
349 | |
350 | static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key) |
351 | { |
352 | struct tc_u_knode **kp; |
353 | struct tc_u_hnode *ht = key->ht_up; |
354 | |
355 | if (ht) { |
356 | for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) { |
357 | if (*kp == key) { |
358 | tcf_tree_lock(tp); |
359 | *kp = key->next; |
360 | tcf_tree_unlock(tp); |
361 | |
362 | u32_destroy_key(tp, key); |
363 | return 0; |
364 | } |
365 | } |
366 | } |
367 | WARN_ON(1); |
368 | return 0; |
369 | } |
370 | |
371 | static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) |
372 | { |
373 | struct tc_u_knode *n; |
374 | unsigned int h; |
375 | |
376 | for (h = 0; h <= ht->divisor; h++) { |
377 | while ((n = ht->ht[h]) != NULL) { |
378 | ht->ht[h] = n->next; |
379 | |
380 | u32_destroy_key(tp, n); |
381 | } |
382 | } |
383 | } |
384 | |
385 | static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht) |
386 | { |
387 | struct tc_u_common *tp_c = tp->data; |
388 | struct tc_u_hnode **hn; |
389 | |
390 | WARN_ON(ht->refcnt); |
391 | |
392 | u32_clear_hnode(tp, ht); |
393 | |
394 | for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) { |
395 | if (*hn == ht) { |
396 | *hn = ht->next; |
397 | kfree(ht); |
398 | return 0; |
399 | } |
400 | } |
401 | |
402 | WARN_ON(1); |
403 | return -ENOENT; |
404 | } |
405 | |
406 | static void u32_destroy(struct tcf_proto *tp) |
407 | { |
408 | struct tc_u_common *tp_c = tp->data; |
409 | struct tc_u_hnode *root_ht = tp->root; |
410 | |
411 | WARN_ON(root_ht == NULL); |
412 | |
413 | if (root_ht && --root_ht->refcnt == 0) |
414 | u32_destroy_hnode(tp, root_ht); |
415 | |
416 | if (--tp_c->refcnt == 0) { |
417 | struct tc_u_hnode *ht; |
418 | |
419 | tp->q->u32_node = NULL; |
420 | |
421 | for (ht = tp_c->hlist; ht; ht = ht->next) { |
422 | ht->refcnt--; |
423 | u32_clear_hnode(tp, ht); |
424 | } |
425 | |
426 | while ((ht = tp_c->hlist) != NULL) { |
427 | tp_c->hlist = ht->next; |
428 | |
429 | WARN_ON(ht->refcnt != 0); |
430 | |
431 | kfree(ht); |
432 | } |
433 | |
434 | kfree(tp_c); |
435 | } |
436 | |
437 | tp->data = NULL; |
438 | } |
439 | |
440 | static int u32_delete(struct tcf_proto *tp, unsigned long arg) |
441 | { |
442 | struct tc_u_hnode *ht = (struct tc_u_hnode *)arg; |
443 | |
444 | if (ht == NULL) |
445 | return 0; |
446 | |
447 | if (TC_U32_KEY(ht->handle)) |
448 | return u32_delete_key(tp, (struct tc_u_knode *)ht); |
449 | |
450 | if (tp->root == ht) |
451 | return -EINVAL; |
452 | |
453 | if (ht->refcnt == 1) { |
454 | ht->refcnt--; |
455 | u32_destroy_hnode(tp, ht); |
456 | } else { |
457 | return -EBUSY; |
458 | } |
459 | |
460 | return 0; |
461 | } |
462 | |
463 | static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle) |
464 | { |
465 | struct tc_u_knode *n; |
466 | unsigned int i = 0x7FF; |
467 | |
468 | for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next) |
469 | if (i < TC_U32_NODE(n->handle)) |
470 | i = TC_U32_NODE(n->handle); |
471 | i++; |
472 | |
473 | return handle | (i > 0xFFF ? 0xFFF : i); |
474 | } |
475 | |
476 | static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = { |
477 | [TCA_U32_CLASSID] = { .type = NLA_U32 }, |
478 | [TCA_U32_HASH] = { .type = NLA_U32 }, |
479 | [TCA_U32_LINK] = { .type = NLA_U32 }, |
480 | [TCA_U32_DIVISOR] = { .type = NLA_U32 }, |
481 | [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) }, |
482 | [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ }, |
483 | [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) }, |
484 | }; |
485 | |
486 | static int u32_set_parms(struct net *net, struct tcf_proto *tp, |
487 | unsigned long base, struct tc_u_hnode *ht, |
488 | struct tc_u_knode *n, struct nlattr **tb, |
489 | struct nlattr *est) |
490 | { |
491 | int err; |
492 | struct tcf_exts e; |
493 | |
494 | tcf_exts_init(&e, TCA_U32_ACT, TCA_U32_POLICE); |
495 | err = tcf_exts_validate(net, tp, tb, est, &e); |
496 | if (err < 0) |
497 | return err; |
498 | |
499 | err = -EINVAL; |
500 | if (tb[TCA_U32_LINK]) { |
501 | u32 handle = nla_get_u32(tb[TCA_U32_LINK]); |
502 | struct tc_u_hnode *ht_down = NULL, *ht_old; |
503 | |
504 | if (TC_U32_KEY(handle)) |
505 | goto errout; |
506 | |
507 | if (handle) { |
508 | ht_down = u32_lookup_ht(ht->tp_c, handle); |
509 | |
510 | if (ht_down == NULL) |
511 | goto errout; |
512 | ht_down->refcnt++; |
513 | } |
514 | |
515 | tcf_tree_lock(tp); |
516 | ht_old = n->ht_down; |
517 | n->ht_down = ht_down; |
518 | tcf_tree_unlock(tp); |
519 | |
520 | if (ht_old) |
521 | ht_old->refcnt--; |
522 | } |
523 | if (tb[TCA_U32_CLASSID]) { |
524 | n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]); |
525 | tcf_bind_filter(tp, &n->res, base); |
526 | } |
527 | |
528 | #ifdef CONFIG_NET_CLS_IND |
529 | if (tb[TCA_U32_INDEV]) { |
530 | int ret; |
531 | ret = tcf_change_indev(net, tb[TCA_U32_INDEV]); |
532 | if (ret < 0) |
533 | goto errout; |
534 | n->ifindex = ret; |
535 | } |
536 | #endif |
537 | tcf_exts_change(tp, &n->exts, &e); |
538 | |
539 | return 0; |
540 | errout: |
541 | tcf_exts_destroy(tp, &e); |
542 | return err; |
543 | } |
544 | |
545 | static int u32_change(struct net *net, struct sk_buff *in_skb, |
546 | struct tcf_proto *tp, unsigned long base, u32 handle, |
547 | struct nlattr **tca, |
548 | unsigned long *arg) |
549 | { |
550 | struct tc_u_common *tp_c = tp->data; |
551 | struct tc_u_hnode *ht; |
552 | struct tc_u_knode *n; |
553 | struct tc_u32_sel *s; |
554 | struct nlattr *opt = tca[TCA_OPTIONS]; |
555 | struct nlattr *tb[TCA_U32_MAX + 1]; |
556 | u32 htid; |
557 | int err; |
558 | |
559 | if (opt == NULL) |
560 | return handle ? -EINVAL : 0; |
561 | |
562 | err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy); |
563 | if (err < 0) |
564 | return err; |
565 | |
566 | n = (struct tc_u_knode *)*arg; |
567 | if (n) { |
568 | if (TC_U32_KEY(n->handle) == 0) |
569 | return -EINVAL; |
570 | |
571 | return u32_set_parms(net, tp, base, n->ht_up, n, tb, |
572 | tca[TCA_RATE]); |
573 | } |
574 | |
575 | if (tb[TCA_U32_DIVISOR]) { |
576 | unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]); |
577 | |
578 | if (--divisor > 0x100) |
579 | return -EINVAL; |
580 | if (TC_U32_KEY(handle)) |
581 | return -EINVAL; |
582 | if (handle == 0) { |
583 | handle = gen_new_htid(tp->data); |
584 | if (handle == 0) |
585 | return -ENOMEM; |
586 | } |
587 | ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL); |
588 | if (ht == NULL) |
589 | return -ENOBUFS; |
590 | ht->tp_c = tp_c; |
591 | ht->refcnt = 1; |
592 | ht->divisor = divisor; |
593 | ht->handle = handle; |
594 | ht->prio = tp->prio; |
595 | ht->next = tp_c->hlist; |
596 | tp_c->hlist = ht; |
597 | *arg = (unsigned long)ht; |
598 | return 0; |
599 | } |
600 | |
601 | if (tb[TCA_U32_HASH]) { |
602 | htid = nla_get_u32(tb[TCA_U32_HASH]); |
603 | if (TC_U32_HTID(htid) == TC_U32_ROOT) { |
604 | ht = tp->root; |
605 | htid = ht->handle; |
606 | } else { |
607 | ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid)); |
608 | if (ht == NULL) |
609 | return -EINVAL; |
610 | } |
611 | } else { |
612 | ht = tp->root; |
613 | htid = ht->handle; |
614 | } |
615 | |
616 | if (ht->divisor < TC_U32_HASH(htid)) |
617 | return -EINVAL; |
618 | |
619 | if (handle) { |
620 | if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid)) |
621 | return -EINVAL; |
622 | handle = htid | TC_U32_NODE(handle); |
623 | } else |
624 | handle = gen_new_kid(ht, htid); |
625 | |
626 | if (tb[TCA_U32_SEL] == NULL) |
627 | return -EINVAL; |
628 | |
629 | s = nla_data(tb[TCA_U32_SEL]); |
630 | |
631 | n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL); |
632 | if (n == NULL) |
633 | return -ENOBUFS; |
634 | |
635 | #ifdef CONFIG_CLS_U32_PERF |
636 | n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL); |
637 | if (n->pf == NULL) { |
638 | kfree(n); |
639 | return -ENOBUFS; |
640 | } |
641 | #endif |
642 | |
643 | memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key)); |
644 | n->ht_up = ht; |
645 | n->handle = handle; |
646 | n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0; |
647 | tcf_exts_init(&n->exts, TCA_U32_ACT, TCA_U32_POLICE); |
648 | |
649 | #ifdef CONFIG_CLS_U32_MARK |
650 | if (tb[TCA_U32_MARK]) { |
651 | struct tc_u32_mark *mark; |
652 | |
653 | mark = nla_data(tb[TCA_U32_MARK]); |
654 | memcpy(&n->mark, mark, sizeof(struct tc_u32_mark)); |
655 | n->mark.success = 0; |
656 | } |
657 | #endif |
658 | |
659 | err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE]); |
660 | if (err == 0) { |
661 | struct tc_u_knode **ins; |
662 | for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next) |
663 | if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle)) |
664 | break; |
665 | |
666 | n->next = *ins; |
667 | tcf_tree_lock(tp); |
668 | *ins = n; |
669 | tcf_tree_unlock(tp); |
670 | |
671 | *arg = (unsigned long)n; |
672 | return 0; |
673 | } |
674 | #ifdef CONFIG_CLS_U32_PERF |
675 | kfree(n->pf); |
676 | #endif |
677 | kfree(n); |
678 | return err; |
679 | } |
680 | |
681 | static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg) |
682 | { |
683 | struct tc_u_common *tp_c = tp->data; |
684 | struct tc_u_hnode *ht; |
685 | struct tc_u_knode *n; |
686 | unsigned int h; |
687 | |
688 | if (arg->stop) |
689 | return; |
690 | |
691 | for (ht = tp_c->hlist; ht; ht = ht->next) { |
692 | if (ht->prio != tp->prio) |
693 | continue; |
694 | if (arg->count >= arg->skip) { |
695 | if (arg->fn(tp, (unsigned long)ht, arg) < 0) { |
696 | arg->stop = 1; |
697 | return; |
698 | } |
699 | } |
700 | arg->count++; |
701 | for (h = 0; h <= ht->divisor; h++) { |
702 | for (n = ht->ht[h]; n; n = n->next) { |
703 | if (arg->count < arg->skip) { |
704 | arg->count++; |
705 | continue; |
706 | } |
707 | if (arg->fn(tp, (unsigned long)n, arg) < 0) { |
708 | arg->stop = 1; |
709 | return; |
710 | } |
711 | arg->count++; |
712 | } |
713 | } |
714 | } |
715 | } |
716 | |
717 | static int u32_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, |
718 | struct sk_buff *skb, struct tcmsg *t) |
719 | { |
720 | struct tc_u_knode *n = (struct tc_u_knode *)fh; |
721 | struct nlattr *nest; |
722 | |
723 | if (n == NULL) |
724 | return skb->len; |
725 | |
726 | t->tcm_handle = n->handle; |
727 | |
728 | nest = nla_nest_start(skb, TCA_OPTIONS); |
729 | if (nest == NULL) |
730 | goto nla_put_failure; |
731 | |
732 | if (TC_U32_KEY(n->handle) == 0) { |
733 | struct tc_u_hnode *ht = (struct tc_u_hnode *)fh; |
734 | u32 divisor = ht->divisor + 1; |
735 | |
736 | if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor)) |
737 | goto nla_put_failure; |
738 | } else { |
739 | if (nla_put(skb, TCA_U32_SEL, |
740 | sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key), |
741 | &n->sel)) |
742 | goto nla_put_failure; |
743 | if (n->ht_up) { |
744 | u32 htid = n->handle & 0xFFFFF000; |
745 | if (nla_put_u32(skb, TCA_U32_HASH, htid)) |
746 | goto nla_put_failure; |
747 | } |
748 | if (n->res.classid && |
749 | nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid)) |
750 | goto nla_put_failure; |
751 | if (n->ht_down && |
752 | nla_put_u32(skb, TCA_U32_LINK, n->ht_down->handle)) |
753 | goto nla_put_failure; |
754 | |
755 | #ifdef CONFIG_CLS_U32_MARK |
756 | if ((n->mark.val || n->mark.mask) && |
757 | nla_put(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark)) |
758 | goto nla_put_failure; |
759 | #endif |
760 | |
761 | if (tcf_exts_dump(skb, &n->exts) < 0) |
762 | goto nla_put_failure; |
763 | |
764 | #ifdef CONFIG_NET_CLS_IND |
765 | if (n->ifindex) { |
766 | struct net_device *dev; |
767 | dev = __dev_get_by_index(net, n->ifindex); |
768 | if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name)) |
769 | goto nla_put_failure; |
770 | } |
771 | #endif |
772 | #ifdef CONFIG_CLS_U32_PERF |
773 | if (nla_put(skb, TCA_U32_PCNT, |
774 | sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64), |
775 | n->pf)) |
776 | goto nla_put_failure; |
777 | #endif |
778 | } |
779 | |
780 | nla_nest_end(skb, nest); |
781 | |
782 | if (TC_U32_KEY(n->handle)) |
783 | if (tcf_exts_dump_stats(skb, &n->exts) < 0) |
784 | goto nla_put_failure; |
785 | return skb->len; |
786 | |
787 | nla_put_failure: |
788 | nla_nest_cancel(skb, nest); |
789 | return -1; |
790 | } |
791 | |
792 | static struct tcf_proto_ops cls_u32_ops __read_mostly = { |
793 | .kind = "u32", |
794 | .classify = u32_classify, |
795 | .init = u32_init, |
796 | .destroy = u32_destroy, |
797 | .get = u32_get, |
798 | .put = u32_put, |
799 | .change = u32_change, |
800 | .delete = u32_delete, |
801 | .walk = u32_walk, |
802 | .dump = u32_dump, |
803 | .owner = THIS_MODULE, |
804 | }; |
805 | |
806 | static int __init init_u32(void) |
807 | { |
808 | pr_info("u32 classifier\n"); |
809 | #ifdef CONFIG_CLS_U32_PERF |
810 | pr_info(" Performance counters on\n"); |
811 | #endif |
812 | #ifdef CONFIG_NET_CLS_IND |
813 | pr_info(" input device check on\n"); |
814 | #endif |
815 | #ifdef CONFIG_NET_CLS_ACT |
816 | pr_info(" Actions configured\n"); |
817 | #endif |
818 | return register_tcf_proto_ops(&cls_u32_ops); |
819 | } |
820 | |
821 | static void __exit exit_u32(void) |
822 | { |
823 | unregister_tcf_proto_ops(&cls_u32_ops); |
824 | } |
825 | |
826 | module_init(init_u32) |
827 | module_exit(exit_u32) |
828 | MODULE_LICENSE("GPL"); |
829 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9