Root/
1 | /* |
2 | * x_tables core - Backend for {ip,ip6,arp}_tables |
3 | * |
4 | * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org> |
5 | * |
6 | * Based on existing ip_tables code which is |
7 | * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling |
8 | * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org> |
9 | * |
10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License version 2 as |
12 | * published by the Free Software Foundation. |
13 | * |
14 | */ |
15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
16 | #include <linux/kernel.h> |
17 | #include <linux/socket.h> |
18 | #include <linux/net.h> |
19 | #include <linux/proc_fs.h> |
20 | #include <linux/seq_file.h> |
21 | #include <linux/string.h> |
22 | #include <linux/vmalloc.h> |
23 | #include <linux/mutex.h> |
24 | #include <linux/mm.h> |
25 | #include <linux/slab.h> |
26 | #include <net/net_namespace.h> |
27 | |
28 | #include <linux/netfilter/x_tables.h> |
29 | #include <linux/netfilter_arp.h> |
30 | #include <linux/netfilter_ipv4/ip_tables.h> |
31 | #include <linux/netfilter_ipv6/ip6_tables.h> |
32 | #include <linux/netfilter_arp/arp_tables.h> |
33 | |
34 | MODULE_LICENSE("GPL"); |
35 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); |
36 | MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module"); |
37 | |
38 | #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) |
39 | |
40 | struct compat_delta { |
41 | struct compat_delta *next; |
42 | unsigned int offset; |
43 | int delta; |
44 | }; |
45 | |
46 | struct xt_af { |
47 | struct mutex mutex; |
48 | struct list_head match; |
49 | struct list_head target; |
50 | #ifdef CONFIG_COMPAT |
51 | struct mutex compat_mutex; |
52 | struct compat_delta *compat_offsets; |
53 | #endif |
54 | }; |
55 | |
56 | static struct xt_af *xt; |
57 | |
58 | static const char *const xt_prefix[NFPROTO_NUMPROTO] = { |
59 | [NFPROTO_UNSPEC] = "x", |
60 | [NFPROTO_IPV4] = "ip", |
61 | [NFPROTO_ARP] = "arp", |
62 | [NFPROTO_BRIDGE] = "eb", |
63 | [NFPROTO_IPV6] = "ip6", |
64 | }; |
65 | |
66 | /* Allow this many total (re)entries. */ |
67 | static const unsigned int xt_jumpstack_multiplier = 2; |
68 | |
69 | /* Registration hooks for targets. */ |
70 | int |
71 | xt_register_target(struct xt_target *target) |
72 | { |
73 | u_int8_t af = target->family; |
74 | int ret; |
75 | |
76 | ret = mutex_lock_interruptible(&xt[af].mutex); |
77 | if (ret != 0) |
78 | return ret; |
79 | list_add(&target->list, &xt[af].target); |
80 | mutex_unlock(&xt[af].mutex); |
81 | return ret; |
82 | } |
83 | EXPORT_SYMBOL(xt_register_target); |
84 | |
85 | void |
86 | xt_unregister_target(struct xt_target *target) |
87 | { |
88 | u_int8_t af = target->family; |
89 | |
90 | mutex_lock(&xt[af].mutex); |
91 | list_del(&target->list); |
92 | mutex_unlock(&xt[af].mutex); |
93 | } |
94 | EXPORT_SYMBOL(xt_unregister_target); |
95 | |
96 | int |
97 | xt_register_targets(struct xt_target *target, unsigned int n) |
98 | { |
99 | unsigned int i; |
100 | int err = 0; |
101 | |
102 | for (i = 0; i < n; i++) { |
103 | err = xt_register_target(&target[i]); |
104 | if (err) |
105 | goto err; |
106 | } |
107 | return err; |
108 | |
109 | err: |
110 | if (i > 0) |
111 | xt_unregister_targets(target, i); |
112 | return err; |
113 | } |
114 | EXPORT_SYMBOL(xt_register_targets); |
115 | |
116 | void |
117 | xt_unregister_targets(struct xt_target *target, unsigned int n) |
118 | { |
119 | while (n-- > 0) |
120 | xt_unregister_target(&target[n]); |
121 | } |
122 | EXPORT_SYMBOL(xt_unregister_targets); |
123 | |
124 | int |
125 | xt_register_match(struct xt_match *match) |
126 | { |
127 | u_int8_t af = match->family; |
128 | int ret; |
129 | |
130 | ret = mutex_lock_interruptible(&xt[af].mutex); |
131 | if (ret != 0) |
132 | return ret; |
133 | |
134 | list_add(&match->list, &xt[af].match); |
135 | mutex_unlock(&xt[af].mutex); |
136 | |
137 | return ret; |
138 | } |
139 | EXPORT_SYMBOL(xt_register_match); |
140 | |
141 | void |
142 | xt_unregister_match(struct xt_match *match) |
143 | { |
144 | u_int8_t af = match->family; |
145 | |
146 | mutex_lock(&xt[af].mutex); |
147 | list_del(&match->list); |
148 | mutex_unlock(&xt[af].mutex); |
149 | } |
150 | EXPORT_SYMBOL(xt_unregister_match); |
151 | |
152 | int |
153 | xt_register_matches(struct xt_match *match, unsigned int n) |
154 | { |
155 | unsigned int i; |
156 | int err = 0; |
157 | |
158 | for (i = 0; i < n; i++) { |
159 | err = xt_register_match(&match[i]); |
160 | if (err) |
161 | goto err; |
162 | } |
163 | return err; |
164 | |
165 | err: |
166 | if (i > 0) |
167 | xt_unregister_matches(match, i); |
168 | return err; |
169 | } |
170 | EXPORT_SYMBOL(xt_register_matches); |
171 | |
172 | void |
173 | xt_unregister_matches(struct xt_match *match, unsigned int n) |
174 | { |
175 | while (n-- > 0) |
176 | xt_unregister_match(&match[n]); |
177 | } |
178 | EXPORT_SYMBOL(xt_unregister_matches); |
179 | |
180 | |
181 | /* |
182 | * These are weird, but module loading must not be done with mutex |
183 | * held (since they will register), and we have to have a single |
184 | * function to use try_then_request_module(). |
185 | */ |
186 | |
187 | /* Find match, grabs ref. Returns ERR_PTR() on error. */ |
188 | struct xt_match *xt_find_match(u8 af, const char *name, u8 revision) |
189 | { |
190 | struct xt_match *m; |
191 | int err = 0; |
192 | |
193 | if (mutex_lock_interruptible(&xt[af].mutex) != 0) |
194 | return ERR_PTR(-EINTR); |
195 | |
196 | list_for_each_entry(m, &xt[af].match, list) { |
197 | if (strcmp(m->name, name) == 0) { |
198 | if (m->revision == revision) { |
199 | if (try_module_get(m->me)) { |
200 | mutex_unlock(&xt[af].mutex); |
201 | return m; |
202 | } |
203 | } else |
204 | err = -EPROTOTYPE; /* Found something. */ |
205 | } |
206 | } |
207 | mutex_unlock(&xt[af].mutex); |
208 | |
209 | if (af != NFPROTO_UNSPEC) |
210 | /* Try searching again in the family-independent list */ |
211 | return xt_find_match(NFPROTO_UNSPEC, name, revision); |
212 | |
213 | return ERR_PTR(err); |
214 | } |
215 | EXPORT_SYMBOL(xt_find_match); |
216 | |
217 | struct xt_match * |
218 | xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision) |
219 | { |
220 | struct xt_match *match; |
221 | |
222 | match = try_then_request_module(xt_find_match(nfproto, name, revision), |
223 | "%st_%s", xt_prefix[nfproto], name); |
224 | return (match != NULL) ? match : ERR_PTR(-ENOENT); |
225 | } |
226 | EXPORT_SYMBOL_GPL(xt_request_find_match); |
227 | |
228 | /* Find target, grabs ref. Returns ERR_PTR() on error. */ |
229 | struct xt_target *xt_find_target(u8 af, const char *name, u8 revision) |
230 | { |
231 | struct xt_target *t; |
232 | int err = 0; |
233 | |
234 | if (mutex_lock_interruptible(&xt[af].mutex) != 0) |
235 | return ERR_PTR(-EINTR); |
236 | |
237 | list_for_each_entry(t, &xt[af].target, list) { |
238 | if (strcmp(t->name, name) == 0) { |
239 | if (t->revision == revision) { |
240 | if (try_module_get(t->me)) { |
241 | mutex_unlock(&xt[af].mutex); |
242 | return t; |
243 | } |
244 | } else |
245 | err = -EPROTOTYPE; /* Found something. */ |
246 | } |
247 | } |
248 | mutex_unlock(&xt[af].mutex); |
249 | |
250 | if (af != NFPROTO_UNSPEC) |
251 | /* Try searching again in the family-independent list */ |
252 | return xt_find_target(NFPROTO_UNSPEC, name, revision); |
253 | |
254 | return ERR_PTR(err); |
255 | } |
256 | EXPORT_SYMBOL(xt_find_target); |
257 | |
258 | struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision) |
259 | { |
260 | struct xt_target *target; |
261 | |
262 | target = try_then_request_module(xt_find_target(af, name, revision), |
263 | "%st_%s", xt_prefix[af], name); |
264 | return (target != NULL) ? target : ERR_PTR(-ENOENT); |
265 | } |
266 | EXPORT_SYMBOL_GPL(xt_request_find_target); |
267 | |
268 | static int match_revfn(u8 af, const char *name, u8 revision, int *bestp) |
269 | { |
270 | const struct xt_match *m; |
271 | int have_rev = 0; |
272 | |
273 | list_for_each_entry(m, &xt[af].match, list) { |
274 | if (strcmp(m->name, name) == 0) { |
275 | if (m->revision > *bestp) |
276 | *bestp = m->revision; |
277 | if (m->revision == revision) |
278 | have_rev = 1; |
279 | } |
280 | } |
281 | |
282 | if (af != NFPROTO_UNSPEC && !have_rev) |
283 | return match_revfn(NFPROTO_UNSPEC, name, revision, bestp); |
284 | |
285 | return have_rev; |
286 | } |
287 | |
288 | static int target_revfn(u8 af, const char *name, u8 revision, int *bestp) |
289 | { |
290 | const struct xt_target *t; |
291 | int have_rev = 0; |
292 | |
293 | list_for_each_entry(t, &xt[af].target, list) { |
294 | if (strcmp(t->name, name) == 0) { |
295 | if (t->revision > *bestp) |
296 | *bestp = t->revision; |
297 | if (t->revision == revision) |
298 | have_rev = 1; |
299 | } |
300 | } |
301 | |
302 | if (af != NFPROTO_UNSPEC && !have_rev) |
303 | return target_revfn(NFPROTO_UNSPEC, name, revision, bestp); |
304 | |
305 | return have_rev; |
306 | } |
307 | |
308 | /* Returns true or false (if no such extension at all) */ |
309 | int xt_find_revision(u8 af, const char *name, u8 revision, int target, |
310 | int *err) |
311 | { |
312 | int have_rev, best = -1; |
313 | |
314 | if (mutex_lock_interruptible(&xt[af].mutex) != 0) { |
315 | *err = -EINTR; |
316 | return 1; |
317 | } |
318 | if (target == 1) |
319 | have_rev = target_revfn(af, name, revision, &best); |
320 | else |
321 | have_rev = match_revfn(af, name, revision, &best); |
322 | mutex_unlock(&xt[af].mutex); |
323 | |
324 | /* Nothing at all? Return 0 to try loading module. */ |
325 | if (best == -1) { |
326 | *err = -ENOENT; |
327 | return 0; |
328 | } |
329 | |
330 | *err = best; |
331 | if (!have_rev) |
332 | *err = -EPROTONOSUPPORT; |
333 | return 1; |
334 | } |
335 | EXPORT_SYMBOL_GPL(xt_find_revision); |
336 | |
337 | static char *textify_hooks(char *buf, size_t size, unsigned int mask) |
338 | { |
339 | static const char *const names[] = { |
340 | "PREROUTING", "INPUT", "FORWARD", |
341 | "OUTPUT", "POSTROUTING", "BROUTING", |
342 | }; |
343 | unsigned int i; |
344 | char *p = buf; |
345 | bool np = false; |
346 | int res; |
347 | |
348 | *p = '\0'; |
349 | for (i = 0; i < ARRAY_SIZE(names); ++i) { |
350 | if (!(mask & (1 << i))) |
351 | continue; |
352 | res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]); |
353 | if (res > 0) { |
354 | size -= res; |
355 | p += res; |
356 | } |
357 | np = true; |
358 | } |
359 | |
360 | return buf; |
361 | } |
362 | |
363 | int xt_check_match(struct xt_mtchk_param *par, |
364 | unsigned int size, u_int8_t proto, bool inv_proto) |
365 | { |
366 | int ret; |
367 | |
368 | if (XT_ALIGN(par->match->matchsize) != size && |
369 | par->match->matchsize != -1) { |
370 | /* |
371 | * ebt_among is exempt from centralized matchsize checking |
372 | * because it uses a dynamic-size data set. |
373 | */ |
374 | pr_err("%s_tables: %s.%u match: invalid size " |
375 | "%u (kernel) != (user) %u\n", |
376 | xt_prefix[par->family], par->match->name, |
377 | par->match->revision, |
378 | XT_ALIGN(par->match->matchsize), size); |
379 | return -EINVAL; |
380 | } |
381 | if (par->match->table != NULL && |
382 | strcmp(par->match->table, par->table) != 0) { |
383 | pr_err("%s_tables: %s match: only valid in %s table, not %s\n", |
384 | xt_prefix[par->family], par->match->name, |
385 | par->match->table, par->table); |
386 | return -EINVAL; |
387 | } |
388 | if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) { |
389 | char used[64], allow[64]; |
390 | |
391 | pr_err("%s_tables: %s match: used from hooks %s, but only " |
392 | "valid from %s\n", |
393 | xt_prefix[par->family], par->match->name, |
394 | textify_hooks(used, sizeof(used), par->hook_mask), |
395 | textify_hooks(allow, sizeof(allow), par->match->hooks)); |
396 | return -EINVAL; |
397 | } |
398 | if (par->match->proto && (par->match->proto != proto || inv_proto)) { |
399 | pr_err("%s_tables: %s match: only valid for protocol %u\n", |
400 | xt_prefix[par->family], par->match->name, |
401 | par->match->proto); |
402 | return -EINVAL; |
403 | } |
404 | if (par->match->checkentry != NULL) { |
405 | ret = par->match->checkentry(par); |
406 | if (ret < 0) |
407 | return ret; |
408 | else if (ret > 0) |
409 | /* Flag up potential errors. */ |
410 | return -EIO; |
411 | } |
412 | return 0; |
413 | } |
414 | EXPORT_SYMBOL_GPL(xt_check_match); |
415 | |
416 | #ifdef CONFIG_COMPAT |
417 | int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta) |
418 | { |
419 | struct compat_delta *tmp; |
420 | |
421 | tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL); |
422 | if (!tmp) |
423 | return -ENOMEM; |
424 | |
425 | tmp->offset = offset; |
426 | tmp->delta = delta; |
427 | |
428 | if (xt[af].compat_offsets) { |
429 | tmp->next = xt[af].compat_offsets->next; |
430 | xt[af].compat_offsets->next = tmp; |
431 | } else { |
432 | xt[af].compat_offsets = tmp; |
433 | tmp->next = NULL; |
434 | } |
435 | return 0; |
436 | } |
437 | EXPORT_SYMBOL_GPL(xt_compat_add_offset); |
438 | |
439 | void xt_compat_flush_offsets(u_int8_t af) |
440 | { |
441 | struct compat_delta *tmp, *next; |
442 | |
443 | if (xt[af].compat_offsets) { |
444 | for (tmp = xt[af].compat_offsets; tmp; tmp = next) { |
445 | next = tmp->next; |
446 | kfree(tmp); |
447 | } |
448 | xt[af].compat_offsets = NULL; |
449 | } |
450 | } |
451 | EXPORT_SYMBOL_GPL(xt_compat_flush_offsets); |
452 | |
453 | int xt_compat_calc_jump(u_int8_t af, unsigned int offset) |
454 | { |
455 | struct compat_delta *tmp; |
456 | int delta; |
457 | |
458 | for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next) |
459 | if (tmp->offset < offset) |
460 | delta += tmp->delta; |
461 | return delta; |
462 | } |
463 | EXPORT_SYMBOL_GPL(xt_compat_calc_jump); |
464 | |
465 | int xt_compat_match_offset(const struct xt_match *match) |
466 | { |
467 | u_int16_t csize = match->compatsize ? : match->matchsize; |
468 | return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize); |
469 | } |
470 | EXPORT_SYMBOL_GPL(xt_compat_match_offset); |
471 | |
472 | int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, |
473 | unsigned int *size) |
474 | { |
475 | const struct xt_match *match = m->u.kernel.match; |
476 | struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m; |
477 | int pad, off = xt_compat_match_offset(match); |
478 | u_int16_t msize = cm->u.user.match_size; |
479 | |
480 | m = *dstptr; |
481 | memcpy(m, cm, sizeof(*cm)); |
482 | if (match->compat_from_user) |
483 | match->compat_from_user(m->data, cm->data); |
484 | else |
485 | memcpy(m->data, cm->data, msize - sizeof(*cm)); |
486 | pad = XT_ALIGN(match->matchsize) - match->matchsize; |
487 | if (pad > 0) |
488 | memset(m->data + match->matchsize, 0, pad); |
489 | |
490 | msize += off; |
491 | m->u.user.match_size = msize; |
492 | |
493 | *size += off; |
494 | *dstptr += msize; |
495 | return 0; |
496 | } |
497 | EXPORT_SYMBOL_GPL(xt_compat_match_from_user); |
498 | |
499 | int xt_compat_match_to_user(const struct xt_entry_match *m, |
500 | void __user **dstptr, unsigned int *size) |
501 | { |
502 | const struct xt_match *match = m->u.kernel.match; |
503 | struct compat_xt_entry_match __user *cm = *dstptr; |
504 | int off = xt_compat_match_offset(match); |
505 | u_int16_t msize = m->u.user.match_size - off; |
506 | |
507 | if (copy_to_user(cm, m, sizeof(*cm)) || |
508 | put_user(msize, &cm->u.user.match_size) || |
509 | copy_to_user(cm->u.user.name, m->u.kernel.match->name, |
510 | strlen(m->u.kernel.match->name) + 1)) |
511 | return -EFAULT; |
512 | |
513 | if (match->compat_to_user) { |
514 | if (match->compat_to_user((void __user *)cm->data, m->data)) |
515 | return -EFAULT; |
516 | } else { |
517 | if (copy_to_user(cm->data, m->data, msize - sizeof(*cm))) |
518 | return -EFAULT; |
519 | } |
520 | |
521 | *size -= off; |
522 | *dstptr += msize; |
523 | return 0; |
524 | } |
525 | EXPORT_SYMBOL_GPL(xt_compat_match_to_user); |
526 | #endif /* CONFIG_COMPAT */ |
527 | |
528 | int xt_check_target(struct xt_tgchk_param *par, |
529 | unsigned int size, u_int8_t proto, bool inv_proto) |
530 | { |
531 | int ret; |
532 | |
533 | if (XT_ALIGN(par->target->targetsize) != size) { |
534 | pr_err("%s_tables: %s.%u target: invalid size " |
535 | "%u (kernel) != (user) %u\n", |
536 | xt_prefix[par->family], par->target->name, |
537 | par->target->revision, |
538 | XT_ALIGN(par->target->targetsize), size); |
539 | return -EINVAL; |
540 | } |
541 | if (par->target->table != NULL && |
542 | strcmp(par->target->table, par->table) != 0) { |
543 | pr_err("%s_tables: %s target: only valid in %s table, not %s\n", |
544 | xt_prefix[par->family], par->target->name, |
545 | par->target->table, par->table); |
546 | return -EINVAL; |
547 | } |
548 | if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) { |
549 | char used[64], allow[64]; |
550 | |
551 | pr_err("%s_tables: %s target: used from hooks %s, but only " |
552 | "usable from %s\n", |
553 | xt_prefix[par->family], par->target->name, |
554 | textify_hooks(used, sizeof(used), par->hook_mask), |
555 | textify_hooks(allow, sizeof(allow), par->target->hooks)); |
556 | return -EINVAL; |
557 | } |
558 | if (par->target->proto && (par->target->proto != proto || inv_proto)) { |
559 | pr_err("%s_tables: %s target: only valid for protocol %u\n", |
560 | xt_prefix[par->family], par->target->name, |
561 | par->target->proto); |
562 | return -EINVAL; |
563 | } |
564 | if (par->target->checkentry != NULL) { |
565 | ret = par->target->checkentry(par); |
566 | if (ret < 0) |
567 | return ret; |
568 | else if (ret > 0) |
569 | /* Flag up potential errors. */ |
570 | return -EIO; |
571 | } |
572 | return 0; |
573 | } |
574 | EXPORT_SYMBOL_GPL(xt_check_target); |
575 | |
576 | #ifdef CONFIG_COMPAT |
577 | int xt_compat_target_offset(const struct xt_target *target) |
578 | { |
579 | u_int16_t csize = target->compatsize ? : target->targetsize; |
580 | return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize); |
581 | } |
582 | EXPORT_SYMBOL_GPL(xt_compat_target_offset); |
583 | |
584 | void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, |
585 | unsigned int *size) |
586 | { |
587 | const struct xt_target *target = t->u.kernel.target; |
588 | struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t; |
589 | int pad, off = xt_compat_target_offset(target); |
590 | u_int16_t tsize = ct->u.user.target_size; |
591 | |
592 | t = *dstptr; |
593 | memcpy(t, ct, sizeof(*ct)); |
594 | if (target->compat_from_user) |
595 | target->compat_from_user(t->data, ct->data); |
596 | else |
597 | memcpy(t->data, ct->data, tsize - sizeof(*ct)); |
598 | pad = XT_ALIGN(target->targetsize) - target->targetsize; |
599 | if (pad > 0) |
600 | memset(t->data + target->targetsize, 0, pad); |
601 | |
602 | tsize += off; |
603 | t->u.user.target_size = tsize; |
604 | |
605 | *size += off; |
606 | *dstptr += tsize; |
607 | } |
608 | EXPORT_SYMBOL_GPL(xt_compat_target_from_user); |
609 | |
610 | int xt_compat_target_to_user(const struct xt_entry_target *t, |
611 | void __user **dstptr, unsigned int *size) |
612 | { |
613 | const struct xt_target *target = t->u.kernel.target; |
614 | struct compat_xt_entry_target __user *ct = *dstptr; |
615 | int off = xt_compat_target_offset(target); |
616 | u_int16_t tsize = t->u.user.target_size - off; |
617 | |
618 | if (copy_to_user(ct, t, sizeof(*ct)) || |
619 | put_user(tsize, &ct->u.user.target_size) || |
620 | copy_to_user(ct->u.user.name, t->u.kernel.target->name, |
621 | strlen(t->u.kernel.target->name) + 1)) |
622 | return -EFAULT; |
623 | |
624 | if (target->compat_to_user) { |
625 | if (target->compat_to_user((void __user *)ct->data, t->data)) |
626 | return -EFAULT; |
627 | } else { |
628 | if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct))) |
629 | return -EFAULT; |
630 | } |
631 | |
632 | *size -= off; |
633 | *dstptr += tsize; |
634 | return 0; |
635 | } |
636 | EXPORT_SYMBOL_GPL(xt_compat_target_to_user); |
637 | #endif |
638 | |
639 | struct xt_table_info *xt_alloc_table_info(unsigned int size) |
640 | { |
641 | struct xt_table_info *newinfo; |
642 | int cpu; |
643 | |
644 | /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */ |
645 | if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages) |
646 | return NULL; |
647 | |
648 | newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL); |
649 | if (!newinfo) |
650 | return NULL; |
651 | |
652 | newinfo->size = size; |
653 | |
654 | for_each_possible_cpu(cpu) { |
655 | if (size <= PAGE_SIZE) |
656 | newinfo->entries[cpu] = kmalloc_node(size, |
657 | GFP_KERNEL, |
658 | cpu_to_node(cpu)); |
659 | else |
660 | newinfo->entries[cpu] = vmalloc_node(size, |
661 | cpu_to_node(cpu)); |
662 | |
663 | if (newinfo->entries[cpu] == NULL) { |
664 | xt_free_table_info(newinfo); |
665 | return NULL; |
666 | } |
667 | } |
668 | |
669 | return newinfo; |
670 | } |
671 | EXPORT_SYMBOL(xt_alloc_table_info); |
672 | |
673 | void xt_free_table_info(struct xt_table_info *info) |
674 | { |
675 | int cpu; |
676 | |
677 | for_each_possible_cpu(cpu) { |
678 | if (info->size <= PAGE_SIZE) |
679 | kfree(info->entries[cpu]); |
680 | else |
681 | vfree(info->entries[cpu]); |
682 | } |
683 | |
684 | if (info->jumpstack != NULL) { |
685 | if (sizeof(void *) * info->stacksize > PAGE_SIZE) { |
686 | for_each_possible_cpu(cpu) |
687 | vfree(info->jumpstack[cpu]); |
688 | } else { |
689 | for_each_possible_cpu(cpu) |
690 | kfree(info->jumpstack[cpu]); |
691 | } |
692 | } |
693 | |
694 | if (sizeof(void **) * nr_cpu_ids > PAGE_SIZE) |
695 | vfree(info->jumpstack); |
696 | else |
697 | kfree(info->jumpstack); |
698 | |
699 | free_percpu(info->stackptr); |
700 | |
701 | kfree(info); |
702 | } |
703 | EXPORT_SYMBOL(xt_free_table_info); |
704 | |
705 | /* Find table by name, grabs mutex & ref. Returns ERR_PTR() on error. */ |
706 | struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, |
707 | const char *name) |
708 | { |
709 | struct xt_table *t; |
710 | |
711 | if (mutex_lock_interruptible(&xt[af].mutex) != 0) |
712 | return ERR_PTR(-EINTR); |
713 | |
714 | list_for_each_entry(t, &net->xt.tables[af], list) |
715 | if (strcmp(t->name, name) == 0 && try_module_get(t->me)) |
716 | return t; |
717 | mutex_unlock(&xt[af].mutex); |
718 | return NULL; |
719 | } |
720 | EXPORT_SYMBOL_GPL(xt_find_table_lock); |
721 | |
722 | void xt_table_unlock(struct xt_table *table) |
723 | { |
724 | mutex_unlock(&xt[table->af].mutex); |
725 | } |
726 | EXPORT_SYMBOL_GPL(xt_table_unlock); |
727 | |
728 | #ifdef CONFIG_COMPAT |
729 | void xt_compat_lock(u_int8_t af) |
730 | { |
731 | mutex_lock(&xt[af].compat_mutex); |
732 | } |
733 | EXPORT_SYMBOL_GPL(xt_compat_lock); |
734 | |
735 | void xt_compat_unlock(u_int8_t af) |
736 | { |
737 | mutex_unlock(&xt[af].compat_mutex); |
738 | } |
739 | EXPORT_SYMBOL_GPL(xt_compat_unlock); |
740 | #endif |
741 | |
742 | DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks); |
743 | EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks); |
744 | |
745 | static int xt_jumpstack_alloc(struct xt_table_info *i) |
746 | { |
747 | unsigned int size; |
748 | int cpu; |
749 | |
750 | i->stackptr = alloc_percpu(unsigned int); |
751 | if (i->stackptr == NULL) |
752 | return -ENOMEM; |
753 | |
754 | size = sizeof(void **) * nr_cpu_ids; |
755 | if (size > PAGE_SIZE) |
756 | i->jumpstack = vmalloc(size); |
757 | else |
758 | i->jumpstack = kmalloc(size, GFP_KERNEL); |
759 | if (i->jumpstack == NULL) |
760 | return -ENOMEM; |
761 | memset(i->jumpstack, 0, size); |
762 | |
763 | i->stacksize *= xt_jumpstack_multiplier; |
764 | size = sizeof(void *) * i->stacksize; |
765 | for_each_possible_cpu(cpu) { |
766 | if (size > PAGE_SIZE) |
767 | i->jumpstack[cpu] = vmalloc_node(size, |
768 | cpu_to_node(cpu)); |
769 | else |
770 | i->jumpstack[cpu] = kmalloc_node(size, |
771 | GFP_KERNEL, cpu_to_node(cpu)); |
772 | if (i->jumpstack[cpu] == NULL) |
773 | /* |
774 | * Freeing will be done later on by the callers. The |
775 | * chain is: xt_replace_table -> __do_replace -> |
776 | * do_replace -> xt_free_table_info. |
777 | */ |
778 | return -ENOMEM; |
779 | } |
780 | |
781 | return 0; |
782 | } |
783 | |
784 | struct xt_table_info * |
785 | xt_replace_table(struct xt_table *table, |
786 | unsigned int num_counters, |
787 | struct xt_table_info *newinfo, |
788 | int *error) |
789 | { |
790 | struct xt_table_info *private; |
791 | int ret; |
792 | |
793 | ret = xt_jumpstack_alloc(newinfo); |
794 | if (ret < 0) { |
795 | *error = ret; |
796 | return NULL; |
797 | } |
798 | |
799 | /* Do the substitution. */ |
800 | local_bh_disable(); |
801 | private = table->private; |
802 | |
803 | /* Check inside lock: is the old number correct? */ |
804 | if (num_counters != private->number) { |
805 | pr_debug("num_counters != table->private->number (%u/%u)\n", |
806 | num_counters, private->number); |
807 | local_bh_enable(); |
808 | *error = -EAGAIN; |
809 | return NULL; |
810 | } |
811 | |
812 | table->private = newinfo; |
813 | newinfo->initial_entries = private->initial_entries; |
814 | |
815 | /* |
816 | * Even though table entries have now been swapped, other CPU's |
817 | * may still be using the old entries. This is okay, because |
818 | * resynchronization happens because of the locking done |
819 | * during the get_counters() routine. |
820 | */ |
821 | local_bh_enable(); |
822 | |
823 | return private; |
824 | } |
825 | EXPORT_SYMBOL_GPL(xt_replace_table); |
826 | |
827 | struct xt_table *xt_register_table(struct net *net, |
828 | const struct xt_table *input_table, |
829 | struct xt_table_info *bootstrap, |
830 | struct xt_table_info *newinfo) |
831 | { |
832 | int ret; |
833 | struct xt_table_info *private; |
834 | struct xt_table *t, *table; |
835 | |
836 | /* Don't add one object to multiple lists. */ |
837 | table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); |
838 | if (!table) { |
839 | ret = -ENOMEM; |
840 | goto out; |
841 | } |
842 | |
843 | ret = mutex_lock_interruptible(&xt[table->af].mutex); |
844 | if (ret != 0) |
845 | goto out_free; |
846 | |
847 | /* Don't autoload: we'd eat our tail... */ |
848 | list_for_each_entry(t, &net->xt.tables[table->af], list) { |
849 | if (strcmp(t->name, table->name) == 0) { |
850 | ret = -EEXIST; |
851 | goto unlock; |
852 | } |
853 | } |
854 | |
855 | /* Simplifies replace_table code. */ |
856 | table->private = bootstrap; |
857 | |
858 | if (!xt_replace_table(table, 0, newinfo, &ret)) |
859 | goto unlock; |
860 | |
861 | private = table->private; |
862 | pr_debug("table->private->number = %u\n", private->number); |
863 | |
864 | /* save number of initial entries */ |
865 | private->initial_entries = private->number; |
866 | |
867 | list_add(&table->list, &net->xt.tables[table->af]); |
868 | mutex_unlock(&xt[table->af].mutex); |
869 | return table; |
870 | |
871 | unlock: |
872 | mutex_unlock(&xt[table->af].mutex); |
873 | out_free: |
874 | kfree(table); |
875 | out: |
876 | return ERR_PTR(ret); |
877 | } |
878 | EXPORT_SYMBOL_GPL(xt_register_table); |
879 | |
880 | void *xt_unregister_table(struct xt_table *table) |
881 | { |
882 | struct xt_table_info *private; |
883 | |
884 | mutex_lock(&xt[table->af].mutex); |
885 | private = table->private; |
886 | list_del(&table->list); |
887 | mutex_unlock(&xt[table->af].mutex); |
888 | kfree(table); |
889 | |
890 | return private; |
891 | } |
892 | EXPORT_SYMBOL_GPL(xt_unregister_table); |
893 | |
894 | #ifdef CONFIG_PROC_FS |
895 | struct xt_names_priv { |
896 | struct seq_net_private p; |
897 | u_int8_t af; |
898 | }; |
899 | static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos) |
900 | { |
901 | struct xt_names_priv *priv = seq->private; |
902 | struct net *net = seq_file_net(seq); |
903 | u_int8_t af = priv->af; |
904 | |
905 | mutex_lock(&xt[af].mutex); |
906 | return seq_list_start(&net->xt.tables[af], *pos); |
907 | } |
908 | |
909 | static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
910 | { |
911 | struct xt_names_priv *priv = seq->private; |
912 | struct net *net = seq_file_net(seq); |
913 | u_int8_t af = priv->af; |
914 | |
915 | return seq_list_next(v, &net->xt.tables[af], pos); |
916 | } |
917 | |
918 | static void xt_table_seq_stop(struct seq_file *seq, void *v) |
919 | { |
920 | struct xt_names_priv *priv = seq->private; |
921 | u_int8_t af = priv->af; |
922 | |
923 | mutex_unlock(&xt[af].mutex); |
924 | } |
925 | |
926 | static int xt_table_seq_show(struct seq_file *seq, void *v) |
927 | { |
928 | struct xt_table *table = list_entry(v, struct xt_table, list); |
929 | |
930 | if (strlen(table->name)) |
931 | return seq_printf(seq, "%s\n", table->name); |
932 | else |
933 | return 0; |
934 | } |
935 | |
936 | static const struct seq_operations xt_table_seq_ops = { |
937 | .start = xt_table_seq_start, |
938 | .next = xt_table_seq_next, |
939 | .stop = xt_table_seq_stop, |
940 | .show = xt_table_seq_show, |
941 | }; |
942 | |
943 | static int xt_table_open(struct inode *inode, struct file *file) |
944 | { |
945 | int ret; |
946 | struct xt_names_priv *priv; |
947 | |
948 | ret = seq_open_net(inode, file, &xt_table_seq_ops, |
949 | sizeof(struct xt_names_priv)); |
950 | if (!ret) { |
951 | priv = ((struct seq_file *)file->private_data)->private; |
952 | priv->af = (unsigned long)PDE(inode)->data; |
953 | } |
954 | return ret; |
955 | } |
956 | |
957 | static const struct file_operations xt_table_ops = { |
958 | .owner = THIS_MODULE, |
959 | .open = xt_table_open, |
960 | .read = seq_read, |
961 | .llseek = seq_lseek, |
962 | .release = seq_release_net, |
963 | }; |
964 | |
965 | /* |
966 | * Traverse state for ip{,6}_{tables,matches} for helping crossing |
967 | * the multi-AF mutexes. |
968 | */ |
969 | struct nf_mttg_trav { |
970 | struct list_head *head, *curr; |
971 | uint8_t class, nfproto; |
972 | }; |
973 | |
974 | enum { |
975 | MTTG_TRAV_INIT, |
976 | MTTG_TRAV_NFP_UNSPEC, |
977 | MTTG_TRAV_NFP_SPEC, |
978 | MTTG_TRAV_DONE, |
979 | }; |
980 | |
981 | static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos, |
982 | bool is_target) |
983 | { |
984 | static const uint8_t next_class[] = { |
985 | [MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC, |
986 | [MTTG_TRAV_NFP_SPEC] = MTTG_TRAV_DONE, |
987 | }; |
988 | struct nf_mttg_trav *trav = seq->private; |
989 | |
990 | switch (trav->class) { |
991 | case MTTG_TRAV_INIT: |
992 | trav->class = MTTG_TRAV_NFP_UNSPEC; |
993 | mutex_lock(&xt[NFPROTO_UNSPEC].mutex); |
994 | trav->head = trav->curr = is_target ? |
995 | &xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match; |
996 | break; |
997 | case MTTG_TRAV_NFP_UNSPEC: |
998 | trav->curr = trav->curr->next; |
999 | if (trav->curr != trav->head) |
1000 | break; |
1001 | mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); |
1002 | mutex_lock(&xt[trav->nfproto].mutex); |
1003 | trav->head = trav->curr = is_target ? |
1004 | &xt[trav->nfproto].target : &xt[trav->nfproto].match; |
1005 | trav->class = next_class[trav->class]; |
1006 | break; |
1007 | case MTTG_TRAV_NFP_SPEC: |
1008 | trav->curr = trav->curr->next; |
1009 | if (trav->curr != trav->head) |
1010 | break; |
1011 | /* fallthru, _stop will unlock */ |
1012 | default: |
1013 | return NULL; |
1014 | } |
1015 | |
1016 | if (ppos != NULL) |
1017 | ++*ppos; |
1018 | return trav; |
1019 | } |
1020 | |
1021 | static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos, |
1022 | bool is_target) |
1023 | { |
1024 | struct nf_mttg_trav *trav = seq->private; |
1025 | unsigned int j; |
1026 | |
1027 | trav->class = MTTG_TRAV_INIT; |
1028 | for (j = 0; j < *pos; ++j) |
1029 | if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL) |
1030 | return NULL; |
1031 | return trav; |
1032 | } |
1033 | |
1034 | static void xt_mttg_seq_stop(struct seq_file *seq, void *v) |
1035 | { |
1036 | struct nf_mttg_trav *trav = seq->private; |
1037 | |
1038 | switch (trav->class) { |
1039 | case MTTG_TRAV_NFP_UNSPEC: |
1040 | mutex_unlock(&xt[NFPROTO_UNSPEC].mutex); |
1041 | break; |
1042 | case MTTG_TRAV_NFP_SPEC: |
1043 | mutex_unlock(&xt[trav->nfproto].mutex); |
1044 | break; |
1045 | } |
1046 | } |
1047 | |
1048 | static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos) |
1049 | { |
1050 | return xt_mttg_seq_start(seq, pos, false); |
1051 | } |
1052 | |
1053 | static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos) |
1054 | { |
1055 | return xt_mttg_seq_next(seq, v, ppos, false); |
1056 | } |
1057 | |
1058 | static int xt_match_seq_show(struct seq_file *seq, void *v) |
1059 | { |
1060 | const struct nf_mttg_trav *trav = seq->private; |
1061 | const struct xt_match *match; |
1062 | |
1063 | switch (trav->class) { |
1064 | case MTTG_TRAV_NFP_UNSPEC: |
1065 | case MTTG_TRAV_NFP_SPEC: |
1066 | if (trav->curr == trav->head) |
1067 | return 0; |
1068 | match = list_entry(trav->curr, struct xt_match, list); |
1069 | return (*match->name == '\0') ? 0 : |
1070 | seq_printf(seq, "%s\n", match->name); |
1071 | } |
1072 | return 0; |
1073 | } |
1074 | |
1075 | static const struct seq_operations xt_match_seq_ops = { |
1076 | .start = xt_match_seq_start, |
1077 | .next = xt_match_seq_next, |
1078 | .stop = xt_mttg_seq_stop, |
1079 | .show = xt_match_seq_show, |
1080 | }; |
1081 | |
1082 | static int xt_match_open(struct inode *inode, struct file *file) |
1083 | { |
1084 | struct seq_file *seq; |
1085 | struct nf_mttg_trav *trav; |
1086 | int ret; |
1087 | |
1088 | trav = kmalloc(sizeof(*trav), GFP_KERNEL); |
1089 | if (trav == NULL) |
1090 | return -ENOMEM; |
1091 | |
1092 | ret = seq_open(file, &xt_match_seq_ops); |
1093 | if (ret < 0) { |
1094 | kfree(trav); |
1095 | return ret; |
1096 | } |
1097 | |
1098 | seq = file->private_data; |
1099 | seq->private = trav; |
1100 | trav->nfproto = (unsigned long)PDE(inode)->data; |
1101 | return 0; |
1102 | } |
1103 | |
1104 | static const struct file_operations xt_match_ops = { |
1105 | .owner = THIS_MODULE, |
1106 | .open = xt_match_open, |
1107 | .read = seq_read, |
1108 | .llseek = seq_lseek, |
1109 | .release = seq_release_private, |
1110 | }; |
1111 | |
1112 | static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos) |
1113 | { |
1114 | return xt_mttg_seq_start(seq, pos, true); |
1115 | } |
1116 | |
1117 | static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos) |
1118 | { |
1119 | return xt_mttg_seq_next(seq, v, ppos, true); |
1120 | } |
1121 | |
1122 | static int xt_target_seq_show(struct seq_file *seq, void *v) |
1123 | { |
1124 | const struct nf_mttg_trav *trav = seq->private; |
1125 | const struct xt_target *target; |
1126 | |
1127 | switch (trav->class) { |
1128 | case MTTG_TRAV_NFP_UNSPEC: |
1129 | case MTTG_TRAV_NFP_SPEC: |
1130 | if (trav->curr == trav->head) |
1131 | return 0; |
1132 | target = list_entry(trav->curr, struct xt_target, list); |
1133 | return (*target->name == '\0') ? 0 : |
1134 | seq_printf(seq, "%s\n", target->name); |
1135 | } |
1136 | return 0; |
1137 | } |
1138 | |
1139 | static const struct seq_operations xt_target_seq_ops = { |
1140 | .start = xt_target_seq_start, |
1141 | .next = xt_target_seq_next, |
1142 | .stop = xt_mttg_seq_stop, |
1143 | .show = xt_target_seq_show, |
1144 | }; |
1145 | |
1146 | static int xt_target_open(struct inode *inode, struct file *file) |
1147 | { |
1148 | struct seq_file *seq; |
1149 | struct nf_mttg_trav *trav; |
1150 | int ret; |
1151 | |
1152 | trav = kmalloc(sizeof(*trav), GFP_KERNEL); |
1153 | if (trav == NULL) |
1154 | return -ENOMEM; |
1155 | |
1156 | ret = seq_open(file, &xt_target_seq_ops); |
1157 | if (ret < 0) { |
1158 | kfree(trav); |
1159 | return ret; |
1160 | } |
1161 | |
1162 | seq = file->private_data; |
1163 | seq->private = trav; |
1164 | trav->nfproto = (unsigned long)PDE(inode)->data; |
1165 | return 0; |
1166 | } |
1167 | |
1168 | static const struct file_operations xt_target_ops = { |
1169 | .owner = THIS_MODULE, |
1170 | .open = xt_target_open, |
1171 | .read = seq_read, |
1172 | .llseek = seq_lseek, |
1173 | .release = seq_release_private, |
1174 | }; |
1175 | |
1176 | #define FORMAT_TABLES "_tables_names" |
1177 | #define FORMAT_MATCHES "_tables_matches" |
1178 | #define FORMAT_TARGETS "_tables_targets" |
1179 | |
1180 | #endif /* CONFIG_PROC_FS */ |
1181 | |
1182 | /** |
1183 | * xt_hook_link - set up hooks for a new table |
1184 | * @table: table with metadata needed to set up hooks |
1185 | * @fn: Hook function |
1186 | * |
1187 | * This function will take care of creating and registering the necessary |
1188 | * Netfilter hooks for XT tables. |
1189 | */ |
1190 | struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn) |
1191 | { |
1192 | unsigned int hook_mask = table->valid_hooks; |
1193 | uint8_t i, num_hooks = hweight32(hook_mask); |
1194 | uint8_t hooknum; |
1195 | struct nf_hook_ops *ops; |
1196 | int ret; |
1197 | |
1198 | ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL); |
1199 | if (ops == NULL) |
1200 | return ERR_PTR(-ENOMEM); |
1201 | |
1202 | for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0; |
1203 | hook_mask >>= 1, ++hooknum) { |
1204 | if (!(hook_mask & 1)) |
1205 | continue; |
1206 | ops[i].hook = fn; |
1207 | ops[i].owner = table->me; |
1208 | ops[i].pf = table->af; |
1209 | ops[i].hooknum = hooknum; |
1210 | ops[i].priority = table->priority; |
1211 | ++i; |
1212 | } |
1213 | |
1214 | ret = nf_register_hooks(ops, num_hooks); |
1215 | if (ret < 0) { |
1216 | kfree(ops); |
1217 | return ERR_PTR(ret); |
1218 | } |
1219 | |
1220 | return ops; |
1221 | } |
1222 | EXPORT_SYMBOL_GPL(xt_hook_link); |
1223 | |
1224 | /** |
1225 | * xt_hook_unlink - remove hooks for a table |
1226 | * @ops: nf_hook_ops array as returned by nf_hook_link |
1227 | * @hook_mask: the very same mask that was passed to nf_hook_link |
1228 | */ |
1229 | void xt_hook_unlink(const struct xt_table *table, struct nf_hook_ops *ops) |
1230 | { |
1231 | nf_unregister_hooks(ops, hweight32(table->valid_hooks)); |
1232 | kfree(ops); |
1233 | } |
1234 | EXPORT_SYMBOL_GPL(xt_hook_unlink); |
1235 | |
1236 | int xt_proto_init(struct net *net, u_int8_t af) |
1237 | { |
1238 | #ifdef CONFIG_PROC_FS |
1239 | char buf[XT_FUNCTION_MAXNAMELEN]; |
1240 | struct proc_dir_entry *proc; |
1241 | #endif |
1242 | |
1243 | if (af >= ARRAY_SIZE(xt_prefix)) |
1244 | return -EINVAL; |
1245 | |
1246 | |
1247 | #ifdef CONFIG_PROC_FS |
1248 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
1249 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
1250 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops, |
1251 | (void *)(unsigned long)af); |
1252 | if (!proc) |
1253 | goto out; |
1254 | |
1255 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
1256 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
1257 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops, |
1258 | (void *)(unsigned long)af); |
1259 | if (!proc) |
1260 | goto out_remove_tables; |
1261 | |
1262 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
1263 | strlcat(buf, FORMAT_TARGETS, sizeof(buf)); |
1264 | proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops, |
1265 | (void *)(unsigned long)af); |
1266 | if (!proc) |
1267 | goto out_remove_matches; |
1268 | #endif |
1269 | |
1270 | return 0; |
1271 | |
1272 | #ifdef CONFIG_PROC_FS |
1273 | out_remove_matches: |
1274 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
1275 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
1276 | proc_net_remove(net, buf); |
1277 | |
1278 | out_remove_tables: |
1279 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
1280 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
1281 | proc_net_remove(net, buf); |
1282 | out: |
1283 | return -1; |
1284 | #endif |
1285 | } |
1286 | EXPORT_SYMBOL_GPL(xt_proto_init); |
1287 | |
1288 | void xt_proto_fini(struct net *net, u_int8_t af) |
1289 | { |
1290 | #ifdef CONFIG_PROC_FS |
1291 | char buf[XT_FUNCTION_MAXNAMELEN]; |
1292 | |
1293 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
1294 | strlcat(buf, FORMAT_TABLES, sizeof(buf)); |
1295 | proc_net_remove(net, buf); |
1296 | |
1297 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
1298 | strlcat(buf, FORMAT_TARGETS, sizeof(buf)); |
1299 | proc_net_remove(net, buf); |
1300 | |
1301 | strlcpy(buf, xt_prefix[af], sizeof(buf)); |
1302 | strlcat(buf, FORMAT_MATCHES, sizeof(buf)); |
1303 | proc_net_remove(net, buf); |
1304 | #endif /*CONFIG_PROC_FS*/ |
1305 | } |
1306 | EXPORT_SYMBOL_GPL(xt_proto_fini); |
1307 | |
1308 | static int __net_init xt_net_init(struct net *net) |
1309 | { |
1310 | int i; |
1311 | |
1312 | for (i = 0; i < NFPROTO_NUMPROTO; i++) |
1313 | INIT_LIST_HEAD(&net->xt.tables[i]); |
1314 | return 0; |
1315 | } |
1316 | |
1317 | static struct pernet_operations xt_net_ops = { |
1318 | .init = xt_net_init, |
1319 | }; |
1320 | |
1321 | static int __init xt_init(void) |
1322 | { |
1323 | unsigned int i; |
1324 | int rv; |
1325 | |
1326 | for_each_possible_cpu(i) { |
1327 | struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); |
1328 | |
1329 | seqlock_init(&lock->lock); |
1330 | lock->readers = 0; |
1331 | } |
1332 | |
1333 | xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL); |
1334 | if (!xt) |
1335 | return -ENOMEM; |
1336 | |
1337 | for (i = 0; i < NFPROTO_NUMPROTO; i++) { |
1338 | mutex_init(&xt[i].mutex); |
1339 | #ifdef CONFIG_COMPAT |
1340 | mutex_init(&xt[i].compat_mutex); |
1341 | xt[i].compat_offsets = NULL; |
1342 | #endif |
1343 | INIT_LIST_HEAD(&xt[i].target); |
1344 | INIT_LIST_HEAD(&xt[i].match); |
1345 | } |
1346 | rv = register_pernet_subsys(&xt_net_ops); |
1347 | if (rv < 0) |
1348 | kfree(xt); |
1349 | return rv; |
1350 | } |
1351 | |
1352 | static void __exit xt_fini(void) |
1353 | { |
1354 | unregister_pernet_subsys(&xt_net_ops); |
1355 | kfree(xt); |
1356 | } |
1357 | |
1358 | module_init(xt_init); |
1359 | module_exit(xt_fini); |
1360 | |
1361 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9