Root/
1 | /* (C) 1999-2001 Paul `Rusty' Russell |
2 | * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. |
7 | */ |
8 | |
9 | #include <linux/types.h> |
10 | #include <linux/netfilter.h> |
11 | #include <linux/slab.h> |
12 | #include <linux/module.h> |
13 | #include <linux/skbuff.h> |
14 | #include <linux/proc_fs.h> |
15 | #include <linux/seq_file.h> |
16 | #include <linux/percpu.h> |
17 | #include <linux/netdevice.h> |
18 | #include <net/net_namespace.h> |
19 | #ifdef CONFIG_SYSCTL |
20 | #include <linux/sysctl.h> |
21 | #endif |
22 | |
23 | #include <net/netfilter/nf_conntrack.h> |
24 | #include <net/netfilter/nf_conntrack_core.h> |
25 | #include <net/netfilter/nf_conntrack_l3proto.h> |
26 | #include <net/netfilter/nf_conntrack_l4proto.h> |
27 | #include <net/netfilter/nf_conntrack_expect.h> |
28 | #include <net/netfilter/nf_conntrack_helper.h> |
29 | #include <net/netfilter/nf_conntrack_acct.h> |
30 | #include <net/netfilter/nf_conntrack_zones.h> |
31 | |
32 | MODULE_LICENSE("GPL"); |
33 | |
34 | #ifdef CONFIG_PROC_FS |
35 | int |
36 | print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, |
37 | const struct nf_conntrack_l3proto *l3proto, |
38 | const struct nf_conntrack_l4proto *l4proto) |
39 | { |
40 | return l3proto->print_tuple(s, tuple) || l4proto->print_tuple(s, tuple); |
41 | } |
42 | EXPORT_SYMBOL_GPL(print_tuple); |
43 | |
44 | struct ct_iter_state { |
45 | struct seq_net_private p; |
46 | unsigned int bucket; |
47 | }; |
48 | |
49 | static struct hlist_nulls_node *ct_get_first(struct seq_file *seq) |
50 | { |
51 | struct net *net = seq_file_net(seq); |
52 | struct ct_iter_state *st = seq->private; |
53 | struct hlist_nulls_node *n; |
54 | |
55 | for (st->bucket = 0; |
56 | st->bucket < net->ct.htable_size; |
57 | st->bucket++) { |
58 | n = rcu_dereference(net->ct.hash[st->bucket].first); |
59 | if (!is_a_nulls(n)) |
60 | return n; |
61 | } |
62 | return NULL; |
63 | } |
64 | |
65 | static struct hlist_nulls_node *ct_get_next(struct seq_file *seq, |
66 | struct hlist_nulls_node *head) |
67 | { |
68 | struct net *net = seq_file_net(seq); |
69 | struct ct_iter_state *st = seq->private; |
70 | |
71 | head = rcu_dereference(head->next); |
72 | while (is_a_nulls(head)) { |
73 | if (likely(get_nulls_value(head) == st->bucket)) { |
74 | if (++st->bucket >= net->ct.htable_size) |
75 | return NULL; |
76 | } |
77 | head = rcu_dereference(net->ct.hash[st->bucket].first); |
78 | } |
79 | return head; |
80 | } |
81 | |
82 | static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos) |
83 | { |
84 | struct hlist_nulls_node *head = ct_get_first(seq); |
85 | |
86 | if (head) |
87 | while (pos && (head = ct_get_next(seq, head))) |
88 | pos--; |
89 | return pos ? NULL : head; |
90 | } |
91 | |
92 | static void *ct_seq_start(struct seq_file *seq, loff_t *pos) |
93 | __acquires(RCU) |
94 | { |
95 | rcu_read_lock(); |
96 | return ct_get_idx(seq, *pos); |
97 | } |
98 | |
99 | static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos) |
100 | { |
101 | (*pos)++; |
102 | return ct_get_next(s, v); |
103 | } |
104 | |
105 | static void ct_seq_stop(struct seq_file *s, void *v) |
106 | __releases(RCU) |
107 | { |
108 | rcu_read_unlock(); |
109 | } |
110 | |
111 | /* return 0 on success, 1 in case of error */ |
112 | static int ct_seq_show(struct seq_file *s, void *v) |
113 | { |
114 | struct nf_conntrack_tuple_hash *hash = v; |
115 | struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash); |
116 | const struct nf_conntrack_l3proto *l3proto; |
117 | const struct nf_conntrack_l4proto *l4proto; |
118 | int ret = 0; |
119 | |
120 | NF_CT_ASSERT(ct); |
121 | if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use))) |
122 | return 0; |
123 | |
124 | /* we only want to print DIR_ORIGINAL */ |
125 | if (NF_CT_DIRECTION(hash)) |
126 | goto release; |
127 | |
128 | l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct)); |
129 | NF_CT_ASSERT(l3proto); |
130 | l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); |
131 | NF_CT_ASSERT(l4proto); |
132 | |
133 | ret = -ENOSPC; |
134 | if (seq_printf(s, "%-8s %u %-8s %u %ld ", |
135 | l3proto->name, nf_ct_l3num(ct), |
136 | l4proto->name, nf_ct_protonum(ct), |
137 | timer_pending(&ct->timeout) |
138 | ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0) |
139 | goto release; |
140 | |
141 | if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct)) |
142 | goto release; |
143 | |
144 | if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, |
145 | l3proto, l4proto)) |
146 | goto release; |
147 | |
148 | if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL)) |
149 | goto release; |
150 | |
151 | if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status))) |
152 | if (seq_printf(s, "[UNREPLIED] ")) |
153 | goto release; |
154 | |
155 | if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, |
156 | l3proto, l4proto)) |
157 | goto release; |
158 | |
159 | if (seq_print_acct(s, ct, IP_CT_DIR_REPLY)) |
160 | goto release; |
161 | |
162 | if (test_bit(IPS_ASSURED_BIT, &ct->status)) |
163 | if (seq_printf(s, "[ASSURED] ")) |
164 | goto release; |
165 | |
166 | #if defined(CONFIG_NF_CONNTRACK_MARK) |
167 | if (seq_printf(s, "mark=%u ", ct->mark)) |
168 | goto release; |
169 | #endif |
170 | |
171 | #ifdef CONFIG_NF_CONNTRACK_SECMARK |
172 | if (seq_printf(s, "secmark=%u ", ct->secmark)) |
173 | goto release; |
174 | #endif |
175 | |
176 | #ifdef CONFIG_NF_CONNTRACK_ZONES |
177 | if (seq_printf(s, "zone=%u ", nf_ct_zone(ct))) |
178 | goto release; |
179 | #endif |
180 | |
181 | if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use))) |
182 | goto release; |
183 | |
184 | ret = 0; |
185 | release: |
186 | nf_ct_put(ct); |
187 | return 0; |
188 | } |
189 | |
190 | static const struct seq_operations ct_seq_ops = { |
191 | .start = ct_seq_start, |
192 | .next = ct_seq_next, |
193 | .stop = ct_seq_stop, |
194 | .show = ct_seq_show |
195 | }; |
196 | |
197 | static int ct_open(struct inode *inode, struct file *file) |
198 | { |
199 | return seq_open_net(inode, file, &ct_seq_ops, |
200 | sizeof(struct ct_iter_state)); |
201 | } |
202 | |
203 | static const struct file_operations ct_file_ops = { |
204 | .owner = THIS_MODULE, |
205 | .open = ct_open, |
206 | .read = seq_read, |
207 | .llseek = seq_lseek, |
208 | .release = seq_release_net, |
209 | }; |
210 | |
211 | static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos) |
212 | { |
213 | struct net *net = seq_file_net(seq); |
214 | int cpu; |
215 | |
216 | if (*pos == 0) |
217 | return SEQ_START_TOKEN; |
218 | |
219 | for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { |
220 | if (!cpu_possible(cpu)) |
221 | continue; |
222 | *pos = cpu + 1; |
223 | return per_cpu_ptr(net->ct.stat, cpu); |
224 | } |
225 | |
226 | return NULL; |
227 | } |
228 | |
229 | static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
230 | { |
231 | struct net *net = seq_file_net(seq); |
232 | int cpu; |
233 | |
234 | for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { |
235 | if (!cpu_possible(cpu)) |
236 | continue; |
237 | *pos = cpu + 1; |
238 | return per_cpu_ptr(net->ct.stat, cpu); |
239 | } |
240 | |
241 | return NULL; |
242 | } |
243 | |
244 | static void ct_cpu_seq_stop(struct seq_file *seq, void *v) |
245 | { |
246 | } |
247 | |
248 | static int ct_cpu_seq_show(struct seq_file *seq, void *v) |
249 | { |
250 | struct net *net = seq_file_net(seq); |
251 | unsigned int nr_conntracks = atomic_read(&net->ct.count); |
252 | const struct ip_conntrack_stat *st = v; |
253 | |
254 | if (v == SEQ_START_TOKEN) { |
255 | seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete\n"); |
256 | return 0; |
257 | } |
258 | |
259 | seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x " |
260 | "%08x %08x %08x %08x %08x %08x %08x %08x \n", |
261 | nr_conntracks, |
262 | st->searched, |
263 | st->found, |
264 | st->new, |
265 | st->invalid, |
266 | st->ignore, |
267 | st->delete, |
268 | st->delete_list, |
269 | st->insert, |
270 | st->insert_failed, |
271 | st->drop, |
272 | st->early_drop, |
273 | st->error, |
274 | |
275 | st->expect_new, |
276 | st->expect_create, |
277 | st->expect_delete |
278 | ); |
279 | return 0; |
280 | } |
281 | |
282 | static const struct seq_operations ct_cpu_seq_ops = { |
283 | .start = ct_cpu_seq_start, |
284 | .next = ct_cpu_seq_next, |
285 | .stop = ct_cpu_seq_stop, |
286 | .show = ct_cpu_seq_show, |
287 | }; |
288 | |
289 | static int ct_cpu_seq_open(struct inode *inode, struct file *file) |
290 | { |
291 | return seq_open_net(inode, file, &ct_cpu_seq_ops, |
292 | sizeof(struct seq_net_private)); |
293 | } |
294 | |
295 | static const struct file_operations ct_cpu_seq_fops = { |
296 | .owner = THIS_MODULE, |
297 | .open = ct_cpu_seq_open, |
298 | .read = seq_read, |
299 | .llseek = seq_lseek, |
300 | .release = seq_release_net, |
301 | }; |
302 | |
303 | static int nf_conntrack_standalone_init_proc(struct net *net) |
304 | { |
305 | struct proc_dir_entry *pde; |
306 | |
307 | pde = proc_net_fops_create(net, "nf_conntrack", 0440, &ct_file_ops); |
308 | if (!pde) |
309 | goto out_nf_conntrack; |
310 | |
311 | pde = proc_create("nf_conntrack", S_IRUGO, net->proc_net_stat, |
312 | &ct_cpu_seq_fops); |
313 | if (!pde) |
314 | goto out_stat_nf_conntrack; |
315 | return 0; |
316 | |
317 | out_stat_nf_conntrack: |
318 | proc_net_remove(net, "nf_conntrack"); |
319 | out_nf_conntrack: |
320 | return -ENOMEM; |
321 | } |
322 | |
323 | static void nf_conntrack_standalone_fini_proc(struct net *net) |
324 | { |
325 | remove_proc_entry("nf_conntrack", net->proc_net_stat); |
326 | proc_net_remove(net, "nf_conntrack"); |
327 | } |
328 | #else |
329 | static int nf_conntrack_standalone_init_proc(struct net *net) |
330 | { |
331 | return 0; |
332 | } |
333 | |
334 | static void nf_conntrack_standalone_fini_proc(struct net *net) |
335 | { |
336 | } |
337 | #endif /* CONFIG_PROC_FS */ |
338 | |
339 | /* Sysctl support */ |
340 | |
341 | #ifdef CONFIG_SYSCTL |
342 | /* Log invalid packets of a given protocol */ |
343 | static int log_invalid_proto_min = 0; |
344 | static int log_invalid_proto_max = 255; |
345 | |
346 | static struct ctl_table_header *nf_ct_netfilter_header; |
347 | |
348 | static ctl_table nf_ct_sysctl_table[] = { |
349 | { |
350 | .procname = "nf_conntrack_max", |
351 | .data = &nf_conntrack_max, |
352 | .maxlen = sizeof(int), |
353 | .mode = 0644, |
354 | .proc_handler = proc_dointvec, |
355 | }, |
356 | { |
357 | .procname = "nf_conntrack_count", |
358 | .data = &init_net.ct.count, |
359 | .maxlen = sizeof(int), |
360 | .mode = 0444, |
361 | .proc_handler = proc_dointvec, |
362 | }, |
363 | { |
364 | .procname = "nf_conntrack_buckets", |
365 | .data = &init_net.ct.htable_size, |
366 | .maxlen = sizeof(unsigned int), |
367 | .mode = 0444, |
368 | .proc_handler = proc_dointvec, |
369 | }, |
370 | { |
371 | .procname = "nf_conntrack_checksum", |
372 | .data = &init_net.ct.sysctl_checksum, |
373 | .maxlen = sizeof(unsigned int), |
374 | .mode = 0644, |
375 | .proc_handler = proc_dointvec, |
376 | }, |
377 | { |
378 | .procname = "nf_conntrack_log_invalid", |
379 | .data = &init_net.ct.sysctl_log_invalid, |
380 | .maxlen = sizeof(unsigned int), |
381 | .mode = 0644, |
382 | .proc_handler = proc_dointvec_minmax, |
383 | .extra1 = &log_invalid_proto_min, |
384 | .extra2 = &log_invalid_proto_max, |
385 | }, |
386 | { |
387 | .procname = "nf_conntrack_expect_max", |
388 | .data = &nf_ct_expect_max, |
389 | .maxlen = sizeof(int), |
390 | .mode = 0644, |
391 | .proc_handler = proc_dointvec, |
392 | }, |
393 | { } |
394 | }; |
395 | |
396 | #define NET_NF_CONNTRACK_MAX 2089 |
397 | |
398 | static ctl_table nf_ct_netfilter_table[] = { |
399 | { |
400 | .procname = "nf_conntrack_max", |
401 | .data = &nf_conntrack_max, |
402 | .maxlen = sizeof(int), |
403 | .mode = 0644, |
404 | .proc_handler = proc_dointvec, |
405 | }, |
406 | { } |
407 | }; |
408 | |
409 | static struct ctl_path nf_ct_path[] = { |
410 | { .procname = "net", }, |
411 | { } |
412 | }; |
413 | |
414 | static int nf_conntrack_standalone_init_sysctl(struct net *net) |
415 | { |
416 | struct ctl_table *table; |
417 | |
418 | if (net_eq(net, &init_net)) { |
419 | nf_ct_netfilter_header = |
420 | register_sysctl_paths(nf_ct_path, nf_ct_netfilter_table); |
421 | if (!nf_ct_netfilter_header) |
422 | goto out; |
423 | } |
424 | |
425 | table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table), |
426 | GFP_KERNEL); |
427 | if (!table) |
428 | goto out_kmemdup; |
429 | |
430 | table[1].data = &net->ct.count; |
431 | table[2].data = &net->ct.htable_size; |
432 | table[3].data = &net->ct.sysctl_checksum; |
433 | table[4].data = &net->ct.sysctl_log_invalid; |
434 | |
435 | net->ct.sysctl_header = register_net_sysctl_table(net, |
436 | nf_net_netfilter_sysctl_path, table); |
437 | if (!net->ct.sysctl_header) |
438 | goto out_unregister_netfilter; |
439 | |
440 | return 0; |
441 | |
442 | out_unregister_netfilter: |
443 | kfree(table); |
444 | out_kmemdup: |
445 | if (net_eq(net, &init_net)) |
446 | unregister_sysctl_table(nf_ct_netfilter_header); |
447 | out: |
448 | printk("nf_conntrack: can't register to sysctl.\n"); |
449 | return -ENOMEM; |
450 | } |
451 | |
452 | static void nf_conntrack_standalone_fini_sysctl(struct net *net) |
453 | { |
454 | struct ctl_table *table; |
455 | |
456 | if (net_eq(net, &init_net)) |
457 | unregister_sysctl_table(nf_ct_netfilter_header); |
458 | table = net->ct.sysctl_header->ctl_table_arg; |
459 | unregister_net_sysctl_table(net->ct.sysctl_header); |
460 | kfree(table); |
461 | } |
462 | #else |
463 | static int nf_conntrack_standalone_init_sysctl(struct net *net) |
464 | { |
465 | return 0; |
466 | } |
467 | |
468 | static void nf_conntrack_standalone_fini_sysctl(struct net *net) |
469 | { |
470 | } |
471 | #endif /* CONFIG_SYSCTL */ |
472 | |
473 | static int nf_conntrack_net_init(struct net *net) |
474 | { |
475 | int ret; |
476 | |
477 | ret = nf_conntrack_init(net); |
478 | if (ret < 0) |
479 | goto out_init; |
480 | ret = nf_conntrack_standalone_init_proc(net); |
481 | if (ret < 0) |
482 | goto out_proc; |
483 | net->ct.sysctl_checksum = 1; |
484 | net->ct.sysctl_log_invalid = 0; |
485 | ret = nf_conntrack_standalone_init_sysctl(net); |
486 | if (ret < 0) |
487 | goto out_sysctl; |
488 | return 0; |
489 | |
490 | out_sysctl: |
491 | nf_conntrack_standalone_fini_proc(net); |
492 | out_proc: |
493 | nf_conntrack_cleanup(net); |
494 | out_init: |
495 | return ret; |
496 | } |
497 | |
498 | static void nf_conntrack_net_exit(struct net *net) |
499 | { |
500 | nf_conntrack_standalone_fini_sysctl(net); |
501 | nf_conntrack_standalone_fini_proc(net); |
502 | nf_conntrack_cleanup(net); |
503 | } |
504 | |
505 | static struct pernet_operations nf_conntrack_net_ops = { |
506 | .init = nf_conntrack_net_init, |
507 | .exit = nf_conntrack_net_exit, |
508 | }; |
509 | |
510 | static int __init nf_conntrack_standalone_init(void) |
511 | { |
512 | return register_pernet_subsys(&nf_conntrack_net_ops); |
513 | } |
514 | |
515 | static void __exit nf_conntrack_standalone_fini(void) |
516 | { |
517 | unregister_pernet_subsys(&nf_conntrack_net_ops); |
518 | } |
519 | |
520 | module_init(nf_conntrack_standalone_init); |
521 | module_exit(nf_conntrack_standalone_fini); |
522 | |
523 | /* Some modules need us, but don't depend directly on any symbol. |
524 | They should call this. */ |
525 | void need_conntrack(void) |
526 | { |
527 | } |
528 | EXPORT_SYMBOL_GPL(need_conntrack); |
529 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9