Root/
1 | /* Event cache for netfilter. */ |
2 | |
3 | /* |
4 | * (C) 2005 Harald Welte <laforge@gnumonks.org> |
5 | * (C) 2005 Patrick McHardy <kaber@trash.net> |
6 | * (C) 2005-2006 Netfilter Core Team <coreteam@netfilter.org> |
7 | * (C) 2005 USAGI/WIDE Project <http://www.linux-ipv6.org> |
8 | * |
9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License version 2 as |
11 | * published by the Free Software Foundation. |
12 | */ |
13 | |
14 | #include <linux/types.h> |
15 | #include <linux/netfilter.h> |
16 | #include <linux/skbuff.h> |
17 | #include <linux/vmalloc.h> |
18 | #include <linux/stddef.h> |
19 | #include <linux/err.h> |
20 | #include <linux/percpu.h> |
21 | #include <linux/kernel.h> |
22 | #include <linux/netdevice.h> |
23 | #include <linux/slab.h> |
24 | #include <linux/export.h> |
25 | |
26 | #include <net/netfilter/nf_conntrack.h> |
27 | #include <net/netfilter/nf_conntrack_core.h> |
28 | #include <net/netfilter/nf_conntrack_extend.h> |
29 | |
30 | static DEFINE_MUTEX(nf_ct_ecache_mutex); |
31 | |
32 | /* deliver cached events and clear cache entry - must be called with locally |
33 | * disabled softirqs */ |
34 | void nf_ct_deliver_cached_events(struct nf_conn *ct) |
35 | { |
36 | struct net *net = nf_ct_net(ct); |
37 | unsigned long events, missed; |
38 | struct nf_ct_event_notifier *notify; |
39 | struct nf_conntrack_ecache *e; |
40 | struct nf_ct_event item; |
41 | int ret; |
42 | |
43 | rcu_read_lock(); |
44 | notify = rcu_dereference(net->ct.nf_conntrack_event_cb); |
45 | if (notify == NULL) |
46 | goto out_unlock; |
47 | |
48 | e = nf_ct_ecache_find(ct); |
49 | if (e == NULL) |
50 | goto out_unlock; |
51 | |
52 | events = xchg(&e->cache, 0); |
53 | |
54 | if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct) || !events) |
55 | goto out_unlock; |
56 | |
57 | /* We make a copy of the missed event cache without taking |
58 | * the lock, thus we may send missed events twice. However, |
59 | * this does not harm and it happens very rarely. */ |
60 | missed = e->missed; |
61 | |
62 | if (!((events | missed) & e->ctmask)) |
63 | goto out_unlock; |
64 | |
65 | item.ct = ct; |
66 | item.portid = 0; |
67 | item.report = 0; |
68 | |
69 | ret = notify->fcn(events | missed, &item); |
70 | |
71 | if (likely(ret >= 0 && !missed)) |
72 | goto out_unlock; |
73 | |
74 | spin_lock_bh(&ct->lock); |
75 | if (ret < 0) |
76 | e->missed |= events; |
77 | else |
78 | e->missed &= ~missed; |
79 | spin_unlock_bh(&ct->lock); |
80 | |
81 | out_unlock: |
82 | rcu_read_unlock(); |
83 | } |
84 | EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events); |
85 | |
86 | int nf_conntrack_register_notifier(struct net *net, |
87 | struct nf_ct_event_notifier *new) |
88 | { |
89 | int ret; |
90 | struct nf_ct_event_notifier *notify; |
91 | |
92 | mutex_lock(&nf_ct_ecache_mutex); |
93 | notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb, |
94 | lockdep_is_held(&nf_ct_ecache_mutex)); |
95 | if (notify != NULL) { |
96 | ret = -EBUSY; |
97 | goto out_unlock; |
98 | } |
99 | rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new); |
100 | ret = 0; |
101 | |
102 | out_unlock: |
103 | mutex_unlock(&nf_ct_ecache_mutex); |
104 | return ret; |
105 | } |
106 | EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier); |
107 | |
108 | void nf_conntrack_unregister_notifier(struct net *net, |
109 | struct nf_ct_event_notifier *new) |
110 | { |
111 | struct nf_ct_event_notifier *notify; |
112 | |
113 | mutex_lock(&nf_ct_ecache_mutex); |
114 | notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb, |
115 | lockdep_is_held(&nf_ct_ecache_mutex)); |
116 | BUG_ON(notify != new); |
117 | RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL); |
118 | mutex_unlock(&nf_ct_ecache_mutex); |
119 | } |
120 | EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier); |
121 | |
122 | int nf_ct_expect_register_notifier(struct net *net, |
123 | struct nf_exp_event_notifier *new) |
124 | { |
125 | int ret; |
126 | struct nf_exp_event_notifier *notify; |
127 | |
128 | mutex_lock(&nf_ct_ecache_mutex); |
129 | notify = rcu_dereference_protected(net->ct.nf_expect_event_cb, |
130 | lockdep_is_held(&nf_ct_ecache_mutex)); |
131 | if (notify != NULL) { |
132 | ret = -EBUSY; |
133 | goto out_unlock; |
134 | } |
135 | rcu_assign_pointer(net->ct.nf_expect_event_cb, new); |
136 | ret = 0; |
137 | |
138 | out_unlock: |
139 | mutex_unlock(&nf_ct_ecache_mutex); |
140 | return ret; |
141 | } |
142 | EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier); |
143 | |
144 | void nf_ct_expect_unregister_notifier(struct net *net, |
145 | struct nf_exp_event_notifier *new) |
146 | { |
147 | struct nf_exp_event_notifier *notify; |
148 | |
149 | mutex_lock(&nf_ct_ecache_mutex); |
150 | notify = rcu_dereference_protected(net->ct.nf_expect_event_cb, |
151 | lockdep_is_held(&nf_ct_ecache_mutex)); |
152 | BUG_ON(notify != new); |
153 | RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL); |
154 | mutex_unlock(&nf_ct_ecache_mutex); |
155 | } |
156 | EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier); |
157 | |
158 | #define NF_CT_EVENTS_DEFAULT 1 |
159 | static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT; |
160 | static int nf_ct_events_retry_timeout __read_mostly = 15*HZ; |
161 | |
162 | #ifdef CONFIG_SYSCTL |
163 | static struct ctl_table event_sysctl_table[] = { |
164 | { |
165 | .procname = "nf_conntrack_events", |
166 | .data = &init_net.ct.sysctl_events, |
167 | .maxlen = sizeof(unsigned int), |
168 | .mode = 0644, |
169 | .proc_handler = proc_dointvec, |
170 | }, |
171 | { |
172 | .procname = "nf_conntrack_events_retry_timeout", |
173 | .data = &init_net.ct.sysctl_events_retry_timeout, |
174 | .maxlen = sizeof(unsigned int), |
175 | .mode = 0644, |
176 | .proc_handler = proc_dointvec_jiffies, |
177 | }, |
178 | {} |
179 | }; |
180 | #endif /* CONFIG_SYSCTL */ |
181 | |
182 | static struct nf_ct_ext_type event_extend __read_mostly = { |
183 | .len = sizeof(struct nf_conntrack_ecache), |
184 | .align = __alignof__(struct nf_conntrack_ecache), |
185 | .id = NF_CT_EXT_ECACHE, |
186 | }; |
187 | |
188 | #ifdef CONFIG_SYSCTL |
189 | static int nf_conntrack_event_init_sysctl(struct net *net) |
190 | { |
191 | struct ctl_table *table; |
192 | |
193 | table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table), |
194 | GFP_KERNEL); |
195 | if (!table) |
196 | goto out; |
197 | |
198 | table[0].data = &net->ct.sysctl_events; |
199 | table[1].data = &net->ct.sysctl_events_retry_timeout; |
200 | |
201 | /* Don't export sysctls to unprivileged users */ |
202 | if (net->user_ns != &init_user_ns) |
203 | table[0].procname = NULL; |
204 | |
205 | net->ct.event_sysctl_header = |
206 | register_net_sysctl(net, "net/netfilter", table); |
207 | if (!net->ct.event_sysctl_header) { |
208 | printk(KERN_ERR "nf_ct_event: can't register to sysctl.\n"); |
209 | goto out_register; |
210 | } |
211 | return 0; |
212 | |
213 | out_register: |
214 | kfree(table); |
215 | out: |
216 | return -ENOMEM; |
217 | } |
218 | |
219 | static void nf_conntrack_event_fini_sysctl(struct net *net) |
220 | { |
221 | struct ctl_table *table; |
222 | |
223 | table = net->ct.event_sysctl_header->ctl_table_arg; |
224 | unregister_net_sysctl_table(net->ct.event_sysctl_header); |
225 | kfree(table); |
226 | } |
227 | #else |
228 | static int nf_conntrack_event_init_sysctl(struct net *net) |
229 | { |
230 | return 0; |
231 | } |
232 | |
233 | static void nf_conntrack_event_fini_sysctl(struct net *net) |
234 | { |
235 | } |
236 | #endif /* CONFIG_SYSCTL */ |
237 | |
238 | int nf_conntrack_ecache_pernet_init(struct net *net) |
239 | { |
240 | net->ct.sysctl_events = nf_ct_events; |
241 | net->ct.sysctl_events_retry_timeout = nf_ct_events_retry_timeout; |
242 | return nf_conntrack_event_init_sysctl(net); |
243 | } |
244 | |
245 | void nf_conntrack_ecache_pernet_fini(struct net *net) |
246 | { |
247 | nf_conntrack_event_fini_sysctl(net); |
248 | } |
249 | |
250 | int nf_conntrack_ecache_init(void) |
251 | { |
252 | int ret = nf_ct_extend_register(&event_extend); |
253 | if (ret < 0) |
254 | pr_err("nf_ct_event: Unable to register event extension.\n"); |
255 | return ret; |
256 | } |
257 | |
258 | void nf_conntrack_ecache_fini(void) |
259 | { |
260 | nf_ct_extend_unregister(&event_extend); |
261 | } |
262 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9