Root/
1 | #include <linux/kernel.h> |
2 | #include <linux/slab.h> |
3 | #include <linux/init.h> |
4 | #include <linux/module.h> |
5 | #include <linux/proc_fs.h> |
6 | #include <linux/skbuff.h> |
7 | #include <linux/netfilter.h> |
8 | #include <linux/seq_file.h> |
9 | #include <linux/rcupdate.h> |
10 | #include <net/protocol.h> |
11 | #include <net/netfilter/nf_queue.h> |
12 | |
13 | #include "nf_internals.h" |
14 | |
15 | /* |
16 | * A queue handler may be registered for each protocol. Each is protected by |
17 | * long term mutex. The handler must provide an an outfn() to accept packets |
18 | * for queueing and must reinject all packets it receives, no matter what. |
19 | */ |
20 | static const struct nf_queue_handler *queue_handler[NFPROTO_NUMPROTO] __read_mostly; |
21 | |
22 | static DEFINE_MUTEX(queue_handler_mutex); |
23 | |
24 | /* return EBUSY when somebody else is registered, return EEXIST if the |
25 | * same handler is registered, return 0 in case of success. */ |
26 | int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh) |
27 | { |
28 | int ret; |
29 | |
30 | if (pf >= ARRAY_SIZE(queue_handler)) |
31 | return -EINVAL; |
32 | |
33 | mutex_lock(&queue_handler_mutex); |
34 | if (queue_handler[pf] == qh) |
35 | ret = -EEXIST; |
36 | else if (queue_handler[pf]) |
37 | ret = -EBUSY; |
38 | else { |
39 | rcu_assign_pointer(queue_handler[pf], qh); |
40 | ret = 0; |
41 | } |
42 | mutex_unlock(&queue_handler_mutex); |
43 | |
44 | return ret; |
45 | } |
46 | EXPORT_SYMBOL(nf_register_queue_handler); |
47 | |
48 | /* The caller must flush their queue before this */ |
49 | int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh) |
50 | { |
51 | if (pf >= ARRAY_SIZE(queue_handler)) |
52 | return -EINVAL; |
53 | |
54 | mutex_lock(&queue_handler_mutex); |
55 | if (queue_handler[pf] && queue_handler[pf] != qh) { |
56 | mutex_unlock(&queue_handler_mutex); |
57 | return -EINVAL; |
58 | } |
59 | |
60 | rcu_assign_pointer(queue_handler[pf], NULL); |
61 | mutex_unlock(&queue_handler_mutex); |
62 | |
63 | synchronize_rcu(); |
64 | |
65 | return 0; |
66 | } |
67 | EXPORT_SYMBOL(nf_unregister_queue_handler); |
68 | |
69 | void nf_unregister_queue_handlers(const struct nf_queue_handler *qh) |
70 | { |
71 | u_int8_t pf; |
72 | |
73 | mutex_lock(&queue_handler_mutex); |
74 | for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++) { |
75 | if (queue_handler[pf] == qh) |
76 | rcu_assign_pointer(queue_handler[pf], NULL); |
77 | } |
78 | mutex_unlock(&queue_handler_mutex); |
79 | |
80 | synchronize_rcu(); |
81 | } |
82 | EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers); |
83 | |
84 | static void nf_queue_entry_release_refs(struct nf_queue_entry *entry) |
85 | { |
86 | /* Release those devices we held, or Alexey will kill me. */ |
87 | if (entry->indev) |
88 | dev_put(entry->indev); |
89 | if (entry->outdev) |
90 | dev_put(entry->outdev); |
91 | #ifdef CONFIG_BRIDGE_NETFILTER |
92 | if (entry->skb->nf_bridge) { |
93 | struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge; |
94 | |
95 | if (nf_bridge->physindev) |
96 | dev_put(nf_bridge->physindev); |
97 | if (nf_bridge->physoutdev) |
98 | dev_put(nf_bridge->physoutdev); |
99 | } |
100 | #endif |
101 | /* Drop reference to owner of hook which queued us. */ |
102 | module_put(entry->elem->owner); |
103 | } |
104 | |
105 | /* |
106 | * Any packet that leaves via this function must come back |
107 | * through nf_reinject(). |
108 | */ |
109 | static int __nf_queue(struct sk_buff *skb, |
110 | struct list_head *elem, |
111 | u_int8_t pf, unsigned int hook, |
112 | struct net_device *indev, |
113 | struct net_device *outdev, |
114 | int (*okfn)(struct sk_buff *), |
115 | unsigned int queuenum) |
116 | { |
117 | int status; |
118 | struct nf_queue_entry *entry = NULL; |
119 | #ifdef CONFIG_BRIDGE_NETFILTER |
120 | struct net_device *physindev; |
121 | struct net_device *physoutdev; |
122 | #endif |
123 | const struct nf_afinfo *afinfo; |
124 | const struct nf_queue_handler *qh; |
125 | |
126 | /* QUEUE == DROP if noone is waiting, to be safe. */ |
127 | rcu_read_lock(); |
128 | |
129 | qh = rcu_dereference(queue_handler[pf]); |
130 | if (!qh) |
131 | goto err_unlock; |
132 | |
133 | afinfo = nf_get_afinfo(pf); |
134 | if (!afinfo) |
135 | goto err_unlock; |
136 | |
137 | entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC); |
138 | if (!entry) |
139 | goto err_unlock; |
140 | |
141 | *entry = (struct nf_queue_entry) { |
142 | .skb = skb, |
143 | .elem = list_entry(elem, struct nf_hook_ops, list), |
144 | .pf = pf, |
145 | .hook = hook, |
146 | .indev = indev, |
147 | .outdev = outdev, |
148 | .okfn = okfn, |
149 | }; |
150 | |
151 | /* If it's going away, ignore hook. */ |
152 | if (!try_module_get(entry->elem->owner)) { |
153 | rcu_read_unlock(); |
154 | kfree(entry); |
155 | return 0; |
156 | } |
157 | |
158 | /* Bump dev refs so they don't vanish while packet is out */ |
159 | if (indev) |
160 | dev_hold(indev); |
161 | if (outdev) |
162 | dev_hold(outdev); |
163 | #ifdef CONFIG_BRIDGE_NETFILTER |
164 | if (skb->nf_bridge) { |
165 | physindev = skb->nf_bridge->physindev; |
166 | if (physindev) |
167 | dev_hold(physindev); |
168 | physoutdev = skb->nf_bridge->physoutdev; |
169 | if (physoutdev) |
170 | dev_hold(physoutdev); |
171 | } |
172 | #endif |
173 | afinfo->saveroute(skb, entry); |
174 | status = qh->outfn(entry, queuenum); |
175 | |
176 | rcu_read_unlock(); |
177 | |
178 | if (status < 0) { |
179 | nf_queue_entry_release_refs(entry); |
180 | goto err; |
181 | } |
182 | |
183 | return 1; |
184 | |
185 | err_unlock: |
186 | rcu_read_unlock(); |
187 | err: |
188 | kfree_skb(skb); |
189 | kfree(entry); |
190 | return 1; |
191 | } |
192 | |
193 | int nf_queue(struct sk_buff *skb, |
194 | struct list_head *elem, |
195 | u_int8_t pf, unsigned int hook, |
196 | struct net_device *indev, |
197 | struct net_device *outdev, |
198 | int (*okfn)(struct sk_buff *), |
199 | unsigned int queuenum) |
200 | { |
201 | struct sk_buff *segs; |
202 | |
203 | if (!skb_is_gso(skb)) |
204 | return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn, |
205 | queuenum); |
206 | |
207 | switch (pf) { |
208 | case NFPROTO_IPV4: |
209 | skb->protocol = htons(ETH_P_IP); |
210 | break; |
211 | case NFPROTO_IPV6: |
212 | skb->protocol = htons(ETH_P_IPV6); |
213 | break; |
214 | } |
215 | |
216 | segs = skb_gso_segment(skb, 0); |
217 | kfree_skb(skb); |
218 | if (IS_ERR(segs)) |
219 | return 1; |
220 | |
221 | do { |
222 | struct sk_buff *nskb = segs->next; |
223 | |
224 | segs->next = NULL; |
225 | if (!__nf_queue(segs, elem, pf, hook, indev, outdev, okfn, |
226 | queuenum)) |
227 | kfree_skb(segs); |
228 | segs = nskb; |
229 | } while (segs); |
230 | return 1; |
231 | } |
232 | |
233 | void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) |
234 | { |
235 | struct sk_buff *skb = entry->skb; |
236 | struct list_head *elem = &entry->elem->list; |
237 | const struct nf_afinfo *afinfo; |
238 | |
239 | rcu_read_lock(); |
240 | |
241 | nf_queue_entry_release_refs(entry); |
242 | |
243 | /* Continue traversal iff userspace said ok... */ |
244 | if (verdict == NF_REPEAT) { |
245 | elem = elem->prev; |
246 | verdict = NF_ACCEPT; |
247 | } |
248 | |
249 | if (verdict == NF_ACCEPT) { |
250 | afinfo = nf_get_afinfo(entry->pf); |
251 | if (!afinfo || afinfo->reroute(skb, entry) < 0) |
252 | verdict = NF_DROP; |
253 | } |
254 | |
255 | if (verdict == NF_ACCEPT) { |
256 | next_hook: |
257 | verdict = nf_iterate(&nf_hooks[entry->pf][entry->hook], |
258 | skb, entry->hook, |
259 | entry->indev, entry->outdev, &elem, |
260 | entry->okfn, INT_MIN); |
261 | } |
262 | |
263 | switch (verdict & NF_VERDICT_MASK) { |
264 | case NF_ACCEPT: |
265 | case NF_STOP: |
266 | local_bh_disable(); |
267 | entry->okfn(skb); |
268 | local_bh_enable(); |
269 | break; |
270 | case NF_QUEUE: |
271 | if (!__nf_queue(skb, elem, entry->pf, entry->hook, |
272 | entry->indev, entry->outdev, entry->okfn, |
273 | verdict >> NF_VERDICT_BITS)) |
274 | goto next_hook; |
275 | break; |
276 | case NF_STOLEN: |
277 | default: |
278 | kfree_skb(skb); |
279 | } |
280 | rcu_read_unlock(); |
281 | kfree(entry); |
282 | return; |
283 | } |
284 | EXPORT_SYMBOL(nf_reinject); |
285 | |
286 | #ifdef CONFIG_PROC_FS |
287 | static void *seq_start(struct seq_file *seq, loff_t *pos) |
288 | { |
289 | if (*pos >= ARRAY_SIZE(queue_handler)) |
290 | return NULL; |
291 | |
292 | return pos; |
293 | } |
294 | |
295 | static void *seq_next(struct seq_file *s, void *v, loff_t *pos) |
296 | { |
297 | (*pos)++; |
298 | |
299 | if (*pos >= ARRAY_SIZE(queue_handler)) |
300 | return NULL; |
301 | |
302 | return pos; |
303 | } |
304 | |
305 | static void seq_stop(struct seq_file *s, void *v) |
306 | { |
307 | |
308 | } |
309 | |
310 | static int seq_show(struct seq_file *s, void *v) |
311 | { |
312 | int ret; |
313 | loff_t *pos = v; |
314 | const struct nf_queue_handler *qh; |
315 | |
316 | rcu_read_lock(); |
317 | qh = rcu_dereference(queue_handler[*pos]); |
318 | if (!qh) |
319 | ret = seq_printf(s, "%2lld NONE\n", *pos); |
320 | else |
321 | ret = seq_printf(s, "%2lld %s\n", *pos, qh->name); |
322 | rcu_read_unlock(); |
323 | |
324 | return ret; |
325 | } |
326 | |
327 | static const struct seq_operations nfqueue_seq_ops = { |
328 | .start = seq_start, |
329 | .next = seq_next, |
330 | .stop = seq_stop, |
331 | .show = seq_show, |
332 | }; |
333 | |
334 | static int nfqueue_open(struct inode *inode, struct file *file) |
335 | { |
336 | return seq_open(file, &nfqueue_seq_ops); |
337 | } |
338 | |
339 | static const struct file_operations nfqueue_file_ops = { |
340 | .owner = THIS_MODULE, |
341 | .open = nfqueue_open, |
342 | .read = seq_read, |
343 | .llseek = seq_lseek, |
344 | .release = seq_release, |
345 | }; |
346 | #endif /* PROC_FS */ |
347 | |
348 | |
349 | int __init netfilter_queue_init(void) |
350 | { |
351 | #ifdef CONFIG_PROC_FS |
352 | if (!proc_create("nf_queue", S_IRUGO, |
353 | proc_net_netfilter, &nfqueue_file_ops)) |
354 | return -1; |
355 | #endif |
356 | return 0; |
357 | } |
358 | |
359 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9