Root/
1 | /* |
2 | * This is a module which is used for queueing packets and communicating with |
3 | * userspace via nfnetlink. |
4 | * |
5 | * (C) 2005 by Harald Welte <laforge@netfilter.org> |
6 | * (C) 2007 by Patrick McHardy <kaber@trash.net> |
7 | * |
8 | * Based on the old ipv4-only ip_queue.c: |
9 | * (C) 2000-2002 James Morris <jmorris@intercode.com.au> |
10 | * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org> |
11 | * |
12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License version 2 as |
14 | * published by the Free Software Foundation. |
15 | * |
16 | */ |
17 | #include <linux/module.h> |
18 | #include <linux/skbuff.h> |
19 | #include <linux/init.h> |
20 | #include <linux/spinlock.h> |
21 | #include <linux/slab.h> |
22 | #include <linux/notifier.h> |
23 | #include <linux/netdevice.h> |
24 | #include <linux/netfilter.h> |
25 | #include <linux/proc_fs.h> |
26 | #include <linux/netfilter_ipv4.h> |
27 | #include <linux/netfilter_ipv6.h> |
28 | #include <linux/netfilter/nfnetlink.h> |
29 | #include <linux/netfilter/nfnetlink_queue.h> |
30 | #include <linux/list.h> |
31 | #include <net/sock.h> |
32 | #include <net/netfilter/nf_queue.h> |
33 | |
34 | #include <asm/atomic.h> |
35 | |
36 | #ifdef CONFIG_BRIDGE_NETFILTER |
37 | #include "../bridge/br_private.h" |
38 | #endif |
39 | |
40 | #define NFQNL_QMAX_DEFAULT 1024 |
41 | |
42 | struct nfqnl_instance { |
43 | struct hlist_node hlist; /* global list of queues */ |
44 | struct rcu_head rcu; |
45 | |
46 | int peer_pid; |
47 | unsigned int queue_maxlen; |
48 | unsigned int copy_range; |
49 | unsigned int queue_total; |
50 | unsigned int queue_dropped; |
51 | unsigned int queue_user_dropped; |
52 | |
53 | unsigned int id_sequence; /* 'sequence' of pkt ids */ |
54 | |
55 | u_int16_t queue_num; /* number of this queue */ |
56 | u_int8_t copy_mode; |
57 | |
58 | spinlock_t lock; |
59 | |
60 | struct list_head queue_list; /* packets in queue */ |
61 | }; |
62 | |
63 | typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long); |
64 | |
65 | static DEFINE_SPINLOCK(instances_lock); |
66 | |
67 | #define INSTANCE_BUCKETS 16 |
68 | static struct hlist_head instance_table[INSTANCE_BUCKETS] __read_mostly; |
69 | |
70 | static inline u_int8_t instance_hashfn(u_int16_t queue_num) |
71 | { |
72 | return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS; |
73 | } |
74 | |
75 | static struct nfqnl_instance * |
76 | instance_lookup(u_int16_t queue_num) |
77 | { |
78 | struct hlist_head *head; |
79 | struct hlist_node *pos; |
80 | struct nfqnl_instance *inst; |
81 | |
82 | head = &instance_table[instance_hashfn(queue_num)]; |
83 | hlist_for_each_entry_rcu(inst, pos, head, hlist) { |
84 | if (inst->queue_num == queue_num) |
85 | return inst; |
86 | } |
87 | return NULL; |
88 | } |
89 | |
90 | static struct nfqnl_instance * |
91 | instance_create(u_int16_t queue_num, int pid) |
92 | { |
93 | struct nfqnl_instance *inst; |
94 | unsigned int h; |
95 | int err; |
96 | |
97 | spin_lock(&instances_lock); |
98 | if (instance_lookup(queue_num)) { |
99 | err = -EEXIST; |
100 | goto out_unlock; |
101 | } |
102 | |
103 | inst = kzalloc(sizeof(*inst), GFP_ATOMIC); |
104 | if (!inst) { |
105 | err = -ENOMEM; |
106 | goto out_unlock; |
107 | } |
108 | |
109 | inst->queue_num = queue_num; |
110 | inst->peer_pid = pid; |
111 | inst->queue_maxlen = NFQNL_QMAX_DEFAULT; |
112 | inst->copy_range = 0xfffff; |
113 | inst->copy_mode = NFQNL_COPY_NONE; |
114 | spin_lock_init(&inst->lock); |
115 | INIT_LIST_HEAD(&inst->queue_list); |
116 | |
117 | if (!try_module_get(THIS_MODULE)) { |
118 | err = -EAGAIN; |
119 | goto out_free; |
120 | } |
121 | |
122 | h = instance_hashfn(queue_num); |
123 | hlist_add_head_rcu(&inst->hlist, &instance_table[h]); |
124 | |
125 | spin_unlock(&instances_lock); |
126 | |
127 | return inst; |
128 | |
129 | out_free: |
130 | kfree(inst); |
131 | out_unlock: |
132 | spin_unlock(&instances_lock); |
133 | return ERR_PTR(err); |
134 | } |
135 | |
136 | static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, |
137 | unsigned long data); |
138 | |
139 | static void |
140 | instance_destroy_rcu(struct rcu_head *head) |
141 | { |
142 | struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance, |
143 | rcu); |
144 | |
145 | nfqnl_flush(inst, NULL, 0); |
146 | kfree(inst); |
147 | module_put(THIS_MODULE); |
148 | } |
149 | |
150 | static void |
151 | __instance_destroy(struct nfqnl_instance *inst) |
152 | { |
153 | hlist_del_rcu(&inst->hlist); |
154 | call_rcu(&inst->rcu, instance_destroy_rcu); |
155 | } |
156 | |
157 | static void |
158 | instance_destroy(struct nfqnl_instance *inst) |
159 | { |
160 | spin_lock(&instances_lock); |
161 | __instance_destroy(inst); |
162 | spin_unlock(&instances_lock); |
163 | } |
164 | |
165 | static inline void |
166 | __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) |
167 | { |
168 | list_add_tail(&entry->list, &queue->queue_list); |
169 | queue->queue_total++; |
170 | } |
171 | |
172 | static struct nf_queue_entry * |
173 | find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id) |
174 | { |
175 | struct nf_queue_entry *entry = NULL, *i; |
176 | |
177 | spin_lock_bh(&queue->lock); |
178 | |
179 | list_for_each_entry(i, &queue->queue_list, list) { |
180 | if (i->id == id) { |
181 | entry = i; |
182 | break; |
183 | } |
184 | } |
185 | |
186 | if (entry) { |
187 | list_del(&entry->list); |
188 | queue->queue_total--; |
189 | } |
190 | |
191 | spin_unlock_bh(&queue->lock); |
192 | |
193 | return entry; |
194 | } |
195 | |
196 | static void |
197 | nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data) |
198 | { |
199 | struct nf_queue_entry *entry, *next; |
200 | |
201 | spin_lock_bh(&queue->lock); |
202 | list_for_each_entry_safe(entry, next, &queue->queue_list, list) { |
203 | if (!cmpfn || cmpfn(entry, data)) { |
204 | list_del(&entry->list); |
205 | queue->queue_total--; |
206 | nf_reinject(entry, NF_DROP); |
207 | } |
208 | } |
209 | spin_unlock_bh(&queue->lock); |
210 | } |
211 | |
212 | static struct sk_buff * |
213 | nfqnl_build_packet_message(struct nfqnl_instance *queue, |
214 | struct nf_queue_entry *entry) |
215 | { |
216 | sk_buff_data_t old_tail; |
217 | size_t size; |
218 | size_t data_len = 0; |
219 | struct sk_buff *skb; |
220 | struct nfqnl_msg_packet_hdr pmsg; |
221 | struct nlmsghdr *nlh; |
222 | struct nfgenmsg *nfmsg; |
223 | struct sk_buff *entskb = entry->skb; |
224 | struct net_device *indev; |
225 | struct net_device *outdev; |
226 | |
227 | size = NLMSG_SPACE(sizeof(struct nfgenmsg)) |
228 | + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) |
229 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
230 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
231 | #ifdef CONFIG_BRIDGE_NETFILTER |
232 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
233 | + nla_total_size(sizeof(u_int32_t)) /* ifindex */ |
234 | #endif |
235 | + nla_total_size(sizeof(u_int32_t)) /* mark */ |
236 | + nla_total_size(sizeof(struct nfqnl_msg_packet_hw)) |
237 | + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); |
238 | |
239 | outdev = entry->outdev; |
240 | |
241 | spin_lock_bh(&queue->lock); |
242 | |
243 | switch ((enum nfqnl_config_mode)queue->copy_mode) { |
244 | case NFQNL_COPY_META: |
245 | case NFQNL_COPY_NONE: |
246 | break; |
247 | |
248 | case NFQNL_COPY_PACKET: |
249 | if ((entskb->ip_summed == CHECKSUM_PARTIAL || |
250 | entskb->ip_summed == CHECKSUM_COMPLETE) && |
251 | skb_checksum_help(entskb)) { |
252 | spin_unlock_bh(&queue->lock); |
253 | return NULL; |
254 | } |
255 | if (queue->copy_range == 0 |
256 | || queue->copy_range > entskb->len) |
257 | data_len = entskb->len; |
258 | else |
259 | data_len = queue->copy_range; |
260 | |
261 | size += nla_total_size(data_len); |
262 | break; |
263 | } |
264 | |
265 | entry->id = queue->id_sequence++; |
266 | |
267 | spin_unlock_bh(&queue->lock); |
268 | |
269 | skb = alloc_skb(size, GFP_ATOMIC); |
270 | if (!skb) |
271 | goto nlmsg_failure; |
272 | |
273 | old_tail = skb->tail; |
274 | nlh = NLMSG_PUT(skb, 0, 0, |
275 | NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, |
276 | sizeof(struct nfgenmsg)); |
277 | nfmsg = NLMSG_DATA(nlh); |
278 | nfmsg->nfgen_family = entry->pf; |
279 | nfmsg->version = NFNETLINK_V0; |
280 | nfmsg->res_id = htons(queue->queue_num); |
281 | |
282 | pmsg.packet_id = htonl(entry->id); |
283 | pmsg.hw_protocol = entskb->protocol; |
284 | pmsg.hook = entry->hook; |
285 | |
286 | NLA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg); |
287 | |
288 | indev = entry->indev; |
289 | if (indev) { |
290 | #ifndef CONFIG_BRIDGE_NETFILTER |
291 | NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)); |
292 | #else |
293 | if (entry->pf == PF_BRIDGE) { |
294 | /* Case 1: indev is physical input device, we need to |
295 | * look for bridge group (when called from |
296 | * netfilter_bridge) */ |
297 | NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, |
298 | htonl(indev->ifindex)); |
299 | /* this is the bridge group "brX" */ |
300 | NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, |
301 | htonl(indev->br_port->br->dev->ifindex)); |
302 | } else { |
303 | /* Case 2: indev is bridge group, we need to look for |
304 | * physical device (when called from ipv4) */ |
305 | NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, |
306 | htonl(indev->ifindex)); |
307 | if (entskb->nf_bridge && entskb->nf_bridge->physindev) |
308 | NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, |
309 | htonl(entskb->nf_bridge->physindev->ifindex)); |
310 | } |
311 | #endif |
312 | } |
313 | |
314 | if (outdev) { |
315 | #ifndef CONFIG_BRIDGE_NETFILTER |
316 | NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)); |
317 | #else |
318 | if (entry->pf == PF_BRIDGE) { |
319 | /* Case 1: outdev is physical output device, we need to |
320 | * look for bridge group (when called from |
321 | * netfilter_bridge) */ |
322 | NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, |
323 | htonl(outdev->ifindex)); |
324 | /* this is the bridge group "brX" */ |
325 | NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, |
326 | htonl(outdev->br_port->br->dev->ifindex)); |
327 | } else { |
328 | /* Case 2: outdev is bridge group, we need to look for |
329 | * physical output device (when called from ipv4) */ |
330 | NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, |
331 | htonl(outdev->ifindex)); |
332 | if (entskb->nf_bridge && entskb->nf_bridge->physoutdev) |
333 | NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, |
334 | htonl(entskb->nf_bridge->physoutdev->ifindex)); |
335 | } |
336 | #endif |
337 | } |
338 | |
339 | if (entskb->mark) |
340 | NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark)); |
341 | |
342 | if (indev && entskb->dev) { |
343 | struct nfqnl_msg_packet_hw phw; |
344 | int len = dev_parse_header(entskb, phw.hw_addr); |
345 | if (len) { |
346 | phw.hw_addrlen = htons(len); |
347 | NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw); |
348 | } |
349 | } |
350 | |
351 | if (entskb->tstamp.tv64) { |
352 | struct nfqnl_msg_packet_timestamp ts; |
353 | struct timeval tv = ktime_to_timeval(entskb->tstamp); |
354 | ts.sec = cpu_to_be64(tv.tv_sec); |
355 | ts.usec = cpu_to_be64(tv.tv_usec); |
356 | |
357 | NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts); |
358 | } |
359 | |
360 | if (data_len) { |
361 | struct nlattr *nla; |
362 | int sz = nla_attr_size(data_len); |
363 | |
364 | if (skb_tailroom(skb) < nla_total_size(data_len)) { |
365 | printk(KERN_WARNING "nf_queue: no tailroom!\n"); |
366 | goto nlmsg_failure; |
367 | } |
368 | |
369 | nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len)); |
370 | nla->nla_type = NFQA_PAYLOAD; |
371 | nla->nla_len = sz; |
372 | |
373 | if (skb_copy_bits(entskb, 0, nla_data(nla), data_len)) |
374 | BUG(); |
375 | } |
376 | |
377 | nlh->nlmsg_len = skb->tail - old_tail; |
378 | return skb; |
379 | |
380 | nlmsg_failure: |
381 | nla_put_failure: |
382 | if (skb) |
383 | kfree_skb(skb); |
384 | if (net_ratelimit()) |
385 | printk(KERN_ERR "nf_queue: error creating packet message\n"); |
386 | return NULL; |
387 | } |
388 | |
389 | static int |
390 | nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) |
391 | { |
392 | struct sk_buff *nskb; |
393 | struct nfqnl_instance *queue; |
394 | int err; |
395 | |
396 | /* rcu_read_lock()ed by nf_hook_slow() */ |
397 | queue = instance_lookup(queuenum); |
398 | if (!queue) |
399 | goto err_out; |
400 | |
401 | if (queue->copy_mode == NFQNL_COPY_NONE) |
402 | goto err_out; |
403 | |
404 | nskb = nfqnl_build_packet_message(queue, entry); |
405 | if (nskb == NULL) |
406 | goto err_out; |
407 | |
408 | spin_lock_bh(&queue->lock); |
409 | |
410 | if (!queue->peer_pid) |
411 | goto err_out_free_nskb; |
412 | |
413 | if (queue->queue_total >= queue->queue_maxlen) { |
414 | queue->queue_dropped++; |
415 | if (net_ratelimit()) |
416 | printk(KERN_WARNING "nf_queue: full at %d entries, " |
417 | "dropping packets(s).\n", |
418 | queue->queue_total); |
419 | goto err_out_free_nskb; |
420 | } |
421 | |
422 | /* nfnetlink_unicast will either free the nskb or add it to a socket */ |
423 | err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT); |
424 | if (err < 0) { |
425 | queue->queue_user_dropped++; |
426 | goto err_out_unlock; |
427 | } |
428 | |
429 | __enqueue_entry(queue, entry); |
430 | |
431 | spin_unlock_bh(&queue->lock); |
432 | return 0; |
433 | |
434 | err_out_free_nskb: |
435 | kfree_skb(nskb); |
436 | err_out_unlock: |
437 | spin_unlock_bh(&queue->lock); |
438 | err_out: |
439 | return -1; |
440 | } |
441 | |
442 | static int |
443 | nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e) |
444 | { |
445 | struct sk_buff *nskb; |
446 | int diff; |
447 | |
448 | diff = data_len - e->skb->len; |
449 | if (diff < 0) { |
450 | if (pskb_trim(e->skb, data_len)) |
451 | return -ENOMEM; |
452 | } else if (diff > 0) { |
453 | if (data_len > 0xFFFF) |
454 | return -EINVAL; |
455 | if (diff > skb_tailroom(e->skb)) { |
456 | nskb = skb_copy_expand(e->skb, skb_headroom(e->skb), |
457 | diff, GFP_ATOMIC); |
458 | if (!nskb) { |
459 | printk(KERN_WARNING "nf_queue: OOM " |
460 | "in mangle, dropping packet\n"); |
461 | return -ENOMEM; |
462 | } |
463 | kfree_skb(e->skb); |
464 | e->skb = nskb; |
465 | } |
466 | skb_put(e->skb, diff); |
467 | } |
468 | if (!skb_make_writable(e->skb, data_len)) |
469 | return -ENOMEM; |
470 | skb_copy_to_linear_data(e->skb, data, data_len); |
471 | e->skb->ip_summed = CHECKSUM_NONE; |
472 | return 0; |
473 | } |
474 | |
475 | static int |
476 | nfqnl_set_mode(struct nfqnl_instance *queue, |
477 | unsigned char mode, unsigned int range) |
478 | { |
479 | int status = 0; |
480 | |
481 | spin_lock_bh(&queue->lock); |
482 | switch (mode) { |
483 | case NFQNL_COPY_NONE: |
484 | case NFQNL_COPY_META: |
485 | queue->copy_mode = mode; |
486 | queue->copy_range = 0; |
487 | break; |
488 | |
489 | case NFQNL_COPY_PACKET: |
490 | queue->copy_mode = mode; |
491 | /* we're using struct nlattr which has 16bit nla_len */ |
492 | if (range > 0xffff) |
493 | queue->copy_range = 0xffff; |
494 | else |
495 | queue->copy_range = range; |
496 | break; |
497 | |
498 | default: |
499 | status = -EINVAL; |
500 | |
501 | } |
502 | spin_unlock_bh(&queue->lock); |
503 | |
504 | return status; |
505 | } |
506 | |
507 | static int |
508 | dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex) |
509 | { |
510 | if (entry->indev) |
511 | if (entry->indev->ifindex == ifindex) |
512 | return 1; |
513 | if (entry->outdev) |
514 | if (entry->outdev->ifindex == ifindex) |
515 | return 1; |
516 | #ifdef CONFIG_BRIDGE_NETFILTER |
517 | if (entry->skb->nf_bridge) { |
518 | if (entry->skb->nf_bridge->physindev && |
519 | entry->skb->nf_bridge->physindev->ifindex == ifindex) |
520 | return 1; |
521 | if (entry->skb->nf_bridge->physoutdev && |
522 | entry->skb->nf_bridge->physoutdev->ifindex == ifindex) |
523 | return 1; |
524 | } |
525 | #endif |
526 | return 0; |
527 | } |
528 | |
529 | /* drop all packets with either indev or outdev == ifindex from all queue |
530 | * instances */ |
531 | static void |
532 | nfqnl_dev_drop(int ifindex) |
533 | { |
534 | int i; |
535 | |
536 | rcu_read_lock(); |
537 | |
538 | for (i = 0; i < INSTANCE_BUCKETS; i++) { |
539 | struct hlist_node *tmp; |
540 | struct nfqnl_instance *inst; |
541 | struct hlist_head *head = &instance_table[i]; |
542 | |
543 | hlist_for_each_entry_rcu(inst, tmp, head, hlist) |
544 | nfqnl_flush(inst, dev_cmp, ifindex); |
545 | } |
546 | |
547 | rcu_read_unlock(); |
548 | } |
549 | |
550 | #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) |
551 | |
552 | static int |
553 | nfqnl_rcv_dev_event(struct notifier_block *this, |
554 | unsigned long event, void *ptr) |
555 | { |
556 | struct net_device *dev = ptr; |
557 | |
558 | if (!net_eq(dev_net(dev), &init_net)) |
559 | return NOTIFY_DONE; |
560 | |
561 | /* Drop any packets associated with the downed device */ |
562 | if (event == NETDEV_DOWN) |
563 | nfqnl_dev_drop(dev->ifindex); |
564 | return NOTIFY_DONE; |
565 | } |
566 | |
567 | static struct notifier_block nfqnl_dev_notifier = { |
568 | .notifier_call = nfqnl_rcv_dev_event, |
569 | }; |
570 | |
571 | static int |
572 | nfqnl_rcv_nl_event(struct notifier_block *this, |
573 | unsigned long event, void *ptr) |
574 | { |
575 | struct netlink_notify *n = ptr; |
576 | |
577 | if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) { |
578 | int i; |
579 | |
580 | /* destroy all instances for this pid */ |
581 | spin_lock(&instances_lock); |
582 | for (i = 0; i < INSTANCE_BUCKETS; i++) { |
583 | struct hlist_node *tmp, *t2; |
584 | struct nfqnl_instance *inst; |
585 | struct hlist_head *head = &instance_table[i]; |
586 | |
587 | hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { |
588 | if ((n->net == &init_net) && |
589 | (n->pid == inst->peer_pid)) |
590 | __instance_destroy(inst); |
591 | } |
592 | } |
593 | spin_unlock(&instances_lock); |
594 | } |
595 | return NOTIFY_DONE; |
596 | } |
597 | |
598 | static struct notifier_block nfqnl_rtnl_notifier = { |
599 | .notifier_call = nfqnl_rcv_nl_event, |
600 | }; |
601 | |
602 | static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = { |
603 | [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) }, |
604 | [NFQA_MARK] = { .type = NLA_U32 }, |
605 | [NFQA_PAYLOAD] = { .type = NLA_UNSPEC }, |
606 | }; |
607 | |
608 | static int |
609 | nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, |
610 | const struct nlmsghdr *nlh, |
611 | const struct nlattr * const nfqa[]) |
612 | { |
613 | struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); |
614 | u_int16_t queue_num = ntohs(nfmsg->res_id); |
615 | |
616 | struct nfqnl_msg_verdict_hdr *vhdr; |
617 | struct nfqnl_instance *queue; |
618 | unsigned int verdict; |
619 | struct nf_queue_entry *entry; |
620 | int err; |
621 | |
622 | rcu_read_lock(); |
623 | queue = instance_lookup(queue_num); |
624 | if (!queue) { |
625 | err = -ENODEV; |
626 | goto err_out_unlock; |
627 | } |
628 | |
629 | if (queue->peer_pid != NETLINK_CB(skb).pid) { |
630 | err = -EPERM; |
631 | goto err_out_unlock; |
632 | } |
633 | |
634 | if (!nfqa[NFQA_VERDICT_HDR]) { |
635 | err = -EINVAL; |
636 | goto err_out_unlock; |
637 | } |
638 | |
639 | vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]); |
640 | verdict = ntohl(vhdr->verdict); |
641 | |
642 | if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) { |
643 | err = -EINVAL; |
644 | goto err_out_unlock; |
645 | } |
646 | |
647 | entry = find_dequeue_entry(queue, ntohl(vhdr->id)); |
648 | if (entry == NULL) { |
649 | err = -ENOENT; |
650 | goto err_out_unlock; |
651 | } |
652 | rcu_read_unlock(); |
653 | |
654 | if (nfqa[NFQA_PAYLOAD]) { |
655 | if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]), |
656 | nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0) |
657 | verdict = NF_DROP; |
658 | } |
659 | |
660 | if (nfqa[NFQA_MARK]) |
661 | entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK])); |
662 | |
663 | nf_reinject(entry, verdict); |
664 | return 0; |
665 | |
666 | err_out_unlock: |
667 | rcu_read_unlock(); |
668 | return err; |
669 | } |
670 | |
671 | static int |
672 | nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb, |
673 | const struct nlmsghdr *nlh, |
674 | const struct nlattr * const nfqa[]) |
675 | { |
676 | return -ENOTSUPP; |
677 | } |
678 | |
679 | static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = { |
680 | [NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) }, |
681 | [NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) }, |
682 | }; |
683 | |
684 | static const struct nf_queue_handler nfqh = { |
685 | .name = "nf_queue", |
686 | .outfn = &nfqnl_enqueue_packet, |
687 | }; |
688 | |
689 | static int |
690 | nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, |
691 | const struct nlmsghdr *nlh, |
692 | const struct nlattr * const nfqa[]) |
693 | { |
694 | struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); |
695 | u_int16_t queue_num = ntohs(nfmsg->res_id); |
696 | struct nfqnl_instance *queue; |
697 | struct nfqnl_msg_config_cmd *cmd = NULL; |
698 | int ret = 0; |
699 | |
700 | if (nfqa[NFQA_CFG_CMD]) { |
701 | cmd = nla_data(nfqa[NFQA_CFG_CMD]); |
702 | |
703 | /* Commands without queue context - might sleep */ |
704 | switch (cmd->command) { |
705 | case NFQNL_CFG_CMD_PF_BIND: |
706 | return nf_register_queue_handler(ntohs(cmd->pf), |
707 | &nfqh); |
708 | case NFQNL_CFG_CMD_PF_UNBIND: |
709 | return nf_unregister_queue_handler(ntohs(cmd->pf), |
710 | &nfqh); |
711 | } |
712 | } |
713 | |
714 | rcu_read_lock(); |
715 | queue = instance_lookup(queue_num); |
716 | if (queue && queue->peer_pid != NETLINK_CB(skb).pid) { |
717 | ret = -EPERM; |
718 | goto err_out_unlock; |
719 | } |
720 | |
721 | if (cmd != NULL) { |
722 | switch (cmd->command) { |
723 | case NFQNL_CFG_CMD_BIND: |
724 | if (queue) { |
725 | ret = -EBUSY; |
726 | goto err_out_unlock; |
727 | } |
728 | queue = instance_create(queue_num, NETLINK_CB(skb).pid); |
729 | if (IS_ERR(queue)) { |
730 | ret = PTR_ERR(queue); |
731 | goto err_out_unlock; |
732 | } |
733 | break; |
734 | case NFQNL_CFG_CMD_UNBIND: |
735 | if (!queue) { |
736 | ret = -ENODEV; |
737 | goto err_out_unlock; |
738 | } |
739 | instance_destroy(queue); |
740 | break; |
741 | case NFQNL_CFG_CMD_PF_BIND: |
742 | case NFQNL_CFG_CMD_PF_UNBIND: |
743 | break; |
744 | default: |
745 | ret = -ENOTSUPP; |
746 | break; |
747 | } |
748 | } |
749 | |
750 | if (nfqa[NFQA_CFG_PARAMS]) { |
751 | struct nfqnl_msg_config_params *params; |
752 | |
753 | if (!queue) { |
754 | ret = -ENODEV; |
755 | goto err_out_unlock; |
756 | } |
757 | params = nla_data(nfqa[NFQA_CFG_PARAMS]); |
758 | nfqnl_set_mode(queue, params->copy_mode, |
759 | ntohl(params->copy_range)); |
760 | } |
761 | |
762 | if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) { |
763 | __be32 *queue_maxlen; |
764 | |
765 | if (!queue) { |
766 | ret = -ENODEV; |
767 | goto err_out_unlock; |
768 | } |
769 | queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]); |
770 | spin_lock_bh(&queue->lock); |
771 | queue->queue_maxlen = ntohl(*queue_maxlen); |
772 | spin_unlock_bh(&queue->lock); |
773 | } |
774 | |
775 | err_out_unlock: |
776 | rcu_read_unlock(); |
777 | return ret; |
778 | } |
779 | |
780 | static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = { |
781 | [NFQNL_MSG_PACKET] = { .call = nfqnl_recv_unsupp, |
782 | .attr_count = NFQA_MAX, }, |
783 | [NFQNL_MSG_VERDICT] = { .call = nfqnl_recv_verdict, |
784 | .attr_count = NFQA_MAX, |
785 | .policy = nfqa_verdict_policy }, |
786 | [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config, |
787 | .attr_count = NFQA_CFG_MAX, |
788 | .policy = nfqa_cfg_policy }, |
789 | }; |
790 | |
791 | static const struct nfnetlink_subsystem nfqnl_subsys = { |
792 | .name = "nf_queue", |
793 | .subsys_id = NFNL_SUBSYS_QUEUE, |
794 | .cb_count = NFQNL_MSG_MAX, |
795 | .cb = nfqnl_cb, |
796 | }; |
797 | |
798 | #ifdef CONFIG_PROC_FS |
799 | struct iter_state { |
800 | unsigned int bucket; |
801 | }; |
802 | |
803 | static struct hlist_node *get_first(struct seq_file *seq) |
804 | { |
805 | struct iter_state *st = seq->private; |
806 | |
807 | if (!st) |
808 | return NULL; |
809 | |
810 | for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { |
811 | if (!hlist_empty(&instance_table[st->bucket])) |
812 | return instance_table[st->bucket].first; |
813 | } |
814 | return NULL; |
815 | } |
816 | |
817 | static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h) |
818 | { |
819 | struct iter_state *st = seq->private; |
820 | |
821 | h = h->next; |
822 | while (!h) { |
823 | if (++st->bucket >= INSTANCE_BUCKETS) |
824 | return NULL; |
825 | |
826 | h = instance_table[st->bucket].first; |
827 | } |
828 | return h; |
829 | } |
830 | |
831 | static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos) |
832 | { |
833 | struct hlist_node *head; |
834 | head = get_first(seq); |
835 | |
836 | if (head) |
837 | while (pos && (head = get_next(seq, head))) |
838 | pos--; |
839 | return pos ? NULL : head; |
840 | } |
841 | |
842 | static void *seq_start(struct seq_file *seq, loff_t *pos) |
843 | __acquires(instances_lock) |
844 | { |
845 | spin_lock(&instances_lock); |
846 | return get_idx(seq, *pos); |
847 | } |
848 | |
849 | static void *seq_next(struct seq_file *s, void *v, loff_t *pos) |
850 | { |
851 | (*pos)++; |
852 | return get_next(s, v); |
853 | } |
854 | |
855 | static void seq_stop(struct seq_file *s, void *v) |
856 | __releases(instances_lock) |
857 | { |
858 | spin_unlock(&instances_lock); |
859 | } |
860 | |
861 | static int seq_show(struct seq_file *s, void *v) |
862 | { |
863 | const struct nfqnl_instance *inst = v; |
864 | |
865 | return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n", |
866 | inst->queue_num, |
867 | inst->peer_pid, inst->queue_total, |
868 | inst->copy_mode, inst->copy_range, |
869 | inst->queue_dropped, inst->queue_user_dropped, |
870 | inst->id_sequence, 1); |
871 | } |
872 | |
873 | static const struct seq_operations nfqnl_seq_ops = { |
874 | .start = seq_start, |
875 | .next = seq_next, |
876 | .stop = seq_stop, |
877 | .show = seq_show, |
878 | }; |
879 | |
880 | static int nfqnl_open(struct inode *inode, struct file *file) |
881 | { |
882 | return seq_open_private(file, &nfqnl_seq_ops, |
883 | sizeof(struct iter_state)); |
884 | } |
885 | |
886 | static const struct file_operations nfqnl_file_ops = { |
887 | .owner = THIS_MODULE, |
888 | .open = nfqnl_open, |
889 | .read = seq_read, |
890 | .llseek = seq_lseek, |
891 | .release = seq_release_private, |
892 | }; |
893 | |
894 | #endif /* PROC_FS */ |
895 | |
896 | static int __init nfnetlink_queue_init(void) |
897 | { |
898 | int i, status = -ENOMEM; |
899 | |
900 | for (i = 0; i < INSTANCE_BUCKETS; i++) |
901 | INIT_HLIST_HEAD(&instance_table[i]); |
902 | |
903 | netlink_register_notifier(&nfqnl_rtnl_notifier); |
904 | status = nfnetlink_subsys_register(&nfqnl_subsys); |
905 | if (status < 0) { |
906 | printk(KERN_ERR "nf_queue: failed to create netlink socket\n"); |
907 | goto cleanup_netlink_notifier; |
908 | } |
909 | |
910 | #ifdef CONFIG_PROC_FS |
911 | if (!proc_create("nfnetlink_queue", 0440, |
912 | proc_net_netfilter, &nfqnl_file_ops)) |
913 | goto cleanup_subsys; |
914 | #endif |
915 | |
916 | register_netdevice_notifier(&nfqnl_dev_notifier); |
917 | return status; |
918 | |
919 | #ifdef CONFIG_PROC_FS |
920 | cleanup_subsys: |
921 | nfnetlink_subsys_unregister(&nfqnl_subsys); |
922 | #endif |
923 | cleanup_netlink_notifier: |
924 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); |
925 | return status; |
926 | } |
927 | |
928 | static void __exit nfnetlink_queue_fini(void) |
929 | { |
930 | nf_unregister_queue_handlers(&nfqh); |
931 | unregister_netdevice_notifier(&nfqnl_dev_notifier); |
932 | #ifdef CONFIG_PROC_FS |
933 | remove_proc_entry("nfnetlink_queue", proc_net_netfilter); |
934 | #endif |
935 | nfnetlink_subsys_unregister(&nfqnl_subsys); |
936 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); |
937 | |
938 | rcu_barrier(); /* Wait for completion of call_rcu()'s */ |
939 | } |
940 | |
941 | MODULE_DESCRIPTION("netfilter packet queue handler"); |
942 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); |
943 | MODULE_LICENSE("GPL"); |
944 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE); |
945 | |
946 | module_init(nfnetlink_queue_init); |
947 | module_exit(nfnetlink_queue_fini); |
948 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9