Root/
1 | /* |
2 | * CAIF Interface registration. |
3 | * Copyright (C) ST-Ericsson AB 2010 |
4 | * Author: Sjur Brendeland |
5 | * License terms: GNU General Public License (GPL) version 2 |
6 | * |
7 | * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont |
8 | * and Sakari Ailus <sakari.ailus@nokia.com> |
9 | */ |
10 | |
11 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ |
12 | |
13 | #include <linux/kernel.h> |
14 | #include <linux/if_arp.h> |
15 | #include <linux/net.h> |
16 | #include <linux/netdevice.h> |
17 | #include <linux/mutex.h> |
18 | #include <linux/module.h> |
19 | #include <linux/spinlock.h> |
20 | #include <net/netns/generic.h> |
21 | #include <net/net_namespace.h> |
22 | #include <net/pkt_sched.h> |
23 | #include <net/caif/caif_device.h> |
24 | #include <net/caif/caif_layer.h> |
25 | #include <net/caif/cfpkt.h> |
26 | #include <net/caif/cfcnfg.h> |
27 | #include <net/caif/cfserl.h> |
28 | |
29 | MODULE_LICENSE("GPL"); |
30 | |
31 | /* Used for local tracking of the CAIF net devices */ |
32 | struct caif_device_entry { |
33 | struct cflayer layer; |
34 | struct list_head list; |
35 | struct net_device *netdev; |
36 | int __percpu *pcpu_refcnt; |
37 | spinlock_t flow_lock; |
38 | struct sk_buff *xoff_skb; |
39 | void (*xoff_skb_dtor)(struct sk_buff *skb); |
40 | bool xoff; |
41 | }; |
42 | |
43 | struct caif_device_entry_list { |
44 | struct list_head list; |
45 | /* Protects simulanous deletes in list */ |
46 | struct mutex lock; |
47 | }; |
48 | |
49 | struct caif_net { |
50 | struct cfcnfg *cfg; |
51 | struct caif_device_entry_list caifdevs; |
52 | }; |
53 | |
54 | static int caif_net_id; |
55 | static int q_high = 50; /* Percent */ |
56 | |
57 | struct cfcnfg *get_cfcnfg(struct net *net) |
58 | { |
59 | struct caif_net *caifn; |
60 | caifn = net_generic(net, caif_net_id); |
61 | return caifn->cfg; |
62 | } |
63 | EXPORT_SYMBOL(get_cfcnfg); |
64 | |
65 | static struct caif_device_entry_list *caif_device_list(struct net *net) |
66 | { |
67 | struct caif_net *caifn; |
68 | caifn = net_generic(net, caif_net_id); |
69 | return &caifn->caifdevs; |
70 | } |
71 | |
72 | static void caifd_put(struct caif_device_entry *e) |
73 | { |
74 | this_cpu_dec(*e->pcpu_refcnt); |
75 | } |
76 | |
77 | static void caifd_hold(struct caif_device_entry *e) |
78 | { |
79 | this_cpu_inc(*e->pcpu_refcnt); |
80 | } |
81 | |
82 | static int caifd_refcnt_read(struct caif_device_entry *e) |
83 | { |
84 | int i, refcnt = 0; |
85 | for_each_possible_cpu(i) |
86 | refcnt += *per_cpu_ptr(e->pcpu_refcnt, i); |
87 | return refcnt; |
88 | } |
89 | |
90 | /* Allocate new CAIF device. */ |
91 | static struct caif_device_entry *caif_device_alloc(struct net_device *dev) |
92 | { |
93 | struct caif_device_entry *caifd; |
94 | |
95 | caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); |
96 | if (!caifd) |
97 | return NULL; |
98 | caifd->pcpu_refcnt = alloc_percpu(int); |
99 | if (!caifd->pcpu_refcnt) { |
100 | kfree(caifd); |
101 | return NULL; |
102 | } |
103 | caifd->netdev = dev; |
104 | dev_hold(dev); |
105 | return caifd; |
106 | } |
107 | |
108 | static struct caif_device_entry *caif_get(struct net_device *dev) |
109 | { |
110 | struct caif_device_entry_list *caifdevs = |
111 | caif_device_list(dev_net(dev)); |
112 | struct caif_device_entry *caifd; |
113 | |
114 | list_for_each_entry_rcu(caifd, &caifdevs->list, list) { |
115 | if (caifd->netdev == dev) |
116 | return caifd; |
117 | } |
118 | return NULL; |
119 | } |
120 | |
121 | static void caif_flow_cb(struct sk_buff *skb) |
122 | { |
123 | struct caif_device_entry *caifd; |
124 | void (*dtor)(struct sk_buff *skb) = NULL; |
125 | bool send_xoff; |
126 | |
127 | WARN_ON(skb->dev == NULL); |
128 | |
129 | rcu_read_lock(); |
130 | caifd = caif_get(skb->dev); |
131 | |
132 | WARN_ON(caifd == NULL); |
133 | if (caifd == NULL) |
134 | return; |
135 | |
136 | caifd_hold(caifd); |
137 | rcu_read_unlock(); |
138 | |
139 | spin_lock_bh(&caifd->flow_lock); |
140 | send_xoff = caifd->xoff; |
141 | caifd->xoff = 0; |
142 | dtor = caifd->xoff_skb_dtor; |
143 | |
144 | if (WARN_ON(caifd->xoff_skb != skb)) |
145 | skb = NULL; |
146 | |
147 | caifd->xoff_skb = NULL; |
148 | caifd->xoff_skb_dtor = NULL; |
149 | |
150 | spin_unlock_bh(&caifd->flow_lock); |
151 | |
152 | if (dtor && skb) |
153 | dtor(skb); |
154 | |
155 | if (send_xoff) |
156 | caifd->layer.up-> |
157 | ctrlcmd(caifd->layer.up, |
158 | _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND, |
159 | caifd->layer.id); |
160 | caifd_put(caifd); |
161 | } |
162 | |
163 | static int transmit(struct cflayer *layer, struct cfpkt *pkt) |
164 | { |
165 | int err, high = 0, qlen = 0; |
166 | struct caif_device_entry *caifd = |
167 | container_of(layer, struct caif_device_entry, layer); |
168 | struct sk_buff *skb; |
169 | struct netdev_queue *txq; |
170 | |
171 | rcu_read_lock_bh(); |
172 | |
173 | skb = cfpkt_tonative(pkt); |
174 | skb->dev = caifd->netdev; |
175 | skb_reset_network_header(skb); |
176 | skb->protocol = htons(ETH_P_CAIF); |
177 | |
178 | /* Check if we need to handle xoff */ |
179 | if (likely(caifd->netdev->tx_queue_len == 0)) |
180 | goto noxoff; |
181 | |
182 | if (unlikely(caifd->xoff)) |
183 | goto noxoff; |
184 | |
185 | if (likely(!netif_queue_stopped(caifd->netdev))) { |
186 | /* If we run with a TX queue, check if the queue is too long*/ |
187 | txq = netdev_get_tx_queue(skb->dev, 0); |
188 | qlen = qdisc_qlen(rcu_dereference_bh(txq->qdisc)); |
189 | |
190 | if (likely(qlen == 0)) |
191 | goto noxoff; |
192 | |
193 | high = (caifd->netdev->tx_queue_len * q_high) / 100; |
194 | if (likely(qlen < high)) |
195 | goto noxoff; |
196 | } |
197 | |
198 | /* Hold lock while accessing xoff */ |
199 | spin_lock_bh(&caifd->flow_lock); |
200 | if (caifd->xoff) { |
201 | spin_unlock_bh(&caifd->flow_lock); |
202 | goto noxoff; |
203 | } |
204 | |
205 | /* |
206 | * Handle flow off, we do this by temporary hi-jacking this |
207 | * skb's destructor function, and replace it with our own |
208 | * flow-on callback. The callback will set flow-on and call |
209 | * the original destructor. |
210 | */ |
211 | |
212 | pr_debug("queue has stopped(%d) or is full (%d > %d)\n", |
213 | netif_queue_stopped(caifd->netdev), |
214 | qlen, high); |
215 | caifd->xoff = 1; |
216 | caifd->xoff_skb = skb; |
217 | caifd->xoff_skb_dtor = skb->destructor; |
218 | skb->destructor = caif_flow_cb; |
219 | spin_unlock_bh(&caifd->flow_lock); |
220 | |
221 | caifd->layer.up->ctrlcmd(caifd->layer.up, |
222 | _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, |
223 | caifd->layer.id); |
224 | noxoff: |
225 | rcu_read_unlock_bh(); |
226 | |
227 | err = dev_queue_xmit(skb); |
228 | if (err > 0) |
229 | err = -EIO; |
230 | |
231 | return err; |
232 | } |
233 | |
234 | /* |
235 | * Stuff received packets into the CAIF stack. |
236 | * On error, returns non-zero and releases the skb. |
237 | */ |
238 | static int receive(struct sk_buff *skb, struct net_device *dev, |
239 | struct packet_type *pkttype, struct net_device *orig_dev) |
240 | { |
241 | struct cfpkt *pkt; |
242 | struct caif_device_entry *caifd; |
243 | int err; |
244 | |
245 | pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); |
246 | |
247 | rcu_read_lock(); |
248 | caifd = caif_get(dev); |
249 | |
250 | if (!caifd || !caifd->layer.up || !caifd->layer.up->receive || |
251 | !netif_oper_up(caifd->netdev)) { |
252 | rcu_read_unlock(); |
253 | kfree_skb(skb); |
254 | return NET_RX_DROP; |
255 | } |
256 | |
257 | /* Hold reference to netdevice while using CAIF stack */ |
258 | caifd_hold(caifd); |
259 | rcu_read_unlock(); |
260 | |
261 | err = caifd->layer.up->receive(caifd->layer.up, pkt); |
262 | |
263 | /* For -EILSEQ the packet is not freed so so it now */ |
264 | if (err == -EILSEQ) |
265 | cfpkt_destroy(pkt); |
266 | |
267 | /* Release reference to stack upwards */ |
268 | caifd_put(caifd); |
269 | |
270 | if (err != 0) |
271 | err = NET_RX_DROP; |
272 | return err; |
273 | } |
274 | |
275 | static struct packet_type caif_packet_type __read_mostly = { |
276 | .type = cpu_to_be16(ETH_P_CAIF), |
277 | .func = receive, |
278 | }; |
279 | |
280 | static void dev_flowctrl(struct net_device *dev, int on) |
281 | { |
282 | struct caif_device_entry *caifd; |
283 | |
284 | rcu_read_lock(); |
285 | |
286 | caifd = caif_get(dev); |
287 | if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { |
288 | rcu_read_unlock(); |
289 | return; |
290 | } |
291 | |
292 | caifd_hold(caifd); |
293 | rcu_read_unlock(); |
294 | |
295 | caifd->layer.up->ctrlcmd(caifd->layer.up, |
296 | on ? |
297 | _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND : |
298 | _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, |
299 | caifd->layer.id); |
300 | caifd_put(caifd); |
301 | } |
302 | |
303 | void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, |
304 | struct cflayer *link_support, int head_room, |
305 | struct cflayer **layer, |
306 | int (**rcv_func)(struct sk_buff *, struct net_device *, |
307 | struct packet_type *, |
308 | struct net_device *)) |
309 | { |
310 | struct caif_device_entry *caifd; |
311 | enum cfcnfg_phy_preference pref; |
312 | struct cfcnfg *cfg = get_cfcnfg(dev_net(dev)); |
313 | struct caif_device_entry_list *caifdevs; |
314 | |
315 | caifdevs = caif_device_list(dev_net(dev)); |
316 | caifd = caif_device_alloc(dev); |
317 | if (!caifd) |
318 | return; |
319 | *layer = &caifd->layer; |
320 | spin_lock_init(&caifd->flow_lock); |
321 | |
322 | switch (caifdev->link_select) { |
323 | case CAIF_LINK_HIGH_BANDW: |
324 | pref = CFPHYPREF_HIGH_BW; |
325 | break; |
326 | case CAIF_LINK_LOW_LATENCY: |
327 | pref = CFPHYPREF_LOW_LAT; |
328 | break; |
329 | default: |
330 | pref = CFPHYPREF_HIGH_BW; |
331 | break; |
332 | } |
333 | mutex_lock(&caifdevs->lock); |
334 | list_add_rcu(&caifd->list, &caifdevs->list); |
335 | |
336 | strncpy(caifd->layer.name, dev->name, |
337 | sizeof(caifd->layer.name) - 1); |
338 | caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; |
339 | caifd->layer.transmit = transmit; |
340 | cfcnfg_add_phy_layer(cfg, |
341 | dev, |
342 | &caifd->layer, |
343 | pref, |
344 | link_support, |
345 | caifdev->use_fcs, |
346 | head_room); |
347 | mutex_unlock(&caifdevs->lock); |
348 | if (rcv_func) |
349 | *rcv_func = receive; |
350 | } |
351 | EXPORT_SYMBOL(caif_enroll_dev); |
352 | |
353 | /* notify Caif of device events */ |
354 | static int caif_device_notify(struct notifier_block *me, unsigned long what, |
355 | void *ptr) |
356 | { |
357 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
358 | struct caif_device_entry *caifd = NULL; |
359 | struct caif_dev_common *caifdev; |
360 | struct cfcnfg *cfg; |
361 | struct cflayer *layer, *link_support; |
362 | int head_room = 0; |
363 | struct caif_device_entry_list *caifdevs; |
364 | |
365 | cfg = get_cfcnfg(dev_net(dev)); |
366 | caifdevs = caif_device_list(dev_net(dev)); |
367 | |
368 | caifd = caif_get(dev); |
369 | if (caifd == NULL && dev->type != ARPHRD_CAIF) |
370 | return 0; |
371 | |
372 | switch (what) { |
373 | case NETDEV_REGISTER: |
374 | if (caifd != NULL) |
375 | break; |
376 | |
377 | caifdev = netdev_priv(dev); |
378 | |
379 | link_support = NULL; |
380 | if (caifdev->use_frag) { |
381 | head_room = 1; |
382 | link_support = cfserl_create(dev->ifindex, |
383 | caifdev->use_stx); |
384 | if (!link_support) { |
385 | pr_warn("Out of memory\n"); |
386 | break; |
387 | } |
388 | } |
389 | caif_enroll_dev(dev, caifdev, link_support, head_room, |
390 | &layer, NULL); |
391 | caifdev->flowctrl = dev_flowctrl; |
392 | break; |
393 | |
394 | case NETDEV_UP: |
395 | rcu_read_lock(); |
396 | |
397 | caifd = caif_get(dev); |
398 | if (caifd == NULL) { |
399 | rcu_read_unlock(); |
400 | break; |
401 | } |
402 | |
403 | caifd->xoff = 0; |
404 | cfcnfg_set_phy_state(cfg, &caifd->layer, true); |
405 | rcu_read_unlock(); |
406 | |
407 | break; |
408 | |
409 | case NETDEV_DOWN: |
410 | rcu_read_lock(); |
411 | |
412 | caifd = caif_get(dev); |
413 | if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { |
414 | rcu_read_unlock(); |
415 | return -EINVAL; |
416 | } |
417 | |
418 | cfcnfg_set_phy_state(cfg, &caifd->layer, false); |
419 | caifd_hold(caifd); |
420 | rcu_read_unlock(); |
421 | |
422 | caifd->layer.up->ctrlcmd(caifd->layer.up, |
423 | _CAIF_CTRLCMD_PHYIF_DOWN_IND, |
424 | caifd->layer.id); |
425 | |
426 | spin_lock_bh(&caifd->flow_lock); |
427 | |
428 | /* |
429 | * Replace our xoff-destructor with original destructor. |
430 | * We trust that skb->destructor *always* is called before |
431 | * the skb reference is invalid. The hijacked SKB destructor |
432 | * takes the flow_lock so manipulating the skb->destructor here |
433 | * should be safe. |
434 | */ |
435 | if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL) |
436 | caifd->xoff_skb->destructor = caifd->xoff_skb_dtor; |
437 | |
438 | caifd->xoff = 0; |
439 | caifd->xoff_skb_dtor = NULL; |
440 | caifd->xoff_skb = NULL; |
441 | |
442 | spin_unlock_bh(&caifd->flow_lock); |
443 | caifd_put(caifd); |
444 | break; |
445 | |
446 | case NETDEV_UNREGISTER: |
447 | mutex_lock(&caifdevs->lock); |
448 | |
449 | caifd = caif_get(dev); |
450 | if (caifd == NULL) { |
451 | mutex_unlock(&caifdevs->lock); |
452 | break; |
453 | } |
454 | list_del_rcu(&caifd->list); |
455 | |
456 | /* |
457 | * NETDEV_UNREGISTER is called repeatedly until all reference |
458 | * counts for the net-device are released. If references to |
459 | * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for |
460 | * the next call to NETDEV_UNREGISTER. |
461 | * |
462 | * If any packets are in flight down the CAIF Stack, |
463 | * cfcnfg_del_phy_layer will return nonzero. |
464 | * If no packets are in flight, the CAIF Stack associated |
465 | * with the net-device un-registering is freed. |
466 | */ |
467 | |
468 | if (caifd_refcnt_read(caifd) != 0 || |
469 | cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) { |
470 | |
471 | pr_info("Wait for device inuse\n"); |
472 | /* Enrole device if CAIF Stack is still in use */ |
473 | list_add_rcu(&caifd->list, &caifdevs->list); |
474 | mutex_unlock(&caifdevs->lock); |
475 | break; |
476 | } |
477 | |
478 | synchronize_rcu(); |
479 | dev_put(caifd->netdev); |
480 | free_percpu(caifd->pcpu_refcnt); |
481 | kfree(caifd); |
482 | |
483 | mutex_unlock(&caifdevs->lock); |
484 | break; |
485 | } |
486 | return 0; |
487 | } |
488 | |
489 | static struct notifier_block caif_device_notifier = { |
490 | .notifier_call = caif_device_notify, |
491 | .priority = 0, |
492 | }; |
493 | |
494 | /* Per-namespace Caif devices handling */ |
495 | static int caif_init_net(struct net *net) |
496 | { |
497 | struct caif_net *caifn = net_generic(net, caif_net_id); |
498 | INIT_LIST_HEAD(&caifn->caifdevs.list); |
499 | mutex_init(&caifn->caifdevs.lock); |
500 | |
501 | caifn->cfg = cfcnfg_create(); |
502 | if (!caifn->cfg) |
503 | return -ENOMEM; |
504 | |
505 | return 0; |
506 | } |
507 | |
508 | static void caif_exit_net(struct net *net) |
509 | { |
510 | struct caif_device_entry *caifd, *tmp; |
511 | struct caif_device_entry_list *caifdevs = |
512 | caif_device_list(net); |
513 | struct cfcnfg *cfg = get_cfcnfg(net); |
514 | |
515 | rtnl_lock(); |
516 | mutex_lock(&caifdevs->lock); |
517 | |
518 | list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) { |
519 | int i = 0; |
520 | list_del_rcu(&caifd->list); |
521 | cfcnfg_set_phy_state(cfg, &caifd->layer, false); |
522 | |
523 | while (i < 10 && |
524 | (caifd_refcnt_read(caifd) != 0 || |
525 | cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) { |
526 | |
527 | pr_info("Wait for device inuse\n"); |
528 | msleep(250); |
529 | i++; |
530 | } |
531 | synchronize_rcu(); |
532 | dev_put(caifd->netdev); |
533 | free_percpu(caifd->pcpu_refcnt); |
534 | kfree(caifd); |
535 | } |
536 | cfcnfg_remove(cfg); |
537 | |
538 | mutex_unlock(&caifdevs->lock); |
539 | rtnl_unlock(); |
540 | } |
541 | |
542 | static struct pernet_operations caif_net_ops = { |
543 | .init = caif_init_net, |
544 | .exit = caif_exit_net, |
545 | .id = &caif_net_id, |
546 | .size = sizeof(struct caif_net), |
547 | }; |
548 | |
549 | /* Initialize Caif devices list */ |
550 | static int __init caif_device_init(void) |
551 | { |
552 | int result; |
553 | |
554 | result = register_pernet_subsys(&caif_net_ops); |
555 | |
556 | if (result) |
557 | return result; |
558 | |
559 | register_netdevice_notifier(&caif_device_notifier); |
560 | dev_add_pack(&caif_packet_type); |
561 | |
562 | return result; |
563 | } |
564 | |
565 | static void __exit caif_device_exit(void) |
566 | { |
567 | unregister_netdevice_notifier(&caif_device_notifier); |
568 | dev_remove_pack(&caif_packet_type); |
569 | unregister_pernet_subsys(&caif_net_ops); |
570 | } |
571 | |
572 | module_init(caif_device_init); |
573 | module_exit(caif_device_exit); |
574 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9