Root/
1 | /* drivers/net/ifb.c: |
2 | |
3 | The purpose of this driver is to provide a device that allows |
4 | for sharing of resources: |
5 | |
6 | 1) qdiscs/policies that are per device as opposed to system wide. |
7 | ifb allows for a device which can be redirected to thus providing |
8 | an impression of sharing. |
9 | |
10 | 2) Allows for queueing incoming traffic for shaping instead of |
11 | dropping. |
12 | |
13 | The original concept is based on what is known as the IMQ |
14 | driver initially written by Martin Devera, later rewritten |
15 | by Patrick McHardy and then maintained by Andre Correa. |
16 | |
17 | You need the tc action mirror or redirect to feed this device |
18 | packets. |
19 | |
20 | This program is free software; you can redistribute it and/or |
21 | modify it under the terms of the GNU General Public License |
22 | as published by the Free Software Foundation; either version |
23 | 2 of the License, or (at your option) any later version. |
24 | |
25 | Authors: Jamal Hadi Salim (2005) |
26 | |
27 | */ |
28 | |
29 | |
30 | #include <linux/module.h> |
31 | #include <linux/kernel.h> |
32 | #include <linux/netdevice.h> |
33 | #include <linux/etherdevice.h> |
34 | #include <linux/init.h> |
35 | #include <linux/interrupt.h> |
36 | #include <linux/moduleparam.h> |
37 | #include <net/pkt_sched.h> |
38 | #include <net/net_namespace.h> |
39 | |
40 | #define TX_Q_LIMIT 32 |
41 | struct ifb_private { |
42 | struct tasklet_struct ifb_tasklet; |
43 | int tasklet_pending; |
44 | |
45 | struct u64_stats_sync rsync; |
46 | struct sk_buff_head rq; |
47 | u64 rx_packets; |
48 | u64 rx_bytes; |
49 | |
50 | struct u64_stats_sync tsync; |
51 | struct sk_buff_head tq; |
52 | u64 tx_packets; |
53 | u64 tx_bytes; |
54 | }; |
55 | |
56 | static int numifbs = 2; |
57 | |
58 | static void ri_tasklet(unsigned long dev); |
59 | static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev); |
60 | static int ifb_open(struct net_device *dev); |
61 | static int ifb_close(struct net_device *dev); |
62 | |
63 | static void ri_tasklet(unsigned long dev) |
64 | { |
65 | struct net_device *_dev = (struct net_device *)dev; |
66 | struct ifb_private *dp = netdev_priv(_dev); |
67 | struct netdev_queue *txq; |
68 | struct sk_buff *skb; |
69 | |
70 | txq = netdev_get_tx_queue(_dev, 0); |
71 | if ((skb = skb_peek(&dp->tq)) == NULL) { |
72 | if (__netif_tx_trylock(txq)) { |
73 | skb_queue_splice_tail_init(&dp->rq, &dp->tq); |
74 | __netif_tx_unlock(txq); |
75 | } else { |
76 | /* reschedule */ |
77 | goto resched; |
78 | } |
79 | } |
80 | |
81 | while ((skb = __skb_dequeue(&dp->tq)) != NULL) { |
82 | u32 from = G_TC_FROM(skb->tc_verd); |
83 | |
84 | skb->tc_verd = 0; |
85 | skb->tc_verd = SET_TC_NCLS(skb->tc_verd); |
86 | |
87 | u64_stats_update_begin(&dp->tsync); |
88 | dp->tx_packets++; |
89 | dp->tx_bytes += skb->len; |
90 | u64_stats_update_end(&dp->tsync); |
91 | |
92 | rcu_read_lock(); |
93 | skb->dev = dev_get_by_index_rcu(&init_net, skb->skb_iif); |
94 | if (!skb->dev) { |
95 | rcu_read_unlock(); |
96 | dev_kfree_skb(skb); |
97 | _dev->stats.tx_dropped++; |
98 | if (skb_queue_len(&dp->tq) != 0) |
99 | goto resched; |
100 | break; |
101 | } |
102 | rcu_read_unlock(); |
103 | skb->skb_iif = _dev->ifindex; |
104 | |
105 | if (from & AT_EGRESS) { |
106 | dev_queue_xmit(skb); |
107 | } else if (from & AT_INGRESS) { |
108 | skb_pull(skb, skb->dev->hard_header_len); |
109 | netif_receive_skb(skb); |
110 | } else |
111 | BUG(); |
112 | } |
113 | |
114 | if (__netif_tx_trylock(txq)) { |
115 | if ((skb = skb_peek(&dp->rq)) == NULL) { |
116 | dp->tasklet_pending = 0; |
117 | if (netif_queue_stopped(_dev)) |
118 | netif_wake_queue(_dev); |
119 | } else { |
120 | __netif_tx_unlock(txq); |
121 | goto resched; |
122 | } |
123 | __netif_tx_unlock(txq); |
124 | } else { |
125 | resched: |
126 | dp->tasklet_pending = 1; |
127 | tasklet_schedule(&dp->ifb_tasklet); |
128 | } |
129 | |
130 | } |
131 | |
132 | static struct rtnl_link_stats64 *ifb_stats64(struct net_device *dev, |
133 | struct rtnl_link_stats64 *stats) |
134 | { |
135 | struct ifb_private *dp = netdev_priv(dev); |
136 | unsigned int start; |
137 | |
138 | do { |
139 | start = u64_stats_fetch_begin_bh(&dp->rsync); |
140 | stats->rx_packets = dp->rx_packets; |
141 | stats->rx_bytes = dp->rx_bytes; |
142 | } while (u64_stats_fetch_retry_bh(&dp->rsync, start)); |
143 | |
144 | do { |
145 | start = u64_stats_fetch_begin_bh(&dp->tsync); |
146 | |
147 | stats->tx_packets = dp->tx_packets; |
148 | stats->tx_bytes = dp->tx_bytes; |
149 | |
150 | } while (u64_stats_fetch_retry_bh(&dp->tsync, start)); |
151 | |
152 | stats->rx_dropped = dev->stats.rx_dropped; |
153 | stats->tx_dropped = dev->stats.tx_dropped; |
154 | |
155 | return stats; |
156 | } |
157 | |
158 | |
159 | static const struct net_device_ops ifb_netdev_ops = { |
160 | .ndo_open = ifb_open, |
161 | .ndo_stop = ifb_close, |
162 | .ndo_get_stats64 = ifb_stats64, |
163 | .ndo_start_xmit = ifb_xmit, |
164 | .ndo_validate_addr = eth_validate_addr, |
165 | }; |
166 | |
167 | #define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \ |
168 | NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \ |
169 | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX) |
170 | |
171 | static void ifb_setup(struct net_device *dev) |
172 | { |
173 | /* Initialize the device structure. */ |
174 | dev->destructor = free_netdev; |
175 | dev->netdev_ops = &ifb_netdev_ops; |
176 | |
177 | /* Fill in device structure with ethernet-generic values. */ |
178 | ether_setup(dev); |
179 | dev->tx_queue_len = TX_Q_LIMIT; |
180 | |
181 | dev->features |= IFB_FEATURES; |
182 | dev->vlan_features |= IFB_FEATURES; |
183 | |
184 | dev->flags |= IFF_NOARP; |
185 | dev->flags &= ~IFF_MULTICAST; |
186 | dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); |
187 | eth_hw_addr_random(dev); |
188 | } |
189 | |
190 | static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) |
191 | { |
192 | struct ifb_private *dp = netdev_priv(dev); |
193 | u32 from = G_TC_FROM(skb->tc_verd); |
194 | |
195 | u64_stats_update_begin(&dp->rsync); |
196 | dp->rx_packets++; |
197 | dp->rx_bytes += skb->len; |
198 | u64_stats_update_end(&dp->rsync); |
199 | |
200 | if (!(from & (AT_INGRESS|AT_EGRESS)) || !skb->skb_iif) { |
201 | dev_kfree_skb(skb); |
202 | dev->stats.rx_dropped++; |
203 | return NETDEV_TX_OK; |
204 | } |
205 | |
206 | if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) { |
207 | netif_stop_queue(dev); |
208 | } |
209 | |
210 | __skb_queue_tail(&dp->rq, skb); |
211 | if (!dp->tasklet_pending) { |
212 | dp->tasklet_pending = 1; |
213 | tasklet_schedule(&dp->ifb_tasklet); |
214 | } |
215 | |
216 | return NETDEV_TX_OK; |
217 | } |
218 | |
219 | static int ifb_close(struct net_device *dev) |
220 | { |
221 | struct ifb_private *dp = netdev_priv(dev); |
222 | |
223 | tasklet_kill(&dp->ifb_tasklet); |
224 | netif_stop_queue(dev); |
225 | __skb_queue_purge(&dp->rq); |
226 | __skb_queue_purge(&dp->tq); |
227 | return 0; |
228 | } |
229 | |
230 | static int ifb_open(struct net_device *dev) |
231 | { |
232 | struct ifb_private *dp = netdev_priv(dev); |
233 | |
234 | tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev); |
235 | __skb_queue_head_init(&dp->rq); |
236 | __skb_queue_head_init(&dp->tq); |
237 | netif_start_queue(dev); |
238 | |
239 | return 0; |
240 | } |
241 | |
242 | static int ifb_validate(struct nlattr *tb[], struct nlattr *data[]) |
243 | { |
244 | if (tb[IFLA_ADDRESS]) { |
245 | if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) |
246 | return -EINVAL; |
247 | if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) |
248 | return -EADDRNOTAVAIL; |
249 | } |
250 | return 0; |
251 | } |
252 | |
253 | static struct rtnl_link_ops ifb_link_ops __read_mostly = { |
254 | .kind = "ifb", |
255 | .priv_size = sizeof(struct ifb_private), |
256 | .setup = ifb_setup, |
257 | .validate = ifb_validate, |
258 | }; |
259 | |
260 | /* Number of ifb devices to be set up by this module. */ |
261 | module_param(numifbs, int, 0); |
262 | MODULE_PARM_DESC(numifbs, "Number of ifb devices"); |
263 | |
264 | static int __init ifb_init_one(int index) |
265 | { |
266 | struct net_device *dev_ifb; |
267 | int err; |
268 | |
269 | dev_ifb = alloc_netdev(sizeof(struct ifb_private), |
270 | "ifb%d", ifb_setup); |
271 | |
272 | if (!dev_ifb) |
273 | return -ENOMEM; |
274 | |
275 | dev_ifb->rtnl_link_ops = &ifb_link_ops; |
276 | err = register_netdevice(dev_ifb); |
277 | if (err < 0) |
278 | goto err; |
279 | |
280 | return 0; |
281 | |
282 | err: |
283 | free_netdev(dev_ifb); |
284 | return err; |
285 | } |
286 | |
287 | static int __init ifb_init_module(void) |
288 | { |
289 | int i, err; |
290 | |
291 | rtnl_lock(); |
292 | err = __rtnl_link_register(&ifb_link_ops); |
293 | |
294 | for (i = 0; i < numifbs && !err; i++) |
295 | err = ifb_init_one(i); |
296 | if (err) |
297 | __rtnl_link_unregister(&ifb_link_ops); |
298 | rtnl_unlock(); |
299 | |
300 | return err; |
301 | } |
302 | |
303 | static void __exit ifb_cleanup_module(void) |
304 | { |
305 | rtnl_link_unregister(&ifb_link_ops); |
306 | } |
307 | |
308 | module_init(ifb_init_module); |
309 | module_exit(ifb_cleanup_module); |
310 | MODULE_LICENSE("GPL"); |
311 | MODULE_AUTHOR("Jamal Hadi Salim"); |
312 | MODULE_ALIAS_RTNL_LINK("ifb"); |
313 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9