Root/
1 | /* |
2 | * Common code for low-level network console, dump, and debugger code |
3 | * |
4 | * Derived from netconsole, kgdb-over-ethernet, and netdump patches |
5 | */ |
6 | |
7 | #ifndef _LINUX_NETPOLL_H |
8 | #define _LINUX_NETPOLL_H |
9 | |
10 | #include <linux/netdevice.h> |
11 | #include <linux/interrupt.h> |
12 | #include <linux/rcupdate.h> |
13 | #include <linux/list.h> |
14 | |
15 | union inet_addr { |
16 | __u32 all[4]; |
17 | __be32 ip; |
18 | __be32 ip6[4]; |
19 | struct in_addr in; |
20 | struct in6_addr in6; |
21 | }; |
22 | |
23 | struct netpoll { |
24 | struct net_device *dev; |
25 | char dev_name[IFNAMSIZ]; |
26 | const char *name; |
27 | void (*rx_hook)(struct netpoll *, int, char *, int); |
28 | |
29 | union inet_addr local_ip, remote_ip; |
30 | bool ipv6; |
31 | u16 local_port, remote_port; |
32 | u8 remote_mac[ETH_ALEN]; |
33 | |
34 | struct list_head rx; /* rx_np list element */ |
35 | struct work_struct cleanup_work; |
36 | }; |
37 | |
38 | struct netpoll_info { |
39 | atomic_t refcnt; |
40 | |
41 | unsigned long rx_flags; |
42 | spinlock_t rx_lock; |
43 | struct semaphore dev_lock; |
44 | struct list_head rx_np; /* netpolls that registered an rx_hook */ |
45 | |
46 | struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */ |
47 | struct sk_buff_head txq; |
48 | |
49 | struct delayed_work tx_work; |
50 | |
51 | struct netpoll *netpoll; |
52 | struct rcu_head rcu; |
53 | }; |
54 | |
55 | #ifdef CONFIG_NETPOLL |
56 | extern void netpoll_rx_disable(struct net_device *dev); |
57 | extern void netpoll_rx_enable(struct net_device *dev); |
58 | #else |
59 | static inline void netpoll_rx_disable(struct net_device *dev) { return; } |
60 | static inline void netpoll_rx_enable(struct net_device *dev) { return; } |
61 | #endif |
62 | |
63 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len); |
64 | void netpoll_print_options(struct netpoll *np); |
65 | int netpoll_parse_options(struct netpoll *np, char *opt); |
66 | int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp); |
67 | int netpoll_setup(struct netpoll *np); |
68 | int netpoll_trap(void); |
69 | void netpoll_set_trap(int trap); |
70 | void __netpoll_cleanup(struct netpoll *np); |
71 | void __netpoll_free_async(struct netpoll *np); |
72 | void netpoll_cleanup(struct netpoll *np); |
73 | int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo); |
74 | void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, |
75 | struct net_device *dev); |
76 | static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) |
77 | { |
78 | unsigned long flags; |
79 | local_irq_save(flags); |
80 | netpoll_send_skb_on_dev(np, skb, np->dev); |
81 | local_irq_restore(flags); |
82 | } |
83 | |
84 | |
85 | |
86 | #ifdef CONFIG_NETPOLL |
87 | static inline bool netpoll_rx_on(struct sk_buff *skb) |
88 | { |
89 | struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo); |
90 | |
91 | return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags); |
92 | } |
93 | |
94 | static inline bool netpoll_rx(struct sk_buff *skb) |
95 | { |
96 | struct netpoll_info *npinfo; |
97 | unsigned long flags; |
98 | bool ret = false; |
99 | |
100 | local_irq_save(flags); |
101 | |
102 | if (!netpoll_rx_on(skb)) |
103 | goto out; |
104 | |
105 | npinfo = rcu_dereference_bh(skb->dev->npinfo); |
106 | spin_lock(&npinfo->rx_lock); |
107 | /* check rx_flags again with the lock held */ |
108 | if (npinfo->rx_flags && __netpoll_rx(skb, npinfo)) |
109 | ret = true; |
110 | spin_unlock(&npinfo->rx_lock); |
111 | |
112 | out: |
113 | local_irq_restore(flags); |
114 | return ret; |
115 | } |
116 | |
117 | static inline int netpoll_receive_skb(struct sk_buff *skb) |
118 | { |
119 | if (!list_empty(&skb->dev->napi_list)) |
120 | return netpoll_rx(skb); |
121 | return 0; |
122 | } |
123 | |
124 | static inline void *netpoll_poll_lock(struct napi_struct *napi) |
125 | { |
126 | struct net_device *dev = napi->dev; |
127 | |
128 | if (dev && dev->npinfo) { |
129 | spin_lock(&napi->poll_lock); |
130 | napi->poll_owner = smp_processor_id(); |
131 | return napi; |
132 | } |
133 | return NULL; |
134 | } |
135 | |
136 | static inline void netpoll_poll_unlock(void *have) |
137 | { |
138 | struct napi_struct *napi = have; |
139 | |
140 | if (napi) { |
141 | napi->poll_owner = -1; |
142 | spin_unlock(&napi->poll_lock); |
143 | } |
144 | } |
145 | |
146 | static inline bool netpoll_tx_running(struct net_device *dev) |
147 | { |
148 | return irqs_disabled(); |
149 | } |
150 | |
151 | #else |
152 | static inline bool netpoll_rx(struct sk_buff *skb) |
153 | { |
154 | return false; |
155 | } |
156 | static inline bool netpoll_rx_on(struct sk_buff *skb) |
157 | { |
158 | return false; |
159 | } |
160 | static inline int netpoll_receive_skb(struct sk_buff *skb) |
161 | { |
162 | return 0; |
163 | } |
164 | static inline void *netpoll_poll_lock(struct napi_struct *napi) |
165 | { |
166 | return NULL; |
167 | } |
168 | static inline void netpoll_poll_unlock(void *have) |
169 | { |
170 | } |
171 | static inline void netpoll_netdev_init(struct net_device *dev) |
172 | { |
173 | } |
174 | static inline bool netpoll_tx_running(struct net_device *dev) |
175 | { |
176 | return false; |
177 | } |
178 | #endif |
179 | |
180 | #endif |
181 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9