Root/
1 | /* |
2 | * DECnet An implementation of the DECnet protocol suite for the LINUX |
3 | * operating system. DECnet is implemented using the BSD Socket |
4 | * interface as the means of communication with the user level. |
5 | * |
6 | * DECnet Routing Functions (Endnode and Router) |
7 | * |
8 | * Authors: Steve Whitehouse <SteveW@ACM.org> |
9 | * Eduardo Marcelo Serrat <emserrat@geocities.com> |
10 | * |
11 | * Changes: |
12 | * Steve Whitehouse : Fixes to allow "intra-ethernet" and |
13 | * "return-to-sender" bits on outgoing |
14 | * packets. |
15 | * Steve Whitehouse : Timeouts for cached routes. |
16 | * Steve Whitehouse : Use dst cache for input routes too. |
17 | * Steve Whitehouse : Fixed error values in dn_send_skb. |
18 | * Steve Whitehouse : Rework routing functions to better fit |
19 | * DECnet routing design |
20 | * Alexey Kuznetsov : New SMP locking |
21 | * Steve Whitehouse : More SMP locking changes & dn_cache_dump() |
22 | * Steve Whitehouse : Prerouting NF hook, now really is prerouting. |
23 | * Fixed possible skb leak in rtnetlink funcs. |
24 | * Steve Whitehouse : Dave Miller's dynamic hash table sizing and |
25 | * Alexey Kuznetsov's finer grained locking |
26 | * from ipv4/route.c. |
27 | * Steve Whitehouse : Routing is now starting to look like a |
28 | * sensible set of code now, mainly due to |
29 | * my copying the IPv4 routing code. The |
30 | * hooks here are modified and will continue |
31 | * to evolve for a while. |
32 | * Steve Whitehouse : Real SMP at last :-) Also new netfilter |
33 | * stuff. Look out raw sockets your days |
34 | * are numbered! |
35 | * Steve Whitehouse : Added return-to-sender functions. Added |
36 | * backlog congestion level return codes. |
37 | * Steve Whitehouse : Fixed bug where routes were set up with |
38 | * no ref count on net devices. |
39 | * Steve Whitehouse : RCU for the route cache |
40 | * Steve Whitehouse : Preparations for the flow cache |
41 | * Steve Whitehouse : Prepare for nonlinear skbs |
42 | */ |
43 | |
44 | /****************************************************************************** |
45 | (c) 1995-1998 E.M. Serrat emserrat@geocities.com |
46 | |
47 | This program is free software; you can redistribute it and/or modify |
48 | it under the terms of the GNU General Public License as published by |
49 | the Free Software Foundation; either version 2 of the License, or |
50 | any later version. |
51 | |
52 | This program is distributed in the hope that it will be useful, |
53 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
54 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
55 | GNU General Public License for more details. |
56 | *******************************************************************************/ |
57 | |
58 | #include <linux/errno.h> |
59 | #include <linux/types.h> |
60 | #include <linux/socket.h> |
61 | #include <linux/in.h> |
62 | #include <linux/kernel.h> |
63 | #include <linux/sockios.h> |
64 | #include <linux/net.h> |
65 | #include <linux/netdevice.h> |
66 | #include <linux/inet.h> |
67 | #include <linux/route.h> |
68 | #include <linux/in_route.h> |
69 | #include <linux/slab.h> |
70 | #include <net/sock.h> |
71 | #include <linux/mm.h> |
72 | #include <linux/proc_fs.h> |
73 | #include <linux/seq_file.h> |
74 | #include <linux/init.h> |
75 | #include <linux/rtnetlink.h> |
76 | #include <linux/string.h> |
77 | #include <linux/netfilter_decnet.h> |
78 | #include <linux/rcupdate.h> |
79 | #include <linux/times.h> |
80 | #include <linux/export.h> |
81 | #include <asm/errno.h> |
82 | #include <net/net_namespace.h> |
83 | #include <net/netlink.h> |
84 | #include <net/neighbour.h> |
85 | #include <net/dst.h> |
86 | #include <net/flow.h> |
87 | #include <net/fib_rules.h> |
88 | #include <net/dn.h> |
89 | #include <net/dn_dev.h> |
90 | #include <net/dn_nsp.h> |
91 | #include <net/dn_route.h> |
92 | #include <net/dn_neigh.h> |
93 | #include <net/dn_fib.h> |
94 | |
95 | struct dn_rt_hash_bucket |
96 | { |
97 | struct dn_route __rcu *chain; |
98 | spinlock_t lock; |
99 | }; |
100 | |
101 | extern struct neigh_table dn_neigh_table; |
102 | |
103 | |
104 | static unsigned char dn_hiord_addr[6] = {0xAA,0x00,0x04,0x00,0x00,0x00}; |
105 | |
106 | static const int dn_rt_min_delay = 2 * HZ; |
107 | static const int dn_rt_max_delay = 10 * HZ; |
108 | static const int dn_rt_mtu_expires = 10 * 60 * HZ; |
109 | |
110 | static unsigned long dn_rt_deadline; |
111 | |
112 | static int dn_dst_gc(struct dst_ops *ops); |
113 | static struct dst_entry *dn_dst_check(struct dst_entry *, __u32); |
114 | static unsigned int dn_dst_default_advmss(const struct dst_entry *dst); |
115 | static unsigned int dn_dst_mtu(const struct dst_entry *dst); |
116 | static void dn_dst_destroy(struct dst_entry *); |
117 | static struct dst_entry *dn_dst_negative_advice(struct dst_entry *); |
118 | static void dn_dst_link_failure(struct sk_buff *); |
119 | static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu); |
120 | static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr); |
121 | static int dn_route_input(struct sk_buff *); |
122 | static void dn_run_flush(unsigned long dummy); |
123 | |
124 | static struct dn_rt_hash_bucket *dn_rt_hash_table; |
125 | static unsigned dn_rt_hash_mask; |
126 | |
127 | static struct timer_list dn_route_timer; |
128 | static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush, 0, 0); |
129 | int decnet_dst_gc_interval = 2; |
130 | |
131 | static struct dst_ops dn_dst_ops = { |
132 | .family = PF_DECnet, |
133 | .protocol = cpu_to_be16(ETH_P_DNA_RT), |
134 | .gc_thresh = 128, |
135 | .gc = dn_dst_gc, |
136 | .check = dn_dst_check, |
137 | .default_advmss = dn_dst_default_advmss, |
138 | .mtu = dn_dst_mtu, |
139 | .cow_metrics = dst_cow_metrics_generic, |
140 | .destroy = dn_dst_destroy, |
141 | .negative_advice = dn_dst_negative_advice, |
142 | .link_failure = dn_dst_link_failure, |
143 | .update_pmtu = dn_dst_update_pmtu, |
144 | .neigh_lookup = dn_dst_neigh_lookup, |
145 | }; |
146 | |
147 | static void dn_dst_destroy(struct dst_entry *dst) |
148 | { |
149 | dst_destroy_metrics_generic(dst); |
150 | } |
151 | |
152 | static __inline__ unsigned dn_hash(__le16 src, __le16 dst) |
153 | { |
154 | __u16 tmp = (__u16 __force)(src ^ dst); |
155 | tmp ^= (tmp >> 3); |
156 | tmp ^= (tmp >> 5); |
157 | tmp ^= (tmp >> 10); |
158 | return dn_rt_hash_mask & (unsigned)tmp; |
159 | } |
160 | |
161 | static inline void dnrt_free(struct dn_route *rt) |
162 | { |
163 | call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); |
164 | } |
165 | |
166 | static inline void dnrt_drop(struct dn_route *rt) |
167 | { |
168 | dst_release(&rt->dst); |
169 | call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free); |
170 | } |
171 | |
172 | static void dn_dst_check_expire(unsigned long dummy) |
173 | { |
174 | int i; |
175 | struct dn_route *rt; |
176 | struct dn_route __rcu **rtp; |
177 | unsigned long now = jiffies; |
178 | unsigned long expire = 120 * HZ; |
179 | |
180 | for (i = 0; i <= dn_rt_hash_mask; i++) { |
181 | rtp = &dn_rt_hash_table[i].chain; |
182 | |
183 | spin_lock(&dn_rt_hash_table[i].lock); |
184 | while ((rt = rcu_dereference_protected(*rtp, |
185 | lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) { |
186 | if (atomic_read(&rt->dst.__refcnt) || |
187 | (now - rt->dst.lastuse) < expire) { |
188 | rtp = &rt->dst.dn_next; |
189 | continue; |
190 | } |
191 | *rtp = rt->dst.dn_next; |
192 | rt->dst.dn_next = NULL; |
193 | dnrt_free(rt); |
194 | } |
195 | spin_unlock(&dn_rt_hash_table[i].lock); |
196 | |
197 | if ((jiffies - now) > 0) |
198 | break; |
199 | } |
200 | |
201 | mod_timer(&dn_route_timer, now + decnet_dst_gc_interval * HZ); |
202 | } |
203 | |
204 | static int dn_dst_gc(struct dst_ops *ops) |
205 | { |
206 | struct dn_route *rt; |
207 | struct dn_route __rcu **rtp; |
208 | int i; |
209 | unsigned long now = jiffies; |
210 | unsigned long expire = 10 * HZ; |
211 | |
212 | for (i = 0; i <= dn_rt_hash_mask; i++) { |
213 | |
214 | spin_lock_bh(&dn_rt_hash_table[i].lock); |
215 | rtp = &dn_rt_hash_table[i].chain; |
216 | |
217 | while ((rt = rcu_dereference_protected(*rtp, |
218 | lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) { |
219 | if (atomic_read(&rt->dst.__refcnt) || |
220 | (now - rt->dst.lastuse) < expire) { |
221 | rtp = &rt->dst.dn_next; |
222 | continue; |
223 | } |
224 | *rtp = rt->dst.dn_next; |
225 | rt->dst.dn_next = NULL; |
226 | dnrt_drop(rt); |
227 | break; |
228 | } |
229 | spin_unlock_bh(&dn_rt_hash_table[i].lock); |
230 | } |
231 | |
232 | return 0; |
233 | } |
234 | |
235 | /* |
236 | * The decnet standards don't impose a particular minimum mtu, what they |
237 | * do insist on is that the routing layer accepts a datagram of at least |
238 | * 230 bytes long. Here we have to subtract the routing header length from |
239 | * 230 to get the minimum acceptable mtu. If there is no neighbour, then we |
240 | * assume the worst and use a long header size. |
241 | * |
242 | * We update both the mtu and the advertised mss (i.e. the segment size we |
243 | * advertise to the other end). |
244 | */ |
245 | static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu) |
246 | { |
247 | struct neighbour *n = dst_get_neighbour_noref(dst); |
248 | u32 min_mtu = 230; |
249 | struct dn_dev *dn; |
250 | |
251 | dn = n ? rcu_dereference_raw(n->dev->dn_ptr) : NULL; |
252 | |
253 | if (dn && dn->use_long == 0) |
254 | min_mtu -= 6; |
255 | else |
256 | min_mtu -= 21; |
257 | |
258 | if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= min_mtu) { |
259 | if (!(dst_metric_locked(dst, RTAX_MTU))) { |
260 | dst_metric_set(dst, RTAX_MTU, mtu); |
261 | dst_set_expires(dst, dn_rt_mtu_expires); |
262 | } |
263 | if (!(dst_metric_locked(dst, RTAX_ADVMSS))) { |
264 | u32 mss = mtu - DN_MAX_NSP_DATA_HEADER; |
265 | u32 existing_mss = dst_metric_raw(dst, RTAX_ADVMSS); |
266 | if (!existing_mss || existing_mss > mss) |
267 | dst_metric_set(dst, RTAX_ADVMSS, mss); |
268 | } |
269 | } |
270 | } |
271 | |
272 | /* |
273 | * When a route has been marked obsolete. (e.g. routing cache flush) |
274 | */ |
275 | static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie) |
276 | { |
277 | return NULL; |
278 | } |
279 | |
280 | static struct dst_entry *dn_dst_negative_advice(struct dst_entry *dst) |
281 | { |
282 | dst_release(dst); |
283 | return NULL; |
284 | } |
285 | |
286 | static void dn_dst_link_failure(struct sk_buff *skb) |
287 | { |
288 | } |
289 | |
290 | static inline int compare_keys(struct flowidn *fl1, struct flowidn *fl2) |
291 | { |
292 | return ((fl1->daddr ^ fl2->daddr) | |
293 | (fl1->saddr ^ fl2->saddr) | |
294 | (fl1->flowidn_mark ^ fl2->flowidn_mark) | |
295 | (fl1->flowidn_scope ^ fl2->flowidn_scope) | |
296 | (fl1->flowidn_oif ^ fl2->flowidn_oif) | |
297 | (fl1->flowidn_iif ^ fl2->flowidn_iif)) == 0; |
298 | } |
299 | |
300 | static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp) |
301 | { |
302 | struct dn_route *rth; |
303 | struct dn_route __rcu **rthp; |
304 | unsigned long now = jiffies; |
305 | |
306 | rthp = &dn_rt_hash_table[hash].chain; |
307 | |
308 | spin_lock_bh(&dn_rt_hash_table[hash].lock); |
309 | while ((rth = rcu_dereference_protected(*rthp, |
310 | lockdep_is_held(&dn_rt_hash_table[hash].lock))) != NULL) { |
311 | if (compare_keys(&rth->fld, &rt->fld)) { |
312 | /* Put it first */ |
313 | *rthp = rth->dst.dn_next; |
314 | rcu_assign_pointer(rth->dst.dn_next, |
315 | dn_rt_hash_table[hash].chain); |
316 | rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth); |
317 | |
318 | dst_use(&rth->dst, now); |
319 | spin_unlock_bh(&dn_rt_hash_table[hash].lock); |
320 | |
321 | dnrt_drop(rt); |
322 | *rp = rth; |
323 | return 0; |
324 | } |
325 | rthp = &rth->dst.dn_next; |
326 | } |
327 | |
328 | rcu_assign_pointer(rt->dst.dn_next, dn_rt_hash_table[hash].chain); |
329 | rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt); |
330 | |
331 | dst_use(&rt->dst, now); |
332 | spin_unlock_bh(&dn_rt_hash_table[hash].lock); |
333 | *rp = rt; |
334 | return 0; |
335 | } |
336 | |
337 | static void dn_run_flush(unsigned long dummy) |
338 | { |
339 | int i; |
340 | struct dn_route *rt, *next; |
341 | |
342 | for (i = 0; i < dn_rt_hash_mask; i++) { |
343 | spin_lock_bh(&dn_rt_hash_table[i].lock); |
344 | |
345 | if ((rt = xchg((struct dn_route **)&dn_rt_hash_table[i].chain, NULL)) == NULL) |
346 | goto nothing_to_declare; |
347 | |
348 | for(; rt; rt = next) { |
349 | next = rcu_dereference_raw(rt->dst.dn_next); |
350 | RCU_INIT_POINTER(rt->dst.dn_next, NULL); |
351 | dst_free((struct dst_entry *)rt); |
352 | } |
353 | |
354 | nothing_to_declare: |
355 | spin_unlock_bh(&dn_rt_hash_table[i].lock); |
356 | } |
357 | } |
358 | |
359 | static DEFINE_SPINLOCK(dn_rt_flush_lock); |
360 | |
361 | void dn_rt_cache_flush(int delay) |
362 | { |
363 | unsigned long now = jiffies; |
364 | int user_mode = !in_interrupt(); |
365 | |
366 | if (delay < 0) |
367 | delay = dn_rt_min_delay; |
368 | |
369 | spin_lock_bh(&dn_rt_flush_lock); |
370 | |
371 | if (del_timer(&dn_rt_flush_timer) && delay > 0 && dn_rt_deadline) { |
372 | long tmo = (long)(dn_rt_deadline - now); |
373 | |
374 | if (user_mode && tmo < dn_rt_max_delay - dn_rt_min_delay) |
375 | tmo = 0; |
376 | |
377 | if (delay > tmo) |
378 | delay = tmo; |
379 | } |
380 | |
381 | if (delay <= 0) { |
382 | spin_unlock_bh(&dn_rt_flush_lock); |
383 | dn_run_flush(0); |
384 | return; |
385 | } |
386 | |
387 | if (dn_rt_deadline == 0) |
388 | dn_rt_deadline = now + dn_rt_max_delay; |
389 | |
390 | dn_rt_flush_timer.expires = now + delay; |
391 | add_timer(&dn_rt_flush_timer); |
392 | spin_unlock_bh(&dn_rt_flush_lock); |
393 | } |
394 | |
395 | /** |
396 | * dn_return_short - Return a short packet to its sender |
397 | * @skb: The packet to return |
398 | * |
399 | */ |
400 | static int dn_return_short(struct sk_buff *skb) |
401 | { |
402 | struct dn_skb_cb *cb; |
403 | unsigned char *ptr; |
404 | __le16 *src; |
405 | __le16 *dst; |
406 | |
407 | /* Add back headers */ |
408 | skb_push(skb, skb->data - skb_network_header(skb)); |
409 | |
410 | if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL) |
411 | return NET_RX_DROP; |
412 | |
413 | cb = DN_SKB_CB(skb); |
414 | /* Skip packet length and point to flags */ |
415 | ptr = skb->data + 2; |
416 | *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS; |
417 | |
418 | dst = (__le16 *)ptr; |
419 | ptr += 2; |
420 | src = (__le16 *)ptr; |
421 | ptr += 2; |
422 | *ptr = 0; /* Zero hop count */ |
423 | |
424 | swap(*src, *dst); |
425 | |
426 | skb->pkt_type = PACKET_OUTGOING; |
427 | dn_rt_finish_output(skb, NULL, NULL); |
428 | return NET_RX_SUCCESS; |
429 | } |
430 | |
431 | /** |
432 | * dn_return_long - Return a long packet to its sender |
433 | * @skb: The long format packet to return |
434 | * |
435 | */ |
436 | static int dn_return_long(struct sk_buff *skb) |
437 | { |
438 | struct dn_skb_cb *cb; |
439 | unsigned char *ptr; |
440 | unsigned char *src_addr, *dst_addr; |
441 | unsigned char tmp[ETH_ALEN]; |
442 | |
443 | /* Add back all headers */ |
444 | skb_push(skb, skb->data - skb_network_header(skb)); |
445 | |
446 | if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL) |
447 | return NET_RX_DROP; |
448 | |
449 | cb = DN_SKB_CB(skb); |
450 | /* Ignore packet length and point to flags */ |
451 | ptr = skb->data + 2; |
452 | |
453 | /* Skip padding */ |
454 | if (*ptr & DN_RT_F_PF) { |
455 | char padlen = (*ptr & ~DN_RT_F_PF); |
456 | ptr += padlen; |
457 | } |
458 | |
459 | *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS; |
460 | ptr += 2; |
461 | dst_addr = ptr; |
462 | ptr += 8; |
463 | src_addr = ptr; |
464 | ptr += 6; |
465 | *ptr = 0; /* Zero hop count */ |
466 | |
467 | /* Swap source and destination */ |
468 | memcpy(tmp, src_addr, ETH_ALEN); |
469 | memcpy(src_addr, dst_addr, ETH_ALEN); |
470 | memcpy(dst_addr, tmp, ETH_ALEN); |
471 | |
472 | skb->pkt_type = PACKET_OUTGOING; |
473 | dn_rt_finish_output(skb, dst_addr, src_addr); |
474 | return NET_RX_SUCCESS; |
475 | } |
476 | |
477 | /** |
478 | * dn_route_rx_packet - Try and find a route for an incoming packet |
479 | * @skb: The packet to find a route for |
480 | * |
481 | * Returns: result of input function if route is found, error code otherwise |
482 | */ |
483 | static int dn_route_rx_packet(struct sk_buff *skb) |
484 | { |
485 | struct dn_skb_cb *cb; |
486 | int err; |
487 | |
488 | if ((err = dn_route_input(skb)) == 0) |
489 | return dst_input(skb); |
490 | |
491 | cb = DN_SKB_CB(skb); |
492 | if (decnet_debug_level & 4) { |
493 | char *devname = skb->dev ? skb->dev->name : "???"; |
494 | |
495 | printk(KERN_DEBUG |
496 | "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n", |
497 | (int)cb->rt_flags, devname, skb->len, |
498 | le16_to_cpu(cb->src), le16_to_cpu(cb->dst), |
499 | err, skb->pkt_type); |
500 | } |
501 | |
502 | if ((skb->pkt_type == PACKET_HOST) && (cb->rt_flags & DN_RT_F_RQR)) { |
503 | switch (cb->rt_flags & DN_RT_PKT_MSK) { |
504 | case DN_RT_PKT_SHORT: |
505 | return dn_return_short(skb); |
506 | case DN_RT_PKT_LONG: |
507 | return dn_return_long(skb); |
508 | } |
509 | } |
510 | |
511 | kfree_skb(skb); |
512 | return NET_RX_DROP; |
513 | } |
514 | |
515 | static int dn_route_rx_long(struct sk_buff *skb) |
516 | { |
517 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
518 | unsigned char *ptr = skb->data; |
519 | |
520 | if (!pskb_may_pull(skb, 21)) /* 20 for long header, 1 for shortest nsp */ |
521 | goto drop_it; |
522 | |
523 | skb_pull(skb, 20); |
524 | skb_reset_transport_header(skb); |
525 | |
526 | /* Destination info */ |
527 | ptr += 2; |
528 | cb->dst = dn_eth2dn(ptr); |
529 | if (memcmp(ptr, dn_hiord_addr, 4) != 0) |
530 | goto drop_it; |
531 | ptr += 6; |
532 | |
533 | |
534 | /* Source info */ |
535 | ptr += 2; |
536 | cb->src = dn_eth2dn(ptr); |
537 | if (memcmp(ptr, dn_hiord_addr, 4) != 0) |
538 | goto drop_it; |
539 | ptr += 6; |
540 | /* Other junk */ |
541 | ptr++; |
542 | cb->hops = *ptr++; /* Visit Count */ |
543 | |
544 | return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, |
545 | dn_route_rx_packet); |
546 | |
547 | drop_it: |
548 | kfree_skb(skb); |
549 | return NET_RX_DROP; |
550 | } |
551 | |
552 | |
553 | |
554 | static int dn_route_rx_short(struct sk_buff *skb) |
555 | { |
556 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
557 | unsigned char *ptr = skb->data; |
558 | |
559 | if (!pskb_may_pull(skb, 6)) /* 5 for short header + 1 for shortest nsp */ |
560 | goto drop_it; |
561 | |
562 | skb_pull(skb, 5); |
563 | skb_reset_transport_header(skb); |
564 | |
565 | cb->dst = *(__le16 *)ptr; |
566 | ptr += 2; |
567 | cb->src = *(__le16 *)ptr; |
568 | ptr += 2; |
569 | cb->hops = *ptr & 0x3f; |
570 | |
571 | return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, |
572 | dn_route_rx_packet); |
573 | |
574 | drop_it: |
575 | kfree_skb(skb); |
576 | return NET_RX_DROP; |
577 | } |
578 | |
579 | static int dn_route_discard(struct sk_buff *skb) |
580 | { |
581 | /* |
582 | * I know we drop the packet here, but thats considered success in |
583 | * this case |
584 | */ |
585 | kfree_skb(skb); |
586 | return NET_RX_SUCCESS; |
587 | } |
588 | |
589 | static int dn_route_ptp_hello(struct sk_buff *skb) |
590 | { |
591 | dn_dev_hello(skb); |
592 | dn_neigh_pointopoint_hello(skb); |
593 | return NET_RX_SUCCESS; |
594 | } |
595 | |
596 | int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) |
597 | { |
598 | struct dn_skb_cb *cb; |
599 | unsigned char flags = 0; |
600 | __u16 len = le16_to_cpu(*(__le16 *)skb->data); |
601 | struct dn_dev *dn = rcu_dereference(dev->dn_ptr); |
602 | unsigned char padlen = 0; |
603 | |
604 | if (!net_eq(dev_net(dev), &init_net)) |
605 | goto dump_it; |
606 | |
607 | if (dn == NULL) |
608 | goto dump_it; |
609 | |
610 | if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) |
611 | goto out; |
612 | |
613 | if (!pskb_may_pull(skb, 3)) |
614 | goto dump_it; |
615 | |
616 | skb_pull(skb, 2); |
617 | |
618 | if (len > skb->len) |
619 | goto dump_it; |
620 | |
621 | skb_trim(skb, len); |
622 | |
623 | flags = *skb->data; |
624 | |
625 | cb = DN_SKB_CB(skb); |
626 | cb->stamp = jiffies; |
627 | cb->iif = dev->ifindex; |
628 | |
629 | /* |
630 | * If we have padding, remove it. |
631 | */ |
632 | if (flags & DN_RT_F_PF) { |
633 | padlen = flags & ~DN_RT_F_PF; |
634 | if (!pskb_may_pull(skb, padlen + 1)) |
635 | goto dump_it; |
636 | skb_pull(skb, padlen); |
637 | flags = *skb->data; |
638 | } |
639 | |
640 | skb_reset_network_header(skb); |
641 | |
642 | /* |
643 | * Weed out future version DECnet |
644 | */ |
645 | if (flags & DN_RT_F_VER) |
646 | goto dump_it; |
647 | |
648 | cb->rt_flags = flags; |
649 | |
650 | if (decnet_debug_level & 1) |
651 | printk(KERN_DEBUG |
652 | "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n", |
653 | (int)flags, (dev) ? dev->name : "???", len, skb->len, |
654 | padlen); |
655 | |
656 | if (flags & DN_RT_PKT_CNTL) { |
657 | if (unlikely(skb_linearize(skb))) |
658 | goto dump_it; |
659 | |
660 | switch (flags & DN_RT_CNTL_MSK) { |
661 | case DN_RT_PKT_INIT: |
662 | dn_dev_init_pkt(skb); |
663 | break; |
664 | case DN_RT_PKT_VERI: |
665 | dn_dev_veri_pkt(skb); |
666 | break; |
667 | } |
668 | |
669 | if (dn->parms.state != DN_DEV_S_RU) |
670 | goto dump_it; |
671 | |
672 | switch (flags & DN_RT_CNTL_MSK) { |
673 | case DN_RT_PKT_HELO: |
674 | return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, |
675 | skb, skb->dev, NULL, |
676 | dn_route_ptp_hello); |
677 | |
678 | case DN_RT_PKT_L1RT: |
679 | case DN_RT_PKT_L2RT: |
680 | return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE, |
681 | skb, skb->dev, NULL, |
682 | dn_route_discard); |
683 | case DN_RT_PKT_ERTH: |
684 | return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, |
685 | skb, skb->dev, NULL, |
686 | dn_neigh_router_hello); |
687 | |
688 | case DN_RT_PKT_EEDH: |
689 | return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, |
690 | skb, skb->dev, NULL, |
691 | dn_neigh_endnode_hello); |
692 | } |
693 | } else { |
694 | if (dn->parms.state != DN_DEV_S_RU) |
695 | goto dump_it; |
696 | |
697 | skb_pull(skb, 1); /* Pull flags */ |
698 | |
699 | switch (flags & DN_RT_PKT_MSK) { |
700 | case DN_RT_PKT_LONG: |
701 | return dn_route_rx_long(skb); |
702 | case DN_RT_PKT_SHORT: |
703 | return dn_route_rx_short(skb); |
704 | } |
705 | } |
706 | |
707 | dump_it: |
708 | kfree_skb(skb); |
709 | out: |
710 | return NET_RX_DROP; |
711 | } |
712 | |
713 | static int dn_to_neigh_output(struct sk_buff *skb) |
714 | { |
715 | struct dst_entry *dst = skb_dst(skb); |
716 | struct neighbour *n = dst_get_neighbour_noref(dst); |
717 | |
718 | return n->output(n, skb); |
719 | } |
720 | |
721 | static int dn_output(struct sk_buff *skb) |
722 | { |
723 | struct dst_entry *dst = skb_dst(skb); |
724 | struct dn_route *rt = (struct dn_route *)dst; |
725 | struct net_device *dev = dst->dev; |
726 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
727 | |
728 | int err = -EINVAL; |
729 | |
730 | if (dst_get_neighbour_noref(dst) == NULL) |
731 | goto error; |
732 | |
733 | skb->dev = dev; |
734 | |
735 | cb->src = rt->rt_saddr; |
736 | cb->dst = rt->rt_daddr; |
737 | |
738 | /* |
739 | * Always set the Intra-Ethernet bit on all outgoing packets |
740 | * originated on this node. Only valid flag from upper layers |
741 | * is return-to-sender-requested. Set hop count to 0 too. |
742 | */ |
743 | cb->rt_flags &= ~DN_RT_F_RQR; |
744 | cb->rt_flags |= DN_RT_F_IE; |
745 | cb->hops = 0; |
746 | |
747 | return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, skb, NULL, dev, |
748 | dn_to_neigh_output); |
749 | |
750 | error: |
751 | if (net_ratelimit()) |
752 | printk(KERN_DEBUG "dn_output: This should not happen\n"); |
753 | |
754 | kfree_skb(skb); |
755 | |
756 | return err; |
757 | } |
758 | |
759 | static int dn_forward(struct sk_buff *skb) |
760 | { |
761 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
762 | struct dst_entry *dst = skb_dst(skb); |
763 | struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr); |
764 | struct dn_route *rt; |
765 | int header_len; |
766 | #ifdef CONFIG_NETFILTER |
767 | struct net_device *dev = skb->dev; |
768 | #endif |
769 | |
770 | if (skb->pkt_type != PACKET_HOST) |
771 | goto drop; |
772 | |
773 | /* Ensure that we have enough space for headers */ |
774 | rt = (struct dn_route *)skb_dst(skb); |
775 | header_len = dn_db->use_long ? 21 : 6; |
776 | if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+header_len)) |
777 | goto drop; |
778 | |
779 | /* |
780 | * Hop count exceeded. |
781 | */ |
782 | if (++cb->hops > 30) |
783 | goto drop; |
784 | |
785 | skb->dev = rt->dst.dev; |
786 | |
787 | /* |
788 | * If packet goes out same interface it came in on, then set |
789 | * the Intra-Ethernet bit. This has no effect for short |
790 | * packets, so we don't need to test for them here. |
791 | */ |
792 | cb->rt_flags &= ~DN_RT_F_IE; |
793 | if (rt->rt_flags & RTCF_DOREDIRECT) |
794 | cb->rt_flags |= DN_RT_F_IE; |
795 | |
796 | return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, skb, dev, skb->dev, |
797 | dn_to_neigh_output); |
798 | |
799 | drop: |
800 | kfree_skb(skb); |
801 | return NET_RX_DROP; |
802 | } |
803 | |
804 | /* |
805 | * Used to catch bugs. This should never normally get |
806 | * called. |
807 | */ |
808 | static int dn_rt_bug(struct sk_buff *skb) |
809 | { |
810 | if (net_ratelimit()) { |
811 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
812 | |
813 | printk(KERN_DEBUG "dn_rt_bug: skb from:%04x to:%04x\n", |
814 | le16_to_cpu(cb->src), le16_to_cpu(cb->dst)); |
815 | } |
816 | |
817 | kfree_skb(skb); |
818 | |
819 | return NET_RX_DROP; |
820 | } |
821 | |
822 | static unsigned int dn_dst_default_advmss(const struct dst_entry *dst) |
823 | { |
824 | return dn_mss_from_pmtu(dst->dev, dst_mtu(dst)); |
825 | } |
826 | |
827 | static unsigned int dn_dst_mtu(const struct dst_entry *dst) |
828 | { |
829 | unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); |
830 | |
831 | return mtu ? : dst->dev->mtu; |
832 | } |
833 | |
834 | static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr) |
835 | { |
836 | return __neigh_lookup_errno(&dn_neigh_table, daddr, dst->dev); |
837 | } |
838 | |
839 | static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) |
840 | { |
841 | struct dn_fib_info *fi = res->fi; |
842 | struct net_device *dev = rt->dst.dev; |
843 | unsigned int mss_metric; |
844 | struct neighbour *n; |
845 | |
846 | if (fi) { |
847 | if (DN_FIB_RES_GW(*res) && |
848 | DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) |
849 | rt->rt_gateway = DN_FIB_RES_GW(*res); |
850 | dst_init_metrics(&rt->dst, fi->fib_metrics, true); |
851 | } |
852 | rt->rt_type = res->type; |
853 | |
854 | if (dev != NULL && dst_get_neighbour_noref(&rt->dst) == NULL) { |
855 | n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev); |
856 | if (IS_ERR(n)) |
857 | return PTR_ERR(n); |
858 | dst_set_neighbour(&rt->dst, n); |
859 | } |
860 | |
861 | if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu) |
862 | dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu); |
863 | mss_metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS); |
864 | if (mss_metric) { |
865 | unsigned int mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst)); |
866 | if (mss_metric > mss) |
867 | dst_metric_set(&rt->dst, RTAX_ADVMSS, mss); |
868 | } |
869 | return 0; |
870 | } |
871 | |
872 | static inline int dn_match_addr(__le16 addr1, __le16 addr2) |
873 | { |
874 | __u16 tmp = le16_to_cpu(addr1) ^ le16_to_cpu(addr2); |
875 | int match = 16; |
876 | while(tmp) { |
877 | tmp >>= 1; |
878 | match--; |
879 | } |
880 | return match; |
881 | } |
882 | |
883 | static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope) |
884 | { |
885 | __le16 saddr = 0; |
886 | struct dn_dev *dn_db; |
887 | struct dn_ifaddr *ifa; |
888 | int best_match = 0; |
889 | int ret; |
890 | |
891 | rcu_read_lock(); |
892 | dn_db = rcu_dereference(dev->dn_ptr); |
893 | for (ifa = rcu_dereference(dn_db->ifa_list); |
894 | ifa != NULL; |
895 | ifa = rcu_dereference(ifa->ifa_next)) { |
896 | if (ifa->ifa_scope > scope) |
897 | continue; |
898 | if (!daddr) { |
899 | saddr = ifa->ifa_local; |
900 | break; |
901 | } |
902 | ret = dn_match_addr(daddr, ifa->ifa_local); |
903 | if (ret > best_match) |
904 | saddr = ifa->ifa_local; |
905 | if (best_match == 0) |
906 | saddr = ifa->ifa_local; |
907 | } |
908 | rcu_read_unlock(); |
909 | |
910 | return saddr; |
911 | } |
912 | |
913 | static inline __le16 __dn_fib_res_prefsrc(struct dn_fib_res *res) |
914 | { |
915 | return dnet_select_source(DN_FIB_RES_DEV(*res), DN_FIB_RES_GW(*res), res->scope); |
916 | } |
917 | |
918 | static inline __le16 dn_fib_rules_map_destination(__le16 daddr, struct dn_fib_res *res) |
919 | { |
920 | __le16 mask = dnet_make_mask(res->prefixlen); |
921 | return (daddr&~mask)|res->fi->fib_nh->nh_gw; |
922 | } |
923 | |
924 | static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *oldflp, int try_hard) |
925 | { |
926 | struct flowidn fld = { |
927 | .daddr = oldflp->daddr, |
928 | .saddr = oldflp->saddr, |
929 | .flowidn_scope = RT_SCOPE_UNIVERSE, |
930 | .flowidn_mark = oldflp->flowidn_mark, |
931 | .flowidn_iif = init_net.loopback_dev->ifindex, |
932 | .flowidn_oif = oldflp->flowidn_oif, |
933 | }; |
934 | struct dn_route *rt = NULL; |
935 | struct net_device *dev_out = NULL, *dev; |
936 | struct neighbour *neigh = NULL; |
937 | unsigned hash; |
938 | unsigned flags = 0; |
939 | struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST }; |
940 | int err; |
941 | int free_res = 0; |
942 | __le16 gateway = 0; |
943 | |
944 | if (decnet_debug_level & 16) |
945 | printk(KERN_DEBUG |
946 | "dn_route_output_slow: dst=%04x src=%04x mark=%d" |
947 | " iif=%d oif=%d\n", le16_to_cpu(oldflp->daddr), |
948 | le16_to_cpu(oldflp->saddr), |
949 | oldflp->flowidn_mark, init_net.loopback_dev->ifindex, |
950 | oldflp->flowidn_oif); |
951 | |
952 | /* If we have an output interface, verify its a DECnet device */ |
953 | if (oldflp->flowidn_oif) { |
954 | dev_out = dev_get_by_index(&init_net, oldflp->flowidn_oif); |
955 | err = -ENODEV; |
956 | if (dev_out && dev_out->dn_ptr == NULL) { |
957 | dev_put(dev_out); |
958 | dev_out = NULL; |
959 | } |
960 | if (dev_out == NULL) |
961 | goto out; |
962 | } |
963 | |
964 | /* If we have a source address, verify that its a local address */ |
965 | if (oldflp->saddr) { |
966 | err = -EADDRNOTAVAIL; |
967 | |
968 | if (dev_out) { |
969 | if (dn_dev_islocal(dev_out, oldflp->saddr)) |
970 | goto source_ok; |
971 | dev_put(dev_out); |
972 | goto out; |
973 | } |
974 | rcu_read_lock(); |
975 | for_each_netdev_rcu(&init_net, dev) { |
976 | if (!dev->dn_ptr) |
977 | continue; |
978 | if (!dn_dev_islocal(dev, oldflp->saddr)) |
979 | continue; |
980 | if ((dev->flags & IFF_LOOPBACK) && |
981 | oldflp->daddr && |
982 | !dn_dev_islocal(dev, oldflp->daddr)) |
983 | continue; |
984 | |
985 | dev_out = dev; |
986 | break; |
987 | } |
988 | rcu_read_unlock(); |
989 | if (dev_out == NULL) |
990 | goto out; |
991 | dev_hold(dev_out); |
992 | source_ok: |
993 | ; |
994 | } |
995 | |
996 | /* No destination? Assume its local */ |
997 | if (!fld.daddr) { |
998 | fld.daddr = fld.saddr; |
999 | |
1000 | err = -EADDRNOTAVAIL; |
1001 | if (dev_out) |
1002 | dev_put(dev_out); |
1003 | dev_out = init_net.loopback_dev; |
1004 | dev_hold(dev_out); |
1005 | if (!fld.daddr) { |
1006 | fld.daddr = |
1007 | fld.saddr = dnet_select_source(dev_out, 0, |
1008 | RT_SCOPE_HOST); |
1009 | if (!fld.daddr) |
1010 | goto out; |
1011 | } |
1012 | fld.flowidn_oif = init_net.loopback_dev->ifindex; |
1013 | res.type = RTN_LOCAL; |
1014 | goto make_route; |
1015 | } |
1016 | |
1017 | if (decnet_debug_level & 16) |
1018 | printk(KERN_DEBUG |
1019 | "dn_route_output_slow: initial checks complete." |
1020 | " dst=%o4x src=%04x oif=%d try_hard=%d\n", |
1021 | le16_to_cpu(fld.daddr), le16_to_cpu(fld.saddr), |
1022 | fld.flowidn_oif, try_hard); |
1023 | |
1024 | /* |
1025 | * N.B. If the kernel is compiled without router support then |
1026 | * dn_fib_lookup() will evaluate to non-zero so this if () block |
1027 | * will always be executed. |
1028 | */ |
1029 | err = -ESRCH; |
1030 | if (try_hard || (err = dn_fib_lookup(&fld, &res)) != 0) { |
1031 | struct dn_dev *dn_db; |
1032 | if (err != -ESRCH) |
1033 | goto out; |
1034 | /* |
1035 | * Here the fallback is basically the standard algorithm for |
1036 | * routing in endnodes which is described in the DECnet routing |
1037 | * docs |
1038 | * |
1039 | * If we are not trying hard, look in neighbour cache. |
1040 | * The result is tested to ensure that if a specific output |
1041 | * device/source address was requested, then we honour that |
1042 | * here |
1043 | */ |
1044 | if (!try_hard) { |
1045 | neigh = neigh_lookup_nodev(&dn_neigh_table, &init_net, &fld.daddr); |
1046 | if (neigh) { |
1047 | if ((oldflp->flowidn_oif && |
1048 | (neigh->dev->ifindex != oldflp->flowidn_oif)) || |
1049 | (oldflp->saddr && |
1050 | (!dn_dev_islocal(neigh->dev, |
1051 | oldflp->saddr)))) { |
1052 | neigh_release(neigh); |
1053 | neigh = NULL; |
1054 | } else { |
1055 | if (dev_out) |
1056 | dev_put(dev_out); |
1057 | if (dn_dev_islocal(neigh->dev, fld.daddr)) { |
1058 | dev_out = init_net.loopback_dev; |
1059 | res.type = RTN_LOCAL; |
1060 | } else { |
1061 | dev_out = neigh->dev; |
1062 | } |
1063 | dev_hold(dev_out); |
1064 | goto select_source; |
1065 | } |
1066 | } |
1067 | } |
1068 | |
1069 | /* Not there? Perhaps its a local address */ |
1070 | if (dev_out == NULL) |
1071 | dev_out = dn_dev_get_default(); |
1072 | err = -ENODEV; |
1073 | if (dev_out == NULL) |
1074 | goto out; |
1075 | dn_db = rcu_dereference_raw(dev_out->dn_ptr); |
1076 | /* Possible improvement - check all devices for local addr */ |
1077 | if (dn_dev_islocal(dev_out, fld.daddr)) { |
1078 | dev_put(dev_out); |
1079 | dev_out = init_net.loopback_dev; |
1080 | dev_hold(dev_out); |
1081 | res.type = RTN_LOCAL; |
1082 | goto select_source; |
1083 | } |
1084 | /* Not local either.... try sending it to the default router */ |
1085 | neigh = neigh_clone(dn_db->router); |
1086 | BUG_ON(neigh && neigh->dev != dev_out); |
1087 | |
1088 | /* Ok then, we assume its directly connected and move on */ |
1089 | select_source: |
1090 | if (neigh) |
1091 | gateway = ((struct dn_neigh *)neigh)->addr; |
1092 | if (gateway == 0) |
1093 | gateway = fld.daddr; |
1094 | if (fld.saddr == 0) { |
1095 | fld.saddr = dnet_select_source(dev_out, gateway, |
1096 | res.type == RTN_LOCAL ? |
1097 | RT_SCOPE_HOST : |
1098 | RT_SCOPE_LINK); |
1099 | if (fld.saddr == 0 && res.type != RTN_LOCAL) |
1100 | goto e_addr; |
1101 | } |
1102 | fld.flowidn_oif = dev_out->ifindex; |
1103 | goto make_route; |
1104 | } |
1105 | free_res = 1; |
1106 | |
1107 | if (res.type == RTN_NAT) |
1108 | goto e_inval; |
1109 | |
1110 | if (res.type == RTN_LOCAL) { |
1111 | if (!fld.saddr) |
1112 | fld.saddr = fld.daddr; |
1113 | if (dev_out) |
1114 | dev_put(dev_out); |
1115 | dev_out = init_net.loopback_dev; |
1116 | dev_hold(dev_out); |
1117 | fld.flowidn_oif = dev_out->ifindex; |
1118 | if (res.fi) |
1119 | dn_fib_info_put(res.fi); |
1120 | res.fi = NULL; |
1121 | goto make_route; |
1122 | } |
1123 | |
1124 | if (res.fi->fib_nhs > 1 && fld.flowidn_oif == 0) |
1125 | dn_fib_select_multipath(&fld, &res); |
1126 | |
1127 | /* |
1128 | * We could add some logic to deal with default routes here and |
1129 | * get rid of some of the special casing above. |
1130 | */ |
1131 | |
1132 | if (!fld.saddr) |
1133 | fld.saddr = DN_FIB_RES_PREFSRC(res); |
1134 | |
1135 | if (dev_out) |
1136 | dev_put(dev_out); |
1137 | dev_out = DN_FIB_RES_DEV(res); |
1138 | dev_hold(dev_out); |
1139 | fld.flowidn_oif = dev_out->ifindex; |
1140 | gateway = DN_FIB_RES_GW(res); |
1141 | |
1142 | make_route: |
1143 | if (dev_out->flags & IFF_LOOPBACK) |
1144 | flags |= RTCF_LOCAL; |
1145 | |
1146 | rt = dst_alloc(&dn_dst_ops, dev_out, 1, 0, DST_HOST); |
1147 | if (rt == NULL) |
1148 | goto e_nobufs; |
1149 | |
1150 | memset(&rt->fld, 0, sizeof(rt->fld)); |
1151 | rt->fld.saddr = oldflp->saddr; |
1152 | rt->fld.daddr = oldflp->daddr; |
1153 | rt->fld.flowidn_oif = oldflp->flowidn_oif; |
1154 | rt->fld.flowidn_iif = 0; |
1155 | rt->fld.flowidn_mark = oldflp->flowidn_mark; |
1156 | |
1157 | rt->rt_saddr = fld.saddr; |
1158 | rt->rt_daddr = fld.daddr; |
1159 | rt->rt_gateway = gateway ? gateway : fld.daddr; |
1160 | rt->rt_local_src = fld.saddr; |
1161 | |
1162 | rt->rt_dst_map = fld.daddr; |
1163 | rt->rt_src_map = fld.saddr; |
1164 | |
1165 | dst_set_neighbour(&rt->dst, neigh); |
1166 | neigh = NULL; |
1167 | |
1168 | rt->dst.lastuse = jiffies; |
1169 | rt->dst.output = dn_output; |
1170 | rt->dst.input = dn_rt_bug; |
1171 | rt->rt_flags = flags; |
1172 | if (flags & RTCF_LOCAL) |
1173 | rt->dst.input = dn_nsp_rx; |
1174 | |
1175 | err = dn_rt_set_next_hop(rt, &res); |
1176 | if (err) |
1177 | goto e_neighbour; |
1178 | |
1179 | hash = dn_hash(rt->fld.saddr, rt->fld.daddr); |
1180 | dn_insert_route(rt, hash, (struct dn_route **)pprt); |
1181 | |
1182 | done: |
1183 | if (neigh) |
1184 | neigh_release(neigh); |
1185 | if (free_res) |
1186 | dn_fib_res_put(&res); |
1187 | if (dev_out) |
1188 | dev_put(dev_out); |
1189 | out: |
1190 | return err; |
1191 | |
1192 | e_addr: |
1193 | err = -EADDRNOTAVAIL; |
1194 | goto done; |
1195 | e_inval: |
1196 | err = -EINVAL; |
1197 | goto done; |
1198 | e_nobufs: |
1199 | err = -ENOBUFS; |
1200 | goto done; |
1201 | e_neighbour: |
1202 | dst_free(&rt->dst); |
1203 | goto e_nobufs; |
1204 | } |
1205 | |
1206 | |
1207 | /* |
1208 | * N.B. The flags may be moved into the flowi at some future stage. |
1209 | */ |
1210 | static int __dn_route_output_key(struct dst_entry **pprt, const struct flowidn *flp, int flags) |
1211 | { |
1212 | unsigned hash = dn_hash(flp->saddr, flp->daddr); |
1213 | struct dn_route *rt = NULL; |
1214 | |
1215 | if (!(flags & MSG_TRYHARD)) { |
1216 | rcu_read_lock_bh(); |
1217 | for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt; |
1218 | rt = rcu_dereference_bh(rt->dst.dn_next)) { |
1219 | if ((flp->daddr == rt->fld.daddr) && |
1220 | (flp->saddr == rt->fld.saddr) && |
1221 | (flp->flowidn_mark == rt->fld.flowidn_mark) && |
1222 | dn_is_output_route(rt) && |
1223 | (rt->fld.flowidn_oif == flp->flowidn_oif)) { |
1224 | dst_use(&rt->dst, jiffies); |
1225 | rcu_read_unlock_bh(); |
1226 | *pprt = &rt->dst; |
1227 | return 0; |
1228 | } |
1229 | } |
1230 | rcu_read_unlock_bh(); |
1231 | } |
1232 | |
1233 | return dn_route_output_slow(pprt, flp, flags); |
1234 | } |
1235 | |
1236 | static int dn_route_output_key(struct dst_entry **pprt, struct flowidn *flp, int flags) |
1237 | { |
1238 | int err; |
1239 | |
1240 | err = __dn_route_output_key(pprt, flp, flags); |
1241 | if (err == 0 && flp->flowidn_proto) { |
1242 | *pprt = xfrm_lookup(&init_net, *pprt, |
1243 | flowidn_to_flowi(flp), NULL, 0); |
1244 | if (IS_ERR(*pprt)) { |
1245 | err = PTR_ERR(*pprt); |
1246 | *pprt = NULL; |
1247 | } |
1248 | } |
1249 | return err; |
1250 | } |
1251 | |
1252 | int dn_route_output_sock(struct dst_entry **pprt, struct flowidn *fl, struct sock *sk, int flags) |
1253 | { |
1254 | int err; |
1255 | |
1256 | err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD); |
1257 | if (err == 0 && fl->flowidn_proto) { |
1258 | if (!(flags & MSG_DONTWAIT)) |
1259 | fl->flowidn_flags |= FLOWI_FLAG_CAN_SLEEP; |
1260 | *pprt = xfrm_lookup(&init_net, *pprt, |
1261 | flowidn_to_flowi(fl), sk, 0); |
1262 | if (IS_ERR(*pprt)) { |
1263 | err = PTR_ERR(*pprt); |
1264 | *pprt = NULL; |
1265 | } |
1266 | } |
1267 | return err; |
1268 | } |
1269 | |
1270 | static int dn_route_input_slow(struct sk_buff *skb) |
1271 | { |
1272 | struct dn_route *rt = NULL; |
1273 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
1274 | struct net_device *in_dev = skb->dev; |
1275 | struct net_device *out_dev = NULL; |
1276 | struct dn_dev *dn_db; |
1277 | struct neighbour *neigh = NULL; |
1278 | unsigned hash; |
1279 | int flags = 0; |
1280 | __le16 gateway = 0; |
1281 | __le16 local_src = 0; |
1282 | struct flowidn fld = { |
1283 | .daddr = cb->dst, |
1284 | .saddr = cb->src, |
1285 | .flowidn_scope = RT_SCOPE_UNIVERSE, |
1286 | .flowidn_mark = skb->mark, |
1287 | .flowidn_iif = skb->dev->ifindex, |
1288 | }; |
1289 | struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE }; |
1290 | int err = -EINVAL; |
1291 | int free_res = 0; |
1292 | |
1293 | dev_hold(in_dev); |
1294 | |
1295 | if ((dn_db = rcu_dereference(in_dev->dn_ptr)) == NULL) |
1296 | goto out; |
1297 | |
1298 | /* Zero source addresses are not allowed */ |
1299 | if (fld.saddr == 0) |
1300 | goto out; |
1301 | |
1302 | /* |
1303 | * In this case we've just received a packet from a source |
1304 | * outside ourselves pretending to come from us. We don't |
1305 | * allow it any further to prevent routing loops, spoofing and |
1306 | * other nasties. Loopback packets already have the dst attached |
1307 | * so this only affects packets which have originated elsewhere. |
1308 | */ |
1309 | err = -ENOTUNIQ; |
1310 | if (dn_dev_islocal(in_dev, cb->src)) |
1311 | goto out; |
1312 | |
1313 | err = dn_fib_lookup(&fld, &res); |
1314 | if (err) { |
1315 | if (err != -ESRCH) |
1316 | goto out; |
1317 | /* |
1318 | * Is the destination us ? |
1319 | */ |
1320 | if (!dn_dev_islocal(in_dev, cb->dst)) |
1321 | goto e_inval; |
1322 | |
1323 | res.type = RTN_LOCAL; |
1324 | } else { |
1325 | __le16 src_map = fld.saddr; |
1326 | free_res = 1; |
1327 | |
1328 | out_dev = DN_FIB_RES_DEV(res); |
1329 | if (out_dev == NULL) { |
1330 | if (net_ratelimit()) |
1331 | printk(KERN_CRIT "Bug in dn_route_input_slow() " |
1332 | "No output device\n"); |
1333 | goto e_inval; |
1334 | } |
1335 | dev_hold(out_dev); |
1336 | |
1337 | if (res.r) |
1338 | src_map = fld.saddr; /* no NAT support for now */ |
1339 | |
1340 | gateway = DN_FIB_RES_GW(res); |
1341 | if (res.type == RTN_NAT) { |
1342 | fld.daddr = dn_fib_rules_map_destination(fld.daddr, &res); |
1343 | dn_fib_res_put(&res); |
1344 | free_res = 0; |
1345 | if (dn_fib_lookup(&fld, &res)) |
1346 | goto e_inval; |
1347 | free_res = 1; |
1348 | if (res.type != RTN_UNICAST) |
1349 | goto e_inval; |
1350 | flags |= RTCF_DNAT; |
1351 | gateway = fld.daddr; |
1352 | } |
1353 | fld.saddr = src_map; |
1354 | } |
1355 | |
1356 | switch(res.type) { |
1357 | case RTN_UNICAST: |
1358 | /* |
1359 | * Forwarding check here, we only check for forwarding |
1360 | * being turned off, if you want to only forward intra |
1361 | * area, its up to you to set the routing tables up |
1362 | * correctly. |
1363 | */ |
1364 | if (dn_db->parms.forwarding == 0) |
1365 | goto e_inval; |
1366 | |
1367 | if (res.fi->fib_nhs > 1 && fld.flowidn_oif == 0) |
1368 | dn_fib_select_multipath(&fld, &res); |
1369 | |
1370 | /* |
1371 | * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT |
1372 | * flag as a hint to set the intra-ethernet bit when |
1373 | * forwarding. If we've got NAT in operation, we don't do |
1374 | * this optimisation. |
1375 | */ |
1376 | if (out_dev == in_dev && !(flags & RTCF_NAT)) |
1377 | flags |= RTCF_DOREDIRECT; |
1378 | |
1379 | local_src = DN_FIB_RES_PREFSRC(res); |
1380 | |
1381 | case RTN_BLACKHOLE: |
1382 | case RTN_UNREACHABLE: |
1383 | break; |
1384 | case RTN_LOCAL: |
1385 | flags |= RTCF_LOCAL; |
1386 | fld.saddr = cb->dst; |
1387 | fld.daddr = cb->src; |
1388 | |
1389 | /* Routing tables gave us a gateway */ |
1390 | if (gateway) |
1391 | goto make_route; |
1392 | |
1393 | /* Packet was intra-ethernet, so we know its on-link */ |
1394 | if (cb->rt_flags & DN_RT_F_IE) { |
1395 | gateway = cb->src; |
1396 | flags |= RTCF_DIRECTSRC; |
1397 | goto make_route; |
1398 | } |
1399 | |
1400 | /* Use the default router if there is one */ |
1401 | neigh = neigh_clone(dn_db->router); |
1402 | if (neigh) { |
1403 | gateway = ((struct dn_neigh *)neigh)->addr; |
1404 | goto make_route; |
1405 | } |
1406 | |
1407 | /* Close eyes and pray */ |
1408 | gateway = cb->src; |
1409 | flags |= RTCF_DIRECTSRC; |
1410 | goto make_route; |
1411 | default: |
1412 | goto e_inval; |
1413 | } |
1414 | |
1415 | make_route: |
1416 | rt = dst_alloc(&dn_dst_ops, out_dev, 0, 0, DST_HOST); |
1417 | if (rt == NULL) |
1418 | goto e_nobufs; |
1419 | |
1420 | memset(&rt->fld, 0, sizeof(rt->fld)); |
1421 | rt->rt_saddr = fld.saddr; |
1422 | rt->rt_daddr = fld.daddr; |
1423 | rt->rt_gateway = fld.daddr; |
1424 | if (gateway) |
1425 | rt->rt_gateway = gateway; |
1426 | rt->rt_local_src = local_src ? local_src : rt->rt_saddr; |
1427 | |
1428 | rt->rt_dst_map = fld.daddr; |
1429 | rt->rt_src_map = fld.saddr; |
1430 | |
1431 | rt->fld.saddr = cb->src; |
1432 | rt->fld.daddr = cb->dst; |
1433 | rt->fld.flowidn_oif = 0; |
1434 | rt->fld.flowidn_iif = in_dev->ifindex; |
1435 | rt->fld.flowidn_mark = fld.flowidn_mark; |
1436 | |
1437 | dst_set_neighbour(&rt->dst, neigh); |
1438 | rt->dst.lastuse = jiffies; |
1439 | rt->dst.output = dn_rt_bug; |
1440 | switch (res.type) { |
1441 | case RTN_UNICAST: |
1442 | rt->dst.input = dn_forward; |
1443 | break; |
1444 | case RTN_LOCAL: |
1445 | rt->dst.output = dn_output; |
1446 | rt->dst.input = dn_nsp_rx; |
1447 | rt->dst.dev = in_dev; |
1448 | flags |= RTCF_LOCAL; |
1449 | break; |
1450 | default: |
1451 | case RTN_UNREACHABLE: |
1452 | case RTN_BLACKHOLE: |
1453 | rt->dst.input = dst_discard; |
1454 | } |
1455 | rt->rt_flags = flags; |
1456 | |
1457 | err = dn_rt_set_next_hop(rt, &res); |
1458 | if (err) |
1459 | goto e_neighbour; |
1460 | |
1461 | hash = dn_hash(rt->fld.saddr, rt->fld.daddr); |
1462 | dn_insert_route(rt, hash, &rt); |
1463 | skb_dst_set(skb, &rt->dst); |
1464 | |
1465 | done: |
1466 | if (neigh) |
1467 | neigh_release(neigh); |
1468 | if (free_res) |
1469 | dn_fib_res_put(&res); |
1470 | dev_put(in_dev); |
1471 | if (out_dev) |
1472 | dev_put(out_dev); |
1473 | out: |
1474 | return err; |
1475 | |
1476 | e_inval: |
1477 | err = -EINVAL; |
1478 | goto done; |
1479 | |
1480 | e_nobufs: |
1481 | err = -ENOBUFS; |
1482 | goto done; |
1483 | |
1484 | e_neighbour: |
1485 | dst_free(&rt->dst); |
1486 | goto done; |
1487 | } |
1488 | |
1489 | static int dn_route_input(struct sk_buff *skb) |
1490 | { |
1491 | struct dn_route *rt; |
1492 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
1493 | unsigned hash = dn_hash(cb->src, cb->dst); |
1494 | |
1495 | if (skb_dst(skb)) |
1496 | return 0; |
1497 | |
1498 | rcu_read_lock(); |
1499 | for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL; |
1500 | rt = rcu_dereference(rt->dst.dn_next)) { |
1501 | if ((rt->fld.saddr == cb->src) && |
1502 | (rt->fld.daddr == cb->dst) && |
1503 | (rt->fld.flowidn_oif == 0) && |
1504 | (rt->fld.flowidn_mark == skb->mark) && |
1505 | (rt->fld.flowidn_iif == cb->iif)) { |
1506 | dst_use(&rt->dst, jiffies); |
1507 | rcu_read_unlock(); |
1508 | skb_dst_set(skb, (struct dst_entry *)rt); |
1509 | return 0; |
1510 | } |
1511 | } |
1512 | rcu_read_unlock(); |
1513 | |
1514 | return dn_route_input_slow(skb); |
1515 | } |
1516 | |
1517 | static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, |
1518 | int event, int nowait, unsigned int flags) |
1519 | { |
1520 | struct dn_route *rt = (struct dn_route *)skb_dst(skb); |
1521 | struct rtmsg *r; |
1522 | struct nlmsghdr *nlh; |
1523 | unsigned char *b = skb_tail_pointer(skb); |
1524 | long expires; |
1525 | |
1526 | nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags); |
1527 | r = NLMSG_DATA(nlh); |
1528 | r->rtm_family = AF_DECnet; |
1529 | r->rtm_dst_len = 16; |
1530 | r->rtm_src_len = 0; |
1531 | r->rtm_tos = 0; |
1532 | r->rtm_table = RT_TABLE_MAIN; |
1533 | RTA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN); |
1534 | r->rtm_type = rt->rt_type; |
1535 | r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; |
1536 | r->rtm_scope = RT_SCOPE_UNIVERSE; |
1537 | r->rtm_protocol = RTPROT_UNSPEC; |
1538 | if (rt->rt_flags & RTCF_NOTIFY) |
1539 | r->rtm_flags |= RTM_F_NOTIFY; |
1540 | RTA_PUT(skb, RTA_DST, 2, &rt->rt_daddr); |
1541 | if (rt->fld.saddr) { |
1542 | r->rtm_src_len = 16; |
1543 | RTA_PUT(skb, RTA_SRC, 2, &rt->fld.saddr); |
1544 | } |
1545 | if (rt->dst.dev) |
1546 | RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->dst.dev->ifindex); |
1547 | /* |
1548 | * Note to self - change this if input routes reverse direction when |
1549 | * they deal only with inputs and not with replies like they do |
1550 | * currently. |
1551 | */ |
1552 | RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src); |
1553 | if (rt->rt_daddr != rt->rt_gateway) |
1554 | RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway); |
1555 | if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) |
1556 | goto rtattr_failure; |
1557 | expires = rt->dst.expires ? rt->dst.expires - jiffies : 0; |
1558 | if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires, |
1559 | rt->dst.error) < 0) |
1560 | goto rtattr_failure; |
1561 | if (dn_is_input_route(rt)) |
1562 | RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fld.flowidn_iif); |
1563 | |
1564 | nlh->nlmsg_len = skb_tail_pointer(skb) - b; |
1565 | return skb->len; |
1566 | |
1567 | nlmsg_failure: |
1568 | rtattr_failure: |
1569 | nlmsg_trim(skb, b); |
1570 | return -1; |
1571 | } |
1572 | |
1573 | /* |
1574 | * This is called by both endnodes and routers now. |
1575 | */ |
1576 | static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg) |
1577 | { |
1578 | struct net *net = sock_net(in_skb->sk); |
1579 | struct rtattr **rta = arg; |
1580 | struct rtmsg *rtm = NLMSG_DATA(nlh); |
1581 | struct dn_route *rt = NULL; |
1582 | struct dn_skb_cb *cb; |
1583 | int err; |
1584 | struct sk_buff *skb; |
1585 | struct flowidn fld; |
1586 | |
1587 | if (!net_eq(net, &init_net)) |
1588 | return -EINVAL; |
1589 | |
1590 | memset(&fld, 0, sizeof(fld)); |
1591 | fld.flowidn_proto = DNPROTO_NSP; |
1592 | |
1593 | skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); |
1594 | if (skb == NULL) |
1595 | return -ENOBUFS; |
1596 | skb_reset_mac_header(skb); |
1597 | cb = DN_SKB_CB(skb); |
1598 | |
1599 | if (rta[RTA_SRC-1]) |
1600 | memcpy(&fld.saddr, RTA_DATA(rta[RTA_SRC-1]), 2); |
1601 | if (rta[RTA_DST-1]) |
1602 | memcpy(&fld.daddr, RTA_DATA(rta[RTA_DST-1]), 2); |
1603 | if (rta[RTA_IIF-1]) |
1604 | memcpy(&fld.flowidn_iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int)); |
1605 | |
1606 | if (fld.flowidn_iif) { |
1607 | struct net_device *dev; |
1608 | if ((dev = dev_get_by_index(&init_net, fld.flowidn_iif)) == NULL) { |
1609 | kfree_skb(skb); |
1610 | return -ENODEV; |
1611 | } |
1612 | if (!dev->dn_ptr) { |
1613 | dev_put(dev); |
1614 | kfree_skb(skb); |
1615 | return -ENODEV; |
1616 | } |
1617 | skb->protocol = htons(ETH_P_DNA_RT); |
1618 | skb->dev = dev; |
1619 | cb->src = fld.saddr; |
1620 | cb->dst = fld.daddr; |
1621 | local_bh_disable(); |
1622 | err = dn_route_input(skb); |
1623 | local_bh_enable(); |
1624 | memset(cb, 0, sizeof(struct dn_skb_cb)); |
1625 | rt = (struct dn_route *)skb_dst(skb); |
1626 | if (!err && -rt->dst.error) |
1627 | err = rt->dst.error; |
1628 | } else { |
1629 | int oif = 0; |
1630 | if (rta[RTA_OIF - 1]) |
1631 | memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int)); |
1632 | fld.flowidn_oif = oif; |
1633 | err = dn_route_output_key((struct dst_entry **)&rt, &fld, 0); |
1634 | } |
1635 | |
1636 | if (skb->dev) |
1637 | dev_put(skb->dev); |
1638 | skb->dev = NULL; |
1639 | if (err) |
1640 | goto out_free; |
1641 | skb_dst_set(skb, &rt->dst); |
1642 | if (rtm->rtm_flags & RTM_F_NOTIFY) |
1643 | rt->rt_flags |= RTCF_NOTIFY; |
1644 | |
1645 | err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0); |
1646 | |
1647 | if (err == 0) |
1648 | goto out_free; |
1649 | if (err < 0) { |
1650 | err = -EMSGSIZE; |
1651 | goto out_free; |
1652 | } |
1653 | |
1654 | return rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid); |
1655 | |
1656 | out_free: |
1657 | kfree_skb(skb); |
1658 | return err; |
1659 | } |
1660 | |
1661 | /* |
1662 | * For routers, this is called from dn_fib_dump, but for endnodes its |
1663 | * called directly from the rtnetlink dispatch table. |
1664 | */ |
1665 | int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb) |
1666 | { |
1667 | struct net *net = sock_net(skb->sk); |
1668 | struct dn_route *rt; |
1669 | int h, s_h; |
1670 | int idx, s_idx; |
1671 | |
1672 | if (!net_eq(net, &init_net)) |
1673 | return 0; |
1674 | |
1675 | if (NLMSG_PAYLOAD(cb->nlh, 0) < sizeof(struct rtmsg)) |
1676 | return -EINVAL; |
1677 | if (!(((struct rtmsg *)NLMSG_DATA(cb->nlh))->rtm_flags&RTM_F_CLONED)) |
1678 | return 0; |
1679 | |
1680 | s_h = cb->args[0]; |
1681 | s_idx = idx = cb->args[1]; |
1682 | for(h = 0; h <= dn_rt_hash_mask; h++) { |
1683 | if (h < s_h) |
1684 | continue; |
1685 | if (h > s_h) |
1686 | s_idx = 0; |
1687 | rcu_read_lock_bh(); |
1688 | for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0; |
1689 | rt; |
1690 | rt = rcu_dereference_bh(rt->dst.dn_next), idx++) { |
1691 | if (idx < s_idx) |
1692 | continue; |
1693 | skb_dst_set(skb, dst_clone(&rt->dst)); |
1694 | if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid, |
1695 | cb->nlh->nlmsg_seq, RTM_NEWROUTE, |
1696 | 1, NLM_F_MULTI) <= 0) { |
1697 | skb_dst_drop(skb); |
1698 | rcu_read_unlock_bh(); |
1699 | goto done; |
1700 | } |
1701 | skb_dst_drop(skb); |
1702 | } |
1703 | rcu_read_unlock_bh(); |
1704 | } |
1705 | |
1706 | done: |
1707 | cb->args[0] = h; |
1708 | cb->args[1] = idx; |
1709 | return skb->len; |
1710 | } |
1711 | |
1712 | #ifdef CONFIG_PROC_FS |
1713 | struct dn_rt_cache_iter_state { |
1714 | int bucket; |
1715 | }; |
1716 | |
1717 | static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq) |
1718 | { |
1719 | struct dn_route *rt = NULL; |
1720 | struct dn_rt_cache_iter_state *s = seq->private; |
1721 | |
1722 | for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) { |
1723 | rcu_read_lock_bh(); |
1724 | rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain); |
1725 | if (rt) |
1726 | break; |
1727 | rcu_read_unlock_bh(); |
1728 | } |
1729 | return rt; |
1730 | } |
1731 | |
1732 | static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt) |
1733 | { |
1734 | struct dn_rt_cache_iter_state *s = seq->private; |
1735 | |
1736 | rt = rcu_dereference_bh(rt->dst.dn_next); |
1737 | while (!rt) { |
1738 | rcu_read_unlock_bh(); |
1739 | if (--s->bucket < 0) |
1740 | break; |
1741 | rcu_read_lock_bh(); |
1742 | rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain); |
1743 | } |
1744 | return rt; |
1745 | } |
1746 | |
1747 | static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos) |
1748 | { |
1749 | struct dn_route *rt = dn_rt_cache_get_first(seq); |
1750 | |
1751 | if (rt) { |
1752 | while(*pos && (rt = dn_rt_cache_get_next(seq, rt))) |
1753 | --*pos; |
1754 | } |
1755 | return *pos ? NULL : rt; |
1756 | } |
1757 | |
1758 | static void *dn_rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos) |
1759 | { |
1760 | struct dn_route *rt = dn_rt_cache_get_next(seq, v); |
1761 | ++*pos; |
1762 | return rt; |
1763 | } |
1764 | |
1765 | static void dn_rt_cache_seq_stop(struct seq_file *seq, void *v) |
1766 | { |
1767 | if (v) |
1768 | rcu_read_unlock_bh(); |
1769 | } |
1770 | |
1771 | static int dn_rt_cache_seq_show(struct seq_file *seq, void *v) |
1772 | { |
1773 | struct dn_route *rt = v; |
1774 | char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN]; |
1775 | |
1776 | seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n", |
1777 | rt->dst.dev ? rt->dst.dev->name : "*", |
1778 | dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1), |
1779 | dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2), |
1780 | atomic_read(&rt->dst.__refcnt), |
1781 | rt->dst.__use, |
1782 | (int) dst_metric(&rt->dst, RTAX_RTT)); |
1783 | return 0; |
1784 | } |
1785 | |
1786 | static const struct seq_operations dn_rt_cache_seq_ops = { |
1787 | .start = dn_rt_cache_seq_start, |
1788 | .next = dn_rt_cache_seq_next, |
1789 | .stop = dn_rt_cache_seq_stop, |
1790 | .show = dn_rt_cache_seq_show, |
1791 | }; |
1792 | |
1793 | static int dn_rt_cache_seq_open(struct inode *inode, struct file *file) |
1794 | { |
1795 | return seq_open_private(file, &dn_rt_cache_seq_ops, |
1796 | sizeof(struct dn_rt_cache_iter_state)); |
1797 | } |
1798 | |
1799 | static const struct file_operations dn_rt_cache_seq_fops = { |
1800 | .owner = THIS_MODULE, |
1801 | .open = dn_rt_cache_seq_open, |
1802 | .read = seq_read, |
1803 | .llseek = seq_lseek, |
1804 | .release = seq_release_private, |
1805 | }; |
1806 | |
1807 | #endif /* CONFIG_PROC_FS */ |
1808 | |
1809 | void __init dn_route_init(void) |
1810 | { |
1811 | int i, goal, order; |
1812 | |
1813 | dn_dst_ops.kmem_cachep = |
1814 | kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0, |
1815 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
1816 | dst_entries_init(&dn_dst_ops); |
1817 | setup_timer(&dn_route_timer, dn_dst_check_expire, 0); |
1818 | dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ; |
1819 | add_timer(&dn_route_timer); |
1820 | |
1821 | goal = totalram_pages >> (26 - PAGE_SHIFT); |
1822 | |
1823 | for(order = 0; (1UL << order) < goal; order++) |
1824 | /* NOTHING */; |
1825 | |
1826 | /* |
1827 | * Only want 1024 entries max, since the table is very, very unlikely |
1828 | * to be larger than that. |
1829 | */ |
1830 | while(order && ((((1UL << order) * PAGE_SIZE) / |
1831 | sizeof(struct dn_rt_hash_bucket)) >= 2048)) |
1832 | order--; |
1833 | |
1834 | do { |
1835 | dn_rt_hash_mask = (1UL << order) * PAGE_SIZE / |
1836 | sizeof(struct dn_rt_hash_bucket); |
1837 | while(dn_rt_hash_mask & (dn_rt_hash_mask - 1)) |
1838 | dn_rt_hash_mask--; |
1839 | dn_rt_hash_table = (struct dn_rt_hash_bucket *) |
1840 | __get_free_pages(GFP_ATOMIC, order); |
1841 | } while (dn_rt_hash_table == NULL && --order > 0); |
1842 | |
1843 | if (!dn_rt_hash_table) |
1844 | panic("Failed to allocate DECnet route cache hash table\n"); |
1845 | |
1846 | printk(KERN_INFO |
1847 | "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n", |
1848 | dn_rt_hash_mask, |
1849 | (long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024); |
1850 | |
1851 | dn_rt_hash_mask--; |
1852 | for(i = 0; i <= dn_rt_hash_mask; i++) { |
1853 | spin_lock_init(&dn_rt_hash_table[i].lock); |
1854 | dn_rt_hash_table[i].chain = NULL; |
1855 | } |
1856 | |
1857 | dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1); |
1858 | |
1859 | proc_net_fops_create(&init_net, "decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops); |
1860 | |
1861 | #ifdef CONFIG_DECNET_ROUTER |
1862 | rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, |
1863 | dn_fib_dump, NULL); |
1864 | #else |
1865 | rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, |
1866 | dn_cache_dump, NULL); |
1867 | #endif |
1868 | } |
1869 | |
1870 | void __exit dn_route_cleanup(void) |
1871 | { |
1872 | del_timer(&dn_route_timer); |
1873 | dn_run_flush(0); |
1874 | |
1875 | proc_net_remove(&init_net, "decnet_cache"); |
1876 | dst_entries_destroy(&dn_dst_ops); |
1877 | } |
1878 | |
1879 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9