Root/
1 | |
2 | /* |
3 | * DECnet An implementation of the DECnet protocol suite for the LINUX |
4 | * operating system. DECnet is implemented using the BSD Socket |
5 | * interface as the means of communication with the user level. |
6 | * |
7 | * DECnet Network Services Protocol (Output) |
8 | * |
9 | * Author: Eduardo Marcelo Serrat <emserrat@geocities.com> |
10 | * |
11 | * Changes: |
12 | * |
13 | * Steve Whitehouse: Split into dn_nsp_in.c and dn_nsp_out.c from |
14 | * original dn_nsp.c. |
15 | * Steve Whitehouse: Updated to work with my new routing architecture. |
16 | * Steve Whitehouse: Added changes from Eduardo Serrat's patches. |
17 | * Steve Whitehouse: Now conninits have the "return" bit set. |
18 | * Steve Whitehouse: Fixes to check alloc'd skbs are non NULL! |
19 | * Moved output state machine into one function |
20 | * Steve Whitehouse: New output state machine |
21 | * Paul Koning: Connect Confirm message fix. |
22 | * Eduardo Serrat: Fix to stop dn_nsp_do_disc() sending malformed packets. |
23 | * Steve Whitehouse: dn_nsp_output() and friends needed a spring clean |
24 | * Steve Whitehouse: Moved dn_nsp_send() in here from route.h |
25 | */ |
26 | |
27 | /****************************************************************************** |
28 | (c) 1995-1998 E.M. Serrat emserrat@geocities.com |
29 | |
30 | This program is free software; you can redistribute it and/or modify |
31 | it under the terms of the GNU General Public License as published by |
32 | the Free Software Foundation; either version 2 of the License, or |
33 | any later version. |
34 | |
35 | This program is distributed in the hope that it will be useful, |
36 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
37 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
38 | GNU General Public License for more details. |
39 | *******************************************************************************/ |
40 | |
41 | #include <linux/errno.h> |
42 | #include <linux/types.h> |
43 | #include <linux/socket.h> |
44 | #include <linux/in.h> |
45 | #include <linux/kernel.h> |
46 | #include <linux/timer.h> |
47 | #include <linux/string.h> |
48 | #include <linux/sockios.h> |
49 | #include <linux/net.h> |
50 | #include <linux/netdevice.h> |
51 | #include <linux/inet.h> |
52 | #include <linux/route.h> |
53 | #include <linux/slab.h> |
54 | #include <net/sock.h> |
55 | #include <linux/fcntl.h> |
56 | #include <linux/mm.h> |
57 | #include <linux/termios.h> |
58 | #include <linux/interrupt.h> |
59 | #include <linux/proc_fs.h> |
60 | #include <linux/stat.h> |
61 | #include <linux/init.h> |
62 | #include <linux/poll.h> |
63 | #include <linux/if_packet.h> |
64 | #include <net/neighbour.h> |
65 | #include <net/dst.h> |
66 | #include <net/flow.h> |
67 | #include <net/dn.h> |
68 | #include <net/dn_nsp.h> |
69 | #include <net/dn_dev.h> |
70 | #include <net/dn_route.h> |
71 | |
72 | |
73 | static int nsp_backoff[NSP_MAXRXTSHIFT + 1] = { 1, 2, 4, 8, 16, 32, 64, 64, 64, 64, 64, 64, 64 }; |
74 | |
75 | static void dn_nsp_send(struct sk_buff *skb) |
76 | { |
77 | struct sock *sk = skb->sk; |
78 | struct dn_scp *scp = DN_SK(sk); |
79 | struct dst_entry *dst; |
80 | struct flowidn fld; |
81 | |
82 | skb_reset_transport_header(skb); |
83 | scp->stamp = jiffies; |
84 | |
85 | dst = sk_dst_check(sk, 0); |
86 | if (dst) { |
87 | try_again: |
88 | skb_dst_set(skb, dst); |
89 | dst_output(skb); |
90 | return; |
91 | } |
92 | |
93 | memset(&fld, 0, sizeof(fld)); |
94 | fld.flowidn_oif = sk->sk_bound_dev_if; |
95 | fld.saddr = dn_saddr2dn(&scp->addr); |
96 | fld.daddr = dn_saddr2dn(&scp->peer); |
97 | dn_sk_ports_copy(&fld, scp); |
98 | fld.flowidn_proto = DNPROTO_NSP; |
99 | if (dn_route_output_sock(&sk->sk_dst_cache, &fld, sk, 0) == 0) { |
100 | dst = sk_dst_get(sk); |
101 | sk->sk_route_caps = dst->dev->features; |
102 | goto try_again; |
103 | } |
104 | |
105 | sk->sk_err = EHOSTUNREACH; |
106 | if (!sock_flag(sk, SOCK_DEAD)) |
107 | sk->sk_state_change(sk); |
108 | } |
109 | |
110 | |
111 | /* |
112 | * If sk == NULL, then we assume that we are supposed to be making |
113 | * a routing layer skb. If sk != NULL, then we are supposed to be |
114 | * creating an skb for the NSP layer. |
115 | * |
116 | * The eventual aim is for each socket to have a cached header size |
117 | * for its outgoing packets, and to set hdr from this when sk != NULL. |
118 | */ |
119 | struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri) |
120 | { |
121 | struct sk_buff *skb; |
122 | int hdr = 64; |
123 | |
124 | if ((skb = alloc_skb(size + hdr, pri)) == NULL) |
125 | return NULL; |
126 | |
127 | skb->protocol = htons(ETH_P_DNA_RT); |
128 | skb->pkt_type = PACKET_OUTGOING; |
129 | |
130 | if (sk) |
131 | skb_set_owner_w(skb, sk); |
132 | |
133 | skb_reserve(skb, hdr); |
134 | |
135 | return skb; |
136 | } |
137 | |
138 | /* |
139 | * Calculate persist timer based upon the smoothed round |
140 | * trip time and the variance. Backoff according to the |
141 | * nsp_backoff[] array. |
142 | */ |
143 | unsigned long dn_nsp_persist(struct sock *sk) |
144 | { |
145 | struct dn_scp *scp = DN_SK(sk); |
146 | |
147 | unsigned long t = ((scp->nsp_srtt >> 2) + scp->nsp_rttvar) >> 1; |
148 | |
149 | t *= nsp_backoff[scp->nsp_rxtshift]; |
150 | |
151 | if (t < HZ) t = HZ; |
152 | if (t > (600*HZ)) t = (600*HZ); |
153 | |
154 | if (scp->nsp_rxtshift < NSP_MAXRXTSHIFT) |
155 | scp->nsp_rxtshift++; |
156 | |
157 | /* printk(KERN_DEBUG "rxtshift %lu, t=%lu\n", scp->nsp_rxtshift, t); */ |
158 | |
159 | return t; |
160 | } |
161 | |
162 | /* |
163 | * This is called each time we get an estimate for the rtt |
164 | * on the link. |
165 | */ |
166 | static void dn_nsp_rtt(struct sock *sk, long rtt) |
167 | { |
168 | struct dn_scp *scp = DN_SK(sk); |
169 | long srtt = (long)scp->nsp_srtt; |
170 | long rttvar = (long)scp->nsp_rttvar; |
171 | long delta; |
172 | |
173 | /* |
174 | * If the jiffies clock flips over in the middle of timestamp |
175 | * gathering this value might turn out negative, so we make sure |
176 | * that is it always positive here. |
177 | */ |
178 | if (rtt < 0) |
179 | rtt = -rtt; |
180 | /* |
181 | * Add new rtt to smoothed average |
182 | */ |
183 | delta = ((rtt << 3) - srtt); |
184 | srtt += (delta >> 3); |
185 | if (srtt >= 1) |
186 | scp->nsp_srtt = (unsigned long)srtt; |
187 | else |
188 | scp->nsp_srtt = 1; |
189 | |
190 | /* |
191 | * Add new rtt varience to smoothed varience |
192 | */ |
193 | delta >>= 1; |
194 | rttvar += ((((delta>0)?(delta):(-delta)) - rttvar) >> 2); |
195 | if (rttvar >= 1) |
196 | scp->nsp_rttvar = (unsigned long)rttvar; |
197 | else |
198 | scp->nsp_rttvar = 1; |
199 | |
200 | /* printk(KERN_DEBUG "srtt=%lu rttvar=%lu\n", scp->nsp_srtt, scp->nsp_rttvar); */ |
201 | } |
202 | |
203 | /** |
204 | * dn_nsp_clone_and_send - Send a data packet by cloning it |
205 | * @skb: The packet to clone and transmit |
206 | * @gfp: memory allocation flag |
207 | * |
208 | * Clone a queued data or other data packet and transmit it. |
209 | * |
210 | * Returns: The number of times the packet has been sent previously |
211 | */ |
212 | static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb, |
213 | gfp_t gfp) |
214 | { |
215 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
216 | struct sk_buff *skb2; |
217 | int ret = 0; |
218 | |
219 | if ((skb2 = skb_clone(skb, gfp)) != NULL) { |
220 | ret = cb->xmit_count; |
221 | cb->xmit_count++; |
222 | cb->stamp = jiffies; |
223 | skb2->sk = skb->sk; |
224 | dn_nsp_send(skb2); |
225 | } |
226 | |
227 | return ret; |
228 | } |
229 | |
230 | /** |
231 | * dn_nsp_output - Try and send something from socket queues |
232 | * @sk: The socket whose queues are to be investigated |
233 | * |
234 | * Try and send the packet on the end of the data and other data queues. |
235 | * Other data gets priority over data, and if we retransmit a packet we |
236 | * reduce the window by dividing it in two. |
237 | * |
238 | */ |
239 | void dn_nsp_output(struct sock *sk) |
240 | { |
241 | struct dn_scp *scp = DN_SK(sk); |
242 | struct sk_buff *skb; |
243 | unsigned reduce_win = 0; |
244 | |
245 | /* |
246 | * First we check for otherdata/linkservice messages |
247 | */ |
248 | if ((skb = skb_peek(&scp->other_xmit_queue)) != NULL) |
249 | reduce_win = dn_nsp_clone_and_send(skb, GFP_ATOMIC); |
250 | |
251 | /* |
252 | * If we may not send any data, we don't. |
253 | * If we are still trying to get some other data down the |
254 | * channel, we don't try and send any data. |
255 | */ |
256 | if (reduce_win || (scp->flowrem_sw != DN_SEND)) |
257 | goto recalc_window; |
258 | |
259 | if ((skb = skb_peek(&scp->data_xmit_queue)) != NULL) |
260 | reduce_win = dn_nsp_clone_and_send(skb, GFP_ATOMIC); |
261 | |
262 | /* |
263 | * If we've sent any frame more than once, we cut the |
264 | * send window size in half. There is always a minimum |
265 | * window size of one available. |
266 | */ |
267 | recalc_window: |
268 | if (reduce_win) { |
269 | scp->snd_window >>= 1; |
270 | if (scp->snd_window < NSP_MIN_WINDOW) |
271 | scp->snd_window = NSP_MIN_WINDOW; |
272 | } |
273 | } |
274 | |
275 | int dn_nsp_xmit_timeout(struct sock *sk) |
276 | { |
277 | struct dn_scp *scp = DN_SK(sk); |
278 | |
279 | dn_nsp_output(sk); |
280 | |
281 | if (!skb_queue_empty(&scp->data_xmit_queue) || |
282 | !skb_queue_empty(&scp->other_xmit_queue)) |
283 | scp->persist = dn_nsp_persist(sk); |
284 | |
285 | return 0; |
286 | } |
287 | |
288 | static inline __le16 *dn_mk_common_header(struct dn_scp *scp, struct sk_buff *skb, unsigned char msgflag, int len) |
289 | { |
290 | unsigned char *ptr = skb_push(skb, len); |
291 | |
292 | BUG_ON(len < 5); |
293 | |
294 | *ptr++ = msgflag; |
295 | *((__le16 *)ptr) = scp->addrrem; |
296 | ptr += 2; |
297 | *((__le16 *)ptr) = scp->addrloc; |
298 | ptr += 2; |
299 | return (__le16 __force *)ptr; |
300 | } |
301 | |
302 | static __le16 *dn_mk_ack_header(struct sock *sk, struct sk_buff *skb, unsigned char msgflag, int hlen, int other) |
303 | { |
304 | struct dn_scp *scp = DN_SK(sk); |
305 | unsigned short acknum = scp->numdat_rcv & 0x0FFF; |
306 | unsigned short ackcrs = scp->numoth_rcv & 0x0FFF; |
307 | __le16 *ptr; |
308 | |
309 | BUG_ON(hlen < 9); |
310 | |
311 | scp->ackxmt_dat = acknum; |
312 | scp->ackxmt_oth = ackcrs; |
313 | acknum |= 0x8000; |
314 | ackcrs |= 0x8000; |
315 | |
316 | /* If this is an "other data/ack" message, swap acknum and ackcrs */ |
317 | if (other) { |
318 | unsigned short tmp = acknum; |
319 | acknum = ackcrs; |
320 | ackcrs = tmp; |
321 | } |
322 | |
323 | /* Set "cross subchannel" bit in ackcrs */ |
324 | ackcrs |= 0x2000; |
325 | |
326 | ptr = (__le16 *)dn_mk_common_header(scp, skb, msgflag, hlen); |
327 | |
328 | *ptr++ = cpu_to_le16(acknum); |
329 | *ptr++ = cpu_to_le16(ackcrs); |
330 | |
331 | return ptr; |
332 | } |
333 | |
334 | static __le16 *dn_nsp_mk_data_header(struct sock *sk, struct sk_buff *skb, int oth) |
335 | { |
336 | struct dn_scp *scp = DN_SK(sk); |
337 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
338 | __le16 *ptr = dn_mk_ack_header(sk, skb, cb->nsp_flags, 11, oth); |
339 | |
340 | if (unlikely(oth)) { |
341 | cb->segnum = scp->numoth; |
342 | seq_add(&scp->numoth, 1); |
343 | } else { |
344 | cb->segnum = scp->numdat; |
345 | seq_add(&scp->numdat, 1); |
346 | } |
347 | *(ptr++) = cpu_to_le16(cb->segnum); |
348 | |
349 | return ptr; |
350 | } |
351 | |
352 | void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, |
353 | gfp_t gfp, int oth) |
354 | { |
355 | struct dn_scp *scp = DN_SK(sk); |
356 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
357 | unsigned long t = ((scp->nsp_srtt >> 2) + scp->nsp_rttvar) >> 1; |
358 | |
359 | cb->xmit_count = 0; |
360 | dn_nsp_mk_data_header(sk, skb, oth); |
361 | |
362 | /* |
363 | * Slow start: If we have been idle for more than |
364 | * one RTT, then reset window to min size. |
365 | */ |
366 | if ((jiffies - scp->stamp) > t) |
367 | scp->snd_window = NSP_MIN_WINDOW; |
368 | |
369 | if (oth) |
370 | skb_queue_tail(&scp->other_xmit_queue, skb); |
371 | else |
372 | skb_queue_tail(&scp->data_xmit_queue, skb); |
373 | |
374 | if (scp->flowrem_sw != DN_SEND) |
375 | return; |
376 | |
377 | dn_nsp_clone_and_send(skb, gfp); |
378 | } |
379 | |
380 | |
381 | int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum) |
382 | { |
383 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
384 | struct dn_scp *scp = DN_SK(sk); |
385 | struct sk_buff *skb2, *n, *ack = NULL; |
386 | int wakeup = 0; |
387 | int try_retrans = 0; |
388 | unsigned long reftime = cb->stamp; |
389 | unsigned long pkttime; |
390 | unsigned short xmit_count; |
391 | unsigned short segnum; |
392 | |
393 | skb_queue_walk_safe(q, skb2, n) { |
394 | struct dn_skb_cb *cb2 = DN_SKB_CB(skb2); |
395 | |
396 | if (dn_before_or_equal(cb2->segnum, acknum)) |
397 | ack = skb2; |
398 | |
399 | /* printk(KERN_DEBUG "ack: %s %04x %04x\n", ack ? "ACK" : "SKIP", (int)cb2->segnum, (int)acknum); */ |
400 | |
401 | if (ack == NULL) |
402 | continue; |
403 | |
404 | /* printk(KERN_DEBUG "check_xmit_queue: %04x, %d\n", acknum, cb2->xmit_count); */ |
405 | |
406 | /* Does _last_ packet acked have xmit_count > 1 */ |
407 | try_retrans = 0; |
408 | /* Remember to wake up the sending process */ |
409 | wakeup = 1; |
410 | /* Keep various statistics */ |
411 | pkttime = cb2->stamp; |
412 | xmit_count = cb2->xmit_count; |
413 | segnum = cb2->segnum; |
414 | /* Remove and drop ack'ed packet */ |
415 | skb_unlink(ack, q); |
416 | kfree_skb(ack); |
417 | ack = NULL; |
418 | |
419 | /* |
420 | * We don't expect to see acknowledgements for packets we |
421 | * haven't sent yet. |
422 | */ |
423 | WARN_ON(xmit_count == 0); |
424 | |
425 | /* |
426 | * If the packet has only been sent once, we can use it |
427 | * to calculate the RTT and also open the window a little |
428 | * further. |
429 | */ |
430 | if (xmit_count == 1) { |
431 | if (dn_equal(segnum, acknum)) |
432 | dn_nsp_rtt(sk, (long)(pkttime - reftime)); |
433 | |
434 | if (scp->snd_window < scp->max_window) |
435 | scp->snd_window++; |
436 | } |
437 | |
438 | /* |
439 | * Packet has been sent more than once. If this is the last |
440 | * packet to be acknowledged then we want to send the next |
441 | * packet in the send queue again (assumes the remote host does |
442 | * go-back-N error control). |
443 | */ |
444 | if (xmit_count > 1) |
445 | try_retrans = 1; |
446 | } |
447 | |
448 | if (try_retrans) |
449 | dn_nsp_output(sk); |
450 | |
451 | return wakeup; |
452 | } |
453 | |
454 | void dn_nsp_send_data_ack(struct sock *sk) |
455 | { |
456 | struct sk_buff *skb = NULL; |
457 | |
458 | if ((skb = dn_alloc_skb(sk, 9, GFP_ATOMIC)) == NULL) |
459 | return; |
460 | |
461 | skb_reserve(skb, 9); |
462 | dn_mk_ack_header(sk, skb, 0x04, 9, 0); |
463 | dn_nsp_send(skb); |
464 | } |
465 | |
466 | void dn_nsp_send_oth_ack(struct sock *sk) |
467 | { |
468 | struct sk_buff *skb = NULL; |
469 | |
470 | if ((skb = dn_alloc_skb(sk, 9, GFP_ATOMIC)) == NULL) |
471 | return; |
472 | |
473 | skb_reserve(skb, 9); |
474 | dn_mk_ack_header(sk, skb, 0x14, 9, 1); |
475 | dn_nsp_send(skb); |
476 | } |
477 | |
478 | |
479 | void dn_send_conn_ack (struct sock *sk) |
480 | { |
481 | struct dn_scp *scp = DN_SK(sk); |
482 | struct sk_buff *skb = NULL; |
483 | struct nsp_conn_ack_msg *msg; |
484 | |
485 | if ((skb = dn_alloc_skb(sk, 3, sk->sk_allocation)) == NULL) |
486 | return; |
487 | |
488 | msg = (struct nsp_conn_ack_msg *)skb_put(skb, 3); |
489 | msg->msgflg = 0x24; |
490 | msg->dstaddr = scp->addrrem; |
491 | |
492 | dn_nsp_send(skb); |
493 | } |
494 | |
495 | void dn_nsp_delayed_ack(struct sock *sk) |
496 | { |
497 | struct dn_scp *scp = DN_SK(sk); |
498 | |
499 | if (scp->ackxmt_oth != scp->numoth_rcv) |
500 | dn_nsp_send_oth_ack(sk); |
501 | |
502 | if (scp->ackxmt_dat != scp->numdat_rcv) |
503 | dn_nsp_send_data_ack(sk); |
504 | } |
505 | |
506 | static int dn_nsp_retrans_conn_conf(struct sock *sk) |
507 | { |
508 | struct dn_scp *scp = DN_SK(sk); |
509 | |
510 | if (scp->state == DN_CC) |
511 | dn_send_conn_conf(sk, GFP_ATOMIC); |
512 | |
513 | return 0; |
514 | } |
515 | |
516 | void dn_send_conn_conf(struct sock *sk, gfp_t gfp) |
517 | { |
518 | struct dn_scp *scp = DN_SK(sk); |
519 | struct sk_buff *skb = NULL; |
520 | struct nsp_conn_init_msg *msg; |
521 | __u8 len = (__u8)le16_to_cpu(scp->conndata_out.opt_optl); |
522 | |
523 | if ((skb = dn_alloc_skb(sk, 50 + len, gfp)) == NULL) |
524 | return; |
525 | |
526 | msg = (struct nsp_conn_init_msg *)skb_put(skb, sizeof(*msg)); |
527 | msg->msgflg = 0x28; |
528 | msg->dstaddr = scp->addrrem; |
529 | msg->srcaddr = scp->addrloc; |
530 | msg->services = scp->services_loc; |
531 | msg->info = scp->info_loc; |
532 | msg->segsize = cpu_to_le16(scp->segsize_loc); |
533 | |
534 | *skb_put(skb,1) = len; |
535 | |
536 | if (len > 0) |
537 | memcpy(skb_put(skb, len), scp->conndata_out.opt_data, len); |
538 | |
539 | |
540 | dn_nsp_send(skb); |
541 | |
542 | scp->persist = dn_nsp_persist(sk); |
543 | scp->persist_fxn = dn_nsp_retrans_conn_conf; |
544 | } |
545 | |
546 | |
547 | static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, |
548 | unsigned short reason, gfp_t gfp, |
549 | struct dst_entry *dst, |
550 | int ddl, unsigned char *dd, __le16 rem, __le16 loc) |
551 | { |
552 | struct sk_buff *skb = NULL; |
553 | int size = 7 + ddl + ((msgflg == NSP_DISCINIT) ? 1 : 0); |
554 | unsigned char *msg; |
555 | |
556 | if ((dst == NULL) || (rem == 0)) { |
557 | if (net_ratelimit()) |
558 | printk(KERN_DEBUG "DECnet: dn_nsp_do_disc: BUG! Please report this to SteveW@ACM.org rem=%u dst=%p\n", le16_to_cpu(rem), dst); |
559 | return; |
560 | } |
561 | |
562 | if ((skb = dn_alloc_skb(sk, size, gfp)) == NULL) |
563 | return; |
564 | |
565 | msg = skb_put(skb, size); |
566 | *msg++ = msgflg; |
567 | *(__le16 *)msg = rem; |
568 | msg += 2; |
569 | *(__le16 *)msg = loc; |
570 | msg += 2; |
571 | *(__le16 *)msg = cpu_to_le16(reason); |
572 | msg += 2; |
573 | if (msgflg == NSP_DISCINIT) |
574 | *msg++ = ddl; |
575 | |
576 | if (ddl) { |
577 | memcpy(msg, dd, ddl); |
578 | } |
579 | |
580 | /* |
581 | * This doesn't go via the dn_nsp_send() function since we need |
582 | * to be able to send disc packets out which have no socket |
583 | * associations. |
584 | */ |
585 | skb_dst_set(skb, dst_clone(dst)); |
586 | dst_output(skb); |
587 | } |
588 | |
589 | |
590 | void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg, |
591 | unsigned short reason, gfp_t gfp) |
592 | { |
593 | struct dn_scp *scp = DN_SK(sk); |
594 | int ddl = 0; |
595 | |
596 | if (msgflg == NSP_DISCINIT) |
597 | ddl = le16_to_cpu(scp->discdata_out.opt_optl); |
598 | |
599 | if (reason == 0) |
600 | reason = le16_to_cpu(scp->discdata_out.opt_status); |
601 | |
602 | dn_nsp_do_disc(sk, msgflg, reason, gfp, sk->sk_dst_cache, ddl, |
603 | scp->discdata_out.opt_data, scp->addrrem, scp->addrloc); |
604 | } |
605 | |
606 | |
607 | void dn_nsp_return_disc(struct sk_buff *skb, unsigned char msgflg, |
608 | unsigned short reason) |
609 | { |
610 | struct dn_skb_cb *cb = DN_SKB_CB(skb); |
611 | int ddl = 0; |
612 | gfp_t gfp = GFP_ATOMIC; |
613 | |
614 | dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb_dst(skb), ddl, |
615 | NULL, cb->src_port, cb->dst_port); |
616 | } |
617 | |
618 | |
619 | void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval) |
620 | { |
621 | struct dn_scp *scp = DN_SK(sk); |
622 | struct sk_buff *skb; |
623 | unsigned char *ptr; |
624 | gfp_t gfp = GFP_ATOMIC; |
625 | |
626 | if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL) |
627 | return; |
628 | |
629 | skb_reserve(skb, DN_MAX_NSP_DATA_HEADER); |
630 | ptr = skb_put(skb, 2); |
631 | DN_SKB_CB(skb)->nsp_flags = 0x10; |
632 | *ptr++ = lsflags; |
633 | *ptr = fcval; |
634 | |
635 | dn_nsp_queue_xmit(sk, skb, gfp, 1); |
636 | |
637 | scp->persist = dn_nsp_persist(sk); |
638 | scp->persist_fxn = dn_nsp_xmit_timeout; |
639 | } |
640 | |
641 | static int dn_nsp_retrans_conninit(struct sock *sk) |
642 | { |
643 | struct dn_scp *scp = DN_SK(sk); |
644 | |
645 | if (scp->state == DN_CI) |
646 | dn_nsp_send_conninit(sk, NSP_RCI); |
647 | |
648 | return 0; |
649 | } |
650 | |
651 | void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg) |
652 | { |
653 | struct dn_scp *scp = DN_SK(sk); |
654 | struct nsp_conn_init_msg *msg; |
655 | unsigned char aux; |
656 | unsigned char menuver; |
657 | struct dn_skb_cb *cb; |
658 | unsigned char type = 1; |
659 | gfp_t allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC; |
660 | struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation); |
661 | |
662 | if (!skb) |
663 | return; |
664 | |
665 | cb = DN_SKB_CB(skb); |
666 | msg = (struct nsp_conn_init_msg *)skb_put(skb,sizeof(*msg)); |
667 | |
668 | msg->msgflg = msgflg; |
669 | msg->dstaddr = 0x0000; /* Remote Node will assign it*/ |
670 | |
671 | msg->srcaddr = scp->addrloc; |
672 | msg->services = scp->services_loc; /* Requested flow control */ |
673 | msg->info = scp->info_loc; /* Version Number */ |
674 | msg->segsize = cpu_to_le16(scp->segsize_loc); /* Max segment size */ |
675 | |
676 | if (scp->peer.sdn_objnum) |
677 | type = 0; |
678 | |
679 | skb_put(skb, dn_sockaddr2username(&scp->peer, |
680 | skb_tail_pointer(skb), type)); |
681 | skb_put(skb, dn_sockaddr2username(&scp->addr, |
682 | skb_tail_pointer(skb), 2)); |
683 | |
684 | menuver = DN_MENUVER_ACC | DN_MENUVER_USR; |
685 | if (scp->peer.sdn_flags & SDF_PROXY) |
686 | menuver |= DN_MENUVER_PRX; |
687 | if (scp->peer.sdn_flags & SDF_UICPROXY) |
688 | menuver |= DN_MENUVER_UIC; |
689 | |
690 | *skb_put(skb, 1) = menuver; /* Menu Version */ |
691 | |
692 | aux = scp->accessdata.acc_userl; |
693 | *skb_put(skb, 1) = aux; |
694 | if (aux > 0) |
695 | memcpy(skb_put(skb, aux), scp->accessdata.acc_user, aux); |
696 | |
697 | aux = scp->accessdata.acc_passl; |
698 | *skb_put(skb, 1) = aux; |
699 | if (aux > 0) |
700 | memcpy(skb_put(skb, aux), scp->accessdata.acc_pass, aux); |
701 | |
702 | aux = scp->accessdata.acc_accl; |
703 | *skb_put(skb, 1) = aux; |
704 | if (aux > 0) |
705 | memcpy(skb_put(skb, aux), scp->accessdata.acc_acc, aux); |
706 | |
707 | aux = (__u8)le16_to_cpu(scp->conndata_out.opt_optl); |
708 | *skb_put(skb, 1) = aux; |
709 | if (aux > 0) |
710 | memcpy(skb_put(skb, aux), scp->conndata_out.opt_data, aux); |
711 | |
712 | scp->persist = dn_nsp_persist(sk); |
713 | scp->persist_fxn = dn_nsp_retrans_conninit; |
714 | |
715 | cb->rt_flags = DN_RT_F_RQR; |
716 | |
717 | dn_nsp_send(skb); |
718 | } |
719 | |
720 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9