Root/net/l2tp/l2tp_core.c

1/*
2 * L2TP core.
3 *
4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5 *
6 * This file contains some code of the original L2TPv2 pppol2tp
7 * driver, which has the following copyright:
8 *
9 * Authors: Martijn van Oosterhout <kleptog@svana.org>
10 * James Chapman (jchapman@katalix.com)
11 * Contributors:
12 * Michal Ostrowski <mostrows@speakeasy.net>
13 * Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
14 * David S. Miller (davem@redhat.com)
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as
18 * published by the Free Software Foundation.
19 */
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/module.h>
24#include <linux/string.h>
25#include <linux/list.h>
26#include <linux/rculist.h>
27#include <linux/uaccess.h>
28
29#include <linux/kernel.h>
30#include <linux/spinlock.h>
31#include <linux/kthread.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/errno.h>
35#include <linux/jiffies.h>
36
37#include <linux/netdevice.h>
38#include <linux/net.h>
39#include <linux/inetdevice.h>
40#include <linux/skbuff.h>
41#include <linux/init.h>
42#include <linux/in.h>
43#include <linux/ip.h>
44#include <linux/udp.h>
45#include <linux/l2tp.h>
46#include <linux/hash.h>
47#include <linux/sort.h>
48#include <linux/file.h>
49#include <linux/nsproxy.h>
50#include <net/net_namespace.h>
51#include <net/netns/generic.h>
52#include <net/dst.h>
53#include <net/ip.h>
54#include <net/udp.h>
55#include <net/inet_common.h>
56#include <net/xfrm.h>
57#include <net/protocol.h>
58#include <net/inet6_connection_sock.h>
59#include <net/inet_ecn.h>
60#include <net/ip6_route.h>
61#include <net/ip6_checksum.h>
62
63#include <asm/byteorder.h>
64#include <linux/atomic.h>
65
66#include "l2tp_core.h"
67
68#define L2TP_DRV_VERSION "V2.0"
69
70/* L2TP header constants */
71#define L2TP_HDRFLAG_T 0x8000
72#define L2TP_HDRFLAG_L 0x4000
73#define L2TP_HDRFLAG_S 0x0800
74#define L2TP_HDRFLAG_O 0x0200
75#define L2TP_HDRFLAG_P 0x0100
76
77#define L2TP_HDR_VER_MASK 0x000F
78#define L2TP_HDR_VER_2 0x0002
79#define L2TP_HDR_VER_3 0x0003
80
81/* L2TPv3 default L2-specific sublayer */
82#define L2TP_SLFLAG_S 0x40000000
83#define L2TP_SL_SEQ_MASK 0x00ffffff
84
85#define L2TP_HDR_SIZE_SEQ 10
86#define L2TP_HDR_SIZE_NOSEQ 6
87
88/* Default trace flags */
89#define L2TP_DEFAULT_DEBUG_FLAGS 0
90
91/* Private data stored for received packets in the skb.
92 */
93struct l2tp_skb_cb {
94    u32 ns;
95    u16 has_seq;
96    u16 length;
97    unsigned long expires;
98};
99
100#define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
101
102static atomic_t l2tp_tunnel_count;
103static atomic_t l2tp_session_count;
104static struct workqueue_struct *l2tp_wq;
105
106/* per-net private data for this module */
107static unsigned int l2tp_net_id;
108struct l2tp_net {
109    struct list_head l2tp_tunnel_list;
110    spinlock_t l2tp_tunnel_list_lock;
111    struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
112    spinlock_t l2tp_session_hlist_lock;
113};
114
115static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
116
117static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
118{
119    return sk->sk_user_data;
120}
121
122static inline struct l2tp_net *l2tp_pernet(struct net *net)
123{
124    BUG_ON(!net);
125
126    return net_generic(net, l2tp_net_id);
127}
128
129/* Tunnel reference counts. Incremented per session that is added to
130 * the tunnel.
131 */
132static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
133{
134    atomic_inc(&tunnel->ref_count);
135}
136
137static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
138{
139    if (atomic_dec_and_test(&tunnel->ref_count))
140        l2tp_tunnel_free(tunnel);
141}
142#ifdef L2TP_REFCNT_DEBUG
143#define l2tp_tunnel_inc_refcount(_t) \
144do { \
145    pr_debug("l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", \
146         __func__, __LINE__, (_t)->name, \
147         atomic_read(&_t->ref_count)); \
148    l2tp_tunnel_inc_refcount_1(_t); \
149} while (0)
150#define l2tp_tunnel_dec_refcount(_t)
151do { \
152    pr_debug("l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", \
153         __func__, __LINE__, (_t)->name, \
154         atomic_read(&_t->ref_count)); \
155    l2tp_tunnel_dec_refcount_1(_t); \
156} while (0)
157#else
158#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
159#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
160#endif
161
162/* Session hash global list for L2TPv3.
163 * The session_id SHOULD be random according to RFC3931, but several
164 * L2TP implementations use incrementing session_ids. So we do a real
165 * hash on the session_id, rather than a simple bitmask.
166 */
167static inline struct hlist_head *
168l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
169{
170    return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
171
172}
173
174/* Lookup the tunnel socket, possibly involving the fs code if the socket is
175 * owned by userspace. A struct sock returned from this function must be
176 * released using l2tp_tunnel_sock_put once you're done with it.
177 */
178static struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)
179{
180    int err = 0;
181    struct socket *sock = NULL;
182    struct sock *sk = NULL;
183
184    if (!tunnel)
185        goto out;
186
187    if (tunnel->fd >= 0) {
188        /* Socket is owned by userspace, who might be in the process
189         * of closing it. Look the socket up using the fd to ensure
190         * consistency.
191         */
192        sock = sockfd_lookup(tunnel->fd, &err);
193        if (sock)
194            sk = sock->sk;
195    } else {
196        /* Socket is owned by kernelspace */
197        sk = tunnel->sock;
198        sock_hold(sk);
199    }
200
201out:
202    return sk;
203}
204
205/* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */
206static void l2tp_tunnel_sock_put(struct sock *sk)
207{
208    struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
209    if (tunnel) {
210        if (tunnel->fd >= 0) {
211            /* Socket is owned by userspace */
212            sockfd_put(sk->sk_socket);
213        }
214        sock_put(sk);
215    }
216    sock_put(sk);
217}
218
219/* Lookup a session by id in the global session list
220 */
221static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
222{
223    struct l2tp_net *pn = l2tp_pernet(net);
224    struct hlist_head *session_list =
225        l2tp_session_id_hash_2(pn, session_id);
226    struct l2tp_session *session;
227
228    rcu_read_lock_bh();
229    hlist_for_each_entry_rcu(session, session_list, global_hlist) {
230        if (session->session_id == session_id) {
231            rcu_read_unlock_bh();
232            return session;
233        }
234    }
235    rcu_read_unlock_bh();
236
237    return NULL;
238}
239
240/* Session hash list.
241 * The session_id SHOULD be random according to RFC2661, but several
242 * L2TP implementations (Cisco and Microsoft) use incrementing
243 * session_ids. So we do a real hash on the session_id, rather than a
244 * simple bitmask.
245 */
246static inline struct hlist_head *
247l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
248{
249    return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
250}
251
252/* Lookup a session by id
253 */
254struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id)
255{
256    struct hlist_head *session_list;
257    struct l2tp_session *session;
258
259    /* In L2TPv3, session_ids are unique over all tunnels and we
260     * sometimes need to look them up before we know the
261     * tunnel.
262     */
263    if (tunnel == NULL)
264        return l2tp_session_find_2(net, session_id);
265
266    session_list = l2tp_session_id_hash(tunnel, session_id);
267    read_lock_bh(&tunnel->hlist_lock);
268    hlist_for_each_entry(session, session_list, hlist) {
269        if (session->session_id == session_id) {
270            read_unlock_bh(&tunnel->hlist_lock);
271            return session;
272        }
273    }
274    read_unlock_bh(&tunnel->hlist_lock);
275
276    return NULL;
277}
278EXPORT_SYMBOL_GPL(l2tp_session_find);
279
280struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
281{
282    int hash;
283    struct l2tp_session *session;
284    int count = 0;
285
286    read_lock_bh(&tunnel->hlist_lock);
287    for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
288        hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
289            if (++count > nth) {
290                read_unlock_bh(&tunnel->hlist_lock);
291                return session;
292            }
293        }
294    }
295
296    read_unlock_bh(&tunnel->hlist_lock);
297
298    return NULL;
299}
300EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
301
302/* Lookup a session by interface name.
303 * This is very inefficient but is only used by management interfaces.
304 */
305struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
306{
307    struct l2tp_net *pn = l2tp_pernet(net);
308    int hash;
309    struct l2tp_session *session;
310
311    rcu_read_lock_bh();
312    for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
313        hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
314            if (!strcmp(session->ifname, ifname)) {
315                rcu_read_unlock_bh();
316                return session;
317            }
318        }
319    }
320
321    rcu_read_unlock_bh();
322
323    return NULL;
324}
325EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname);
326
327/* Lookup a tunnel by id
328 */
329struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id)
330{
331    struct l2tp_tunnel *tunnel;
332    struct l2tp_net *pn = l2tp_pernet(net);
333
334    rcu_read_lock_bh();
335    list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
336        if (tunnel->tunnel_id == tunnel_id) {
337            rcu_read_unlock_bh();
338            return tunnel;
339        }
340    }
341    rcu_read_unlock_bh();
342
343    return NULL;
344}
345EXPORT_SYMBOL_GPL(l2tp_tunnel_find);
346
347struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth)
348{
349    struct l2tp_net *pn = l2tp_pernet(net);
350    struct l2tp_tunnel *tunnel;
351    int count = 0;
352
353    rcu_read_lock_bh();
354    list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
355        if (++count > nth) {
356            rcu_read_unlock_bh();
357            return tunnel;
358        }
359    }
360
361    rcu_read_unlock_bh();
362
363    return NULL;
364}
365EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth);
366
367/*****************************************************************************
368 * Receive data handling
369 *****************************************************************************/
370
371/* Queue a skb in order. We come here only if the skb has an L2TP sequence
372 * number.
373 */
374static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
375{
376    struct sk_buff *skbp;
377    struct sk_buff *tmp;
378    u32 ns = L2TP_SKB_CB(skb)->ns;
379
380    spin_lock_bh(&session->reorder_q.lock);
381    skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
382        if (L2TP_SKB_CB(skbp)->ns > ns) {
383            __skb_queue_before(&session->reorder_q, skbp, skb);
384            l2tp_dbg(session, L2TP_MSG_SEQ,
385                 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
386                 session->name, ns, L2TP_SKB_CB(skbp)->ns,
387                 skb_queue_len(&session->reorder_q));
388            atomic_long_inc(&session->stats.rx_oos_packets);
389            goto out;
390        }
391    }
392
393    __skb_queue_tail(&session->reorder_q, skb);
394
395out:
396    spin_unlock_bh(&session->reorder_q.lock);
397}
398
399/* Dequeue a single skb.
400 */
401static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
402{
403    struct l2tp_tunnel *tunnel = session->tunnel;
404    int length = L2TP_SKB_CB(skb)->length;
405
406    /* We're about to requeue the skb, so return resources
407     * to its current owner (a socket receive buffer).
408     */
409    skb_orphan(skb);
410
411    atomic_long_inc(&tunnel->stats.rx_packets);
412    atomic_long_add(length, &tunnel->stats.rx_bytes);
413    atomic_long_inc(&session->stats.rx_packets);
414    atomic_long_add(length, &session->stats.rx_bytes);
415
416    if (L2TP_SKB_CB(skb)->has_seq) {
417        /* Bump our Nr */
418        session->nr++;
419        session->nr &= session->nr_max;
420
421        l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated nr to %hu\n",
422             session->name, session->nr);
423    }
424
425    /* call private receive handler */
426    if (session->recv_skb != NULL)
427        (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
428    else
429        kfree_skb(skb);
430
431    if (session->deref)
432        (*session->deref)(session);
433}
434
435/* Dequeue skbs from the session's reorder_q, subject to packet order.
436 * Skbs that have been in the queue for too long are simply discarded.
437 */
438static void l2tp_recv_dequeue(struct l2tp_session *session)
439{
440    struct sk_buff *skb;
441    struct sk_buff *tmp;
442
443    /* If the pkt at the head of the queue has the nr that we
444     * expect to send up next, dequeue it and any other
445     * in-sequence packets behind it.
446     */
447start:
448    spin_lock_bh(&session->reorder_q.lock);
449    skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
450        if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
451            atomic_long_inc(&session->stats.rx_seq_discards);
452            atomic_long_inc(&session->stats.rx_errors);
453            l2tp_dbg(session, L2TP_MSG_SEQ,
454                 "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n",
455                 session->name, L2TP_SKB_CB(skb)->ns,
456                 L2TP_SKB_CB(skb)->length, session->nr,
457                 skb_queue_len(&session->reorder_q));
458            session->reorder_skip = 1;
459            __skb_unlink(skb, &session->reorder_q);
460            kfree_skb(skb);
461            if (session->deref)
462                (*session->deref)(session);
463            continue;
464        }
465
466        if (L2TP_SKB_CB(skb)->has_seq) {
467            if (session->reorder_skip) {
468                l2tp_dbg(session, L2TP_MSG_SEQ,
469                     "%s: advancing nr to next pkt: %u -> %u",
470                     session->name, session->nr,
471                     L2TP_SKB_CB(skb)->ns);
472                session->reorder_skip = 0;
473                session->nr = L2TP_SKB_CB(skb)->ns;
474            }
475            if (L2TP_SKB_CB(skb)->ns != session->nr) {
476                l2tp_dbg(session, L2TP_MSG_SEQ,
477                     "%s: holding oos pkt %u len %d, waiting for %u, reorder_q_len=%d\n",
478                     session->name, L2TP_SKB_CB(skb)->ns,
479                     L2TP_SKB_CB(skb)->length, session->nr,
480                     skb_queue_len(&session->reorder_q));
481                goto out;
482            }
483        }
484        __skb_unlink(skb, &session->reorder_q);
485
486        /* Process the skb. We release the queue lock while we
487         * do so to let other contexts process the queue.
488         */
489        spin_unlock_bh(&session->reorder_q.lock);
490        l2tp_recv_dequeue_skb(session, skb);
491        goto start;
492    }
493
494out:
495    spin_unlock_bh(&session->reorder_q.lock);
496}
497
498static inline int l2tp_verify_udp_checksum(struct sock *sk,
499                       struct sk_buff *skb)
500{
501    struct udphdr *uh = udp_hdr(skb);
502    u16 ulen = ntohs(uh->len);
503    __wsum psum;
504
505    if (sk->sk_no_check || skb_csum_unnecessary(skb))
506        return 0;
507
508#if IS_ENABLED(CONFIG_IPV6)
509    if (sk->sk_family == PF_INET6 && !l2tp_tunnel(sk)->v4mapped) {
510        if (!uh->check) {
511            LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
512            return 1;
513        }
514        if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
515            !csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
516                     &ipv6_hdr(skb)->daddr, ulen,
517                     IPPROTO_UDP, skb->csum)) {
518            skb->ip_summed = CHECKSUM_UNNECESSARY;
519            return 0;
520        }
521        skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
522                             &ipv6_hdr(skb)->daddr,
523                             skb->len, IPPROTO_UDP,
524                             0));
525    } else
526#endif
527    {
528        struct inet_sock *inet;
529        if (!uh->check)
530            return 0;
531        inet = inet_sk(sk);
532        psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr,
533                      ulen, IPPROTO_UDP, 0);
534
535        if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
536            !csum_fold(csum_add(psum, skb->csum)))
537            return 0;
538        skb->csum = psum;
539    }
540
541    return __skb_checksum_complete(skb);
542}
543
544static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
545{
546    u32 nws;
547
548    if (nr >= session->nr)
549        nws = nr - session->nr;
550    else
551        nws = (session->nr_max + 1) - (session->nr - nr);
552
553    return nws < session->nr_window_size;
554}
555
556/* If packet has sequence numbers, queue it if acceptable. Returns 0 if
557 * acceptable, else non-zero.
558 */
559static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
560{
561    if (!l2tp_seq_check_rx_window(session, L2TP_SKB_CB(skb)->ns)) {
562        /* Packet sequence number is outside allowed window.
563         * Discard it.
564         */
565        l2tp_dbg(session, L2TP_MSG_SEQ,
566             "%s: pkt %u len %d discarded, outside window, nr=%u\n",
567             session->name, L2TP_SKB_CB(skb)->ns,
568             L2TP_SKB_CB(skb)->length, session->nr);
569        goto discard;
570    }
571
572    if (session->reorder_timeout != 0) {
573        /* Packet reordering enabled. Add skb to session's
574         * reorder queue, in order of ns.
575         */
576        l2tp_recv_queue_skb(session, skb);
577        goto out;
578    }
579
580    /* Packet reordering disabled. Discard out-of-sequence packets, while
581     * tracking the number if in-sequence packets after the first OOS packet
582     * is seen. After nr_oos_count_max in-sequence packets, reset the
583     * sequence number to re-enable packet reception.
584     */
585    if (L2TP_SKB_CB(skb)->ns == session->nr) {
586        skb_queue_tail(&session->reorder_q, skb);
587    } else {
588        u32 nr_oos = L2TP_SKB_CB(skb)->ns;
589        u32 nr_next = (session->nr_oos + 1) & session->nr_max;
590
591        if (nr_oos == nr_next)
592            session->nr_oos_count++;
593        else
594            session->nr_oos_count = 0;
595
596        session->nr_oos = nr_oos;
597        if (session->nr_oos_count > session->nr_oos_count_max) {
598            session->reorder_skip = 1;
599            l2tp_dbg(session, L2TP_MSG_SEQ,
600                 "%s: %d oos packets received. Resetting sequence numbers\n",
601                 session->name, session->nr_oos_count);
602        }
603        if (!session->reorder_skip) {
604            atomic_long_inc(&session->stats.rx_seq_discards);
605            l2tp_dbg(session, L2TP_MSG_SEQ,
606                 "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n",
607                 session->name, L2TP_SKB_CB(skb)->ns,
608                 L2TP_SKB_CB(skb)->length, session->nr,
609                 skb_queue_len(&session->reorder_q));
610            goto discard;
611        }
612        skb_queue_tail(&session->reorder_q, skb);
613    }
614
615out:
616    return 0;
617
618discard:
619    return 1;
620}
621
622/* Do receive processing of L2TP data frames. We handle both L2TPv2
623 * and L2TPv3 data frames here.
624 *
625 * L2TPv2 Data Message Header
626 *
627 * 0 1 2 3
628 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
629 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
630 * |T|L|x|x|S|x|O|P|x|x|x|x| Ver | Length (opt) |
631 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
632 * | Tunnel ID | Session ID |
633 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
634 * | Ns (opt) | Nr (opt) |
635 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
636 * | Offset Size (opt) | Offset pad... (opt)
637 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
638 *
639 * Data frames are marked by T=0. All other fields are the same as
640 * those in L2TP control frames.
641 *
642 * L2TPv3 Data Message Header
643 *
644 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
645 * | L2TP Session Header |
646 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
647 * | L2-Specific Sublayer |
648 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
649 * | Tunnel Payload ...
650 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
651 *
652 * L2TPv3 Session Header Over IP
653 *
654 * 0 1 2 3
655 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
656 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
657 * | Session ID |
658 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
659 * | Cookie (optional, maximum 64 bits)...
660 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
661 * |
662 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
663 *
664 * L2TPv3 L2-Specific Sublayer Format
665 *
666 * 0 1 2 3
667 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
668 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
669 * |x|S|x|x|x|x|x|x| Sequence Number |
670 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
671 *
672 * Cookie value, sublayer format and offset (pad) are negotiated with
673 * the peer when the session is set up. Unlike L2TPv2, we do not need
674 * to parse the packet header to determine if optional fields are
675 * present.
676 *
677 * Caller must already have parsed the frame and determined that it is
678 * a data (not control) frame before coming here. Fields up to the
679 * session-id have already been parsed and ptr points to the data
680 * after the session-id.
681 */
682void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
683              unsigned char *ptr, unsigned char *optr, u16 hdrflags,
684              int length, int (*payload_hook)(struct sk_buff *skb))
685{
686    struct l2tp_tunnel *tunnel = session->tunnel;
687    int offset;
688    u32 ns, nr;
689
690    /* The ref count is increased since we now hold a pointer to
691     * the session. Take care to decrement the refcnt when exiting
692     * this function from now on...
693     */
694    l2tp_session_inc_refcount(session);
695    if (session->ref)
696        (*session->ref)(session);
697
698    /* Parse and check optional cookie */
699    if (session->peer_cookie_len > 0) {
700        if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
701            l2tp_info(tunnel, L2TP_MSG_DATA,
702                  "%s: cookie mismatch (%u/%u). Discarding.\n",
703                  tunnel->name, tunnel->tunnel_id,
704                  session->session_id);
705            atomic_long_inc(&session->stats.rx_cookie_discards);
706            goto discard;
707        }
708        ptr += session->peer_cookie_len;
709    }
710
711    /* Handle the optional sequence numbers. Sequence numbers are
712     * in different places for L2TPv2 and L2TPv3.
713     *
714     * If we are the LAC, enable/disable sequence numbers under
715     * the control of the LNS. If no sequence numbers present but
716     * we were expecting them, discard frame.
717     */
718    ns = nr = 0;
719    L2TP_SKB_CB(skb)->has_seq = 0;
720    if (tunnel->version == L2TP_HDR_VER_2) {
721        if (hdrflags & L2TP_HDRFLAG_S) {
722            ns = ntohs(*(__be16 *) ptr);
723            ptr += 2;
724            nr = ntohs(*(__be16 *) ptr);
725            ptr += 2;
726
727            /* Store L2TP info in the skb */
728            L2TP_SKB_CB(skb)->ns = ns;
729            L2TP_SKB_CB(skb)->has_seq = 1;
730
731            l2tp_dbg(session, L2TP_MSG_SEQ,
732                 "%s: recv data ns=%u, nr=%u, session nr=%u\n",
733                 session->name, ns, nr, session->nr);
734        }
735    } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
736        u32 l2h = ntohl(*(__be32 *) ptr);
737
738        if (l2h & 0x40000000) {
739            ns = l2h & 0x00ffffff;
740
741            /* Store L2TP info in the skb */
742            L2TP_SKB_CB(skb)->ns = ns;
743            L2TP_SKB_CB(skb)->has_seq = 1;
744
745            l2tp_dbg(session, L2TP_MSG_SEQ,
746                 "%s: recv data ns=%u, session nr=%u\n",
747                 session->name, ns, session->nr);
748        }
749    }
750
751    /* Advance past L2-specific header, if present */
752    ptr += session->l2specific_len;
753
754    if (L2TP_SKB_CB(skb)->has_seq) {
755        /* Received a packet with sequence numbers. If we're the LNS,
756         * check if we sre sending sequence numbers and if not,
757         * configure it so.
758         */
759        if ((!session->lns_mode) && (!session->send_seq)) {
760            l2tp_info(session, L2TP_MSG_SEQ,
761                  "%s: requested to enable seq numbers by LNS\n",
762                  session->name);
763            session->send_seq = -1;
764            l2tp_session_set_header_len(session, tunnel->version);
765        }
766    } else {
767        /* No sequence numbers.
768         * If user has configured mandatory sequence numbers, discard.
769         */
770        if (session->recv_seq) {
771            l2tp_warn(session, L2TP_MSG_SEQ,
772                  "%s: recv data has no seq numbers when required. Discarding.\n",
773                  session->name);
774            atomic_long_inc(&session->stats.rx_seq_discards);
775            goto discard;
776        }
777
778        /* If we're the LAC and we're sending sequence numbers, the
779         * LNS has requested that we no longer send sequence numbers.
780         * If we're the LNS and we're sending sequence numbers, the
781         * LAC is broken. Discard the frame.
782         */
783        if ((!session->lns_mode) && (session->send_seq)) {
784            l2tp_info(session, L2TP_MSG_SEQ,
785                  "%s: requested to disable seq numbers by LNS\n",
786                  session->name);
787            session->send_seq = 0;
788            l2tp_session_set_header_len(session, tunnel->version);
789        } else if (session->send_seq) {
790            l2tp_warn(session, L2TP_MSG_SEQ,
791                  "%s: recv data has no seq numbers when required. Discarding.\n",
792                  session->name);
793            atomic_long_inc(&session->stats.rx_seq_discards);
794            goto discard;
795        }
796    }
797
798    /* Session data offset is handled differently for L2TPv2 and
799     * L2TPv3. For L2TPv2, there is an optional 16-bit value in
800     * the header. For L2TPv3, the offset is negotiated using AVPs
801     * in the session setup control protocol.
802     */
803    if (tunnel->version == L2TP_HDR_VER_2) {
804        /* If offset bit set, skip it. */
805        if (hdrflags & L2TP_HDRFLAG_O) {
806            offset = ntohs(*(__be16 *)ptr);
807            ptr += 2 + offset;
808        }
809    } else
810        ptr += session->offset;
811
812    offset = ptr - optr;
813    if (!pskb_may_pull(skb, offset))
814        goto discard;
815
816    __skb_pull(skb, offset);
817
818    /* If caller wants to process the payload before we queue the
819     * packet, do so now.
820     */
821    if (payload_hook)
822        if ((*payload_hook)(skb))
823            goto discard;
824
825    /* Prepare skb for adding to the session's reorder_q. Hold
826     * packets for max reorder_timeout or 1 second if not
827     * reordering.
828     */
829    L2TP_SKB_CB(skb)->length = length;
830    L2TP_SKB_CB(skb)->expires = jiffies +
831        (session->reorder_timeout ? session->reorder_timeout : HZ);
832
833    /* Add packet to the session's receive queue. Reordering is done here, if
834     * enabled. Saved L2TP protocol info is stored in skb->sb[].
835     */
836    if (L2TP_SKB_CB(skb)->has_seq) {
837        if (l2tp_recv_data_seq(session, skb))
838            goto discard;
839    } else {
840        /* No sequence numbers. Add the skb to the tail of the
841         * reorder queue. This ensures that it will be
842         * delivered after all previous sequenced skbs.
843         */
844        skb_queue_tail(&session->reorder_q, skb);
845    }
846
847    /* Try to dequeue as many skbs from reorder_q as we can. */
848    l2tp_recv_dequeue(session);
849
850    l2tp_session_dec_refcount(session);
851
852    return;
853
854discard:
855    atomic_long_inc(&session->stats.rx_errors);
856    kfree_skb(skb);
857
858    if (session->deref)
859        (*session->deref)(session);
860
861    l2tp_session_dec_refcount(session);
862}
863EXPORT_SYMBOL(l2tp_recv_common);
864
865/* Drop skbs from the session's reorder_q
866 */
867int l2tp_session_queue_purge(struct l2tp_session *session)
868{
869    struct sk_buff *skb = NULL;
870    BUG_ON(!session);
871    BUG_ON(session->magic != L2TP_SESSION_MAGIC);
872    while ((skb = skb_dequeue(&session->reorder_q))) {
873        atomic_long_inc(&session->stats.rx_errors);
874        kfree_skb(skb);
875        if (session->deref)
876            (*session->deref)(session);
877    }
878    return 0;
879}
880EXPORT_SYMBOL_GPL(l2tp_session_queue_purge);
881
882/* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
883 * here. The skb is not on a list when we get here.
884 * Returns 0 if the packet was a data packet and was successfully passed on.
885 * Returns 1 if the packet was not a good data packet and could not be
886 * forwarded. All such packets are passed up to userspace to deal with.
887 */
888static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
889                  int (*payload_hook)(struct sk_buff *skb))
890{
891    struct l2tp_session *session = NULL;
892    unsigned char *ptr, *optr;
893    u16 hdrflags;
894    u32 tunnel_id, session_id;
895    u16 version;
896    int length;
897
898    if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
899        goto discard_bad_csum;
900
901    /* UDP always verifies the packet length. */
902    __skb_pull(skb, sizeof(struct udphdr));
903
904    /* Short packet? */
905    if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
906        l2tp_info(tunnel, L2TP_MSG_DATA,
907              "%s: recv short packet (len=%d)\n",
908              tunnel->name, skb->len);
909        goto error;
910    }
911
912    /* Trace packet contents, if enabled */
913    if (tunnel->debug & L2TP_MSG_DATA) {
914        length = min(32u, skb->len);
915        if (!pskb_may_pull(skb, length))
916            goto error;
917
918        pr_debug("%s: recv\n", tunnel->name);
919        print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
920    }
921
922    /* Point to L2TP header */
923    optr = ptr = skb->data;
924
925    /* Get L2TP header flags */
926    hdrflags = ntohs(*(__be16 *) ptr);
927
928    /* Check protocol version */
929    version = hdrflags & L2TP_HDR_VER_MASK;
930    if (version != tunnel->version) {
931        l2tp_info(tunnel, L2TP_MSG_DATA,
932              "%s: recv protocol version mismatch: got %d expected %d\n",
933              tunnel->name, version, tunnel->version);
934        goto error;
935    }
936
937    /* Get length of L2TP packet */
938    length = skb->len;
939
940    /* If type is control packet, it is handled by userspace. */
941    if (hdrflags & L2TP_HDRFLAG_T) {
942        l2tp_dbg(tunnel, L2TP_MSG_DATA,
943             "%s: recv control packet, len=%d\n",
944             tunnel->name, length);
945        goto error;
946    }
947
948    /* Skip flags */
949    ptr += 2;
950
951    if (tunnel->version == L2TP_HDR_VER_2) {
952        /* If length is present, skip it */
953        if (hdrflags & L2TP_HDRFLAG_L)
954            ptr += 2;
955
956        /* Extract tunnel and session ID */
957        tunnel_id = ntohs(*(__be16 *) ptr);
958        ptr += 2;
959        session_id = ntohs(*(__be16 *) ptr);
960        ptr += 2;
961    } else {
962        ptr += 2; /* skip reserved bits */
963        tunnel_id = tunnel->tunnel_id;
964        session_id = ntohl(*(__be32 *) ptr);
965        ptr += 4;
966    }
967
968    /* Find the session context */
969    session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id);
970    if (!session || !session->recv_skb) {
971        /* Not found? Pass to userspace to deal with */
972        l2tp_info(tunnel, L2TP_MSG_DATA,
973              "%s: no session found (%u/%u). Passing up.\n",
974              tunnel->name, tunnel_id, session_id);
975        goto error;
976    }
977
978    l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
979
980    return 0;
981
982discard_bad_csum:
983    LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
984    UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
985    atomic_long_inc(&tunnel->stats.rx_errors);
986    kfree_skb(skb);
987
988    return 0;
989
990error:
991    /* Put UDP header back */
992    __skb_push(skb, sizeof(struct udphdr));
993
994    return 1;
995}
996
997/* UDP encapsulation receive handler. See net/ipv4/udp.c.
998 * Return codes:
999 * 0 : success.
1000 * <0: error
1001 * >0: skb should be passed up to userspace as UDP.
1002 */
1003int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1004{
1005    struct l2tp_tunnel *tunnel;
1006
1007    tunnel = l2tp_sock_to_tunnel(sk);
1008    if (tunnel == NULL)
1009        goto pass_up;
1010
1011    l2tp_dbg(tunnel, L2TP_MSG_DATA, "%s: received %d bytes\n",
1012         tunnel->name, skb->len);
1013
1014    if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
1015        goto pass_up_put;
1016
1017    sock_put(sk);
1018    return 0;
1019
1020pass_up_put:
1021    sock_put(sk);
1022pass_up:
1023    return 1;
1024}
1025EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
1026
1027/************************************************************************
1028 * Transmit handling
1029 ***********************************************************************/
1030
1031/* Build an L2TP header for the session into the buffer provided.
1032 */
1033static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
1034{
1035    struct l2tp_tunnel *tunnel = session->tunnel;
1036    __be16 *bufp = buf;
1037    __be16 *optr = buf;
1038    u16 flags = L2TP_HDR_VER_2;
1039    u32 tunnel_id = tunnel->peer_tunnel_id;
1040    u32 session_id = session->peer_session_id;
1041
1042    if (session->send_seq)
1043        flags |= L2TP_HDRFLAG_S;
1044
1045    /* Setup L2TP header. */
1046    *bufp++ = htons(flags);
1047    *bufp++ = htons(tunnel_id);
1048    *bufp++ = htons(session_id);
1049    if (session->send_seq) {
1050        *bufp++ = htons(session->ns);
1051        *bufp++ = 0;
1052        session->ns++;
1053        session->ns &= 0xffff;
1054        l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated ns to %u\n",
1055             session->name, session->ns);
1056    }
1057
1058    return bufp - optr;
1059}
1060
1061static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
1062{
1063    struct l2tp_tunnel *tunnel = session->tunnel;
1064    char *bufp = buf;
1065    char *optr = bufp;
1066
1067    /* Setup L2TP header. The header differs slightly for UDP and
1068     * IP encapsulations. For UDP, there is 4 bytes of flags.
1069     */
1070    if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
1071        u16 flags = L2TP_HDR_VER_3;
1072        *((__be16 *) bufp) = htons(flags);
1073        bufp += 2;
1074        *((__be16 *) bufp) = 0;
1075        bufp += 2;
1076    }
1077
1078    *((__be32 *) bufp) = htonl(session->peer_session_id);
1079    bufp += 4;
1080    if (session->cookie_len) {
1081        memcpy(bufp, &session->cookie[0], session->cookie_len);
1082        bufp += session->cookie_len;
1083    }
1084    if (session->l2specific_len) {
1085        if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
1086            u32 l2h = 0;
1087            if (session->send_seq) {
1088                l2h = 0x40000000 | session->ns;
1089                session->ns++;
1090                session->ns &= 0xffffff;
1091                l2tp_dbg(session, L2TP_MSG_SEQ,
1092                     "%s: updated ns to %u\n",
1093                     session->name, session->ns);
1094            }
1095
1096            *((__be32 *) bufp) = htonl(l2h);
1097        }
1098        bufp += session->l2specific_len;
1099    }
1100    if (session->offset)
1101        bufp += session->offset;
1102
1103    return bufp - optr;
1104}
1105
1106static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
1107              struct flowi *fl, size_t data_len)
1108{
1109    struct l2tp_tunnel *tunnel = session->tunnel;
1110    unsigned int len = skb->len;
1111    int error;
1112
1113    /* Debug */
1114    if (session->send_seq)
1115        l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes, ns=%u\n",
1116             session->name, data_len, session->ns - 1);
1117    else
1118        l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %Zd bytes\n",
1119             session->name, data_len);
1120
1121    if (session->debug & L2TP_MSG_DATA) {
1122        int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1123        unsigned char *datap = skb->data + uhlen;
1124
1125        pr_debug("%s: xmit\n", session->name);
1126        print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
1127                     datap, min_t(size_t, 32, len - uhlen));
1128    }
1129
1130    /* Queue the packet to IP for output */
1131    skb->local_df = 1;
1132#if IS_ENABLED(CONFIG_IPV6)
1133    if (tunnel->sock->sk_family == PF_INET6 && !tunnel->v4mapped)
1134        error = inet6_csk_xmit(tunnel->sock, skb, NULL);
1135    else
1136#endif
1137        error = ip_queue_xmit(tunnel->sock, skb, fl);
1138
1139    /* Update stats */
1140    if (error >= 0) {
1141        atomic_long_inc(&tunnel->stats.tx_packets);
1142        atomic_long_add(len, &tunnel->stats.tx_bytes);
1143        atomic_long_inc(&session->stats.tx_packets);
1144        atomic_long_add(len, &session->stats.tx_bytes);
1145    } else {
1146        atomic_long_inc(&tunnel->stats.tx_errors);
1147        atomic_long_inc(&session->stats.tx_errors);
1148    }
1149
1150    return 0;
1151}
1152
1153#if IS_ENABLED(CONFIG_IPV6)
1154static void l2tp_xmit_ipv6_csum(struct sock *sk, struct sk_buff *skb,
1155                int udp_len)
1156{
1157    struct ipv6_pinfo *np = inet6_sk(sk);
1158    struct udphdr *uh = udp_hdr(skb);
1159
1160    if (!skb_dst(skb) || !skb_dst(skb)->dev ||
1161        !(skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) {
1162        __wsum csum = skb_checksum(skb, 0, udp_len, 0);
1163        skb->ip_summed = CHECKSUM_UNNECESSARY;
1164        uh->check = csum_ipv6_magic(&np->saddr, &sk->sk_v6_daddr, udp_len,
1165                        IPPROTO_UDP, csum);
1166        if (uh->check == 0)
1167            uh->check = CSUM_MANGLED_0;
1168    } else {
1169        skb->ip_summed = CHECKSUM_PARTIAL;
1170        skb->csum_start = skb_transport_header(skb) - skb->head;
1171        skb->csum_offset = offsetof(struct udphdr, check);
1172        uh->check = ~csum_ipv6_magic(&np->saddr, &sk->sk_v6_daddr,
1173                         udp_len, IPPROTO_UDP, 0);
1174    }
1175}
1176#endif
1177
1178/* If caller requires the skb to have a ppp header, the header must be
1179 * inserted in the skb data before calling this function.
1180 */
1181int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len)
1182{
1183    int data_len = skb->len;
1184    struct l2tp_tunnel *tunnel = session->tunnel;
1185    struct sock *sk = tunnel->sock;
1186    struct flowi *fl;
1187    struct udphdr *uh;
1188    struct inet_sock *inet;
1189    __wsum csum;
1190    int headroom;
1191    int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1192    int udp_len;
1193    int ret = NET_XMIT_SUCCESS;
1194
1195    /* Check that there's enough headroom in the skb to insert IP,
1196     * UDP and L2TP headers. If not enough, expand it to
1197     * make room. Adjust truesize.
1198     */
1199    headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1200        uhlen + hdr_len;
1201    if (skb_cow_head(skb, headroom)) {
1202        kfree_skb(skb);
1203        return NET_XMIT_DROP;
1204    }
1205
1206    /* Setup L2TP header */
1207    session->build_header(session, __skb_push(skb, hdr_len));
1208
1209    /* Reset skb netfilter state */
1210    memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1211    IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
1212                  IPSKB_REROUTED);
1213    nf_reset(skb);
1214
1215    bh_lock_sock(sk);
1216    if (sock_owned_by_user(sk)) {
1217        kfree_skb(skb);
1218        ret = NET_XMIT_DROP;
1219        goto out_unlock;
1220    }
1221
1222    /* Get routing info from the tunnel socket */
1223    skb_dst_drop(skb);
1224    skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
1225
1226    inet = inet_sk(sk);
1227    fl = &inet->cork.fl;
1228    switch (tunnel->encap) {
1229    case L2TP_ENCAPTYPE_UDP:
1230        /* Setup UDP header */
1231        __skb_push(skb, sizeof(*uh));
1232        skb_reset_transport_header(skb);
1233        uh = udp_hdr(skb);
1234        uh->source = inet->inet_sport;
1235        uh->dest = inet->inet_dport;
1236        udp_len = uhlen + hdr_len + data_len;
1237        uh->len = htons(udp_len);
1238        uh->check = 0;
1239
1240        /* Calculate UDP checksum if configured to do so */
1241#if IS_ENABLED(CONFIG_IPV6)
1242        if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
1243            l2tp_xmit_ipv6_csum(sk, skb, udp_len);
1244        else
1245#endif
1246        if (sk->sk_no_check == UDP_CSUM_NOXMIT)
1247            skb->ip_summed = CHECKSUM_NONE;
1248        else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
1249             (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
1250            skb->ip_summed = CHECKSUM_COMPLETE;
1251            csum = skb_checksum(skb, 0, udp_len, 0);
1252            uh->check = csum_tcpudp_magic(inet->inet_saddr,
1253                              inet->inet_daddr,
1254                              udp_len, IPPROTO_UDP, csum);
1255            if (uh->check == 0)
1256                uh->check = CSUM_MANGLED_0;
1257        } else {
1258            skb->ip_summed = CHECKSUM_PARTIAL;
1259            skb->csum_start = skb_transport_header(skb) - skb->head;
1260            skb->csum_offset = offsetof(struct udphdr, check);
1261            uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
1262                               inet->inet_daddr,
1263                               udp_len, IPPROTO_UDP, 0);
1264        }
1265        break;
1266
1267    case L2TP_ENCAPTYPE_IP:
1268        break;
1269    }
1270
1271    l2tp_xmit_core(session, skb, fl, data_len);
1272out_unlock:
1273    bh_unlock_sock(sk);
1274
1275    return ret;
1276}
1277EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1278
1279/*****************************************************************************
1280 * Tinnel and session create/destroy.
1281 *****************************************************************************/
1282
1283/* Tunnel socket destruct hook.
1284 * The tunnel context is deleted only when all session sockets have been
1285 * closed.
1286 */
1287static void l2tp_tunnel_destruct(struct sock *sk)
1288{
1289    struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
1290    struct l2tp_net *pn;
1291
1292    if (tunnel == NULL)
1293        goto end;
1294
1295    l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
1296
1297
1298    /* Disable udp encapsulation */
1299    switch (tunnel->encap) {
1300    case L2TP_ENCAPTYPE_UDP:
1301        /* No longer an encapsulation socket. See net/ipv4/udp.c */
1302        (udp_sk(sk))->encap_type = 0;
1303        (udp_sk(sk))->encap_rcv = NULL;
1304        (udp_sk(sk))->encap_destroy = NULL;
1305        break;
1306    case L2TP_ENCAPTYPE_IP:
1307        break;
1308    }
1309
1310    /* Remove hooks into tunnel socket */
1311    sk->sk_destruct = tunnel->old_sk_destruct;
1312    sk->sk_user_data = NULL;
1313    tunnel->sock = NULL;
1314
1315    /* Remove the tunnel struct from the tunnel list */
1316    pn = l2tp_pernet(tunnel->l2tp_net);
1317    spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1318    list_del_rcu(&tunnel->list);
1319    spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1320    atomic_dec(&l2tp_tunnel_count);
1321
1322    l2tp_tunnel_closeall(tunnel);
1323    l2tp_tunnel_dec_refcount(tunnel);
1324
1325    /* Call the original destructor */
1326    if (sk->sk_destruct)
1327        (*sk->sk_destruct)(sk);
1328end:
1329    return;
1330}
1331
1332/* When the tunnel is closed, all the attached sessions need to go too.
1333 */
1334void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1335{
1336    int hash;
1337    struct hlist_node *walk;
1338    struct hlist_node *tmp;
1339    struct l2tp_session *session;
1340
1341    BUG_ON(tunnel == NULL);
1342
1343    l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing all sessions...\n",
1344          tunnel->name);
1345
1346    write_lock_bh(&tunnel->hlist_lock);
1347    for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
1348again:
1349        hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1350            session = hlist_entry(walk, struct l2tp_session, hlist);
1351
1352            l2tp_info(session, L2TP_MSG_CONTROL,
1353                  "%s: closing session\n", session->name);
1354
1355            hlist_del_init(&session->hlist);
1356
1357            if (session->ref != NULL)
1358                (*session->ref)(session);
1359
1360            write_unlock_bh(&tunnel->hlist_lock);
1361
1362            __l2tp_session_unhash(session);
1363            l2tp_session_queue_purge(session);
1364
1365            if (session->session_close != NULL)
1366                (*session->session_close)(session);
1367
1368            if (session->deref != NULL)
1369                (*session->deref)(session);
1370
1371            l2tp_session_dec_refcount(session);
1372
1373            write_lock_bh(&tunnel->hlist_lock);
1374
1375            /* Now restart from the beginning of this hash
1376             * chain. We always remove a session from the
1377             * list so we are guaranteed to make forward
1378             * progress.
1379             */
1380            goto again;
1381        }
1382    }
1383    write_unlock_bh(&tunnel->hlist_lock);
1384}
1385EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
1386
1387/* Tunnel socket destroy hook for UDP encapsulation */
1388static void l2tp_udp_encap_destroy(struct sock *sk)
1389{
1390    struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);
1391    if (tunnel) {
1392        l2tp_tunnel_closeall(tunnel);
1393        sock_put(sk);
1394    }
1395}
1396
1397/* Really kill the tunnel.
1398 * Come here only when all sessions have been cleared from the tunnel.
1399 */
1400static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
1401{
1402    BUG_ON(atomic_read(&tunnel->ref_count) != 0);
1403    BUG_ON(tunnel->sock != NULL);
1404    l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: free...\n", tunnel->name);
1405    kfree_rcu(tunnel, rcu);
1406}
1407
1408/* Workqueue tunnel deletion function */
1409static void l2tp_tunnel_del_work(struct work_struct *work)
1410{
1411    struct l2tp_tunnel *tunnel = NULL;
1412    struct socket *sock = NULL;
1413    struct sock *sk = NULL;
1414
1415    tunnel = container_of(work, struct l2tp_tunnel, del_work);
1416    sk = l2tp_tunnel_sock_lookup(tunnel);
1417    if (!sk)
1418        return;
1419
1420    sock = sk->sk_socket;
1421
1422    /* If the tunnel socket was created by userspace, then go through the
1423     * inet layer to shut the socket down, and let userspace close it.
1424     * Otherwise, if we created the socket directly within the kernel, use
1425     * the sk API to release it here.
1426     * In either case the tunnel resources are freed in the socket
1427     * destructor when the tunnel socket goes away.
1428     */
1429    if (tunnel->fd >= 0) {
1430        if (sock)
1431            inet_shutdown(sock, 2);
1432    } else {
1433        if (sock)
1434            kernel_sock_shutdown(sock, SHUT_RDWR);
1435        sk_release_kernel(sk);
1436    }
1437
1438    l2tp_tunnel_sock_put(sk);
1439}
1440
1441/* Create a socket for the tunnel, if one isn't set up by
1442 * userspace. This is used for static tunnels where there is no
1443 * managing L2TP daemon.
1444 *
1445 * Since we don't want these sockets to keep a namespace alive by
1446 * themselves, we drop the socket's namespace refcount after creation.
1447 * These sockets are freed when the namespace exits using the pernet
1448 * exit hook.
1449 */
1450static int l2tp_tunnel_sock_create(struct net *net,
1451                u32 tunnel_id,
1452                u32 peer_tunnel_id,
1453                struct l2tp_tunnel_cfg *cfg,
1454                struct socket **sockp)
1455{
1456    int err = -EINVAL;
1457    struct socket *sock = NULL;
1458    struct sockaddr_in udp_addr = {0};
1459    struct sockaddr_l2tpip ip_addr = {0};
1460#if IS_ENABLED(CONFIG_IPV6)
1461    struct sockaddr_in6 udp6_addr = {0};
1462    struct sockaddr_l2tpip6 ip6_addr = {0};
1463#endif
1464
1465    switch (cfg->encap) {
1466    case L2TP_ENCAPTYPE_UDP:
1467#if IS_ENABLED(CONFIG_IPV6)
1468        if (cfg->local_ip6 && cfg->peer_ip6) {
1469            err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock);
1470            if (err < 0)
1471                goto out;
1472
1473            sk_change_net(sock->sk, net);
1474
1475            udp6_addr.sin6_family = AF_INET6;
1476            memcpy(&udp6_addr.sin6_addr, cfg->local_ip6,
1477                   sizeof(udp6_addr.sin6_addr));
1478            udp6_addr.sin6_port = htons(cfg->local_udp_port);
1479            err = kernel_bind(sock, (struct sockaddr *) &udp6_addr,
1480                      sizeof(udp6_addr));
1481            if (err < 0)
1482                goto out;
1483
1484            udp6_addr.sin6_family = AF_INET6;
1485            memcpy(&udp6_addr.sin6_addr, cfg->peer_ip6,
1486                   sizeof(udp6_addr.sin6_addr));
1487            udp6_addr.sin6_port = htons(cfg->peer_udp_port);
1488            err = kernel_connect(sock,
1489                         (struct sockaddr *) &udp6_addr,
1490                         sizeof(udp6_addr), 0);
1491            if (err < 0)
1492                goto out;
1493        } else
1494#endif
1495        {
1496            err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock);
1497            if (err < 0)
1498                goto out;
1499
1500            sk_change_net(sock->sk, net);
1501
1502            udp_addr.sin_family = AF_INET;
1503            udp_addr.sin_addr = cfg->local_ip;
1504            udp_addr.sin_port = htons(cfg->local_udp_port);
1505            err = kernel_bind(sock, (struct sockaddr *) &udp_addr,
1506                      sizeof(udp_addr));
1507            if (err < 0)
1508                goto out;
1509
1510            udp_addr.sin_family = AF_INET;
1511            udp_addr.sin_addr = cfg->peer_ip;
1512            udp_addr.sin_port = htons(cfg->peer_udp_port);
1513            err = kernel_connect(sock,
1514                         (struct sockaddr *) &udp_addr,
1515                         sizeof(udp_addr), 0);
1516            if (err < 0)
1517                goto out;
1518        }
1519
1520        if (!cfg->use_udp_checksums)
1521            sock->sk->sk_no_check = UDP_CSUM_NOXMIT;
1522
1523        break;
1524
1525    case L2TP_ENCAPTYPE_IP:
1526#if IS_ENABLED(CONFIG_IPV6)
1527        if (cfg->local_ip6 && cfg->peer_ip6) {
1528            err = sock_create_kern(AF_INET6, SOCK_DGRAM,
1529                      IPPROTO_L2TP, &sock);
1530            if (err < 0)
1531                goto out;
1532
1533            sk_change_net(sock->sk, net);
1534
1535            ip6_addr.l2tp_family = AF_INET6;
1536            memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1537                   sizeof(ip6_addr.l2tp_addr));
1538            ip6_addr.l2tp_conn_id = tunnel_id;
1539            err = kernel_bind(sock, (struct sockaddr *) &ip6_addr,
1540                      sizeof(ip6_addr));
1541            if (err < 0)
1542                goto out;
1543
1544            ip6_addr.l2tp_family = AF_INET6;
1545            memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1546                   sizeof(ip6_addr.l2tp_addr));
1547            ip6_addr.l2tp_conn_id = peer_tunnel_id;
1548            err = kernel_connect(sock,
1549                         (struct sockaddr *) &ip6_addr,
1550                         sizeof(ip6_addr), 0);
1551            if (err < 0)
1552                goto out;
1553        } else
1554#endif
1555        {
1556            err = sock_create_kern(AF_INET, SOCK_DGRAM,
1557                      IPPROTO_L2TP, &sock);
1558            if (err < 0)
1559                goto out;
1560
1561            sk_change_net(sock->sk, net);
1562
1563            ip_addr.l2tp_family = AF_INET;
1564            ip_addr.l2tp_addr = cfg->local_ip;
1565            ip_addr.l2tp_conn_id = tunnel_id;
1566            err = kernel_bind(sock, (struct sockaddr *) &ip_addr,
1567                      sizeof(ip_addr));
1568            if (err < 0)
1569                goto out;
1570
1571            ip_addr.l2tp_family = AF_INET;
1572            ip_addr.l2tp_addr = cfg->peer_ip;
1573            ip_addr.l2tp_conn_id = peer_tunnel_id;
1574            err = kernel_connect(sock, (struct sockaddr *) &ip_addr,
1575                         sizeof(ip_addr), 0);
1576            if (err < 0)
1577                goto out;
1578        }
1579        break;
1580
1581    default:
1582        goto out;
1583    }
1584
1585out:
1586    *sockp = sock;
1587    if ((err < 0) && sock) {
1588        kernel_sock_shutdown(sock, SHUT_RDWR);
1589        sk_release_kernel(sock->sk);
1590        *sockp = NULL;
1591    }
1592
1593    return err;
1594}
1595
1596static struct lock_class_key l2tp_socket_class;
1597
1598int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1599{
1600    struct l2tp_tunnel *tunnel = NULL;
1601    int err;
1602    struct socket *sock = NULL;
1603    struct sock *sk = NULL;
1604    struct l2tp_net *pn;
1605    enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1606
1607    /* Get the tunnel socket from the fd, which was opened by
1608     * the userspace L2TP daemon. If not specified, create a
1609     * kernel socket.
1610     */
1611    if (fd < 0) {
1612        err = l2tp_tunnel_sock_create(net, tunnel_id, peer_tunnel_id,
1613                cfg, &sock);
1614        if (err < 0)
1615            goto err;
1616    } else {
1617        sock = sockfd_lookup(fd, &err);
1618        if (!sock) {
1619            pr_err("tunl %u: sockfd_lookup(fd=%d) returned %d\n",
1620                   tunnel_id, fd, err);
1621            err = -EBADF;
1622            goto err;
1623        }
1624
1625        /* Reject namespace mismatches */
1626        if (!net_eq(sock_net(sock->sk), net)) {
1627            pr_err("tunl %u: netns mismatch\n", tunnel_id);
1628            err = -EINVAL;
1629            goto err;
1630        }
1631    }
1632
1633    sk = sock->sk;
1634
1635    if (cfg != NULL)
1636        encap = cfg->encap;
1637
1638    /* Quick sanity checks */
1639    switch (encap) {
1640    case L2TP_ENCAPTYPE_UDP:
1641        err = -EPROTONOSUPPORT;
1642        if (sk->sk_protocol != IPPROTO_UDP) {
1643            pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1644                   tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
1645            goto err;
1646        }
1647        break;
1648    case L2TP_ENCAPTYPE_IP:
1649        err = -EPROTONOSUPPORT;
1650        if (sk->sk_protocol != IPPROTO_L2TP) {
1651            pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1652                   tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
1653            goto err;
1654        }
1655        break;
1656    }
1657
1658    /* Check if this socket has already been prepped */
1659    tunnel = l2tp_tunnel(sk);
1660    if (tunnel != NULL) {
1661        /* This socket has already been prepped */
1662        err = -EBUSY;
1663        goto err;
1664    }
1665
1666    tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL);
1667    if (tunnel == NULL) {
1668        err = -ENOMEM;
1669        goto err;
1670    }
1671
1672    tunnel->version = version;
1673    tunnel->tunnel_id = tunnel_id;
1674    tunnel->peer_tunnel_id = peer_tunnel_id;
1675    tunnel->debug = L2TP_DEFAULT_DEBUG_FLAGS;
1676
1677    tunnel->magic = L2TP_TUNNEL_MAGIC;
1678    sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1679    rwlock_init(&tunnel->hlist_lock);
1680
1681    /* The net we belong to */
1682    tunnel->l2tp_net = net;
1683    pn = l2tp_pernet(net);
1684
1685    if (cfg != NULL)
1686        tunnel->debug = cfg->debug;
1687
1688#if IS_ENABLED(CONFIG_IPV6)
1689    if (sk->sk_family == PF_INET6) {
1690        struct ipv6_pinfo *np = inet6_sk(sk);
1691
1692        if (ipv6_addr_v4mapped(&np->saddr) &&
1693            ipv6_addr_v4mapped(&sk->sk_v6_daddr)) {
1694            struct inet_sock *inet = inet_sk(sk);
1695
1696            tunnel->v4mapped = true;
1697            inet->inet_saddr = np->saddr.s6_addr32[3];
1698            inet->inet_rcv_saddr = sk->sk_v6_rcv_saddr.s6_addr32[3];
1699            inet->inet_daddr = sk->sk_v6_daddr.s6_addr32[3];
1700        } else {
1701            tunnel->v4mapped = false;
1702        }
1703    }
1704#endif
1705
1706    /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1707    tunnel->encap = encap;
1708    if (encap == L2TP_ENCAPTYPE_UDP) {
1709        /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1710        udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
1711        udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
1712        udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;
1713#if IS_ENABLED(CONFIG_IPV6)
1714        if (sk->sk_family == PF_INET6 && !tunnel->v4mapped)
1715            udpv6_encap_enable();
1716        else
1717#endif
1718        udp_encap_enable();
1719    }
1720
1721    sk->sk_user_data = tunnel;
1722
1723    /* Hook on the tunnel socket destructor so that we can cleanup
1724     * if the tunnel socket goes away.
1725     */
1726    tunnel->old_sk_destruct = sk->sk_destruct;
1727    sk->sk_destruct = &l2tp_tunnel_destruct;
1728    tunnel->sock = sk;
1729    tunnel->fd = fd;
1730    lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
1731
1732    sk->sk_allocation = GFP_ATOMIC;
1733
1734    /* Init delete workqueue struct */
1735    INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
1736
1737    /* Add tunnel to our list */
1738    INIT_LIST_HEAD(&tunnel->list);
1739    atomic_inc(&l2tp_tunnel_count);
1740
1741    /* Bump the reference count. The tunnel context is deleted
1742     * only when this drops to zero. Must be done before list insertion
1743     */
1744    l2tp_tunnel_inc_refcount(tunnel);
1745    spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1746    list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1747    spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1748
1749    err = 0;
1750err:
1751    if (tunnelp)
1752        *tunnelp = tunnel;
1753
1754    /* If tunnel's socket was created by the kernel, it doesn't
1755     * have a file.
1756     */
1757    if (sock && sock->file)
1758        sockfd_put(sock);
1759
1760    return err;
1761}
1762EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1763
1764/* This function is used by the netlink TUNNEL_DELETE command.
1765 */
1766int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1767{
1768    l2tp_tunnel_closeall(tunnel);
1769    return (false == queue_work(l2tp_wq, &tunnel->del_work));
1770}
1771EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1772
1773/* Really kill the session.
1774 */
1775void l2tp_session_free(struct l2tp_session *session)
1776{
1777    struct l2tp_tunnel *tunnel = session->tunnel;
1778
1779    BUG_ON(atomic_read(&session->ref_count) != 0);
1780
1781    if (tunnel) {
1782        BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1783        if (session->session_id != 0)
1784            atomic_dec(&l2tp_session_count);
1785        sock_put(tunnel->sock);
1786        session->tunnel = NULL;
1787        l2tp_tunnel_dec_refcount(tunnel);
1788    }
1789
1790    kfree(session);
1791}
1792EXPORT_SYMBOL_GPL(l2tp_session_free);
1793
1794/* Remove an l2tp session from l2tp_core's hash lists.
1795 * Provides a tidyup interface for pseudowire code which can't just route all
1796 * shutdown via. l2tp_session_delete and a pseudowire-specific session_close
1797 * callback.
1798 */
1799void __l2tp_session_unhash(struct l2tp_session *session)
1800{
1801    struct l2tp_tunnel *tunnel = session->tunnel;
1802
1803    /* Remove the session from core hashes */
1804    if (tunnel) {
1805        /* Remove from the per-tunnel hash */
1806        write_lock_bh(&tunnel->hlist_lock);
1807        hlist_del_init(&session->hlist);
1808        write_unlock_bh(&tunnel->hlist_lock);
1809
1810        /* For L2TPv3 we have a per-net hash: remove from there, too */
1811        if (tunnel->version != L2TP_HDR_VER_2) {
1812            struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1813            spin_lock_bh(&pn->l2tp_session_hlist_lock);
1814            hlist_del_init_rcu(&session->global_hlist);
1815            spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1816            synchronize_rcu();
1817        }
1818    }
1819}
1820EXPORT_SYMBOL_GPL(__l2tp_session_unhash);
1821
1822/* This function is used by the netlink SESSION_DELETE command and by
1823   pseudowire modules.
1824 */
1825int l2tp_session_delete(struct l2tp_session *session)
1826{
1827    if (session->ref)
1828        (*session->ref)(session);
1829    __l2tp_session_unhash(session);
1830    l2tp_session_queue_purge(session);
1831    if (session->session_close != NULL)
1832        (*session->session_close)(session);
1833    if (session->deref)
1834        (*session->deref)(session);
1835    l2tp_session_dec_refcount(session);
1836    return 0;
1837}
1838EXPORT_SYMBOL_GPL(l2tp_session_delete);
1839
1840/* We come here whenever a session's send_seq, cookie_len or
1841 * l2specific_len parameters are set.
1842 */
1843void l2tp_session_set_header_len(struct l2tp_session *session, int version)
1844{
1845    if (version == L2TP_HDR_VER_2) {
1846        session->hdr_len = 6;
1847        if (session->send_seq)
1848            session->hdr_len += 4;
1849    } else {
1850        session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset;
1851        if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
1852            session->hdr_len += 4;
1853    }
1854
1855}
1856EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1857
1858struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
1859{
1860    struct l2tp_session *session;
1861
1862    session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
1863    if (session != NULL) {
1864        session->magic = L2TP_SESSION_MAGIC;
1865        session->tunnel = tunnel;
1866
1867        session->session_id = session_id;
1868        session->peer_session_id = peer_session_id;
1869        session->nr = 0;
1870        if (tunnel->version == L2TP_HDR_VER_2)
1871            session->nr_max = 0xffff;
1872        else
1873            session->nr_max = 0xffffff;
1874        session->nr_window_size = session->nr_max / 2;
1875        session->nr_oos_count_max = 4;
1876
1877        /* Use NR of first received packet */
1878        session->reorder_skip = 1;
1879
1880        sprintf(&session->name[0], "sess %u/%u",
1881            tunnel->tunnel_id, session->session_id);
1882
1883        skb_queue_head_init(&session->reorder_q);
1884
1885        INIT_HLIST_NODE(&session->hlist);
1886        INIT_HLIST_NODE(&session->global_hlist);
1887
1888        /* Inherit debug options from tunnel */
1889        session->debug = tunnel->debug;
1890
1891        if (cfg) {
1892            session->pwtype = cfg->pw_type;
1893            session->debug = cfg->debug;
1894            session->mtu = cfg->mtu;
1895            session->mru = cfg->mru;
1896            session->send_seq = cfg->send_seq;
1897            session->recv_seq = cfg->recv_seq;
1898            session->lns_mode = cfg->lns_mode;
1899            session->reorder_timeout = cfg->reorder_timeout;
1900            session->offset = cfg->offset;
1901            session->l2specific_type = cfg->l2specific_type;
1902            session->l2specific_len = cfg->l2specific_len;
1903            session->cookie_len = cfg->cookie_len;
1904            memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1905            session->peer_cookie_len = cfg->peer_cookie_len;
1906            memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1907        }
1908
1909        if (tunnel->version == L2TP_HDR_VER_2)
1910            session->build_header = l2tp_build_l2tpv2_header;
1911        else
1912            session->build_header = l2tp_build_l2tpv3_header;
1913
1914        l2tp_session_set_header_len(session, tunnel->version);
1915
1916        /* Bump the reference count. The session context is deleted
1917         * only when this drops to zero.
1918         */
1919        l2tp_session_inc_refcount(session);
1920        l2tp_tunnel_inc_refcount(tunnel);
1921
1922        /* Ensure tunnel socket isn't deleted */
1923        sock_hold(tunnel->sock);
1924
1925        /* Add session to the tunnel's hash list */
1926        write_lock_bh(&tunnel->hlist_lock);
1927        hlist_add_head(&session->hlist,
1928                   l2tp_session_id_hash(tunnel, session_id));
1929        write_unlock_bh(&tunnel->hlist_lock);
1930
1931        /* And to the global session list if L2TPv3 */
1932        if (tunnel->version != L2TP_HDR_VER_2) {
1933            struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1934
1935            spin_lock_bh(&pn->l2tp_session_hlist_lock);
1936            hlist_add_head_rcu(&session->global_hlist,
1937                       l2tp_session_id_hash_2(pn, session_id));
1938            spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1939        }
1940
1941        /* Ignore management session in session count value */
1942        if (session->session_id != 0)
1943            atomic_inc(&l2tp_session_count);
1944    }
1945
1946    return session;
1947}
1948EXPORT_SYMBOL_GPL(l2tp_session_create);
1949
1950/*****************************************************************************
1951 * Init and cleanup
1952 *****************************************************************************/
1953
1954static __net_init int l2tp_init_net(struct net *net)
1955{
1956    struct l2tp_net *pn = net_generic(net, l2tp_net_id);
1957    int hash;
1958
1959    INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
1960    spin_lock_init(&pn->l2tp_tunnel_list_lock);
1961
1962    for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1963        INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
1964
1965    spin_lock_init(&pn->l2tp_session_hlist_lock);
1966
1967    return 0;
1968}
1969
1970static __net_exit void l2tp_exit_net(struct net *net)
1971{
1972    struct l2tp_net *pn = l2tp_pernet(net);
1973    struct l2tp_tunnel *tunnel = NULL;
1974
1975    rcu_read_lock_bh();
1976    list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
1977        (void)l2tp_tunnel_delete(tunnel);
1978    }
1979    rcu_read_unlock_bh();
1980}
1981
1982static struct pernet_operations l2tp_net_ops = {
1983    .init = l2tp_init_net,
1984    .exit = l2tp_exit_net,
1985    .id = &l2tp_net_id,
1986    .size = sizeof(struct l2tp_net),
1987};
1988
1989static int __init l2tp_init(void)
1990{
1991    int rc = 0;
1992
1993    rc = register_pernet_device(&l2tp_net_ops);
1994    if (rc)
1995        goto out;
1996
1997    l2tp_wq = alloc_workqueue("l2tp", WQ_UNBOUND, 0);
1998    if (!l2tp_wq) {
1999        pr_err("alloc_workqueue failed\n");
2000        rc = -ENOMEM;
2001        goto out;
2002    }
2003
2004    pr_info("L2TP core driver, %s\n", L2TP_DRV_VERSION);
2005
2006out:
2007    return rc;
2008}
2009
2010static void __exit l2tp_exit(void)
2011{
2012    unregister_pernet_device(&l2tp_net_ops);
2013    if (l2tp_wq) {
2014        destroy_workqueue(l2tp_wq);
2015        l2tp_wq = NULL;
2016    }
2017}
2018
2019module_init(l2tp_init);
2020module_exit(l2tp_exit);
2021
2022MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
2023MODULE_DESCRIPTION("L2TP core");
2024MODULE_LICENSE("GPL");
2025MODULE_VERSION(L2TP_DRV_VERSION);
2026
2027

Archive Download this file



interactive