Root/net/bluetooth/hci_sock.c

1/*
2   BlueZ - Bluetooth protocol stack for Linux
3   Copyright (C) 2000-2001 Qualcomm Incorporated
4
5   Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7   This program is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License version 2 as
9   published by the Free Software Foundation;
10
11   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12   OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14   IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15   CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16   WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17   ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18   OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20   ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21   COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22   SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
27#include <linux/export.h>
28#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
32#include <net/bluetooth/hci_mon.h>
33
34static atomic_t monitor_promisc = ATOMIC_INIT(0);
35
36/* ----- HCI socket interface ----- */
37
38static inline int hci_test_bit(int nr, void *addr)
39{
40    return *((__u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
41}
42
43/* Security filter */
44static struct hci_sec_filter hci_sec_filter = {
45    /* Packet types */
46    0x10,
47    /* Events */
48    { 0x1000d9fe, 0x0000b00c },
49    /* Commands */
50    {
51        { 0x0 },
52        /* OGF_LINK_CTL */
53        { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
54        /* OGF_LINK_POLICY */
55        { 0x00005200, 0x00000000, 0x00000000, 0x00 },
56        /* OGF_HOST_CTL */
57        { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
58        /* OGF_INFO_PARAM */
59        { 0x000002be, 0x00000000, 0x00000000, 0x00 },
60        /* OGF_STATUS_PARAM */
61        { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
62    }
63};
64
65static struct bt_sock_list hci_sk_list = {
66    .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
67};
68
69static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
70{
71    struct hci_filter *flt;
72    int flt_type, flt_event;
73
74    /* Apply filter */
75    flt = &hci_pi(sk)->filter;
76
77    if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
78        flt_type = 0;
79    else
80        flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
81
82    if (!test_bit(flt_type, &flt->type_mask))
83        return true;
84
85    /* Extra filter for event packets only */
86    if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
87        return false;
88
89    flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
90
91    if (!hci_test_bit(flt_event, &flt->event_mask))
92        return true;
93
94    /* Check filter only when opcode is set */
95    if (!flt->opcode)
96        return false;
97
98    if (flt_event == HCI_EV_CMD_COMPLETE &&
99        flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
100        return true;
101
102    if (flt_event == HCI_EV_CMD_STATUS &&
103        flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
104        return true;
105
106    return false;
107}
108
109/* Send frame to RAW socket */
110void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
111{
112    struct sock *sk;
113    struct sk_buff *skb_copy = NULL;
114
115    BT_DBG("hdev %p len %d", hdev, skb->len);
116
117    read_lock(&hci_sk_list.lock);
118
119    sk_for_each(sk, &hci_sk_list.head) {
120        struct sk_buff *nskb;
121
122        if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
123            continue;
124
125        /* Don't send frame to the socket it came from */
126        if (skb->sk == sk)
127            continue;
128
129        if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
130            if (is_filtered_packet(sk, skb))
131                continue;
132        } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
133            if (!bt_cb(skb)->incoming)
134                continue;
135            if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
136                bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
137                bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
138                continue;
139        } else {
140            /* Don't send frame to other channel types */
141            continue;
142        }
143
144        if (!skb_copy) {
145            /* Create a private copy with headroom */
146            skb_copy = __pskb_copy(skb, 1, GFP_ATOMIC);
147            if (!skb_copy)
148                continue;
149
150            /* Put type byte before the data */
151            memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
152        }
153
154        nskb = skb_clone(skb_copy, GFP_ATOMIC);
155        if (!nskb)
156            continue;
157
158        if (sock_queue_rcv_skb(sk, nskb))
159            kfree_skb(nskb);
160    }
161
162    read_unlock(&hci_sk_list.lock);
163
164    kfree_skb(skb_copy);
165}
166
167/* Send frame to control socket */
168void hci_send_to_control(struct sk_buff *skb, struct sock *skip_sk)
169{
170    struct sock *sk;
171
172    BT_DBG("len %d", skb->len);
173
174    read_lock(&hci_sk_list.lock);
175
176    sk_for_each(sk, &hci_sk_list.head) {
177        struct sk_buff *nskb;
178
179        /* Skip the original socket */
180        if (sk == skip_sk)
181            continue;
182
183        if (sk->sk_state != BT_BOUND)
184            continue;
185
186        if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL)
187            continue;
188
189        nskb = skb_clone(skb, GFP_ATOMIC);
190        if (!nskb)
191            continue;
192
193        if (sock_queue_rcv_skb(sk, nskb))
194            kfree_skb(nskb);
195    }
196
197    read_unlock(&hci_sk_list.lock);
198}
199
200/* Send frame to monitor socket */
201void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
202{
203    struct sock *sk;
204    struct sk_buff *skb_copy = NULL;
205    __le16 opcode;
206
207    if (!atomic_read(&monitor_promisc))
208        return;
209
210    BT_DBG("hdev %p len %d", hdev, skb->len);
211
212    switch (bt_cb(skb)->pkt_type) {
213    case HCI_COMMAND_PKT:
214        opcode = __constant_cpu_to_le16(HCI_MON_COMMAND_PKT);
215        break;
216    case HCI_EVENT_PKT:
217        opcode = __constant_cpu_to_le16(HCI_MON_EVENT_PKT);
218        break;
219    case HCI_ACLDATA_PKT:
220        if (bt_cb(skb)->incoming)
221            opcode = __constant_cpu_to_le16(HCI_MON_ACL_RX_PKT);
222        else
223            opcode = __constant_cpu_to_le16(HCI_MON_ACL_TX_PKT);
224        break;
225    case HCI_SCODATA_PKT:
226        if (bt_cb(skb)->incoming)
227            opcode = __constant_cpu_to_le16(HCI_MON_SCO_RX_PKT);
228        else
229            opcode = __constant_cpu_to_le16(HCI_MON_SCO_TX_PKT);
230        break;
231    default:
232        return;
233    }
234
235    read_lock(&hci_sk_list.lock);
236
237    sk_for_each(sk, &hci_sk_list.head) {
238        struct sk_buff *nskb;
239
240        if (sk->sk_state != BT_BOUND)
241            continue;
242
243        if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
244            continue;
245
246        if (!skb_copy) {
247            struct hci_mon_hdr *hdr;
248
249            /* Create a private copy with headroom */
250            skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
251                           GFP_ATOMIC);
252            if (!skb_copy)
253                continue;
254
255            /* Put header before the data */
256            hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
257            hdr->opcode = opcode;
258            hdr->index = cpu_to_le16(hdev->id);
259            hdr->len = cpu_to_le16(skb->len);
260        }
261
262        nskb = skb_clone(skb_copy, GFP_ATOMIC);
263        if (!nskb)
264            continue;
265
266        if (sock_queue_rcv_skb(sk, nskb))
267            kfree_skb(nskb);
268    }
269
270    read_unlock(&hci_sk_list.lock);
271
272    kfree_skb(skb_copy);
273}
274
275static void send_monitor_event(struct sk_buff *skb)
276{
277    struct sock *sk;
278
279    BT_DBG("len %d", skb->len);
280
281    read_lock(&hci_sk_list.lock);
282
283    sk_for_each(sk, &hci_sk_list.head) {
284        struct sk_buff *nskb;
285
286        if (sk->sk_state != BT_BOUND)
287            continue;
288
289        if (hci_pi(sk)->channel != HCI_CHANNEL_MONITOR)
290            continue;
291
292        nskb = skb_clone(skb, GFP_ATOMIC);
293        if (!nskb)
294            continue;
295
296        if (sock_queue_rcv_skb(sk, nskb))
297            kfree_skb(nskb);
298    }
299
300    read_unlock(&hci_sk_list.lock);
301}
302
303static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
304{
305    struct hci_mon_hdr *hdr;
306    struct hci_mon_new_index *ni;
307    struct sk_buff *skb;
308    __le16 opcode;
309
310    switch (event) {
311    case HCI_DEV_REG:
312        skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
313        if (!skb)
314            return NULL;
315
316        ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
317        ni->type = hdev->dev_type;
318        ni->bus = hdev->bus;
319        bacpy(&ni->bdaddr, &hdev->bdaddr);
320        memcpy(ni->name, hdev->name, 8);
321
322        opcode = __constant_cpu_to_le16(HCI_MON_NEW_INDEX);
323        break;
324
325    case HCI_DEV_UNREG:
326        skb = bt_skb_alloc(0, GFP_ATOMIC);
327        if (!skb)
328            return NULL;
329
330        opcode = __constant_cpu_to_le16(HCI_MON_DEL_INDEX);
331        break;
332
333    default:
334        return NULL;
335    }
336
337    __net_timestamp(skb);
338
339    hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
340    hdr->opcode = opcode;
341    hdr->index = cpu_to_le16(hdev->id);
342    hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
343
344    return skb;
345}
346
347static void send_monitor_replay(struct sock *sk)
348{
349    struct hci_dev *hdev;
350
351    read_lock(&hci_dev_list_lock);
352
353    list_for_each_entry(hdev, &hci_dev_list, list) {
354        struct sk_buff *skb;
355
356        skb = create_monitor_event(hdev, HCI_DEV_REG);
357        if (!skb)
358            continue;
359
360        if (sock_queue_rcv_skb(sk, skb))
361            kfree_skb(skb);
362    }
363
364    read_unlock(&hci_dev_list_lock);
365}
366
367/* Generate internal stack event */
368static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
369{
370    struct hci_event_hdr *hdr;
371    struct hci_ev_stack_internal *ev;
372    struct sk_buff *skb;
373
374    skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
375    if (!skb)
376        return;
377
378    hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
379    hdr->evt = HCI_EV_STACK_INTERNAL;
380    hdr->plen = sizeof(*ev) + dlen;
381
382    ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
383    ev->type = type;
384    memcpy(ev->data, data, dlen);
385
386    bt_cb(skb)->incoming = 1;
387    __net_timestamp(skb);
388
389    bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
390    hci_send_to_sock(hdev, skb);
391    kfree_skb(skb);
392}
393
394void hci_sock_dev_event(struct hci_dev *hdev, int event)
395{
396    struct hci_ev_si_device ev;
397
398    BT_DBG("hdev %s event %d", hdev->name, event);
399
400    /* Send event to monitor */
401    if (atomic_read(&monitor_promisc)) {
402        struct sk_buff *skb;
403
404        skb = create_monitor_event(hdev, event);
405        if (skb) {
406            send_monitor_event(skb);
407            kfree_skb(skb);
408        }
409    }
410
411    /* Send event to sockets */
412    ev.event = event;
413    ev.dev_id = hdev->id;
414    hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
415
416    if (event == HCI_DEV_UNREG) {
417        struct sock *sk;
418
419        /* Detach sockets from device */
420        read_lock(&hci_sk_list.lock);
421        sk_for_each(sk, &hci_sk_list.head) {
422            bh_lock_sock_nested(sk);
423            if (hci_pi(sk)->hdev == hdev) {
424                hci_pi(sk)->hdev = NULL;
425                sk->sk_err = EPIPE;
426                sk->sk_state = BT_OPEN;
427                sk->sk_state_change(sk);
428
429                hci_dev_put(hdev);
430            }
431            bh_unlock_sock(sk);
432        }
433        read_unlock(&hci_sk_list.lock);
434    }
435}
436
437static int hci_sock_release(struct socket *sock)
438{
439    struct sock *sk = sock->sk;
440    struct hci_dev *hdev;
441
442    BT_DBG("sock %p sk %p", sock, sk);
443
444    if (!sk)
445        return 0;
446
447    hdev = hci_pi(sk)->hdev;
448
449    if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
450        atomic_dec(&monitor_promisc);
451
452    bt_sock_unlink(&hci_sk_list, sk);
453
454    if (hdev) {
455        if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
456            mgmt_index_added(hdev);
457            clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
458            hci_dev_close(hdev->id);
459        }
460
461        atomic_dec(&hdev->promisc);
462        hci_dev_put(hdev);
463    }
464
465    sock_orphan(sk);
466
467    skb_queue_purge(&sk->sk_receive_queue);
468    skb_queue_purge(&sk->sk_write_queue);
469
470    sock_put(sk);
471    return 0;
472}
473
474static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
475{
476    bdaddr_t bdaddr;
477    int err;
478
479    if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
480        return -EFAULT;
481
482    hci_dev_lock(hdev);
483
484    err = hci_blacklist_add(hdev, &bdaddr, BDADDR_BREDR);
485
486    hci_dev_unlock(hdev);
487
488    return err;
489}
490
491static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
492{
493    bdaddr_t bdaddr;
494    int err;
495
496    if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
497        return -EFAULT;
498
499    hci_dev_lock(hdev);
500
501    err = hci_blacklist_del(hdev, &bdaddr, BDADDR_BREDR);
502
503    hci_dev_unlock(hdev);
504
505    return err;
506}
507
508/* Ioctls that require bound socket */
509static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
510                unsigned long arg)
511{
512    struct hci_dev *hdev = hci_pi(sk)->hdev;
513
514    if (!hdev)
515        return -EBADFD;
516
517    if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
518        return -EBUSY;
519
520    if (hdev->dev_type != HCI_BREDR)
521        return -EOPNOTSUPP;
522
523    switch (cmd) {
524    case HCISETRAW:
525        if (!capable(CAP_NET_ADMIN))
526            return -EPERM;
527
528        if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
529            return -EPERM;
530
531        if (arg)
532            set_bit(HCI_RAW, &hdev->flags);
533        else
534            clear_bit(HCI_RAW, &hdev->flags);
535
536        return 0;
537
538    case HCIGETCONNINFO:
539        return hci_get_conn_info(hdev, (void __user *) arg);
540
541    case HCIGETAUTHINFO:
542        return hci_get_auth_info(hdev, (void __user *) arg);
543
544    case HCIBLOCKADDR:
545        if (!capable(CAP_NET_ADMIN))
546            return -EPERM;
547        return hci_sock_blacklist_add(hdev, (void __user *) arg);
548
549    case HCIUNBLOCKADDR:
550        if (!capable(CAP_NET_ADMIN))
551            return -EPERM;
552        return hci_sock_blacklist_del(hdev, (void __user *) arg);
553    }
554
555    return -ENOIOCTLCMD;
556}
557
558static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
559              unsigned long arg)
560{
561    void __user *argp = (void __user *) arg;
562    struct sock *sk = sock->sk;
563    int err;
564
565    BT_DBG("cmd %x arg %lx", cmd, arg);
566
567    lock_sock(sk);
568
569    if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
570        err = -EBADFD;
571        goto done;
572    }
573
574    release_sock(sk);
575
576    switch (cmd) {
577    case HCIGETDEVLIST:
578        return hci_get_dev_list(argp);
579
580    case HCIGETDEVINFO:
581        return hci_get_dev_info(argp);
582
583    case HCIGETCONNLIST:
584        return hci_get_conn_list(argp);
585
586    case HCIDEVUP:
587        if (!capable(CAP_NET_ADMIN))
588            return -EPERM;
589        return hci_dev_open(arg);
590
591    case HCIDEVDOWN:
592        if (!capable(CAP_NET_ADMIN))
593            return -EPERM;
594        return hci_dev_close(arg);
595
596    case HCIDEVRESET:
597        if (!capable(CAP_NET_ADMIN))
598            return -EPERM;
599        return hci_dev_reset(arg);
600
601    case HCIDEVRESTAT:
602        if (!capable(CAP_NET_ADMIN))
603            return -EPERM;
604        return hci_dev_reset_stat(arg);
605
606    case HCISETSCAN:
607    case HCISETAUTH:
608    case HCISETENCRYPT:
609    case HCISETPTYPE:
610    case HCISETLINKPOL:
611    case HCISETLINKMODE:
612    case HCISETACLMTU:
613    case HCISETSCOMTU:
614        if (!capable(CAP_NET_ADMIN))
615            return -EPERM;
616        return hci_dev_cmd(cmd, argp);
617
618    case HCIINQUIRY:
619        return hci_inquiry(argp);
620    }
621
622    lock_sock(sk);
623
624    err = hci_sock_bound_ioctl(sk, cmd, arg);
625
626done:
627    release_sock(sk);
628    return err;
629}
630
631static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
632             int addr_len)
633{
634    struct sockaddr_hci haddr;
635    struct sock *sk = sock->sk;
636    struct hci_dev *hdev = NULL;
637    int len, err = 0;
638
639    BT_DBG("sock %p sk %p", sock, sk);
640
641    if (!addr)
642        return -EINVAL;
643
644    memset(&haddr, 0, sizeof(haddr));
645    len = min_t(unsigned int, sizeof(haddr), addr_len);
646    memcpy(&haddr, addr, len);
647
648    if (haddr.hci_family != AF_BLUETOOTH)
649        return -EINVAL;
650
651    lock_sock(sk);
652
653    if (sk->sk_state == BT_BOUND) {
654        err = -EALREADY;
655        goto done;
656    }
657
658    switch (haddr.hci_channel) {
659    case HCI_CHANNEL_RAW:
660        if (hci_pi(sk)->hdev) {
661            err = -EALREADY;
662            goto done;
663        }
664
665        if (haddr.hci_dev != HCI_DEV_NONE) {
666            hdev = hci_dev_get(haddr.hci_dev);
667            if (!hdev) {
668                err = -ENODEV;
669                goto done;
670            }
671
672            atomic_inc(&hdev->promisc);
673        }
674
675        hci_pi(sk)->hdev = hdev;
676        break;
677
678    case HCI_CHANNEL_USER:
679        if (hci_pi(sk)->hdev) {
680            err = -EALREADY;
681            goto done;
682        }
683
684        if (haddr.hci_dev == HCI_DEV_NONE) {
685            err = -EINVAL;
686            goto done;
687        }
688
689        if (!capable(CAP_NET_ADMIN)) {
690            err = -EPERM;
691            goto done;
692        }
693
694        hdev = hci_dev_get(haddr.hci_dev);
695        if (!hdev) {
696            err = -ENODEV;
697            goto done;
698        }
699
700        if (test_bit(HCI_UP, &hdev->flags) ||
701            test_bit(HCI_INIT, &hdev->flags) ||
702            test_bit(HCI_SETUP, &hdev->dev_flags)) {
703            err = -EBUSY;
704            hci_dev_put(hdev);
705            goto done;
706        }
707
708        if (test_and_set_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
709            err = -EUSERS;
710            hci_dev_put(hdev);
711            goto done;
712        }
713
714        mgmt_index_removed(hdev);
715
716        err = hci_dev_open(hdev->id);
717        if (err) {
718            clear_bit(HCI_USER_CHANNEL, &hdev->dev_flags);
719            hci_dev_put(hdev);
720            goto done;
721        }
722
723        atomic_inc(&hdev->promisc);
724
725        hci_pi(sk)->hdev = hdev;
726        break;
727
728    case HCI_CHANNEL_CONTROL:
729        if (haddr.hci_dev != HCI_DEV_NONE) {
730            err = -EINVAL;
731            goto done;
732        }
733
734        if (!capable(CAP_NET_ADMIN)) {
735            err = -EPERM;
736            goto done;
737        }
738
739        break;
740
741    case HCI_CHANNEL_MONITOR:
742        if (haddr.hci_dev != HCI_DEV_NONE) {
743            err = -EINVAL;
744            goto done;
745        }
746
747        if (!capable(CAP_NET_RAW)) {
748            err = -EPERM;
749            goto done;
750        }
751
752        send_monitor_replay(sk);
753
754        atomic_inc(&monitor_promisc);
755        break;
756
757    default:
758        err = -EINVAL;
759        goto done;
760    }
761
762
763    hci_pi(sk)->channel = haddr.hci_channel;
764    sk->sk_state = BT_BOUND;
765
766done:
767    release_sock(sk);
768    return err;
769}
770
771static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
772                int *addr_len, int peer)
773{
774    struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
775    struct sock *sk = sock->sk;
776    struct hci_dev *hdev;
777    int err = 0;
778
779    BT_DBG("sock %p sk %p", sock, sk);
780
781    if (peer)
782        return -EOPNOTSUPP;
783
784    lock_sock(sk);
785
786    hdev = hci_pi(sk)->hdev;
787    if (!hdev) {
788        err = -EBADFD;
789        goto done;
790    }
791
792    *addr_len = sizeof(*haddr);
793    haddr->hci_family = AF_BLUETOOTH;
794    haddr->hci_dev = hdev->id;
795    haddr->hci_channel= hci_pi(sk)->channel;
796
797done:
798    release_sock(sk);
799    return err;
800}
801
802static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
803              struct sk_buff *skb)
804{
805    __u32 mask = hci_pi(sk)->cmsg_mask;
806
807    if (mask & HCI_CMSG_DIR) {
808        int incoming = bt_cb(skb)->incoming;
809        put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
810             &incoming);
811    }
812
813    if (mask & HCI_CMSG_TSTAMP) {
814#ifdef CONFIG_COMPAT
815        struct compat_timeval ctv;
816#endif
817        struct timeval tv;
818        void *data;
819        int len;
820
821        skb_get_timestamp(skb, &tv);
822
823        data = &tv;
824        len = sizeof(tv);
825#ifdef CONFIG_COMPAT
826        if (!COMPAT_USE_64BIT_TIME &&
827            (msg->msg_flags & MSG_CMSG_COMPAT)) {
828            ctv.tv_sec = tv.tv_sec;
829            ctv.tv_usec = tv.tv_usec;
830            data = &ctv;
831            len = sizeof(ctv);
832        }
833#endif
834
835        put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
836    }
837}
838
839static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
840                struct msghdr *msg, size_t len, int flags)
841{
842    int noblock = flags & MSG_DONTWAIT;
843    struct sock *sk = sock->sk;
844    struct sk_buff *skb;
845    int copied, err;
846
847    BT_DBG("sock %p, sk %p", sock, sk);
848
849    if (flags & (MSG_OOB))
850        return -EOPNOTSUPP;
851
852    if (sk->sk_state == BT_CLOSED)
853        return 0;
854
855    skb = skb_recv_datagram(sk, flags, noblock, &err);
856    if (!skb)
857        return err;
858
859    copied = skb->len;
860    if (len < copied) {
861        msg->msg_flags |= MSG_TRUNC;
862        copied = len;
863    }
864
865    skb_reset_transport_header(skb);
866    err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
867
868    switch (hci_pi(sk)->channel) {
869    case HCI_CHANNEL_RAW:
870        hci_sock_cmsg(sk, msg, skb);
871        break;
872    case HCI_CHANNEL_USER:
873    case HCI_CHANNEL_CONTROL:
874    case HCI_CHANNEL_MONITOR:
875        sock_recv_timestamp(msg, sk, skb);
876        break;
877    }
878
879    skb_free_datagram(sk, skb);
880
881    return err ? : copied;
882}
883
884static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
885                struct msghdr *msg, size_t len)
886{
887    struct sock *sk = sock->sk;
888    struct hci_dev *hdev;
889    struct sk_buff *skb;
890    int err;
891
892    BT_DBG("sock %p sk %p", sock, sk);
893
894    if (msg->msg_flags & MSG_OOB)
895        return -EOPNOTSUPP;
896
897    if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
898        return -EINVAL;
899
900    if (len < 4 || len > HCI_MAX_FRAME_SIZE)
901        return -EINVAL;
902
903    lock_sock(sk);
904
905    switch (hci_pi(sk)->channel) {
906    case HCI_CHANNEL_RAW:
907    case HCI_CHANNEL_USER:
908        break;
909    case HCI_CHANNEL_CONTROL:
910        err = mgmt_control(sk, msg, len);
911        goto done;
912    case HCI_CHANNEL_MONITOR:
913        err = -EOPNOTSUPP;
914        goto done;
915    default:
916        err = -EINVAL;
917        goto done;
918    }
919
920    hdev = hci_pi(sk)->hdev;
921    if (!hdev) {
922        err = -EBADFD;
923        goto done;
924    }
925
926    if (!test_bit(HCI_UP, &hdev->flags)) {
927        err = -ENETDOWN;
928        goto done;
929    }
930
931    skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
932    if (!skb)
933        goto done;
934
935    if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
936        err = -EFAULT;
937        goto drop;
938    }
939
940    bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
941    skb_pull(skb, 1);
942
943    if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
944        /* No permission check is needed for user channel
945         * since that gets enforced when binding the socket.
946         *
947         * However check that the packet type is valid.
948         */
949        if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
950            bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
951            bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
952            err = -EINVAL;
953            goto drop;
954        }
955
956        skb_queue_tail(&hdev->raw_q, skb);
957        queue_work(hdev->workqueue, &hdev->tx_work);
958    } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
959        u16 opcode = get_unaligned_le16(skb->data);
960        u16 ogf = hci_opcode_ogf(opcode);
961        u16 ocf = hci_opcode_ocf(opcode);
962
963        if (((ogf > HCI_SFLT_MAX_OGF) ||
964             !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
965                   &hci_sec_filter.ocf_mask[ogf])) &&
966            !capable(CAP_NET_RAW)) {
967            err = -EPERM;
968            goto drop;
969        }
970
971        if (test_bit(HCI_RAW, &hdev->flags) || (ogf == 0x3f)) {
972            skb_queue_tail(&hdev->raw_q, skb);
973            queue_work(hdev->workqueue, &hdev->tx_work);
974        } else {
975            /* Stand-alone HCI commands must be flaged as
976             * single-command requests.
977             */
978            bt_cb(skb)->req.start = true;
979
980            skb_queue_tail(&hdev->cmd_q, skb);
981            queue_work(hdev->workqueue, &hdev->cmd_work);
982        }
983    } else {
984        if (!capable(CAP_NET_RAW)) {
985            err = -EPERM;
986            goto drop;
987        }
988
989        skb_queue_tail(&hdev->raw_q, skb);
990        queue_work(hdev->workqueue, &hdev->tx_work);
991    }
992
993    err = len;
994
995done:
996    release_sock(sk);
997    return err;
998
999drop:
1000    kfree_skb(skb);
1001    goto done;
1002}
1003
1004static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1005                   char __user *optval, unsigned int len)
1006{
1007    struct hci_ufilter uf = { .opcode = 0 };
1008    struct sock *sk = sock->sk;
1009    int err = 0, opt = 0;
1010
1011    BT_DBG("sk %p, opt %d", sk, optname);
1012
1013    lock_sock(sk);
1014
1015    if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1016        err = -EBADFD;
1017        goto done;
1018    }
1019
1020    switch (optname) {
1021    case HCI_DATA_DIR:
1022        if (get_user(opt, (int __user *)optval)) {
1023            err = -EFAULT;
1024            break;
1025        }
1026
1027        if (opt)
1028            hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1029        else
1030            hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1031        break;
1032
1033    case HCI_TIME_STAMP:
1034        if (get_user(opt, (int __user *)optval)) {
1035            err = -EFAULT;
1036            break;
1037        }
1038
1039        if (opt)
1040            hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1041        else
1042            hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1043        break;
1044
1045    case HCI_FILTER:
1046        {
1047            struct hci_filter *f = &hci_pi(sk)->filter;
1048
1049            uf.type_mask = f->type_mask;
1050            uf.opcode = f->opcode;
1051            uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1052            uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1053        }
1054
1055        len = min_t(unsigned int, len, sizeof(uf));
1056        if (copy_from_user(&uf, optval, len)) {
1057            err = -EFAULT;
1058            break;
1059        }
1060
1061        if (!capable(CAP_NET_RAW)) {
1062            uf.type_mask &= hci_sec_filter.type_mask;
1063            uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1064            uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1065        }
1066
1067        {
1068            struct hci_filter *f = &hci_pi(sk)->filter;
1069
1070            f->type_mask = uf.type_mask;
1071            f->opcode = uf.opcode;
1072            *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1073            *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1074        }
1075        break;
1076
1077    default:
1078        err = -ENOPROTOOPT;
1079        break;
1080    }
1081
1082done:
1083    release_sock(sk);
1084    return err;
1085}
1086
1087static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1088                   char __user *optval, int __user *optlen)
1089{
1090    struct hci_ufilter uf;
1091    struct sock *sk = sock->sk;
1092    int len, opt, err = 0;
1093
1094    BT_DBG("sk %p, opt %d", sk, optname);
1095
1096    if (get_user(len, optlen))
1097        return -EFAULT;
1098
1099    lock_sock(sk);
1100
1101    if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1102        err = -EBADFD;
1103        goto done;
1104    }
1105
1106    switch (optname) {
1107    case HCI_DATA_DIR:
1108        if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1109            opt = 1;
1110        else
1111            opt = 0;
1112
1113        if (put_user(opt, optval))
1114            err = -EFAULT;
1115        break;
1116
1117    case HCI_TIME_STAMP:
1118        if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1119            opt = 1;
1120        else
1121            opt = 0;
1122
1123        if (put_user(opt, optval))
1124            err = -EFAULT;
1125        break;
1126
1127    case HCI_FILTER:
1128        {
1129            struct hci_filter *f = &hci_pi(sk)->filter;
1130
1131            memset(&uf, 0, sizeof(uf));
1132            uf.type_mask = f->type_mask;
1133            uf.opcode = f->opcode;
1134            uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1135            uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1136        }
1137
1138        len = min_t(unsigned int, len, sizeof(uf));
1139        if (copy_to_user(optval, &uf, len))
1140            err = -EFAULT;
1141        break;
1142
1143    default:
1144        err = -ENOPROTOOPT;
1145        break;
1146    }
1147
1148done:
1149    release_sock(sk);
1150    return err;
1151}
1152
1153static const struct proto_ops hci_sock_ops = {
1154    .family = PF_BLUETOOTH,
1155    .owner = THIS_MODULE,
1156    .release = hci_sock_release,
1157    .bind = hci_sock_bind,
1158    .getname = hci_sock_getname,
1159    .sendmsg = hci_sock_sendmsg,
1160    .recvmsg = hci_sock_recvmsg,
1161    .ioctl = hci_sock_ioctl,
1162    .poll = datagram_poll,
1163    .listen = sock_no_listen,
1164    .shutdown = sock_no_shutdown,
1165    .setsockopt = hci_sock_setsockopt,
1166    .getsockopt = hci_sock_getsockopt,
1167    .connect = sock_no_connect,
1168    .socketpair = sock_no_socketpair,
1169    .accept = sock_no_accept,
1170    .mmap = sock_no_mmap
1171};
1172
1173static struct proto hci_sk_proto = {
1174    .name = "HCI",
1175    .owner = THIS_MODULE,
1176    .obj_size = sizeof(struct hci_pinfo)
1177};
1178
1179static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1180               int kern)
1181{
1182    struct sock *sk;
1183
1184    BT_DBG("sock %p", sock);
1185
1186    if (sock->type != SOCK_RAW)
1187        return -ESOCKTNOSUPPORT;
1188
1189    sock->ops = &hci_sock_ops;
1190
1191    sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto);
1192    if (!sk)
1193        return -ENOMEM;
1194
1195    sock_init_data(sock, sk);
1196
1197    sock_reset_flag(sk, SOCK_ZAPPED);
1198
1199    sk->sk_protocol = protocol;
1200
1201    sock->state = SS_UNCONNECTED;
1202    sk->sk_state = BT_OPEN;
1203
1204    bt_sock_link(&hci_sk_list, sk);
1205    return 0;
1206}
1207
1208static const struct net_proto_family hci_sock_family_ops = {
1209    .family = PF_BLUETOOTH,
1210    .owner = THIS_MODULE,
1211    .create = hci_sock_create,
1212};
1213
1214int __init hci_sock_init(void)
1215{
1216    int err;
1217
1218    err = proto_register(&hci_sk_proto, 0);
1219    if (err < 0)
1220        return err;
1221
1222    err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1223    if (err < 0) {
1224        BT_ERR("HCI socket registration failed");
1225        goto error;
1226    }
1227
1228    err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1229    if (err < 0) {
1230        BT_ERR("Failed to create HCI proc file");
1231        bt_sock_unregister(BTPROTO_HCI);
1232        goto error;
1233    }
1234
1235    BT_INFO("HCI socket layer initialized");
1236
1237    return 0;
1238
1239error:
1240    proto_unregister(&hci_sk_proto);
1241    return err;
1242}
1243
1244void hci_sock_cleanup(void)
1245{
1246    bt_procfs_cleanup(&init_net, "hci");
1247    bt_sock_unregister(BTPROTO_HCI);
1248    proto_unregister(&hci_sk_proto);
1249}
1250

Archive Download this file



interactive