Root/
1 | /* |
2 | BlueZ - Bluetooth protocol stack for Linux |
3 | Copyright (C) 2000-2001 Qualcomm Incorporated |
4 | Copyright (C) 2011 ProFUSION Embedded Systems |
5 | |
6 | Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> |
7 | |
8 | This program is free software; you can redistribute it and/or modify |
9 | it under the terms of the GNU General Public License version 2 as |
10 | published by the Free Software Foundation; |
11 | |
12 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
13 | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
14 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. |
15 | IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY |
16 | CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES |
17 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
18 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
19 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
20 | |
21 | ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, |
22 | COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS |
23 | SOFTWARE IS DISCLAIMED. |
24 | */ |
25 | |
26 | /* Bluetooth HCI core. */ |
27 | |
28 | #include <linux/export.h> |
29 | #include <linux/idr.h> |
30 | #include <linux/rfkill.h> |
31 | #include <linux/debugfs.h> |
32 | #include <linux/crypto.h> |
33 | #include <asm/unaligned.h> |
34 | |
35 | #include <net/bluetooth/bluetooth.h> |
36 | #include <net/bluetooth/hci_core.h> |
37 | |
38 | #include "smp.h" |
39 | |
40 | static void hci_rx_work(struct work_struct *work); |
41 | static void hci_cmd_work(struct work_struct *work); |
42 | static void hci_tx_work(struct work_struct *work); |
43 | |
44 | /* HCI device list */ |
45 | LIST_HEAD(hci_dev_list); |
46 | DEFINE_RWLOCK(hci_dev_list_lock); |
47 | |
48 | /* HCI callback list */ |
49 | LIST_HEAD(hci_cb_list); |
50 | DEFINE_RWLOCK(hci_cb_list_lock); |
51 | |
52 | /* HCI ID Numbering */ |
53 | static DEFINE_IDA(hci_index_ida); |
54 | |
55 | /* ---- HCI notifications ---- */ |
56 | |
57 | static void hci_notify(struct hci_dev *hdev, int event) |
58 | { |
59 | hci_sock_dev_event(hdev, event); |
60 | } |
61 | |
62 | /* ---- HCI debugfs entries ---- */ |
63 | |
64 | static ssize_t dut_mode_read(struct file *file, char __user *user_buf, |
65 | size_t count, loff_t *ppos) |
66 | { |
67 | struct hci_dev *hdev = file->private_data; |
68 | char buf[3]; |
69 | |
70 | buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N'; |
71 | buf[1] = '\n'; |
72 | buf[2] = '\0'; |
73 | return simple_read_from_buffer(user_buf, count, ppos, buf, 2); |
74 | } |
75 | |
76 | static ssize_t dut_mode_write(struct file *file, const char __user *user_buf, |
77 | size_t count, loff_t *ppos) |
78 | { |
79 | struct hci_dev *hdev = file->private_data; |
80 | struct sk_buff *skb; |
81 | char buf[32]; |
82 | size_t buf_size = min(count, (sizeof(buf)-1)); |
83 | bool enable; |
84 | int err; |
85 | |
86 | if (!test_bit(HCI_UP, &hdev->flags)) |
87 | return -ENETDOWN; |
88 | |
89 | if (copy_from_user(buf, user_buf, buf_size)) |
90 | return -EFAULT; |
91 | |
92 | buf[buf_size] = '\0'; |
93 | if (strtobool(buf, &enable)) |
94 | return -EINVAL; |
95 | |
96 | if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags)) |
97 | return -EALREADY; |
98 | |
99 | hci_req_lock(hdev); |
100 | if (enable) |
101 | skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL, |
102 | HCI_CMD_TIMEOUT); |
103 | else |
104 | skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, |
105 | HCI_CMD_TIMEOUT); |
106 | hci_req_unlock(hdev); |
107 | |
108 | if (IS_ERR(skb)) |
109 | return PTR_ERR(skb); |
110 | |
111 | err = -bt_to_errno(skb->data[0]); |
112 | kfree_skb(skb); |
113 | |
114 | if (err < 0) |
115 | return err; |
116 | |
117 | change_bit(HCI_DUT_MODE, &hdev->dev_flags); |
118 | |
119 | return count; |
120 | } |
121 | |
122 | static const struct file_operations dut_mode_fops = { |
123 | .open = simple_open, |
124 | .read = dut_mode_read, |
125 | .write = dut_mode_write, |
126 | .llseek = default_llseek, |
127 | }; |
128 | |
129 | static int features_show(struct seq_file *f, void *ptr) |
130 | { |
131 | struct hci_dev *hdev = f->private; |
132 | u8 p; |
133 | |
134 | hci_dev_lock(hdev); |
135 | for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) { |
136 | seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x " |
137 | "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p, |
138 | hdev->features[p][0], hdev->features[p][1], |
139 | hdev->features[p][2], hdev->features[p][3], |
140 | hdev->features[p][4], hdev->features[p][5], |
141 | hdev->features[p][6], hdev->features[p][7]); |
142 | } |
143 | if (lmp_le_capable(hdev)) |
144 | seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x " |
145 | "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", |
146 | hdev->le_features[0], hdev->le_features[1], |
147 | hdev->le_features[2], hdev->le_features[3], |
148 | hdev->le_features[4], hdev->le_features[5], |
149 | hdev->le_features[6], hdev->le_features[7]); |
150 | hci_dev_unlock(hdev); |
151 | |
152 | return 0; |
153 | } |
154 | |
155 | static int features_open(struct inode *inode, struct file *file) |
156 | { |
157 | return single_open(file, features_show, inode->i_private); |
158 | } |
159 | |
160 | static const struct file_operations features_fops = { |
161 | .open = features_open, |
162 | .read = seq_read, |
163 | .llseek = seq_lseek, |
164 | .release = single_release, |
165 | }; |
166 | |
167 | static int blacklist_show(struct seq_file *f, void *p) |
168 | { |
169 | struct hci_dev *hdev = f->private; |
170 | struct bdaddr_list *b; |
171 | |
172 | hci_dev_lock(hdev); |
173 | list_for_each_entry(b, &hdev->blacklist, list) |
174 | seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type); |
175 | hci_dev_unlock(hdev); |
176 | |
177 | return 0; |
178 | } |
179 | |
180 | static int blacklist_open(struct inode *inode, struct file *file) |
181 | { |
182 | return single_open(file, blacklist_show, inode->i_private); |
183 | } |
184 | |
185 | static const struct file_operations blacklist_fops = { |
186 | .open = blacklist_open, |
187 | .read = seq_read, |
188 | .llseek = seq_lseek, |
189 | .release = single_release, |
190 | }; |
191 | |
192 | static int uuids_show(struct seq_file *f, void *p) |
193 | { |
194 | struct hci_dev *hdev = f->private; |
195 | struct bt_uuid *uuid; |
196 | |
197 | hci_dev_lock(hdev); |
198 | list_for_each_entry(uuid, &hdev->uuids, list) { |
199 | u8 i, val[16]; |
200 | |
201 | /* The Bluetooth UUID values are stored in big endian, |
202 | * but with reversed byte order. So convert them into |
203 | * the right order for the %pUb modifier. |
204 | */ |
205 | for (i = 0; i < 16; i++) |
206 | val[i] = uuid->uuid[15 - i]; |
207 | |
208 | seq_printf(f, "%pUb\n", val); |
209 | } |
210 | hci_dev_unlock(hdev); |
211 | |
212 | return 0; |
213 | } |
214 | |
215 | static int uuids_open(struct inode *inode, struct file *file) |
216 | { |
217 | return single_open(file, uuids_show, inode->i_private); |
218 | } |
219 | |
220 | static const struct file_operations uuids_fops = { |
221 | .open = uuids_open, |
222 | .read = seq_read, |
223 | .llseek = seq_lseek, |
224 | .release = single_release, |
225 | }; |
226 | |
227 | static int inquiry_cache_show(struct seq_file *f, void *p) |
228 | { |
229 | struct hci_dev *hdev = f->private; |
230 | struct discovery_state *cache = &hdev->discovery; |
231 | struct inquiry_entry *e; |
232 | |
233 | hci_dev_lock(hdev); |
234 | |
235 | list_for_each_entry(e, &cache->all, all) { |
236 | struct inquiry_data *data = &e->data; |
237 | seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n", |
238 | &data->bdaddr, |
239 | data->pscan_rep_mode, data->pscan_period_mode, |
240 | data->pscan_mode, data->dev_class[2], |
241 | data->dev_class[1], data->dev_class[0], |
242 | __le16_to_cpu(data->clock_offset), |
243 | data->rssi, data->ssp_mode, e->timestamp); |
244 | } |
245 | |
246 | hci_dev_unlock(hdev); |
247 | |
248 | return 0; |
249 | } |
250 | |
251 | static int inquiry_cache_open(struct inode *inode, struct file *file) |
252 | { |
253 | return single_open(file, inquiry_cache_show, inode->i_private); |
254 | } |
255 | |
256 | static const struct file_operations inquiry_cache_fops = { |
257 | .open = inquiry_cache_open, |
258 | .read = seq_read, |
259 | .llseek = seq_lseek, |
260 | .release = single_release, |
261 | }; |
262 | |
263 | static int link_keys_show(struct seq_file *f, void *ptr) |
264 | { |
265 | struct hci_dev *hdev = f->private; |
266 | struct list_head *p, *n; |
267 | |
268 | hci_dev_lock(hdev); |
269 | list_for_each_safe(p, n, &hdev->link_keys) { |
270 | struct link_key *key = list_entry(p, struct link_key, list); |
271 | seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type, |
272 | HCI_LINK_KEY_SIZE, key->val, key->pin_len); |
273 | } |
274 | hci_dev_unlock(hdev); |
275 | |
276 | return 0; |
277 | } |
278 | |
279 | static int link_keys_open(struct inode *inode, struct file *file) |
280 | { |
281 | return single_open(file, link_keys_show, inode->i_private); |
282 | } |
283 | |
284 | static const struct file_operations link_keys_fops = { |
285 | .open = link_keys_open, |
286 | .read = seq_read, |
287 | .llseek = seq_lseek, |
288 | .release = single_release, |
289 | }; |
290 | |
291 | static int dev_class_show(struct seq_file *f, void *ptr) |
292 | { |
293 | struct hci_dev *hdev = f->private; |
294 | |
295 | hci_dev_lock(hdev); |
296 | seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2], |
297 | hdev->dev_class[1], hdev->dev_class[0]); |
298 | hci_dev_unlock(hdev); |
299 | |
300 | return 0; |
301 | } |
302 | |
303 | static int dev_class_open(struct inode *inode, struct file *file) |
304 | { |
305 | return single_open(file, dev_class_show, inode->i_private); |
306 | } |
307 | |
308 | static const struct file_operations dev_class_fops = { |
309 | .open = dev_class_open, |
310 | .read = seq_read, |
311 | .llseek = seq_lseek, |
312 | .release = single_release, |
313 | }; |
314 | |
315 | static int voice_setting_get(void *data, u64 *val) |
316 | { |
317 | struct hci_dev *hdev = data; |
318 | |
319 | hci_dev_lock(hdev); |
320 | *val = hdev->voice_setting; |
321 | hci_dev_unlock(hdev); |
322 | |
323 | return 0; |
324 | } |
325 | |
326 | DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get, |
327 | NULL, "0x%4.4llx\n"); |
328 | |
329 | static int auto_accept_delay_set(void *data, u64 val) |
330 | { |
331 | struct hci_dev *hdev = data; |
332 | |
333 | hci_dev_lock(hdev); |
334 | hdev->auto_accept_delay = val; |
335 | hci_dev_unlock(hdev); |
336 | |
337 | return 0; |
338 | } |
339 | |
340 | static int auto_accept_delay_get(void *data, u64 *val) |
341 | { |
342 | struct hci_dev *hdev = data; |
343 | |
344 | hci_dev_lock(hdev); |
345 | *val = hdev->auto_accept_delay; |
346 | hci_dev_unlock(hdev); |
347 | |
348 | return 0; |
349 | } |
350 | |
351 | DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, |
352 | auto_accept_delay_set, "%llu\n"); |
353 | |
354 | static int ssp_debug_mode_set(void *data, u64 val) |
355 | { |
356 | struct hci_dev *hdev = data; |
357 | struct sk_buff *skb; |
358 | __u8 mode; |
359 | int err; |
360 | |
361 | if (val != 0 && val != 1) |
362 | return -EINVAL; |
363 | |
364 | if (!test_bit(HCI_UP, &hdev->flags)) |
365 | return -ENETDOWN; |
366 | |
367 | hci_req_lock(hdev); |
368 | mode = val; |
369 | skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode), |
370 | &mode, HCI_CMD_TIMEOUT); |
371 | hci_req_unlock(hdev); |
372 | |
373 | if (IS_ERR(skb)) |
374 | return PTR_ERR(skb); |
375 | |
376 | err = -bt_to_errno(skb->data[0]); |
377 | kfree_skb(skb); |
378 | |
379 | if (err < 0) |
380 | return err; |
381 | |
382 | hci_dev_lock(hdev); |
383 | hdev->ssp_debug_mode = val; |
384 | hci_dev_unlock(hdev); |
385 | |
386 | return 0; |
387 | } |
388 | |
389 | static int ssp_debug_mode_get(void *data, u64 *val) |
390 | { |
391 | struct hci_dev *hdev = data; |
392 | |
393 | hci_dev_lock(hdev); |
394 | *val = hdev->ssp_debug_mode; |
395 | hci_dev_unlock(hdev); |
396 | |
397 | return 0; |
398 | } |
399 | |
400 | DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get, |
401 | ssp_debug_mode_set, "%llu\n"); |
402 | |
403 | static ssize_t force_sc_support_read(struct file *file, char __user *user_buf, |
404 | size_t count, loff_t *ppos) |
405 | { |
406 | struct hci_dev *hdev = file->private_data; |
407 | char buf[3]; |
408 | |
409 | buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N'; |
410 | buf[1] = '\n'; |
411 | buf[2] = '\0'; |
412 | return simple_read_from_buffer(user_buf, count, ppos, buf, 2); |
413 | } |
414 | |
415 | static ssize_t force_sc_support_write(struct file *file, |
416 | const char __user *user_buf, |
417 | size_t count, loff_t *ppos) |
418 | { |
419 | struct hci_dev *hdev = file->private_data; |
420 | char buf[32]; |
421 | size_t buf_size = min(count, (sizeof(buf)-1)); |
422 | bool enable; |
423 | |
424 | if (test_bit(HCI_UP, &hdev->flags)) |
425 | return -EBUSY; |
426 | |
427 | if (copy_from_user(buf, user_buf, buf_size)) |
428 | return -EFAULT; |
429 | |
430 | buf[buf_size] = '\0'; |
431 | if (strtobool(buf, &enable)) |
432 | return -EINVAL; |
433 | |
434 | if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags)) |
435 | return -EALREADY; |
436 | |
437 | change_bit(HCI_FORCE_SC, &hdev->dev_flags); |
438 | |
439 | return count; |
440 | } |
441 | |
442 | static const struct file_operations force_sc_support_fops = { |
443 | .open = simple_open, |
444 | .read = force_sc_support_read, |
445 | .write = force_sc_support_write, |
446 | .llseek = default_llseek, |
447 | }; |
448 | |
449 | static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf, |
450 | size_t count, loff_t *ppos) |
451 | { |
452 | struct hci_dev *hdev = file->private_data; |
453 | char buf[3]; |
454 | |
455 | buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N'; |
456 | buf[1] = '\n'; |
457 | buf[2] = '\0'; |
458 | return simple_read_from_buffer(user_buf, count, ppos, buf, 2); |
459 | } |
460 | |
461 | static const struct file_operations sc_only_mode_fops = { |
462 | .open = simple_open, |
463 | .read = sc_only_mode_read, |
464 | .llseek = default_llseek, |
465 | }; |
466 | |
467 | static int idle_timeout_set(void *data, u64 val) |
468 | { |
469 | struct hci_dev *hdev = data; |
470 | |
471 | if (val != 0 && (val < 500 || val > 3600000)) |
472 | return -EINVAL; |
473 | |
474 | hci_dev_lock(hdev); |
475 | hdev->idle_timeout = val; |
476 | hci_dev_unlock(hdev); |
477 | |
478 | return 0; |
479 | } |
480 | |
481 | static int idle_timeout_get(void *data, u64 *val) |
482 | { |
483 | struct hci_dev *hdev = data; |
484 | |
485 | hci_dev_lock(hdev); |
486 | *val = hdev->idle_timeout; |
487 | hci_dev_unlock(hdev); |
488 | |
489 | return 0; |
490 | } |
491 | |
492 | DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get, |
493 | idle_timeout_set, "%llu\n"); |
494 | |
495 | static int rpa_timeout_set(void *data, u64 val) |
496 | { |
497 | struct hci_dev *hdev = data; |
498 | |
499 | /* Require the RPA timeout to be at least 30 seconds and at most |
500 | * 24 hours. |
501 | */ |
502 | if (val < 30 || val > (60 * 60 * 24)) |
503 | return -EINVAL; |
504 | |
505 | hci_dev_lock(hdev); |
506 | hdev->rpa_timeout = val; |
507 | hci_dev_unlock(hdev); |
508 | |
509 | return 0; |
510 | } |
511 | |
512 | static int rpa_timeout_get(void *data, u64 *val) |
513 | { |
514 | struct hci_dev *hdev = data; |
515 | |
516 | hci_dev_lock(hdev); |
517 | *val = hdev->rpa_timeout; |
518 | hci_dev_unlock(hdev); |
519 | |
520 | return 0; |
521 | } |
522 | |
523 | DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get, |
524 | rpa_timeout_set, "%llu\n"); |
525 | |
526 | static int sniff_min_interval_set(void *data, u64 val) |
527 | { |
528 | struct hci_dev *hdev = data; |
529 | |
530 | if (val == 0 || val % 2 || val > hdev->sniff_max_interval) |
531 | return -EINVAL; |
532 | |
533 | hci_dev_lock(hdev); |
534 | hdev->sniff_min_interval = val; |
535 | hci_dev_unlock(hdev); |
536 | |
537 | return 0; |
538 | } |
539 | |
540 | static int sniff_min_interval_get(void *data, u64 *val) |
541 | { |
542 | struct hci_dev *hdev = data; |
543 | |
544 | hci_dev_lock(hdev); |
545 | *val = hdev->sniff_min_interval; |
546 | hci_dev_unlock(hdev); |
547 | |
548 | return 0; |
549 | } |
550 | |
551 | DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get, |
552 | sniff_min_interval_set, "%llu\n"); |
553 | |
554 | static int sniff_max_interval_set(void *data, u64 val) |
555 | { |
556 | struct hci_dev *hdev = data; |
557 | |
558 | if (val == 0 || val % 2 || val < hdev->sniff_min_interval) |
559 | return -EINVAL; |
560 | |
561 | hci_dev_lock(hdev); |
562 | hdev->sniff_max_interval = val; |
563 | hci_dev_unlock(hdev); |
564 | |
565 | return 0; |
566 | } |
567 | |
568 | static int sniff_max_interval_get(void *data, u64 *val) |
569 | { |
570 | struct hci_dev *hdev = data; |
571 | |
572 | hci_dev_lock(hdev); |
573 | *val = hdev->sniff_max_interval; |
574 | hci_dev_unlock(hdev); |
575 | |
576 | return 0; |
577 | } |
578 | |
579 | DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get, |
580 | sniff_max_interval_set, "%llu\n"); |
581 | |
582 | static int identity_show(struct seq_file *f, void *p) |
583 | { |
584 | struct hci_dev *hdev = f->private; |
585 | bdaddr_t addr; |
586 | u8 addr_type; |
587 | |
588 | hci_dev_lock(hdev); |
589 | |
590 | hci_copy_identity_address(hdev, &addr, &addr_type); |
591 | |
592 | seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type, |
593 | 16, hdev->irk, &hdev->rpa); |
594 | |
595 | hci_dev_unlock(hdev); |
596 | |
597 | return 0; |
598 | } |
599 | |
600 | static int identity_open(struct inode *inode, struct file *file) |
601 | { |
602 | return single_open(file, identity_show, inode->i_private); |
603 | } |
604 | |
605 | static const struct file_operations identity_fops = { |
606 | .open = identity_open, |
607 | .read = seq_read, |
608 | .llseek = seq_lseek, |
609 | .release = single_release, |
610 | }; |
611 | |
612 | static int random_address_show(struct seq_file *f, void *p) |
613 | { |
614 | struct hci_dev *hdev = f->private; |
615 | |
616 | hci_dev_lock(hdev); |
617 | seq_printf(f, "%pMR\n", &hdev->random_addr); |
618 | hci_dev_unlock(hdev); |
619 | |
620 | return 0; |
621 | } |
622 | |
623 | static int random_address_open(struct inode *inode, struct file *file) |
624 | { |
625 | return single_open(file, random_address_show, inode->i_private); |
626 | } |
627 | |
628 | static const struct file_operations random_address_fops = { |
629 | .open = random_address_open, |
630 | .read = seq_read, |
631 | .llseek = seq_lseek, |
632 | .release = single_release, |
633 | }; |
634 | |
635 | static int static_address_show(struct seq_file *f, void *p) |
636 | { |
637 | struct hci_dev *hdev = f->private; |
638 | |
639 | hci_dev_lock(hdev); |
640 | seq_printf(f, "%pMR\n", &hdev->static_addr); |
641 | hci_dev_unlock(hdev); |
642 | |
643 | return 0; |
644 | } |
645 | |
646 | static int static_address_open(struct inode *inode, struct file *file) |
647 | { |
648 | return single_open(file, static_address_show, inode->i_private); |
649 | } |
650 | |
651 | static const struct file_operations static_address_fops = { |
652 | .open = static_address_open, |
653 | .read = seq_read, |
654 | .llseek = seq_lseek, |
655 | .release = single_release, |
656 | }; |
657 | |
658 | static ssize_t force_static_address_read(struct file *file, |
659 | char __user *user_buf, |
660 | size_t count, loff_t *ppos) |
661 | { |
662 | struct hci_dev *hdev = file->private_data; |
663 | char buf[3]; |
664 | |
665 | buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N'; |
666 | buf[1] = '\n'; |
667 | buf[2] = '\0'; |
668 | return simple_read_from_buffer(user_buf, count, ppos, buf, 2); |
669 | } |
670 | |
671 | static ssize_t force_static_address_write(struct file *file, |
672 | const char __user *user_buf, |
673 | size_t count, loff_t *ppos) |
674 | { |
675 | struct hci_dev *hdev = file->private_data; |
676 | char buf[32]; |
677 | size_t buf_size = min(count, (sizeof(buf)-1)); |
678 | bool enable; |
679 | |
680 | if (test_bit(HCI_UP, &hdev->flags)) |
681 | return -EBUSY; |
682 | |
683 | if (copy_from_user(buf, user_buf, buf_size)) |
684 | return -EFAULT; |
685 | |
686 | buf[buf_size] = '\0'; |
687 | if (strtobool(buf, &enable)) |
688 | return -EINVAL; |
689 | |
690 | if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags)) |
691 | return -EALREADY; |
692 | |
693 | change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags); |
694 | |
695 | return count; |
696 | } |
697 | |
698 | static const struct file_operations force_static_address_fops = { |
699 | .open = simple_open, |
700 | .read = force_static_address_read, |
701 | .write = force_static_address_write, |
702 | .llseek = default_llseek, |
703 | }; |
704 | |
705 | static int white_list_show(struct seq_file *f, void *ptr) |
706 | { |
707 | struct hci_dev *hdev = f->private; |
708 | struct bdaddr_list *b; |
709 | |
710 | hci_dev_lock(hdev); |
711 | list_for_each_entry(b, &hdev->le_white_list, list) |
712 | seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type); |
713 | hci_dev_unlock(hdev); |
714 | |
715 | return 0; |
716 | } |
717 | |
718 | static int white_list_open(struct inode *inode, struct file *file) |
719 | { |
720 | return single_open(file, white_list_show, inode->i_private); |
721 | } |
722 | |
723 | static const struct file_operations white_list_fops = { |
724 | .open = white_list_open, |
725 | .read = seq_read, |
726 | .llseek = seq_lseek, |
727 | .release = single_release, |
728 | }; |
729 | |
730 | static int identity_resolving_keys_show(struct seq_file *f, void *ptr) |
731 | { |
732 | struct hci_dev *hdev = f->private; |
733 | struct list_head *p, *n; |
734 | |
735 | hci_dev_lock(hdev); |
736 | list_for_each_safe(p, n, &hdev->identity_resolving_keys) { |
737 | struct smp_irk *irk = list_entry(p, struct smp_irk, list); |
738 | seq_printf(f, "%pMR (type %u) %*phN %pMR\n", |
739 | &irk->bdaddr, irk->addr_type, |
740 | 16, irk->val, &irk->rpa); |
741 | } |
742 | hci_dev_unlock(hdev); |
743 | |
744 | return 0; |
745 | } |
746 | |
747 | static int identity_resolving_keys_open(struct inode *inode, struct file *file) |
748 | { |
749 | return single_open(file, identity_resolving_keys_show, |
750 | inode->i_private); |
751 | } |
752 | |
753 | static const struct file_operations identity_resolving_keys_fops = { |
754 | .open = identity_resolving_keys_open, |
755 | .read = seq_read, |
756 | .llseek = seq_lseek, |
757 | .release = single_release, |
758 | }; |
759 | |
760 | static int long_term_keys_show(struct seq_file *f, void *ptr) |
761 | { |
762 | struct hci_dev *hdev = f->private; |
763 | struct list_head *p, *n; |
764 | |
765 | hci_dev_lock(hdev); |
766 | list_for_each_safe(p, n, &hdev->long_term_keys) { |
767 | struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list); |
768 | seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n", |
769 | <k->bdaddr, ltk->bdaddr_type, ltk->authenticated, |
770 | ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv), |
771 | __le64_to_cpu(ltk->rand), 16, ltk->val); |
772 | } |
773 | hci_dev_unlock(hdev); |
774 | |
775 | return 0; |
776 | } |
777 | |
778 | static int long_term_keys_open(struct inode *inode, struct file *file) |
779 | { |
780 | return single_open(file, long_term_keys_show, inode->i_private); |
781 | } |
782 | |
783 | static const struct file_operations long_term_keys_fops = { |
784 | .open = long_term_keys_open, |
785 | .read = seq_read, |
786 | .llseek = seq_lseek, |
787 | .release = single_release, |
788 | }; |
789 | |
790 | static int conn_min_interval_set(void *data, u64 val) |
791 | { |
792 | struct hci_dev *hdev = data; |
793 | |
794 | if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval) |
795 | return -EINVAL; |
796 | |
797 | hci_dev_lock(hdev); |
798 | hdev->le_conn_min_interval = val; |
799 | hci_dev_unlock(hdev); |
800 | |
801 | return 0; |
802 | } |
803 | |
804 | static int conn_min_interval_get(void *data, u64 *val) |
805 | { |
806 | struct hci_dev *hdev = data; |
807 | |
808 | hci_dev_lock(hdev); |
809 | *val = hdev->le_conn_min_interval; |
810 | hci_dev_unlock(hdev); |
811 | |
812 | return 0; |
813 | } |
814 | |
815 | DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get, |
816 | conn_min_interval_set, "%llu\n"); |
817 | |
818 | static int conn_max_interval_set(void *data, u64 val) |
819 | { |
820 | struct hci_dev *hdev = data; |
821 | |
822 | if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval) |
823 | return -EINVAL; |
824 | |
825 | hci_dev_lock(hdev); |
826 | hdev->le_conn_max_interval = val; |
827 | hci_dev_unlock(hdev); |
828 | |
829 | return 0; |
830 | } |
831 | |
832 | static int conn_max_interval_get(void *data, u64 *val) |
833 | { |
834 | struct hci_dev *hdev = data; |
835 | |
836 | hci_dev_lock(hdev); |
837 | *val = hdev->le_conn_max_interval; |
838 | hci_dev_unlock(hdev); |
839 | |
840 | return 0; |
841 | } |
842 | |
843 | DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get, |
844 | conn_max_interval_set, "%llu\n"); |
845 | |
846 | static int adv_channel_map_set(void *data, u64 val) |
847 | { |
848 | struct hci_dev *hdev = data; |
849 | |
850 | if (val < 0x01 || val > 0x07) |
851 | return -EINVAL; |
852 | |
853 | hci_dev_lock(hdev); |
854 | hdev->le_adv_channel_map = val; |
855 | hci_dev_unlock(hdev); |
856 | |
857 | return 0; |
858 | } |
859 | |
860 | static int adv_channel_map_get(void *data, u64 *val) |
861 | { |
862 | struct hci_dev *hdev = data; |
863 | |
864 | hci_dev_lock(hdev); |
865 | *val = hdev->le_adv_channel_map; |
866 | hci_dev_unlock(hdev); |
867 | |
868 | return 0; |
869 | } |
870 | |
871 | DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get, |
872 | adv_channel_map_set, "%llu\n"); |
873 | |
874 | static ssize_t lowpan_read(struct file *file, char __user *user_buf, |
875 | size_t count, loff_t *ppos) |
876 | { |
877 | struct hci_dev *hdev = file->private_data; |
878 | char buf[3]; |
879 | |
880 | buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N'; |
881 | buf[1] = '\n'; |
882 | buf[2] = '\0'; |
883 | return simple_read_from_buffer(user_buf, count, ppos, buf, 2); |
884 | } |
885 | |
886 | static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer, |
887 | size_t count, loff_t *position) |
888 | { |
889 | struct hci_dev *hdev = fp->private_data; |
890 | bool enable; |
891 | char buf[32]; |
892 | size_t buf_size = min(count, (sizeof(buf)-1)); |
893 | |
894 | if (copy_from_user(buf, user_buffer, buf_size)) |
895 | return -EFAULT; |
896 | |
897 | buf[buf_size] = '\0'; |
898 | |
899 | if (strtobool(buf, &enable) < 0) |
900 | return -EINVAL; |
901 | |
902 | if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags)) |
903 | return -EALREADY; |
904 | |
905 | change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags); |
906 | |
907 | return count; |
908 | } |
909 | |
910 | static const struct file_operations lowpan_debugfs_fops = { |
911 | .open = simple_open, |
912 | .read = lowpan_read, |
913 | .write = lowpan_write, |
914 | .llseek = default_llseek, |
915 | }; |
916 | |
917 | static int le_auto_conn_show(struct seq_file *sf, void *ptr) |
918 | { |
919 | struct hci_dev *hdev = sf->private; |
920 | struct hci_conn_params *p; |
921 | |
922 | hci_dev_lock(hdev); |
923 | |
924 | list_for_each_entry(p, &hdev->le_conn_params, list) { |
925 | seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type, |
926 | p->auto_connect); |
927 | } |
928 | |
929 | hci_dev_unlock(hdev); |
930 | |
931 | return 0; |
932 | } |
933 | |
934 | static int le_auto_conn_open(struct inode *inode, struct file *file) |
935 | { |
936 | return single_open(file, le_auto_conn_show, inode->i_private); |
937 | } |
938 | |
939 | static ssize_t le_auto_conn_write(struct file *file, const char __user *data, |
940 | size_t count, loff_t *offset) |
941 | { |
942 | struct seq_file *sf = file->private_data; |
943 | struct hci_dev *hdev = sf->private; |
944 | u8 auto_connect = 0; |
945 | bdaddr_t addr; |
946 | u8 addr_type; |
947 | char *buf; |
948 | int err = 0; |
949 | int n; |
950 | |
951 | /* Don't allow partial write */ |
952 | if (*offset != 0) |
953 | return -EINVAL; |
954 | |
955 | if (count < 3) |
956 | return -EINVAL; |
957 | |
958 | buf = kzalloc(count, GFP_KERNEL); |
959 | if (!buf) |
960 | return -ENOMEM; |
961 | |
962 | if (copy_from_user(buf, data, count)) { |
963 | err = -EFAULT; |
964 | goto done; |
965 | } |
966 | |
967 | if (memcmp(buf, "add", 3) == 0) { |
968 | n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu", |
969 | &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2], |
970 | &addr.b[1], &addr.b[0], &addr_type, |
971 | &auto_connect); |
972 | |
973 | if (n < 7) { |
974 | err = -EINVAL; |
975 | goto done; |
976 | } |
977 | |
978 | hci_dev_lock(hdev); |
979 | err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect, |
980 | hdev->le_conn_min_interval, |
981 | hdev->le_conn_max_interval); |
982 | hci_dev_unlock(hdev); |
983 | |
984 | if (err) |
985 | goto done; |
986 | } else if (memcmp(buf, "del", 3) == 0) { |
987 | n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu", |
988 | &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2], |
989 | &addr.b[1], &addr.b[0], &addr_type); |
990 | |
991 | if (n < 7) { |
992 | err = -EINVAL; |
993 | goto done; |
994 | } |
995 | |
996 | hci_dev_lock(hdev); |
997 | hci_conn_params_del(hdev, &addr, addr_type); |
998 | hci_dev_unlock(hdev); |
999 | } else if (memcmp(buf, "clr", 3) == 0) { |
1000 | hci_dev_lock(hdev); |
1001 | hci_conn_params_clear(hdev); |
1002 | hci_pend_le_conns_clear(hdev); |
1003 | hci_update_background_scan(hdev); |
1004 | hci_dev_unlock(hdev); |
1005 | } else { |
1006 | err = -EINVAL; |
1007 | } |
1008 | |
1009 | done: |
1010 | kfree(buf); |
1011 | |
1012 | if (err) |
1013 | return err; |
1014 | else |
1015 | return count; |
1016 | } |
1017 | |
1018 | static const struct file_operations le_auto_conn_fops = { |
1019 | .open = le_auto_conn_open, |
1020 | .read = seq_read, |
1021 | .write = le_auto_conn_write, |
1022 | .llseek = seq_lseek, |
1023 | .release = single_release, |
1024 | }; |
1025 | |
1026 | /* ---- HCI requests ---- */ |
1027 | |
1028 | static void hci_req_sync_complete(struct hci_dev *hdev, u8 result) |
1029 | { |
1030 | BT_DBG("%s result 0x%2.2x", hdev->name, result); |
1031 | |
1032 | if (hdev->req_status == HCI_REQ_PEND) { |
1033 | hdev->req_result = result; |
1034 | hdev->req_status = HCI_REQ_DONE; |
1035 | wake_up_interruptible(&hdev->req_wait_q); |
1036 | } |
1037 | } |
1038 | |
1039 | static void hci_req_cancel(struct hci_dev *hdev, int err) |
1040 | { |
1041 | BT_DBG("%s err 0x%2.2x", hdev->name, err); |
1042 | |
1043 | if (hdev->req_status == HCI_REQ_PEND) { |
1044 | hdev->req_result = err; |
1045 | hdev->req_status = HCI_REQ_CANCELED; |
1046 | wake_up_interruptible(&hdev->req_wait_q); |
1047 | } |
1048 | } |
1049 | |
1050 | static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, |
1051 | u8 event) |
1052 | { |
1053 | struct hci_ev_cmd_complete *ev; |
1054 | struct hci_event_hdr *hdr; |
1055 | struct sk_buff *skb; |
1056 | |
1057 | hci_dev_lock(hdev); |
1058 | |
1059 | skb = hdev->recv_evt; |
1060 | hdev->recv_evt = NULL; |
1061 | |
1062 | hci_dev_unlock(hdev); |
1063 | |
1064 | if (!skb) |
1065 | return ERR_PTR(-ENODATA); |
1066 | |
1067 | if (skb->len < sizeof(*hdr)) { |
1068 | BT_ERR("Too short HCI event"); |
1069 | goto failed; |
1070 | } |
1071 | |
1072 | hdr = (void *) skb->data; |
1073 | skb_pull(skb, HCI_EVENT_HDR_SIZE); |
1074 | |
1075 | if (event) { |
1076 | if (hdr->evt != event) |
1077 | goto failed; |
1078 | return skb; |
1079 | } |
1080 | |
1081 | if (hdr->evt != HCI_EV_CMD_COMPLETE) { |
1082 | BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt); |
1083 | goto failed; |
1084 | } |
1085 | |
1086 | if (skb->len < sizeof(*ev)) { |
1087 | BT_ERR("Too short cmd_complete event"); |
1088 | goto failed; |
1089 | } |
1090 | |
1091 | ev = (void *) skb->data; |
1092 | skb_pull(skb, sizeof(*ev)); |
1093 | |
1094 | if (opcode == __le16_to_cpu(ev->opcode)) |
1095 | return skb; |
1096 | |
1097 | BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode, |
1098 | __le16_to_cpu(ev->opcode)); |
1099 | |
1100 | failed: |
1101 | kfree_skb(skb); |
1102 | return ERR_PTR(-ENODATA); |
1103 | } |
1104 | |
1105 | struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, |
1106 | const void *param, u8 event, u32 timeout) |
1107 | { |
1108 | DECLARE_WAITQUEUE(wait, current); |
1109 | struct hci_request req; |
1110 | int err = 0; |
1111 | |
1112 | BT_DBG("%s", hdev->name); |
1113 | |
1114 | hci_req_init(&req, hdev); |
1115 | |
1116 | hci_req_add_ev(&req, opcode, plen, param, event); |
1117 | |
1118 | hdev->req_status = HCI_REQ_PEND; |
1119 | |
1120 | err = hci_req_run(&req, hci_req_sync_complete); |
1121 | if (err < 0) |
1122 | return ERR_PTR(err); |
1123 | |
1124 | add_wait_queue(&hdev->req_wait_q, &wait); |
1125 | set_current_state(TASK_INTERRUPTIBLE); |
1126 | |
1127 | schedule_timeout(timeout); |
1128 | |
1129 | remove_wait_queue(&hdev->req_wait_q, &wait); |
1130 | |
1131 | if (signal_pending(current)) |
1132 | return ERR_PTR(-EINTR); |
1133 | |
1134 | switch (hdev->req_status) { |
1135 | case HCI_REQ_DONE: |
1136 | err = -bt_to_errno(hdev->req_result); |
1137 | break; |
1138 | |
1139 | case HCI_REQ_CANCELED: |
1140 | err = -hdev->req_result; |
1141 | break; |
1142 | |
1143 | default: |
1144 | err = -ETIMEDOUT; |
1145 | break; |
1146 | } |
1147 | |
1148 | hdev->req_status = hdev->req_result = 0; |
1149 | |
1150 | BT_DBG("%s end: err %d", hdev->name, err); |
1151 | |
1152 | if (err < 0) |
1153 | return ERR_PTR(err); |
1154 | |
1155 | return hci_get_cmd_complete(hdev, opcode, event); |
1156 | } |
1157 | EXPORT_SYMBOL(__hci_cmd_sync_ev); |
1158 | |
1159 | struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, |
1160 | const void *param, u32 timeout) |
1161 | { |
1162 | return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout); |
1163 | } |
1164 | EXPORT_SYMBOL(__hci_cmd_sync); |
1165 | |
1166 | /* Execute request and wait for completion. */ |
1167 | static int __hci_req_sync(struct hci_dev *hdev, |
1168 | void (*func)(struct hci_request *req, |
1169 | unsigned long opt), |
1170 | unsigned long opt, __u32 timeout) |
1171 | { |
1172 | struct hci_request req; |
1173 | DECLARE_WAITQUEUE(wait, current); |
1174 | int err = 0; |
1175 | |
1176 | BT_DBG("%s start", hdev->name); |
1177 | |
1178 | hci_req_init(&req, hdev); |
1179 | |
1180 | hdev->req_status = HCI_REQ_PEND; |
1181 | |
1182 | func(&req, opt); |
1183 | |
1184 | err = hci_req_run(&req, hci_req_sync_complete); |
1185 | if (err < 0) { |
1186 | hdev->req_status = 0; |
1187 | |
1188 | /* ENODATA means the HCI request command queue is empty. |
1189 | * This can happen when a request with conditionals doesn't |
1190 | * trigger any commands to be sent. This is normal behavior |
1191 | * and should not trigger an error return. |
1192 | */ |
1193 | if (err == -ENODATA) |
1194 | return 0; |
1195 | |
1196 | return err; |
1197 | } |
1198 | |
1199 | add_wait_queue(&hdev->req_wait_q, &wait); |
1200 | set_current_state(TASK_INTERRUPTIBLE); |
1201 | |
1202 | schedule_timeout(timeout); |
1203 | |
1204 | remove_wait_queue(&hdev->req_wait_q, &wait); |
1205 | |
1206 | if (signal_pending(current)) |
1207 | return -EINTR; |
1208 | |
1209 | switch (hdev->req_status) { |
1210 | case HCI_REQ_DONE: |
1211 | err = -bt_to_errno(hdev->req_result); |
1212 | break; |
1213 | |
1214 | case HCI_REQ_CANCELED: |
1215 | err = -hdev->req_result; |
1216 | break; |
1217 | |
1218 | default: |
1219 | err = -ETIMEDOUT; |
1220 | break; |
1221 | } |
1222 | |
1223 | hdev->req_status = hdev->req_result = 0; |
1224 | |
1225 | BT_DBG("%s end: err %d", hdev->name, err); |
1226 | |
1227 | return err; |
1228 | } |
1229 | |
1230 | static int hci_req_sync(struct hci_dev *hdev, |
1231 | void (*req)(struct hci_request *req, |
1232 | unsigned long opt), |
1233 | unsigned long opt, __u32 timeout) |
1234 | { |
1235 | int ret; |
1236 | |
1237 | if (!test_bit(HCI_UP, &hdev->flags)) |
1238 | return -ENETDOWN; |
1239 | |
1240 | /* Serialize all requests */ |
1241 | hci_req_lock(hdev); |
1242 | ret = __hci_req_sync(hdev, req, opt, timeout); |
1243 | hci_req_unlock(hdev); |
1244 | |
1245 | return ret; |
1246 | } |
1247 | |
1248 | static void hci_reset_req(struct hci_request *req, unsigned long opt) |
1249 | { |
1250 | BT_DBG("%s %ld", req->hdev->name, opt); |
1251 | |
1252 | /* Reset device */ |
1253 | set_bit(HCI_RESET, &req->hdev->flags); |
1254 | hci_req_add(req, HCI_OP_RESET, 0, NULL); |
1255 | } |
1256 | |
1257 | static void bredr_init(struct hci_request *req) |
1258 | { |
1259 | req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; |
1260 | |
1261 | /* Read Local Supported Features */ |
1262 | hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); |
1263 | |
1264 | /* Read Local Version */ |
1265 | hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); |
1266 | |
1267 | /* Read BD Address */ |
1268 | hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL); |
1269 | } |
1270 | |
1271 | static void amp_init(struct hci_request *req) |
1272 | { |
1273 | req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; |
1274 | |
1275 | /* Read Local Version */ |
1276 | hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); |
1277 | |
1278 | /* Read Local Supported Commands */ |
1279 | hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); |
1280 | |
1281 | /* Read Local Supported Features */ |
1282 | hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); |
1283 | |
1284 | /* Read Local AMP Info */ |
1285 | hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL); |
1286 | |
1287 | /* Read Data Blk size */ |
1288 | hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL); |
1289 | |
1290 | /* Read Flow Control Mode */ |
1291 | hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL); |
1292 | |
1293 | /* Read Location Data */ |
1294 | hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL); |
1295 | } |
1296 | |
1297 | static void hci_init1_req(struct hci_request *req, unsigned long opt) |
1298 | { |
1299 | struct hci_dev *hdev = req->hdev; |
1300 | |
1301 | BT_DBG("%s %ld", hdev->name, opt); |
1302 | |
1303 | /* Reset */ |
1304 | if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) |
1305 | hci_reset_req(req, 0); |
1306 | |
1307 | switch (hdev->dev_type) { |
1308 | case HCI_BREDR: |
1309 | bredr_init(req); |
1310 | break; |
1311 | |
1312 | case HCI_AMP: |
1313 | amp_init(req); |
1314 | break; |
1315 | |
1316 | default: |
1317 | BT_ERR("Unknown device type %d", hdev->dev_type); |
1318 | break; |
1319 | } |
1320 | } |
1321 | |
1322 | static void bredr_setup(struct hci_request *req) |
1323 | { |
1324 | struct hci_dev *hdev = req->hdev; |
1325 | |
1326 | __le16 param; |
1327 | __u8 flt_type; |
1328 | |
1329 | /* Read Buffer Size (ACL mtu, max pkt, etc.) */ |
1330 | hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL); |
1331 | |
1332 | /* Read Class of Device */ |
1333 | hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL); |
1334 | |
1335 | /* Read Local Name */ |
1336 | hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL); |
1337 | |
1338 | /* Read Voice Setting */ |
1339 | hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL); |
1340 | |
1341 | /* Read Number of Supported IAC */ |
1342 | hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL); |
1343 | |
1344 | /* Read Current IAC LAP */ |
1345 | hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL); |
1346 | |
1347 | /* Clear Event Filters */ |
1348 | flt_type = HCI_FLT_CLEAR_ALL; |
1349 | hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type); |
1350 | |
1351 | /* Connection accept timeout ~20 secs */ |
1352 | param = cpu_to_le16(0x7d00); |
1353 | hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); |
1354 | |
1355 | /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2, |
1356 | * but it does not support page scan related HCI commands. |
1357 | */ |
1358 | if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) { |
1359 | hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL); |
1360 | hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL); |
1361 | } |
1362 | } |
1363 | |
1364 | static void le_setup(struct hci_request *req) |
1365 | { |
1366 | struct hci_dev *hdev = req->hdev; |
1367 | |
1368 | /* Read LE Buffer Size */ |
1369 | hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL); |
1370 | |
1371 | /* Read LE Local Supported Features */ |
1372 | hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL); |
1373 | |
1374 | /* Read LE Supported States */ |
1375 | hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL); |
1376 | |
1377 | /* Read LE Advertising Channel TX Power */ |
1378 | hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL); |
1379 | |
1380 | /* Read LE White List Size */ |
1381 | hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL); |
1382 | |
1383 | /* Clear LE White List */ |
1384 | hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL); |
1385 | |
1386 | /* LE-only controllers have LE implicitly enabled */ |
1387 | if (!lmp_bredr_capable(hdev)) |
1388 | set_bit(HCI_LE_ENABLED, &hdev->dev_flags); |
1389 | } |
1390 | |
1391 | static u8 hci_get_inquiry_mode(struct hci_dev *hdev) |
1392 | { |
1393 | if (lmp_ext_inq_capable(hdev)) |
1394 | return 0x02; |
1395 | |
1396 | if (lmp_inq_rssi_capable(hdev)) |
1397 | return 0x01; |
1398 | |
1399 | if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 && |
1400 | hdev->lmp_subver == 0x0757) |
1401 | return 0x01; |
1402 | |
1403 | if (hdev->manufacturer == 15) { |
1404 | if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963) |
1405 | return 0x01; |
1406 | if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963) |
1407 | return 0x01; |
1408 | if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965) |
1409 | return 0x01; |
1410 | } |
1411 | |
1412 | if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 && |
1413 | hdev->lmp_subver == 0x1805) |
1414 | return 0x01; |
1415 | |
1416 | return 0x00; |
1417 | } |
1418 | |
1419 | static void hci_setup_inquiry_mode(struct hci_request *req) |
1420 | { |
1421 | u8 mode; |
1422 | |
1423 | mode = hci_get_inquiry_mode(req->hdev); |
1424 | |
1425 | hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode); |
1426 | } |
1427 | |
1428 | static void hci_setup_event_mask(struct hci_request *req) |
1429 | { |
1430 | struct hci_dev *hdev = req->hdev; |
1431 | |
1432 | /* The second byte is 0xff instead of 0x9f (two reserved bits |
1433 | * disabled) since a Broadcom 1.2 dongle doesn't respond to the |
1434 | * command otherwise. |
1435 | */ |
1436 | u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; |
1437 | |
1438 | /* CSR 1.1 dongles does not accept any bitfield so don't try to set |
1439 | * any event mask for pre 1.2 devices. |
1440 | */ |
1441 | if (hdev->hci_ver < BLUETOOTH_VER_1_2) |
1442 | return; |
1443 | |
1444 | if (lmp_bredr_capable(hdev)) { |
1445 | events[4] |= 0x01; /* Flow Specification Complete */ |
1446 | events[4] |= 0x02; /* Inquiry Result with RSSI */ |
1447 | events[4] |= 0x04; /* Read Remote Extended Features Complete */ |
1448 | events[5] |= 0x08; /* Synchronous Connection Complete */ |
1449 | events[5] |= 0x10; /* Synchronous Connection Changed */ |
1450 | } else { |
1451 | /* Use a different default for LE-only devices */ |
1452 | memset(events, 0, sizeof(events)); |
1453 | events[0] |= 0x10; /* Disconnection Complete */ |
1454 | events[0] |= 0x80; /* Encryption Change */ |
1455 | events[1] |= 0x08; /* Read Remote Version Information Complete */ |
1456 | events[1] |= 0x20; /* Command Complete */ |
1457 | events[1] |= 0x40; /* Command Status */ |
1458 | events[1] |= 0x80; /* Hardware Error */ |
1459 | events[2] |= 0x04; /* Number of Completed Packets */ |
1460 | events[3] |= 0x02; /* Data Buffer Overflow */ |
1461 | events[5] |= 0x80; /* Encryption Key Refresh Complete */ |
1462 | } |
1463 | |
1464 | if (lmp_inq_rssi_capable(hdev)) |
1465 | events[4] |= 0x02; /* Inquiry Result with RSSI */ |
1466 | |
1467 | if (lmp_sniffsubr_capable(hdev)) |
1468 | events[5] |= 0x20; /* Sniff Subrating */ |
1469 | |
1470 | if (lmp_pause_enc_capable(hdev)) |
1471 | events[5] |= 0x80; /* Encryption Key Refresh Complete */ |
1472 | |
1473 | if (lmp_ext_inq_capable(hdev)) |
1474 | events[5] |= 0x40; /* Extended Inquiry Result */ |
1475 | |
1476 | if (lmp_no_flush_capable(hdev)) |
1477 | events[7] |= 0x01; /* Enhanced Flush Complete */ |
1478 | |
1479 | if (lmp_lsto_capable(hdev)) |
1480 | events[6] |= 0x80; /* Link Supervision Timeout Changed */ |
1481 | |
1482 | if (lmp_ssp_capable(hdev)) { |
1483 | events[6] |= 0x01; /* IO Capability Request */ |
1484 | events[6] |= 0x02; /* IO Capability Response */ |
1485 | events[6] |= 0x04; /* User Confirmation Request */ |
1486 | events[6] |= 0x08; /* User Passkey Request */ |
1487 | events[6] |= 0x10; /* Remote OOB Data Request */ |
1488 | events[6] |= 0x20; /* Simple Pairing Complete */ |
1489 | events[7] |= 0x04; /* User Passkey Notification */ |
1490 | events[7] |= 0x08; /* Keypress Notification */ |
1491 | events[7] |= 0x10; /* Remote Host Supported |
1492 | * Features Notification |
1493 | */ |
1494 | } |
1495 | |
1496 | if (lmp_le_capable(hdev)) |
1497 | events[7] |= 0x20; /* LE Meta-Event */ |
1498 | |
1499 | hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events); |
1500 | |
1501 | if (lmp_le_capable(hdev)) { |
1502 | memset(events, 0, sizeof(events)); |
1503 | events[0] = 0x1f; |
1504 | hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, |
1505 | sizeof(events), events); |
1506 | } |
1507 | } |
1508 | |
1509 | static void hci_init2_req(struct hci_request *req, unsigned long opt) |
1510 | { |
1511 | struct hci_dev *hdev = req->hdev; |
1512 | |
1513 | if (lmp_bredr_capable(hdev)) |
1514 | bredr_setup(req); |
1515 | else |
1516 | clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); |
1517 | |
1518 | if (lmp_le_capable(hdev)) |
1519 | le_setup(req); |
1520 | |
1521 | hci_setup_event_mask(req); |
1522 | |
1523 | /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read |
1524 | * local supported commands HCI command. |
1525 | */ |
1526 | if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) |
1527 | hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); |
1528 | |
1529 | if (lmp_ssp_capable(hdev)) { |
1530 | /* When SSP is available, then the host features page |
1531 | * should also be available as well. However some |
1532 | * controllers list the max_page as 0 as long as SSP |
1533 | * has not been enabled. To achieve proper debugging |
1534 | * output, force the minimum max_page to 1 at least. |
1535 | */ |
1536 | hdev->max_page = 0x01; |
1537 | |
1538 | if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { |
1539 | u8 mode = 0x01; |
1540 | hci_req_add(req, HCI_OP_WRITE_SSP_MODE, |
1541 | sizeof(mode), &mode); |
1542 | } else { |
1543 | struct hci_cp_write_eir cp; |
1544 | |
1545 | memset(hdev->eir, 0, sizeof(hdev->eir)); |
1546 | memset(&cp, 0, sizeof(cp)); |
1547 | |
1548 | hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); |
1549 | } |
1550 | } |
1551 | |
1552 | if (lmp_inq_rssi_capable(hdev)) |
1553 | hci_setup_inquiry_mode(req); |
1554 | |
1555 | if (lmp_inq_tx_pwr_capable(hdev)) |
1556 | hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); |
1557 | |
1558 | if (lmp_ext_feat_capable(hdev)) { |
1559 | struct hci_cp_read_local_ext_features cp; |
1560 | |
1561 | cp.page = 0x01; |
1562 | hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES, |
1563 | sizeof(cp), &cp); |
1564 | } |
1565 | |
1566 | if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) { |
1567 | u8 enable = 1; |
1568 | hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable), |
1569 | &enable); |
1570 | } |
1571 | } |
1572 | |
1573 | static void hci_setup_link_policy(struct hci_request *req) |
1574 | { |
1575 | struct hci_dev *hdev = req->hdev; |
1576 | struct hci_cp_write_def_link_policy cp; |
1577 | u16 link_policy = 0; |
1578 | |
1579 | if (lmp_rswitch_capable(hdev)) |
1580 | link_policy |= HCI_LP_RSWITCH; |
1581 | if (lmp_hold_capable(hdev)) |
1582 | link_policy |= HCI_LP_HOLD; |
1583 | if (lmp_sniff_capable(hdev)) |
1584 | link_policy |= HCI_LP_SNIFF; |
1585 | if (lmp_park_capable(hdev)) |
1586 | link_policy |= HCI_LP_PARK; |
1587 | |
1588 | cp.policy = cpu_to_le16(link_policy); |
1589 | hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); |
1590 | } |
1591 | |
1592 | static void hci_set_le_support(struct hci_request *req) |
1593 | { |
1594 | struct hci_dev *hdev = req->hdev; |
1595 | struct hci_cp_write_le_host_supported cp; |
1596 | |
1597 | /* LE-only devices do not support explicit enablement */ |
1598 | if (!lmp_bredr_capable(hdev)) |
1599 | return; |
1600 | |
1601 | memset(&cp, 0, sizeof(cp)); |
1602 | |
1603 | if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { |
1604 | cp.le = 0x01; |
1605 | cp.simul = lmp_le_br_capable(hdev); |
1606 | } |
1607 | |
1608 | if (cp.le != lmp_host_le_capable(hdev)) |
1609 | hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), |
1610 | &cp); |
1611 | } |
1612 | |
1613 | static void hci_set_event_mask_page_2(struct hci_request *req) |
1614 | { |
1615 | struct hci_dev *hdev = req->hdev; |
1616 | u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; |
1617 | |
1618 | /* If Connectionless Slave Broadcast master role is supported |
1619 | * enable all necessary events for it. |
1620 | */ |
1621 | if (lmp_csb_master_capable(hdev)) { |
1622 | events[1] |= 0x40; /* Triggered Clock Capture */ |
1623 | events[1] |= 0x80; /* Synchronization Train Complete */ |
1624 | events[2] |= 0x10; /* Slave Page Response Timeout */ |
1625 | events[2] |= 0x20; /* CSB Channel Map Change */ |
1626 | } |
1627 | |
1628 | /* If Connectionless Slave Broadcast slave role is supported |
1629 | * enable all necessary events for it. |
1630 | */ |
1631 | if (lmp_csb_slave_capable(hdev)) { |
1632 | events[2] |= 0x01; /* Synchronization Train Received */ |
1633 | events[2] |= 0x02; /* CSB Receive */ |
1634 | events[2] |= 0x04; /* CSB Timeout */ |
1635 | events[2] |= 0x08; /* Truncated Page Complete */ |
1636 | } |
1637 | |
1638 | /* Enable Authenticated Payload Timeout Expired event if supported */ |
1639 | if (lmp_ping_capable(hdev)) |
1640 | events[2] |= 0x80; |
1641 | |
1642 | hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events); |
1643 | } |
1644 | |
1645 | static void hci_init3_req(struct hci_request *req, unsigned long opt) |
1646 | { |
1647 | struct hci_dev *hdev = req->hdev; |
1648 | u8 p; |
1649 | |
1650 | /* Some Broadcom based Bluetooth controllers do not support the |
1651 | * Delete Stored Link Key command. They are clearly indicating its |
1652 | * absence in the bit mask of supported commands. |
1653 | * |
1654 | * Check the supported commands and only if the the command is marked |
1655 | * as supported send it. If not supported assume that the controller |
1656 | * does not have actual support for stored link keys which makes this |
1657 | * command redundant anyway. |
1658 | * |
1659 | * Some controllers indicate that they support handling deleting |
1660 | * stored link keys, but they don't. The quirk lets a driver |
1661 | * just disable this command. |
1662 | */ |
1663 | if (hdev->commands[6] & 0x80 && |
1664 | !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) { |
1665 | struct hci_cp_delete_stored_link_key cp; |
1666 | |
1667 | bacpy(&cp.bdaddr, BDADDR_ANY); |
1668 | cp.delete_all = 0x01; |
1669 | hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, |
1670 | sizeof(cp), &cp); |
1671 | } |
1672 | |
1673 | if (hdev->commands[5] & 0x10) |
1674 | hci_setup_link_policy(req); |
1675 | |
1676 | if (lmp_le_capable(hdev)) |
1677 | hci_set_le_support(req); |
1678 | |
1679 | /* Read features beyond page 1 if available */ |
1680 | for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) { |
1681 | struct hci_cp_read_local_ext_features cp; |
1682 | |
1683 | cp.page = p; |
1684 | hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES, |
1685 | sizeof(cp), &cp); |
1686 | } |
1687 | } |
1688 | |
1689 | static void hci_init4_req(struct hci_request *req, unsigned long opt) |
1690 | { |
1691 | struct hci_dev *hdev = req->hdev; |
1692 | |
1693 | /* Set event mask page 2 if the HCI command for it is supported */ |
1694 | if (hdev->commands[22] & 0x04) |
1695 | hci_set_event_mask_page_2(req); |
1696 | |
1697 | /* Check for Synchronization Train support */ |
1698 | if (lmp_sync_train_capable(hdev)) |
1699 | hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL); |
1700 | |
1701 | /* Enable Secure Connections if supported and configured */ |
1702 | if ((lmp_sc_capable(hdev) || |
1703 | test_bit(HCI_FORCE_SC, &hdev->dev_flags)) && |
1704 | test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) { |
1705 | u8 support = 0x01; |
1706 | hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT, |
1707 | sizeof(support), &support); |
1708 | } |
1709 | } |
1710 | |
1711 | static int __hci_init(struct hci_dev *hdev) |
1712 | { |
1713 | int err; |
1714 | |
1715 | err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT); |
1716 | if (err < 0) |
1717 | return err; |
1718 | |
1719 | /* The Device Under Test (DUT) mode is special and available for |
1720 | * all controller types. So just create it early on. |
1721 | */ |
1722 | if (test_bit(HCI_SETUP, &hdev->dev_flags)) { |
1723 | debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev, |
1724 | &dut_mode_fops); |
1725 | } |
1726 | |
1727 | /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode |
1728 | * BR/EDR/LE type controllers. AMP controllers only need the |
1729 | * first stage init. |
1730 | */ |
1731 | if (hdev->dev_type != HCI_BREDR) |
1732 | return 0; |
1733 | |
1734 | err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT); |
1735 | if (err < 0) |
1736 | return err; |
1737 | |
1738 | err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT); |
1739 | if (err < 0) |
1740 | return err; |
1741 | |
1742 | err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT); |
1743 | if (err < 0) |
1744 | return err; |
1745 | |
1746 | /* Only create debugfs entries during the initial setup |
1747 | * phase and not every time the controller gets powered on. |
1748 | */ |
1749 | if (!test_bit(HCI_SETUP, &hdev->dev_flags)) |
1750 | return 0; |
1751 | |
1752 | debugfs_create_file("features", 0444, hdev->debugfs, hdev, |
1753 | &features_fops); |
1754 | debugfs_create_u16("manufacturer", 0444, hdev->debugfs, |
1755 | &hdev->manufacturer); |
1756 | debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver); |
1757 | debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev); |
1758 | debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev, |
1759 | &blacklist_fops); |
1760 | debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); |
1761 | |
1762 | if (lmp_bredr_capable(hdev)) { |
1763 | debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, |
1764 | hdev, &inquiry_cache_fops); |
1765 | debugfs_create_file("link_keys", 0400, hdev->debugfs, |
1766 | hdev, &link_keys_fops); |
1767 | debugfs_create_file("dev_class", 0444, hdev->debugfs, |
1768 | hdev, &dev_class_fops); |
1769 | debugfs_create_file("voice_setting", 0444, hdev->debugfs, |
1770 | hdev, &voice_setting_fops); |
1771 | } |
1772 | |
1773 | if (lmp_ssp_capable(hdev)) { |
1774 | debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs, |
1775 | hdev, &auto_accept_delay_fops); |
1776 | debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs, |
1777 | hdev, &ssp_debug_mode_fops); |
1778 | debugfs_create_file("force_sc_support", 0644, hdev->debugfs, |
1779 | hdev, &force_sc_support_fops); |
1780 | debugfs_create_file("sc_only_mode", 0444, hdev->debugfs, |
1781 | hdev, &sc_only_mode_fops); |
1782 | } |
1783 | |
1784 | if (lmp_sniff_capable(hdev)) { |
1785 | debugfs_create_file("idle_timeout", 0644, hdev->debugfs, |
1786 | hdev, &idle_timeout_fops); |
1787 | debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs, |
1788 | hdev, &sniff_min_interval_fops); |
1789 | debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs, |
1790 | hdev, &sniff_max_interval_fops); |
1791 | } |
1792 | |
1793 | if (lmp_le_capable(hdev)) { |
1794 | debugfs_create_file("identity", 0400, hdev->debugfs, |
1795 | hdev, &identity_fops); |
1796 | debugfs_create_file("rpa_timeout", 0644, hdev->debugfs, |
1797 | hdev, &rpa_timeout_fops); |
1798 | debugfs_create_file("random_address", 0444, hdev->debugfs, |
1799 | hdev, &random_address_fops); |
1800 | debugfs_create_file("static_address", 0444, hdev->debugfs, |
1801 | hdev, &static_address_fops); |
1802 | |
1803 | /* For controllers with a public address, provide a debug |
1804 | * option to force the usage of the configured static |
1805 | * address. By default the public address is used. |
1806 | */ |
1807 | if (bacmp(&hdev->bdaddr, BDADDR_ANY)) |
1808 | debugfs_create_file("force_static_address", 0644, |
1809 | hdev->debugfs, hdev, |
1810 | &force_static_address_fops); |
1811 | |
1812 | debugfs_create_u8("white_list_size", 0444, hdev->debugfs, |
1813 | &hdev->le_white_list_size); |
1814 | debugfs_create_file("white_list", 0444, hdev->debugfs, hdev, |
1815 | &white_list_fops); |
1816 | debugfs_create_file("identity_resolving_keys", 0400, |
1817 | hdev->debugfs, hdev, |
1818 | &identity_resolving_keys_fops); |
1819 | debugfs_create_file("long_term_keys", 0400, hdev->debugfs, |
1820 | hdev, &long_term_keys_fops); |
1821 | debugfs_create_file("conn_min_interval", 0644, hdev->debugfs, |
1822 | hdev, &conn_min_interval_fops); |
1823 | debugfs_create_file("conn_max_interval", 0644, hdev->debugfs, |
1824 | hdev, &conn_max_interval_fops); |
1825 | debugfs_create_file("adv_channel_map", 0644, hdev->debugfs, |
1826 | hdev, &adv_channel_map_fops); |
1827 | debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev, |
1828 | &lowpan_debugfs_fops); |
1829 | debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev, |
1830 | &le_auto_conn_fops); |
1831 | } |
1832 | |
1833 | return 0; |
1834 | } |
1835 | |
1836 | static void hci_scan_req(struct hci_request *req, unsigned long opt) |
1837 | { |
1838 | __u8 scan = opt; |
1839 | |
1840 | BT_DBG("%s %x", req->hdev->name, scan); |
1841 | |
1842 | /* Inquiry and Page scans */ |
1843 | hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); |
1844 | } |
1845 | |
1846 | static void hci_auth_req(struct hci_request *req, unsigned long opt) |
1847 | { |
1848 | __u8 auth = opt; |
1849 | |
1850 | BT_DBG("%s %x", req->hdev->name, auth); |
1851 | |
1852 | /* Authentication */ |
1853 | hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); |
1854 | } |
1855 | |
1856 | static void hci_encrypt_req(struct hci_request *req, unsigned long opt) |
1857 | { |
1858 | __u8 encrypt = opt; |
1859 | |
1860 | BT_DBG("%s %x", req->hdev->name, encrypt); |
1861 | |
1862 | /* Encryption */ |
1863 | hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); |
1864 | } |
1865 | |
1866 | static void hci_linkpol_req(struct hci_request *req, unsigned long opt) |
1867 | { |
1868 | __le16 policy = cpu_to_le16(opt); |
1869 | |
1870 | BT_DBG("%s %x", req->hdev->name, policy); |
1871 | |
1872 | /* Default link policy */ |
1873 | hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); |
1874 | } |
1875 | |
1876 | /* Get HCI device by index. |
1877 | * Device is held on return. */ |
1878 | struct hci_dev *hci_dev_get(int index) |
1879 | { |
1880 | struct hci_dev *hdev = NULL, *d; |
1881 | |
1882 | BT_DBG("%d", index); |
1883 | |
1884 | if (index < 0) |
1885 | return NULL; |
1886 | |
1887 | read_lock(&hci_dev_list_lock); |
1888 | list_for_each_entry(d, &hci_dev_list, list) { |
1889 | if (d->id == index) { |
1890 | hdev = hci_dev_hold(d); |
1891 | break; |
1892 | } |
1893 | } |
1894 | read_unlock(&hci_dev_list_lock); |
1895 | return hdev; |
1896 | } |
1897 | |
1898 | /* ---- Inquiry support ---- */ |
1899 | |
1900 | bool hci_discovery_active(struct hci_dev *hdev) |
1901 | { |
1902 | struct discovery_state *discov = &hdev->discovery; |
1903 | |
1904 | switch (discov->state) { |
1905 | case DISCOVERY_FINDING: |
1906 | case DISCOVERY_RESOLVING: |
1907 | return true; |
1908 | |
1909 | default: |
1910 | return false; |
1911 | } |
1912 | } |
1913 | |
1914 | void hci_discovery_set_state(struct hci_dev *hdev, int state) |
1915 | { |
1916 | BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state); |
1917 | |
1918 | if (hdev->discovery.state == state) |
1919 | return; |
1920 | |
1921 | switch (state) { |
1922 | case DISCOVERY_STOPPED: |
1923 | hci_update_background_scan(hdev); |
1924 | |
1925 | if (hdev->discovery.state != DISCOVERY_STARTING) |
1926 | mgmt_discovering(hdev, 0); |
1927 | break; |
1928 | case DISCOVERY_STARTING: |
1929 | break; |
1930 | case DISCOVERY_FINDING: |
1931 | mgmt_discovering(hdev, 1); |
1932 | break; |
1933 | case DISCOVERY_RESOLVING: |
1934 | break; |
1935 | case DISCOVERY_STOPPING: |
1936 | break; |
1937 | } |
1938 | |
1939 | hdev->discovery.state = state; |
1940 | } |
1941 | |
1942 | void hci_inquiry_cache_flush(struct hci_dev *hdev) |
1943 | { |
1944 | struct discovery_state *cache = &hdev->discovery; |
1945 | struct inquiry_entry *p, *n; |
1946 | |
1947 | list_for_each_entry_safe(p, n, &cache->all, all) { |
1948 | list_del(&p->all); |
1949 | kfree(p); |
1950 | } |
1951 | |
1952 | INIT_LIST_HEAD(&cache->unknown); |
1953 | INIT_LIST_HEAD(&cache->resolve); |
1954 | } |
1955 | |
1956 | struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, |
1957 | bdaddr_t *bdaddr) |
1958 | { |
1959 | struct discovery_state *cache = &hdev->discovery; |
1960 | struct inquiry_entry *e; |
1961 | |
1962 | BT_DBG("cache %p, %pMR", cache, bdaddr); |
1963 | |
1964 | list_for_each_entry(e, &cache->all, all) { |
1965 | if (!bacmp(&e->data.bdaddr, bdaddr)) |
1966 | return e; |
1967 | } |
1968 | |
1969 | return NULL; |
1970 | } |
1971 | |
1972 | struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev, |
1973 | bdaddr_t *bdaddr) |
1974 | { |
1975 | struct discovery_state *cache = &hdev->discovery; |
1976 | struct inquiry_entry *e; |
1977 | |
1978 | BT_DBG("cache %p, %pMR", cache, bdaddr); |
1979 | |
1980 | list_for_each_entry(e, &cache->unknown, list) { |
1981 | if (!bacmp(&e->data.bdaddr, bdaddr)) |
1982 | return e; |
1983 | } |
1984 | |
1985 | return NULL; |
1986 | } |
1987 | |
1988 | struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev, |
1989 | bdaddr_t *bdaddr, |
1990 | int state) |
1991 | { |
1992 | struct discovery_state *cache = &hdev->discovery; |
1993 | struct inquiry_entry *e; |
1994 | |
1995 | BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state); |
1996 | |
1997 | list_for_each_entry(e, &cache->resolve, list) { |
1998 | if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state) |
1999 | return e; |
2000 | if (!bacmp(&e->data.bdaddr, bdaddr)) |
2001 | return e; |
2002 | } |
2003 | |
2004 | return NULL; |
2005 | } |
2006 | |
2007 | void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, |
2008 | struct inquiry_entry *ie) |
2009 | { |
2010 | struct discovery_state *cache = &hdev->discovery; |
2011 | struct list_head *pos = &cache->resolve; |
2012 | struct inquiry_entry *p; |
2013 | |
2014 | list_del(&ie->list); |
2015 | |
2016 | list_for_each_entry(p, &cache->resolve, list) { |
2017 | if (p->name_state != NAME_PENDING && |
2018 | abs(p->data.rssi) >= abs(ie->data.rssi)) |
2019 | break; |
2020 | pos = &p->list; |
2021 | } |
2022 | |
2023 | list_add(&ie->list, pos); |
2024 | } |
2025 | |
2026 | bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, |
2027 | bool name_known, bool *ssp) |
2028 | { |
2029 | struct discovery_state *cache = &hdev->discovery; |
2030 | struct inquiry_entry *ie; |
2031 | |
2032 | BT_DBG("cache %p, %pMR", cache, &data->bdaddr); |
2033 | |
2034 | hci_remove_remote_oob_data(hdev, &data->bdaddr); |
2035 | |
2036 | if (ssp) |
2037 | *ssp = data->ssp_mode; |
2038 | |
2039 | ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr); |
2040 | if (ie) { |
2041 | if (ie->data.ssp_mode && ssp) |
2042 | *ssp = true; |
2043 | |
2044 | if (ie->name_state == NAME_NEEDED && |
2045 | data->rssi != ie->data.rssi) { |
2046 | ie->data.rssi = data->rssi; |
2047 | hci_inquiry_cache_update_resolve(hdev, ie); |
2048 | } |
2049 | |
2050 | goto update; |
2051 | } |
2052 | |
2053 | /* Entry not in the cache. Add new one. */ |
2054 | ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC); |
2055 | if (!ie) |
2056 | return false; |
2057 | |
2058 | list_add(&ie->all, &cache->all); |
2059 | |
2060 | if (name_known) { |
2061 | ie->name_state = NAME_KNOWN; |
2062 | } else { |
2063 | ie->name_state = NAME_NOT_KNOWN; |
2064 | list_add(&ie->list, &cache->unknown); |
2065 | } |
2066 | |
2067 | update: |
2068 | if (name_known && ie->name_state != NAME_KNOWN && |
2069 | ie->name_state != NAME_PENDING) { |
2070 | ie->name_state = NAME_KNOWN; |
2071 | list_del(&ie->list); |
2072 | } |
2073 | |
2074 | memcpy(&ie->data, data, sizeof(*data)); |
2075 | ie->timestamp = jiffies; |
2076 | cache->timestamp = jiffies; |
2077 | |
2078 | if (ie->name_state == NAME_NOT_KNOWN) |
2079 | return false; |
2080 | |
2081 | return true; |
2082 | } |
2083 | |
2084 | static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) |
2085 | { |
2086 | struct discovery_state *cache = &hdev->discovery; |
2087 | struct inquiry_info *info = (struct inquiry_info *) buf; |
2088 | struct inquiry_entry *e; |
2089 | int copied = 0; |
2090 | |
2091 | list_for_each_entry(e, &cache->all, all) { |
2092 | struct inquiry_data *data = &e->data; |
2093 | |
2094 | if (copied >= num) |
2095 | break; |
2096 | |
2097 | bacpy(&info->bdaddr, &data->bdaddr); |
2098 | info->pscan_rep_mode = data->pscan_rep_mode; |
2099 | info->pscan_period_mode = data->pscan_period_mode; |
2100 | info->pscan_mode = data->pscan_mode; |
2101 | memcpy(info->dev_class, data->dev_class, 3); |
2102 | info->clock_offset = data->clock_offset; |
2103 | |
2104 | info++; |
2105 | copied++; |
2106 | } |
2107 | |
2108 | BT_DBG("cache %p, copied %d", cache, copied); |
2109 | return copied; |
2110 | } |
2111 | |
2112 | static void hci_inq_req(struct hci_request *req, unsigned long opt) |
2113 | { |
2114 | struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; |
2115 | struct hci_dev *hdev = req->hdev; |
2116 | struct hci_cp_inquiry cp; |
2117 | |
2118 | BT_DBG("%s", hdev->name); |
2119 | |
2120 | if (test_bit(HCI_INQUIRY, &hdev->flags)) |
2121 | return; |
2122 | |
2123 | /* Start Inquiry */ |
2124 | memcpy(&cp.lap, &ir->lap, 3); |
2125 | cp.length = ir->length; |
2126 | cp.num_rsp = ir->num_rsp; |
2127 | hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); |
2128 | } |
2129 | |
2130 | static int wait_inquiry(void *word) |
2131 | { |
2132 | schedule(); |
2133 | return signal_pending(current); |
2134 | } |
2135 | |
2136 | int hci_inquiry(void __user *arg) |
2137 | { |
2138 | __u8 __user *ptr = arg; |
2139 | struct hci_inquiry_req ir; |
2140 | struct hci_dev *hdev; |
2141 | int err = 0, do_inquiry = 0, max_rsp; |
2142 | long timeo; |
2143 | __u8 *buf; |
2144 | |
2145 | if (copy_from_user(&ir, ptr, sizeof(ir))) |
2146 | return -EFAULT; |
2147 | |
2148 | hdev = hci_dev_get(ir.dev_id); |
2149 | if (!hdev) |
2150 | return -ENODEV; |
2151 | |
2152 | if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { |
2153 | err = -EBUSY; |
2154 | goto done; |
2155 | } |
2156 | |
2157 | if (hdev->dev_type != HCI_BREDR) { |
2158 | err = -EOPNOTSUPP; |
2159 | goto done; |
2160 | } |
2161 | |
2162 | if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { |
2163 | err = -EOPNOTSUPP; |
2164 | goto done; |
2165 | } |
2166 | |
2167 | hci_dev_lock(hdev); |
2168 | if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || |
2169 | inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { |
2170 | hci_inquiry_cache_flush(hdev); |
2171 | do_inquiry = 1; |
2172 | } |
2173 | hci_dev_unlock(hdev); |
2174 | |
2175 | timeo = ir.length * msecs_to_jiffies(2000); |
2176 | |
2177 | if (do_inquiry) { |
2178 | err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir, |
2179 | timeo); |
2180 | if (err < 0) |
2181 | goto done; |
2182 | |
2183 | /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is |
2184 | * cleared). If it is interrupted by a signal, return -EINTR. |
2185 | */ |
2186 | if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry, |
2187 | TASK_INTERRUPTIBLE)) |
2188 | return -EINTR; |
2189 | } |
2190 | |
2191 | /* for unlimited number of responses we will use buffer with |
2192 | * 255 entries |
2193 | */ |
2194 | max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; |
2195 | |
2196 | /* cache_dump can't sleep. Therefore we allocate temp buffer and then |
2197 | * copy it to the user space. |
2198 | */ |
2199 | buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL); |
2200 | if (!buf) { |
2201 | err = -ENOMEM; |
2202 | goto done; |
2203 | } |
2204 | |
2205 | hci_dev_lock(hdev); |
2206 | ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); |
2207 | hci_dev_unlock(hdev); |
2208 | |
2209 | BT_DBG("num_rsp %d", ir.num_rsp); |
2210 | |
2211 | if (!copy_to_user(ptr, &ir, sizeof(ir))) { |
2212 | ptr += sizeof(ir); |
2213 | if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * |
2214 | ir.num_rsp)) |
2215 | err = -EFAULT; |
2216 | } else |
2217 | err = -EFAULT; |
2218 | |
2219 | kfree(buf); |
2220 | |
2221 | done: |
2222 | hci_dev_put(hdev); |
2223 | return err; |
2224 | } |
2225 | |
2226 | static int hci_dev_do_open(struct hci_dev *hdev) |
2227 | { |
2228 | int ret = 0; |
2229 | |
2230 | BT_DBG("%s %p", hdev->name, hdev); |
2231 | |
2232 | hci_req_lock(hdev); |
2233 | |
2234 | if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) { |
2235 | ret = -ENODEV; |
2236 | goto done; |
2237 | } |
2238 | |
2239 | if (!test_bit(HCI_SETUP, &hdev->dev_flags)) { |
2240 | /* Check for rfkill but allow the HCI setup stage to |
2241 | * proceed (which in itself doesn't cause any RF activity). |
2242 | */ |
2243 | if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) { |
2244 | ret = -ERFKILL; |
2245 | goto done; |
2246 | } |
2247 | |
2248 | /* Check for valid public address or a configured static |
2249 | * random adddress, but let the HCI setup proceed to |
2250 | * be able to determine if there is a public address |
2251 | * or not. |
2252 | * |
2253 | * In case of user channel usage, it is not important |
2254 | * if a public address or static random address is |
2255 | * available. |
2256 | * |
2257 | * This check is only valid for BR/EDR controllers |
2258 | * since AMP controllers do not have an address. |
2259 | */ |
2260 | if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) && |
2261 | hdev->dev_type == HCI_BREDR && |
2262 | !bacmp(&hdev->bdaddr, BDADDR_ANY) && |
2263 | !bacmp(&hdev->static_addr, BDADDR_ANY)) { |
2264 | ret = -EADDRNOTAVAIL; |
2265 | goto done; |
2266 | } |
2267 | } |
2268 | |
2269 | if (test_bit(HCI_UP, &hdev->flags)) { |
2270 | ret = -EALREADY; |
2271 | goto done; |
2272 | } |
2273 | |
2274 | if (hdev->open(hdev)) { |
2275 | ret = -EIO; |
2276 | goto done; |
2277 | } |
2278 | |
2279 | atomic_set(&hdev->cmd_cnt, 1); |
2280 | set_bit(HCI_INIT, &hdev->flags); |
2281 | |
2282 | if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags)) |
2283 | ret = hdev->setup(hdev); |
2284 | |
2285 | if (!ret) { |
2286 | if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) |
2287 | set_bit(HCI_RAW, &hdev->flags); |
2288 | |
2289 | if (!test_bit(HCI_RAW, &hdev->flags) && |
2290 | !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) |
2291 | ret = __hci_init(hdev); |
2292 | } |
2293 | |
2294 | clear_bit(HCI_INIT, &hdev->flags); |
2295 | |
2296 | if (!ret) { |
2297 | hci_dev_hold(hdev); |
2298 | set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags); |
2299 | set_bit(HCI_UP, &hdev->flags); |
2300 | hci_notify(hdev, HCI_DEV_UP); |
2301 | if (!test_bit(HCI_SETUP, &hdev->dev_flags) && |
2302 | !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) && |
2303 | hdev->dev_type == HCI_BREDR) { |
2304 | hci_dev_lock(hdev); |
2305 | mgmt_powered(hdev, 1); |
2306 | hci_dev_unlock(hdev); |
2307 | } |
2308 | } else { |
2309 | /* Init failed, cleanup */ |
2310 | flush_work(&hdev->tx_work); |
2311 | flush_work(&hdev->cmd_work); |
2312 | flush_work(&hdev->rx_work); |
2313 | |
2314 | skb_queue_purge(&hdev->cmd_q); |
2315 | skb_queue_purge(&hdev->rx_q); |
2316 | |
2317 | if (hdev->flush) |
2318 | hdev->flush(hdev); |
2319 | |
2320 | if (hdev->sent_cmd) { |
2321 | kfree_skb(hdev->sent_cmd); |
2322 | hdev->sent_cmd = NULL; |
2323 | } |
2324 | |
2325 | hdev->close(hdev); |
2326 | hdev->flags = 0; |
2327 | } |
2328 | |
2329 | done: |
2330 | hci_req_unlock(hdev); |
2331 | return ret; |
2332 | } |
2333 | |
2334 | /* ---- HCI ioctl helpers ---- */ |
2335 | |
2336 | int hci_dev_open(__u16 dev) |
2337 | { |
2338 | struct hci_dev *hdev; |
2339 | int err; |
2340 | |
2341 | hdev = hci_dev_get(dev); |
2342 | if (!hdev) |
2343 | return -ENODEV; |
2344 | |
2345 | /* We need to ensure that no other power on/off work is pending |
2346 | * before proceeding to call hci_dev_do_open. This is |
2347 | * particularly important if the setup procedure has not yet |
2348 | * completed. |
2349 | */ |
2350 | if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) |
2351 | cancel_delayed_work(&hdev->power_off); |
2352 | |
2353 | /* After this call it is guaranteed that the setup procedure |
2354 | * has finished. This means that error conditions like RFKILL |
2355 | * or no valid public or static random address apply. |
2356 | */ |
2357 | flush_workqueue(hdev->req_workqueue); |
2358 | |
2359 | err = hci_dev_do_open(hdev); |
2360 | |
2361 | hci_dev_put(hdev); |
2362 | |
2363 | return err; |
2364 | } |
2365 | |
2366 | static int hci_dev_do_close(struct hci_dev *hdev) |
2367 | { |
2368 | BT_DBG("%s %p", hdev->name, hdev); |
2369 | |
2370 | cancel_delayed_work(&hdev->power_off); |
2371 | |
2372 | hci_req_cancel(hdev, ENODEV); |
2373 | hci_req_lock(hdev); |
2374 | |
2375 | if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { |
2376 | del_timer_sync(&hdev->cmd_timer); |
2377 | hci_req_unlock(hdev); |
2378 | return 0; |
2379 | } |
2380 | |
2381 | /* Flush RX and TX works */ |
2382 | flush_work(&hdev->tx_work); |
2383 | flush_work(&hdev->rx_work); |
2384 | |
2385 | if (hdev->discov_timeout > 0) { |
2386 | cancel_delayed_work(&hdev->discov_off); |
2387 | hdev->discov_timeout = 0; |
2388 | clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags); |
2389 | clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags); |
2390 | } |
2391 | |
2392 | if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) |
2393 | cancel_delayed_work(&hdev->service_cache); |
2394 | |
2395 | cancel_delayed_work_sync(&hdev->le_scan_disable); |
2396 | |
2397 | if (test_bit(HCI_MGMT, &hdev->dev_flags)) |
2398 | cancel_delayed_work_sync(&hdev->rpa_expired); |
2399 | |
2400 | hci_dev_lock(hdev); |
2401 | hci_inquiry_cache_flush(hdev); |
2402 | hci_conn_hash_flush(hdev); |
2403 | hci_pend_le_conns_clear(hdev); |
2404 | hci_dev_unlock(hdev); |
2405 | |
2406 | hci_notify(hdev, HCI_DEV_DOWN); |
2407 | |
2408 | if (hdev->flush) |
2409 | hdev->flush(hdev); |
2410 | |
2411 | /* Reset device */ |
2412 | skb_queue_purge(&hdev->cmd_q); |
2413 | atomic_set(&hdev->cmd_cnt, 1); |
2414 | if (!test_bit(HCI_RAW, &hdev->flags) && |
2415 | !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) && |
2416 | test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { |
2417 | set_bit(HCI_INIT, &hdev->flags); |
2418 | __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT); |
2419 | clear_bit(HCI_INIT, &hdev->flags); |
2420 | } |
2421 | |
2422 | /* flush cmd work */ |
2423 | flush_work(&hdev->cmd_work); |
2424 | |
2425 | /* Drop queues */ |
2426 | skb_queue_purge(&hdev->rx_q); |
2427 | skb_queue_purge(&hdev->cmd_q); |
2428 | skb_queue_purge(&hdev->raw_q); |
2429 | |
2430 | /* Drop last sent command */ |
2431 | if (hdev->sent_cmd) { |
2432 | del_timer_sync(&hdev->cmd_timer); |
2433 | kfree_skb(hdev->sent_cmd); |
2434 | hdev->sent_cmd = NULL; |
2435 | } |
2436 | |
2437 | kfree_skb(hdev->recv_evt); |
2438 | hdev->recv_evt = NULL; |
2439 | |
2440 | /* After this point our queues are empty |
2441 | * and no tasks are scheduled. */ |
2442 | hdev->close(hdev); |
2443 | |
2444 | /* Clear flags */ |
2445 | hdev->flags = 0; |
2446 | hdev->dev_flags &= ~HCI_PERSISTENT_MASK; |
2447 | |
2448 | if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { |
2449 | if (hdev->dev_type == HCI_BREDR) { |
2450 | hci_dev_lock(hdev); |
2451 | mgmt_powered(hdev, 0); |
2452 | hci_dev_unlock(hdev); |
2453 | } |
2454 | } |
2455 | |
2456 | /* Controller radio is available but is currently powered down */ |
2457 | hdev->amp_status = AMP_STATUS_POWERED_DOWN; |
2458 | |
2459 | memset(hdev->eir, 0, sizeof(hdev->eir)); |
2460 | memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); |
2461 | bacpy(&hdev->random_addr, BDADDR_ANY); |
2462 | |
2463 | hci_req_unlock(hdev); |
2464 | |
2465 | hci_dev_put(hdev); |
2466 | return 0; |
2467 | } |
2468 | |
2469 | int hci_dev_close(__u16 dev) |
2470 | { |
2471 | struct hci_dev *hdev; |
2472 | int err; |
2473 | |
2474 | hdev = hci_dev_get(dev); |
2475 | if (!hdev) |
2476 | return -ENODEV; |
2477 | |
2478 | if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { |
2479 | err = -EBUSY; |
2480 | goto done; |
2481 | } |
2482 | |
2483 | if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) |
2484 | cancel_delayed_work(&hdev->power_off); |
2485 | |
2486 | err = hci_dev_do_close(hdev); |
2487 | |
2488 | done: |
2489 | hci_dev_put(hdev); |
2490 | return err; |
2491 | } |
2492 | |
2493 | int hci_dev_reset(__u16 dev) |
2494 | { |
2495 | struct hci_dev *hdev; |
2496 | int ret = 0; |
2497 | |
2498 | hdev = hci_dev_get(dev); |
2499 | if (!hdev) |
2500 | return -ENODEV; |
2501 | |
2502 | hci_req_lock(hdev); |
2503 | |
2504 | if (!test_bit(HCI_UP, &hdev->flags)) { |
2505 | ret = -ENETDOWN; |
2506 | goto done; |
2507 | } |
2508 | |
2509 | if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { |
2510 | ret = -EBUSY; |
2511 | goto done; |
2512 | } |
2513 | |
2514 | /* Drop queues */ |
2515 | skb_queue_purge(&hdev->rx_q); |
2516 | skb_queue_purge(&hdev->cmd_q); |
2517 | |
2518 | hci_dev_lock(hdev); |
2519 | hci_inquiry_cache_flush(hdev); |
2520 | hci_conn_hash_flush(hdev); |
2521 | hci_dev_unlock(hdev); |
2522 | |
2523 | if (hdev->flush) |
2524 | hdev->flush(hdev); |
2525 | |
2526 | atomic_set(&hdev->cmd_cnt, 1); |
2527 | hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; |
2528 | |
2529 | if (!test_bit(HCI_RAW, &hdev->flags)) |
2530 | ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT); |
2531 | |
2532 | done: |
2533 | hci_req_unlock(hdev); |
2534 | hci_dev_put(hdev); |
2535 | return ret; |
2536 | } |
2537 | |
2538 | int hci_dev_reset_stat(__u16 dev) |
2539 | { |
2540 | struct hci_dev *hdev; |
2541 | int ret = 0; |
2542 | |
2543 | hdev = hci_dev_get(dev); |
2544 | if (!hdev) |
2545 | return -ENODEV; |
2546 | |
2547 | if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { |
2548 | ret = -EBUSY; |
2549 | goto done; |
2550 | } |
2551 | |
2552 | memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); |
2553 | |
2554 | done: |
2555 | hci_dev_put(hdev); |
2556 | return ret; |
2557 | } |
2558 | |
2559 | int hci_dev_cmd(unsigned int cmd, void __user *arg) |
2560 | { |
2561 | struct hci_dev *hdev; |
2562 | struct hci_dev_req dr; |
2563 | int err = 0; |
2564 | |
2565 | if (copy_from_user(&dr, arg, sizeof(dr))) |
2566 | return -EFAULT; |
2567 | |
2568 | hdev = hci_dev_get(dr.dev_id); |
2569 | if (!hdev) |
2570 | return -ENODEV; |
2571 | |
2572 | if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { |
2573 | err = -EBUSY; |
2574 | goto done; |
2575 | } |
2576 | |
2577 | if (hdev->dev_type != HCI_BREDR) { |
2578 | err = -EOPNOTSUPP; |
2579 | goto done; |
2580 | } |
2581 | |
2582 | if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) { |
2583 | err = -EOPNOTSUPP; |
2584 | goto done; |
2585 | } |
2586 | |
2587 | switch (cmd) { |
2588 | case HCISETAUTH: |
2589 | err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, |
2590 | HCI_INIT_TIMEOUT); |
2591 | break; |
2592 | |
2593 | case HCISETENCRYPT: |
2594 | if (!lmp_encrypt_capable(hdev)) { |
2595 | err = -EOPNOTSUPP; |
2596 | break; |
2597 | } |
2598 | |
2599 | if (!test_bit(HCI_AUTH, &hdev->flags)) { |
2600 | /* Auth must be enabled first */ |
2601 | err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, |
2602 | HCI_INIT_TIMEOUT); |
2603 | if (err) |
2604 | break; |
2605 | } |
2606 | |
2607 | err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt, |
2608 | HCI_INIT_TIMEOUT); |
2609 | break; |
2610 | |
2611 | case HCISETSCAN: |
2612 | err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt, |
2613 | HCI_INIT_TIMEOUT); |
2614 | break; |
2615 | |
2616 | case HCISETLINKPOL: |
2617 | err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt, |
2618 | HCI_INIT_TIMEOUT); |
2619 | break; |
2620 | |
2621 | case HCISETLINKMODE: |
2622 | hdev->link_mode = ((__u16) dr.dev_opt) & |
2623 | (HCI_LM_MASTER | HCI_LM_ACCEPT); |
2624 | break; |
2625 | |
2626 | case HCISETPTYPE: |
2627 | hdev->pkt_type = (__u16) dr.dev_opt; |
2628 | break; |
2629 | |
2630 | case HCISETACLMTU: |
2631 | hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1); |
2632 | hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0); |
2633 | break; |
2634 | |
2635 | case HCISETSCOMTU: |
2636 | hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1); |
2637 | hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0); |
2638 | break; |
2639 | |
2640 | default: |
2641 | err = -EINVAL; |
2642 | break; |
2643 | } |
2644 | |
2645 | done: |
2646 | hci_dev_put(hdev); |
2647 | return err; |
2648 | } |
2649 | |
2650 | int hci_get_dev_list(void __user *arg) |
2651 | { |
2652 | struct hci_dev *hdev; |
2653 | struct hci_dev_list_req *dl; |
2654 | struct hci_dev_req *dr; |
2655 | int n = 0, size, err; |
2656 | __u16 dev_num; |
2657 | |
2658 | if (get_user(dev_num, (__u16 __user *) arg)) |
2659 | return -EFAULT; |
2660 | |
2661 | if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) |
2662 | return -EINVAL; |
2663 | |
2664 | size = sizeof(*dl) + dev_num * sizeof(*dr); |
2665 | |
2666 | dl = kzalloc(size, GFP_KERNEL); |
2667 | if (!dl) |
2668 | return -ENOMEM; |
2669 | |
2670 | dr = dl->dev_req; |
2671 | |
2672 | read_lock(&hci_dev_list_lock); |
2673 | list_for_each_entry(hdev, &hci_dev_list, list) { |
2674 | if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) |
2675 | cancel_delayed_work(&hdev->power_off); |
2676 | |
2677 | if (!test_bit(HCI_MGMT, &hdev->dev_flags)) |
2678 | set_bit(HCI_PAIRABLE, &hdev->dev_flags); |
2679 | |
2680 | (dr + n)->dev_id = hdev->id; |
2681 | (dr + n)->dev_opt = hdev->flags; |
2682 | |
2683 | if (++n >= dev_num) |
2684 | break; |
2685 | } |
2686 | read_unlock(&hci_dev_list_lock); |
2687 | |
2688 | dl->dev_num = n; |
2689 | size = sizeof(*dl) + n * sizeof(*dr); |
2690 | |
2691 | err = copy_to_user(arg, dl, size); |
2692 | kfree(dl); |
2693 | |
2694 | return err ? -EFAULT : 0; |
2695 | } |
2696 | |
2697 | int hci_get_dev_info(void __user *arg) |
2698 | { |
2699 | struct hci_dev *hdev; |
2700 | struct hci_dev_info di; |
2701 | int err = 0; |
2702 | |
2703 | if (copy_from_user(&di, arg, sizeof(di))) |
2704 | return -EFAULT; |
2705 | |
2706 | hdev = hci_dev_get(di.dev_id); |
2707 | if (!hdev) |
2708 | return -ENODEV; |
2709 | |
2710 | if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) |
2711 | cancel_delayed_work_sync(&hdev->power_off); |
2712 | |
2713 | if (!test_bit(HCI_MGMT, &hdev->dev_flags)) |
2714 | set_bit(HCI_PAIRABLE, &hdev->dev_flags); |
2715 | |
2716 | strcpy(di.name, hdev->name); |
2717 | di.bdaddr = hdev->bdaddr; |
2718 | di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4); |
2719 | di.flags = hdev->flags; |
2720 | di.pkt_type = hdev->pkt_type; |
2721 | if (lmp_bredr_capable(hdev)) { |
2722 | di.acl_mtu = hdev->acl_mtu; |
2723 | di.acl_pkts = hdev->acl_pkts; |
2724 | di.sco_mtu = hdev->sco_mtu; |
2725 | di.sco_pkts = hdev->sco_pkts; |
2726 | } else { |
2727 | di.acl_mtu = hdev->le_mtu; |
2728 | di.acl_pkts = hdev->le_pkts; |
2729 | di.sco_mtu = 0; |
2730 | di.sco_pkts = 0; |
2731 | } |
2732 | di.link_policy = hdev->link_policy; |
2733 | di.link_mode = hdev->link_mode; |
2734 | |
2735 | memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); |
2736 | memcpy(&di.features, &hdev->features, sizeof(di.features)); |
2737 | |
2738 | if (copy_to_user(arg, &di, sizeof(di))) |
2739 | err = -EFAULT; |
2740 | |
2741 | hci_dev_put(hdev); |
2742 | |
2743 | return err; |
2744 | } |
2745 | |
2746 | /* ---- Interface to HCI drivers ---- */ |
2747 | |
2748 | static int hci_rfkill_set_block(void *data, bool blocked) |
2749 | { |
2750 | struct hci_dev *hdev = data; |
2751 | |
2752 | BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); |
2753 | |
2754 | if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) |
2755 | return -EBUSY; |
2756 | |
2757 | if (blocked) { |
2758 | set_bit(HCI_RFKILLED, &hdev->dev_flags); |
2759 | if (!test_bit(HCI_SETUP, &hdev->dev_flags)) |
2760 | hci_dev_do_close(hdev); |
2761 | } else { |
2762 | clear_bit(HCI_RFKILLED, &hdev->dev_flags); |
2763 | } |
2764 | |
2765 | return 0; |
2766 | } |
2767 | |
2768 | static const struct rfkill_ops hci_rfkill_ops = { |
2769 | .set_block = hci_rfkill_set_block, |
2770 | }; |
2771 | |
2772 | static void hci_power_on(struct work_struct *work) |
2773 | { |
2774 | struct hci_dev *hdev = container_of(work, struct hci_dev, power_on); |
2775 | int err; |
2776 | |
2777 | BT_DBG("%s", hdev->name); |
2778 | |
2779 | err = hci_dev_do_open(hdev); |
2780 | if (err < 0) { |
2781 | mgmt_set_powered_failed(hdev, err); |
2782 | return; |
2783 | } |
2784 | |
2785 | /* During the HCI setup phase, a few error conditions are |
2786 | * ignored and they need to be checked now. If they are still |
2787 | * valid, it is important to turn the device back off. |
2788 | */ |
2789 | if (test_bit(HCI_RFKILLED, &hdev->dev_flags) || |
2790 | (hdev->dev_type == HCI_BREDR && |
2791 | !bacmp(&hdev->bdaddr, BDADDR_ANY) && |
2792 | !bacmp(&hdev->static_addr, BDADDR_ANY))) { |
2793 | clear_bit(HCI_AUTO_OFF, &hdev->dev_flags); |
2794 | hci_dev_do_close(hdev); |
2795 | } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) { |
2796 | queue_delayed_work(hdev->req_workqueue, &hdev->power_off, |
2797 | HCI_AUTO_OFF_TIMEOUT); |
2798 | } |
2799 | |
2800 | if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) |
2801 | mgmt_index_added(hdev); |
2802 | } |
2803 | |
2804 | static void hci_power_off(struct work_struct *work) |
2805 | { |
2806 | struct hci_dev *hdev = container_of(work, struct hci_dev, |
2807 | power_off.work); |
2808 | |
2809 | BT_DBG("%s", hdev->name); |
2810 | |
2811 | hci_dev_do_close(hdev); |
2812 | } |
2813 | |
2814 | static void hci_discov_off(struct work_struct *work) |
2815 | { |
2816 | struct hci_dev *hdev; |
2817 | |
2818 | hdev = container_of(work, struct hci_dev, discov_off.work); |
2819 | |
2820 | BT_DBG("%s", hdev->name); |
2821 | |
2822 | mgmt_discoverable_timeout(hdev); |
2823 | } |
2824 | |
2825 | void hci_uuids_clear(struct hci_dev *hdev) |
2826 | { |
2827 | struct bt_uuid *uuid, *tmp; |
2828 | |
2829 | list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) { |
2830 | list_del(&uuid->list); |
2831 | kfree(uuid); |
2832 | } |
2833 | } |
2834 | |
2835 | void hci_link_keys_clear(struct hci_dev *hdev) |
2836 | { |
2837 | struct list_head *p, *n; |
2838 | |
2839 | list_for_each_safe(p, n, &hdev->link_keys) { |
2840 | struct link_key *key; |
2841 | |
2842 | key = list_entry(p, struct link_key, list); |
2843 | |
2844 | list_del(p); |
2845 | kfree(key); |
2846 | } |
2847 | } |
2848 | |
2849 | void hci_smp_ltks_clear(struct hci_dev *hdev) |
2850 | { |
2851 | struct smp_ltk *k, *tmp; |
2852 | |
2853 | list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { |
2854 | list_del(&k->list); |
2855 | kfree(k); |
2856 | } |
2857 | } |
2858 | |
2859 | void hci_smp_irks_clear(struct hci_dev *hdev) |
2860 | { |
2861 | struct smp_irk *k, *tmp; |
2862 | |
2863 | list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { |
2864 | list_del(&k->list); |
2865 | kfree(k); |
2866 | } |
2867 | } |
2868 | |
2869 | struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) |
2870 | { |
2871 | struct link_key *k; |
2872 | |
2873 | list_for_each_entry(k, &hdev->link_keys, list) |
2874 | if (bacmp(bdaddr, &k->bdaddr) == 0) |
2875 | return k; |
2876 | |
2877 | return NULL; |
2878 | } |
2879 | |
2880 | static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, |
2881 | u8 key_type, u8 old_key_type) |
2882 | { |
2883 | /* Legacy key */ |
2884 | if (key_type < 0x03) |
2885 | return true; |
2886 | |
2887 | /* Debug keys are insecure so don't store them persistently */ |
2888 | if (key_type == HCI_LK_DEBUG_COMBINATION) |
2889 | return false; |
2890 | |
2891 | /* Changed combination key and there's no previous one */ |
2892 | if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff) |
2893 | return false; |
2894 | |
2895 | /* Security mode 3 case */ |
2896 | if (!conn) |
2897 | return true; |
2898 | |
2899 | /* Neither local nor remote side had no-bonding as requirement */ |
2900 | if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) |
2901 | return true; |
2902 | |
2903 | /* Local side had dedicated bonding as requirement */ |
2904 | if (conn->auth_type == 0x02 || conn->auth_type == 0x03) |
2905 | return true; |
2906 | |
2907 | /* Remote side had dedicated bonding as requirement */ |
2908 | if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) |
2909 | return true; |
2910 | |
2911 | /* If none of the above criteria match, then don't store the key |
2912 | * persistently */ |
2913 | return false; |
2914 | } |
2915 | |
2916 | static bool ltk_type_master(u8 type) |
2917 | { |
2918 | if (type == HCI_SMP_STK || type == HCI_SMP_LTK) |
2919 | return true; |
2920 | |
2921 | return false; |
2922 | } |
2923 | |
2924 | struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand, |
2925 | bool master) |
2926 | { |
2927 | struct smp_ltk *k; |
2928 | |
2929 | list_for_each_entry(k, &hdev->long_term_keys, list) { |
2930 | if (k->ediv != ediv || k->rand != rand) |
2931 | continue; |
2932 | |
2933 | if (ltk_type_master(k->type) != master) |
2934 | continue; |
2935 | |
2936 | return k; |
2937 | } |
2938 | |
2939 | return NULL; |
2940 | } |
2941 | |
2942 | struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, |
2943 | u8 addr_type, bool master) |
2944 | { |
2945 | struct smp_ltk *k; |
2946 | |
2947 | list_for_each_entry(k, &hdev->long_term_keys, list) |
2948 | if (addr_type == k->bdaddr_type && |
2949 | bacmp(bdaddr, &k->bdaddr) == 0 && |
2950 | ltk_type_master(k->type) == master) |
2951 | return k; |
2952 | |
2953 | return NULL; |
2954 | } |
2955 | |
2956 | struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa) |
2957 | { |
2958 | struct smp_irk *irk; |
2959 | |
2960 | list_for_each_entry(irk, &hdev->identity_resolving_keys, list) { |
2961 | if (!bacmp(&irk->rpa, rpa)) |
2962 | return irk; |
2963 | } |
2964 | |
2965 | list_for_each_entry(irk, &hdev->identity_resolving_keys, list) { |
2966 | if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) { |
2967 | bacpy(&irk->rpa, rpa); |
2968 | return irk; |
2969 | } |
2970 | } |
2971 | |
2972 | return NULL; |
2973 | } |
2974 | |
2975 | struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, |
2976 | u8 addr_type) |
2977 | { |
2978 | struct smp_irk *irk; |
2979 | |
2980 | /* Identity Address must be public or static random */ |
2981 | if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0) |
2982 | return NULL; |
2983 | |
2984 | list_for_each_entry(irk, &hdev->identity_resolving_keys, list) { |
2985 | if (addr_type == irk->addr_type && |
2986 | bacmp(bdaddr, &irk->bdaddr) == 0) |
2987 | return irk; |
2988 | } |
2989 | |
2990 | return NULL; |
2991 | } |
2992 | |
2993 | int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, |
2994 | bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) |
2995 | { |
2996 | struct link_key *key, *old_key; |
2997 | u8 old_key_type; |
2998 | bool persistent; |
2999 | |
3000 | old_key = hci_find_link_key(hdev, bdaddr); |
3001 | if (old_key) { |
3002 | old_key_type = old_key->type; |
3003 | key = old_key; |
3004 | } else { |
3005 | old_key_type = conn ? conn->key_type : 0xff; |
3006 | key = kzalloc(sizeof(*key), GFP_KERNEL); |
3007 | if (!key) |
3008 | return -ENOMEM; |
3009 | list_add(&key->list, &hdev->link_keys); |
3010 | } |
3011 | |
3012 | BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type); |
3013 | |
3014 | /* Some buggy controller combinations generate a changed |
3015 | * combination key for legacy pairing even when there's no |
3016 | * previous key */ |
3017 | if (type == HCI_LK_CHANGED_COMBINATION && |
3018 | (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) { |
3019 | type = HCI_LK_COMBINATION; |
3020 | if (conn) |
3021 | conn->key_type = type; |
3022 | } |
3023 | |
3024 | bacpy(&key->bdaddr, bdaddr); |
3025 | memcpy(key->val, val, HCI_LINK_KEY_SIZE); |
3026 | key->pin_len = pin_len; |
3027 | |
3028 | if (type == HCI_LK_CHANGED_COMBINATION) |
3029 | key->type = old_key_type; |
3030 | else |
3031 | key->type = type; |
3032 | |
3033 | if (!new_key) |
3034 | return 0; |
3035 | |
3036 | persistent = hci_persistent_key(hdev, conn, type, old_key_type); |
3037 | |
3038 | mgmt_new_link_key(hdev, key, persistent); |
3039 | |
3040 | if (conn) |
3041 | conn->flush_key = !persistent; |
3042 | |
3043 | return 0; |
3044 | } |
3045 | |
3046 | struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, |
3047 | u8 addr_type, u8 type, u8 authenticated, |
3048 | u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand) |
3049 | { |
3050 | struct smp_ltk *key, *old_key; |
3051 | bool master = ltk_type_master(type); |
3052 | |
3053 | old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master); |
3054 | if (old_key) |
3055 | key = old_key; |
3056 | else { |
3057 | key = kzalloc(sizeof(*key), GFP_KERNEL); |
3058 | if (!key) |
3059 | return NULL; |
3060 | list_add(&key->list, &hdev->long_term_keys); |
3061 | } |
3062 | |
3063 | bacpy(&key->bdaddr, bdaddr); |
3064 | key->bdaddr_type = addr_type; |
3065 | memcpy(key->val, tk, sizeof(key->val)); |
3066 | key->authenticated = authenticated; |
3067 | key->ediv = ediv; |
3068 | key->rand = rand; |
3069 | key->enc_size = enc_size; |
3070 | key->type = type; |
3071 | |
3072 | return key; |
3073 | } |
3074 | |
3075 | struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, |
3076 | u8 addr_type, u8 val[16], bdaddr_t *rpa) |
3077 | { |
3078 | struct smp_irk *irk; |
3079 | |
3080 | irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type); |
3081 | if (!irk) { |
3082 | irk = kzalloc(sizeof(*irk), GFP_KERNEL); |
3083 | if (!irk) |
3084 | return NULL; |
3085 | |
3086 | bacpy(&irk->bdaddr, bdaddr); |
3087 | irk->addr_type = addr_type; |
3088 | |
3089 | list_add(&irk->list, &hdev->identity_resolving_keys); |
3090 | } |
3091 | |
3092 | memcpy(irk->val, val, 16); |
3093 | bacpy(&irk->rpa, rpa); |
3094 | |
3095 | return irk; |
3096 | } |
3097 | |
3098 | int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) |
3099 | { |
3100 | struct link_key *key; |
3101 | |
3102 | key = hci_find_link_key(hdev, bdaddr); |
3103 | if (!key) |
3104 | return -ENOENT; |
3105 | |
3106 | BT_DBG("%s removing %pMR", hdev->name, bdaddr); |
3107 | |
3108 | list_del(&key->list); |
3109 | kfree(key); |
3110 | |
3111 | return 0; |
3112 | } |
3113 | |
3114 | int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) |
3115 | { |
3116 | struct smp_ltk *k, *tmp; |
3117 | int removed = 0; |
3118 | |
3119 | list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { |
3120 | if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type) |
3121 | continue; |
3122 | |
3123 | BT_DBG("%s removing %pMR", hdev->name, bdaddr); |
3124 | |
3125 | list_del(&k->list); |
3126 | kfree(k); |
3127 | removed++; |
3128 | } |
3129 | |
3130 | return removed ? 0 : -ENOENT; |
3131 | } |
3132 | |
3133 | void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) |
3134 | { |
3135 | struct smp_irk *k, *tmp; |
3136 | |
3137 | list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { |
3138 | if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type) |
3139 | continue; |
3140 | |
3141 | BT_DBG("%s removing %pMR", hdev->name, bdaddr); |
3142 | |
3143 | list_del(&k->list); |
3144 | kfree(k); |
3145 | } |
3146 | } |
3147 | |
3148 | /* HCI command timer function */ |
3149 | static void hci_cmd_timeout(unsigned long arg) |
3150 | { |
3151 | struct hci_dev *hdev = (void *) arg; |
3152 | |
3153 | if (hdev->sent_cmd) { |
3154 | struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data; |
3155 | u16 opcode = __le16_to_cpu(sent->opcode); |
3156 | |
3157 | BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode); |
3158 | } else { |
3159 | BT_ERR("%s command tx timeout", hdev->name); |
3160 | } |
3161 | |
3162 | atomic_set(&hdev->cmd_cnt, 1); |
3163 | queue_work(hdev->workqueue, &hdev->cmd_work); |
3164 | } |
3165 | |
3166 | struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, |
3167 | bdaddr_t *bdaddr) |
3168 | { |
3169 | struct oob_data *data; |
3170 | |
3171 | list_for_each_entry(data, &hdev->remote_oob_data, list) |
3172 | if (bacmp(bdaddr, &data->bdaddr) == 0) |
3173 | return data; |
3174 | |
3175 | return NULL; |
3176 | } |
3177 | |
3178 | int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr) |
3179 | { |
3180 | struct oob_data *data; |
3181 | |
3182 | data = hci_find_remote_oob_data(hdev, bdaddr); |
3183 | if (!data) |
3184 | return -ENOENT; |
3185 | |
3186 | BT_DBG("%s removing %pMR", hdev->name, bdaddr); |
3187 | |
3188 | list_del(&data->list); |
3189 | kfree(data); |
3190 | |
3191 | return 0; |
3192 | } |
3193 | |
3194 | void hci_remote_oob_data_clear(struct hci_dev *hdev) |
3195 | { |
3196 | struct oob_data *data, *n; |
3197 | |
3198 | list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) { |
3199 | list_del(&data->list); |
3200 | kfree(data); |
3201 | } |
3202 | } |
3203 | |
3204 | int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, |
3205 | u8 *hash, u8 *randomizer) |
3206 | { |
3207 | struct oob_data *data; |
3208 | |
3209 | data = hci_find_remote_oob_data(hdev, bdaddr); |
3210 | if (!data) { |
3211 | data = kmalloc(sizeof(*data), GFP_KERNEL); |
3212 | if (!data) |
3213 | return -ENOMEM; |
3214 | |
3215 | bacpy(&data->bdaddr, bdaddr); |
3216 | list_add(&data->list, &hdev->remote_oob_data); |
3217 | } |
3218 | |
3219 | memcpy(data->hash192, hash, sizeof(data->hash192)); |
3220 | memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192)); |
3221 | |
3222 | memset(data->hash256, 0, sizeof(data->hash256)); |
3223 | memset(data->randomizer256, 0, sizeof(data->randomizer256)); |
3224 | |
3225 | BT_DBG("%s for %pMR", hdev->name, bdaddr); |
3226 | |
3227 | return 0; |
3228 | } |
3229 | |
3230 | int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr, |
3231 | u8 *hash192, u8 *randomizer192, |
3232 | u8 *hash256, u8 *randomizer256) |
3233 | { |
3234 | struct oob_data *data; |
3235 | |
3236 | data = hci_find_remote_oob_data(hdev, bdaddr); |
3237 | if (!data) { |
3238 | data = kmalloc(sizeof(*data), GFP_KERNEL); |
3239 | if (!data) |
3240 | return -ENOMEM; |
3241 | |
3242 | bacpy(&data->bdaddr, bdaddr); |
3243 | list_add(&data->list, &hdev->remote_oob_data); |
3244 | } |
3245 | |
3246 | memcpy(data->hash192, hash192, sizeof(data->hash192)); |
3247 | memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192)); |
3248 | |
3249 | memcpy(data->hash256, hash256, sizeof(data->hash256)); |
3250 | memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256)); |
3251 | |
3252 | BT_DBG("%s for %pMR", hdev->name, bdaddr); |
3253 | |
3254 | return 0; |
3255 | } |
3256 | |
3257 | struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, |
3258 | bdaddr_t *bdaddr, u8 type) |
3259 | { |
3260 | struct bdaddr_list *b; |
3261 | |
3262 | list_for_each_entry(b, &hdev->blacklist, list) { |
3263 | if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) |
3264 | return b; |
3265 | } |
3266 | |
3267 | return NULL; |
3268 | } |
3269 | |
3270 | static void hci_blacklist_clear(struct hci_dev *hdev) |
3271 | { |
3272 | struct list_head *p, *n; |
3273 | |
3274 | list_for_each_safe(p, n, &hdev->blacklist) { |
3275 | struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list); |
3276 | |
3277 | list_del(p); |
3278 | kfree(b); |
3279 | } |
3280 | } |
3281 | |
3282 | int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) |
3283 | { |
3284 | struct bdaddr_list *entry; |
3285 | |
3286 | if (!bacmp(bdaddr, BDADDR_ANY)) |
3287 | return -EBADF; |
3288 | |
3289 | if (hci_blacklist_lookup(hdev, bdaddr, type)) |
3290 | return -EEXIST; |
3291 | |
3292 | entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL); |
3293 | if (!entry) |
3294 | return -ENOMEM; |
3295 | |
3296 | bacpy(&entry->bdaddr, bdaddr); |
3297 | entry->bdaddr_type = type; |
3298 | |
3299 | list_add(&entry->list, &hdev->blacklist); |
3300 | |
3301 | return mgmt_device_blocked(hdev, bdaddr, type); |
3302 | } |
3303 | |
3304 | int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) |
3305 | { |
3306 | struct bdaddr_list *entry; |
3307 | |
3308 | if (!bacmp(bdaddr, BDADDR_ANY)) { |
3309 | hci_blacklist_clear(hdev); |
3310 | return 0; |
3311 | } |
3312 | |
3313 | entry = hci_blacklist_lookup(hdev, bdaddr, type); |
3314 | if (!entry) |
3315 | return -ENOENT; |
3316 | |
3317 | list_del(&entry->list); |
3318 | kfree(entry); |
3319 | |
3320 | return mgmt_device_unblocked(hdev, bdaddr, type); |
3321 | } |
3322 | |
3323 | struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev, |
3324 | bdaddr_t *bdaddr, u8 type) |
3325 | { |
3326 | struct bdaddr_list *b; |
3327 | |
3328 | list_for_each_entry(b, &hdev->le_white_list, list) { |
3329 | if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) |
3330 | return b; |
3331 | } |
3332 | |
3333 | return NULL; |
3334 | } |
3335 | |
3336 | void hci_white_list_clear(struct hci_dev *hdev) |
3337 | { |
3338 | struct list_head *p, *n; |
3339 | |
3340 | list_for_each_safe(p, n, &hdev->le_white_list) { |
3341 | struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list); |
3342 | |
3343 | list_del(p); |
3344 | kfree(b); |
3345 | } |
3346 | } |
3347 | |
3348 | int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) |
3349 | { |
3350 | struct bdaddr_list *entry; |
3351 | |
3352 | if (!bacmp(bdaddr, BDADDR_ANY)) |
3353 | return -EBADF; |
3354 | |
3355 | entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL); |
3356 | if (!entry) |
3357 | return -ENOMEM; |
3358 | |
3359 | bacpy(&entry->bdaddr, bdaddr); |
3360 | entry->bdaddr_type = type; |
3361 | |
3362 | list_add(&entry->list, &hdev->le_white_list); |
3363 | |
3364 | return 0; |
3365 | } |
3366 | |
3367 | int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) |
3368 | { |
3369 | struct bdaddr_list *entry; |
3370 | |
3371 | if (!bacmp(bdaddr, BDADDR_ANY)) |
3372 | return -EBADF; |
3373 | |
3374 | entry = hci_white_list_lookup(hdev, bdaddr, type); |
3375 | if (!entry) |
3376 | return -ENOENT; |
3377 | |
3378 | list_del(&entry->list); |
3379 | kfree(entry); |
3380 | |
3381 | return 0; |
3382 | } |
3383 | |
3384 | /* This function requires the caller holds hdev->lock */ |
3385 | struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, |
3386 | bdaddr_t *addr, u8 addr_type) |
3387 | { |
3388 | struct hci_conn_params *params; |
3389 | |
3390 | list_for_each_entry(params, &hdev->le_conn_params, list) { |
3391 | if (bacmp(¶ms->addr, addr) == 0 && |
3392 | params->addr_type == addr_type) { |
3393 | return params; |
3394 | } |
3395 | } |
3396 | |
3397 | return NULL; |
3398 | } |
3399 | |
3400 | static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type) |
3401 | { |
3402 | struct hci_conn *conn; |
3403 | |
3404 | conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr); |
3405 | if (!conn) |
3406 | return false; |
3407 | |
3408 | if (conn->dst_type != type) |
3409 | return false; |
3410 | |
3411 | if (conn->state != BT_CONNECTED) |
3412 | return false; |
3413 | |
3414 | return true; |
3415 | } |
3416 | |
3417 | static bool is_identity_address(bdaddr_t *addr, u8 addr_type) |
3418 | { |
3419 | if (addr_type == ADDR_LE_DEV_PUBLIC) |
3420 | return true; |
3421 | |
3422 | /* Check for Random Static address type */ |
3423 | if ((addr->b[5] & 0xc0) == 0xc0) |
3424 | return true; |
3425 | |
3426 | return false; |
3427 | } |
3428 | |
3429 | /* This function requires the caller holds hdev->lock */ |
3430 | int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type, |
3431 | u8 auto_connect, u16 conn_min_interval, |
3432 | u16 conn_max_interval) |
3433 | { |
3434 | struct hci_conn_params *params; |
3435 | |
3436 | if (!is_identity_address(addr, addr_type)) |
3437 | return -EINVAL; |
3438 | |
3439 | params = hci_conn_params_lookup(hdev, addr, addr_type); |
3440 | if (params) |
3441 | goto update; |
3442 | |
3443 | params = kzalloc(sizeof(*params), GFP_KERNEL); |
3444 | if (!params) { |
3445 | BT_ERR("Out of memory"); |
3446 | return -ENOMEM; |
3447 | } |
3448 | |
3449 | bacpy(¶ms->addr, addr); |
3450 | params->addr_type = addr_type; |
3451 | |
3452 | list_add(¶ms->list, &hdev->le_conn_params); |
3453 | |
3454 | update: |
3455 | params->conn_min_interval = conn_min_interval; |
3456 | params->conn_max_interval = conn_max_interval; |
3457 | params->auto_connect = auto_connect; |
3458 | |
3459 | switch (auto_connect) { |
3460 | case HCI_AUTO_CONN_DISABLED: |
3461 | case HCI_AUTO_CONN_LINK_LOSS: |
3462 | hci_pend_le_conn_del(hdev, addr, addr_type); |
3463 | break; |
3464 | case HCI_AUTO_CONN_ALWAYS: |
3465 | if (!is_connected(hdev, addr, addr_type)) |
3466 | hci_pend_le_conn_add(hdev, addr, addr_type); |
3467 | break; |
3468 | } |
3469 | |
3470 | BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x " |
3471 | "conn_max_interval 0x%.4x", addr, addr_type, auto_connect, |
3472 | conn_min_interval, conn_max_interval); |
3473 | |
3474 | return 0; |
3475 | } |
3476 | |
3477 | /* This function requires the caller holds hdev->lock */ |
3478 | void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) |
3479 | { |
3480 | struct hci_conn_params *params; |
3481 | |
3482 | params = hci_conn_params_lookup(hdev, addr, addr_type); |
3483 | if (!params) |
3484 | return; |
3485 | |
3486 | hci_pend_le_conn_del(hdev, addr, addr_type); |
3487 | |
3488 | list_del(¶ms->list); |
3489 | kfree(params); |
3490 | |
3491 | BT_DBG("addr %pMR (type %u)", addr, addr_type); |
3492 | } |
3493 | |
3494 | /* This function requires the caller holds hdev->lock */ |
3495 | void hci_conn_params_clear(struct hci_dev *hdev) |
3496 | { |
3497 | struct hci_conn_params *params, *tmp; |
3498 | |
3499 | list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { |
3500 | list_del(¶ms->list); |
3501 | kfree(params); |
3502 | } |
3503 | |
3504 | BT_DBG("All LE connection parameters were removed"); |
3505 | } |
3506 | |
3507 | /* This function requires the caller holds hdev->lock */ |
3508 | struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev, |
3509 | bdaddr_t *addr, u8 addr_type) |
3510 | { |
3511 | struct bdaddr_list *entry; |
3512 | |
3513 | list_for_each_entry(entry, &hdev->pend_le_conns, list) { |
3514 | if (bacmp(&entry->bdaddr, addr) == 0 && |
3515 | entry->bdaddr_type == addr_type) |
3516 | return entry; |
3517 | } |
3518 | |
3519 | return NULL; |
3520 | } |
3521 | |
3522 | /* This function requires the caller holds hdev->lock */ |
3523 | void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) |
3524 | { |
3525 | struct bdaddr_list *entry; |
3526 | |
3527 | entry = hci_pend_le_conn_lookup(hdev, addr, addr_type); |
3528 | if (entry) |
3529 | goto done; |
3530 | |
3531 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); |
3532 | if (!entry) { |
3533 | BT_ERR("Out of memory"); |
3534 | return; |
3535 | } |
3536 | |
3537 | bacpy(&entry->bdaddr, addr); |
3538 | entry->bdaddr_type = addr_type; |
3539 | |
3540 | list_add(&entry->list, &hdev->pend_le_conns); |
3541 | |
3542 | BT_DBG("addr %pMR (type %u)", addr, addr_type); |
3543 | |
3544 | done: |
3545 | hci_update_background_scan(hdev); |
3546 | } |
3547 | |
3548 | /* This function requires the caller holds hdev->lock */ |
3549 | void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) |
3550 | { |
3551 | struct bdaddr_list *entry; |
3552 | |
3553 | entry = hci_pend_le_conn_lookup(hdev, addr, addr_type); |
3554 | if (!entry) |
3555 | goto done; |
3556 | |
3557 | list_del(&entry->list); |
3558 | kfree(entry); |
3559 | |
3560 | BT_DBG("addr %pMR (type %u)", addr, addr_type); |
3561 | |
3562 | done: |
3563 | hci_update_background_scan(hdev); |
3564 | } |
3565 | |
3566 | /* This function requires the caller holds hdev->lock */ |
3567 | void hci_pend_le_conns_clear(struct hci_dev *hdev) |
3568 | { |
3569 | struct bdaddr_list *entry, *tmp; |
3570 | |
3571 | list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) { |
3572 | list_del(&entry->list); |
3573 | kfree(entry); |
3574 | } |
3575 | |
3576 | BT_DBG("All LE pending connections cleared"); |
3577 | } |
3578 | |
3579 | static void inquiry_complete(struct hci_dev *hdev, u8 status) |
3580 | { |
3581 | if (status) { |
3582 | BT_ERR("Failed to start inquiry: status %d", status); |
3583 | |
3584 | hci_dev_lock(hdev); |
3585 | hci_discovery_set_state(hdev, DISCOVERY_STOPPED); |
3586 | hci_dev_unlock(hdev); |
3587 | return; |
3588 | } |
3589 | } |
3590 | |
3591 | static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status) |
3592 | { |
3593 | /* General inquiry access code (GIAC) */ |
3594 | u8 lap[3] = { 0x33, 0x8b, 0x9e }; |
3595 | struct hci_request req; |
3596 | struct hci_cp_inquiry cp; |
3597 | int err; |
3598 | |
3599 | if (status) { |
3600 | BT_ERR("Failed to disable LE scanning: status %d", status); |
3601 | return; |
3602 | } |
3603 | |
3604 | switch (hdev->discovery.type) { |
3605 | case DISCOV_TYPE_LE: |
3606 | hci_dev_lock(hdev); |
3607 | hci_discovery_set_state(hdev, DISCOVERY_STOPPED); |
3608 | hci_dev_unlock(hdev); |
3609 | break; |
3610 | |
3611 | case DISCOV_TYPE_INTERLEAVED: |
3612 | hci_req_init(&req, hdev); |
3613 | |
3614 | memset(&cp, 0, sizeof(cp)); |
3615 | memcpy(&cp.lap, lap, sizeof(cp.lap)); |
3616 | cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN; |
3617 | hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp); |
3618 | |
3619 | hci_dev_lock(hdev); |
3620 | |
3621 | hci_inquiry_cache_flush(hdev); |
3622 | |
3623 | err = hci_req_run(&req, inquiry_complete); |
3624 | if (err) { |
3625 | BT_ERR("Inquiry request failed: err %d", err); |
3626 | hci_discovery_set_state(hdev, DISCOVERY_STOPPED); |
3627 | } |
3628 | |
3629 | hci_dev_unlock(hdev); |
3630 | break; |
3631 | } |
3632 | } |
3633 | |
3634 | static void le_scan_disable_work(struct work_struct *work) |
3635 | { |
3636 | struct hci_dev *hdev = container_of(work, struct hci_dev, |
3637 | le_scan_disable.work); |
3638 | struct hci_request req; |
3639 | int err; |
3640 | |
3641 | BT_DBG("%s", hdev->name); |
3642 | |
3643 | hci_req_init(&req, hdev); |
3644 | |
3645 | hci_req_add_le_scan_disable(&req); |
3646 | |
3647 | err = hci_req_run(&req, le_scan_disable_work_complete); |
3648 | if (err) |
3649 | BT_ERR("Disable LE scanning request failed: err %d", err); |
3650 | } |
3651 | |
3652 | static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) |
3653 | { |
3654 | struct hci_dev *hdev = req->hdev; |
3655 | |
3656 | /* If we're advertising or initiating an LE connection we can't |
3657 | * go ahead and change the random address at this time. This is |
3658 | * because the eventual initiator address used for the |
3659 | * subsequently created connection will be undefined (some |
3660 | * controllers use the new address and others the one we had |
3661 | * when the operation started). |
3662 | * |
3663 | * In this kind of scenario skip the update and let the random |
3664 | * address be updated at the next cycle. |
3665 | */ |
3666 | if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) || |
3667 | hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) { |
3668 | BT_DBG("Deferring random address update"); |
3669 | return; |
3670 | } |
3671 | |
3672 | hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa); |
3673 | } |
3674 | |
3675 | int hci_update_random_address(struct hci_request *req, bool require_privacy, |
3676 | u8 *own_addr_type) |
3677 | { |
3678 | struct hci_dev *hdev = req->hdev; |
3679 | int err; |
3680 | |
3681 | /* If privacy is enabled use a resolvable private address. If |
3682 | * current RPA has expired or there is something else than |
3683 | * the current RPA in use, then generate a new one. |
3684 | */ |
3685 | if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) { |
3686 | int to; |
3687 | |
3688 | *own_addr_type = ADDR_LE_DEV_RANDOM; |
3689 | |
3690 | if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) && |
3691 | !bacmp(&hdev->random_addr, &hdev->rpa)) |
3692 | return 0; |
3693 | |
3694 | err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa); |
3695 | if (err < 0) { |
3696 | BT_ERR("%s failed to generate new RPA", hdev->name); |
3697 | return err; |
3698 | } |
3699 | |
3700 | set_random_addr(req, &hdev->rpa); |
3701 | |
3702 | to = msecs_to_jiffies(hdev->rpa_timeout * 1000); |
3703 | queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to); |
3704 | |
3705 | return 0; |
3706 | } |
3707 | |
3708 | /* In case of required privacy without resolvable private address, |
3709 | * use an unresolvable private address. This is useful for active |
3710 | * scanning and non-connectable advertising. |
3711 | */ |
3712 | if (require_privacy) { |
3713 | bdaddr_t urpa; |
3714 | |
3715 | get_random_bytes(&urpa, 6); |
3716 | urpa.b[5] &= 0x3f; /* Clear two most significant bits */ |
3717 | |
3718 | *own_addr_type = ADDR_LE_DEV_RANDOM; |
3719 | set_random_addr(req, &urpa); |
3720 | return 0; |
3721 | } |
3722 | |
3723 | /* If forcing static address is in use or there is no public |
3724 | * address use the static address as random address (but skip |
3725 | * the HCI command if the current random address is already the |
3726 | * static one. |
3727 | */ |
3728 | if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) || |
3729 | !bacmp(&hdev->bdaddr, BDADDR_ANY)) { |
3730 | *own_addr_type = ADDR_LE_DEV_RANDOM; |
3731 | if (bacmp(&hdev->static_addr, &hdev->random_addr)) |
3732 | hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, |
3733 | &hdev->static_addr); |
3734 | return 0; |
3735 | } |
3736 | |
3737 | /* Neither privacy nor static address is being used so use a |
3738 | * public address. |
3739 | */ |
3740 | *own_addr_type = ADDR_LE_DEV_PUBLIC; |
3741 | |
3742 | return 0; |
3743 | } |
3744 | |
3745 | /* Copy the Identity Address of the controller. |
3746 | * |
3747 | * If the controller has a public BD_ADDR, then by default use that one. |
3748 | * If this is a LE only controller without a public address, default to |
3749 | * the static random address. |
3750 | * |
3751 | * For debugging purposes it is possible to force controllers with a |
3752 | * public address to use the static random address instead. |
3753 | */ |
3754 | void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, |
3755 | u8 *bdaddr_type) |
3756 | { |
3757 | if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) || |
3758 | !bacmp(&hdev->bdaddr, BDADDR_ANY)) { |
3759 | bacpy(bdaddr, &hdev->static_addr); |
3760 | *bdaddr_type = ADDR_LE_DEV_RANDOM; |
3761 | } else { |
3762 | bacpy(bdaddr, &hdev->bdaddr); |
3763 | *bdaddr_type = ADDR_LE_DEV_PUBLIC; |
3764 | } |
3765 | } |
3766 | |
3767 | /* Alloc HCI device */ |
3768 | struct hci_dev *hci_alloc_dev(void) |
3769 | { |
3770 | struct hci_dev *hdev; |
3771 | |
3772 | hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL); |
3773 | if (!hdev) |
3774 | return NULL; |
3775 | |
3776 | hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); |
3777 | hdev->esco_type = (ESCO_HV1); |
3778 | hdev->link_mode = (HCI_LM_ACCEPT); |
3779 | hdev->num_iac = 0x01; /* One IAC support is mandatory */ |
3780 | hdev->io_capability = 0x03; /* No Input No Output */ |
3781 | hdev->inq_tx_power = HCI_TX_POWER_INVALID; |
3782 | hdev->adv_tx_power = HCI_TX_POWER_INVALID; |
3783 | |
3784 | hdev->sniff_max_interval = 800; |
3785 | hdev->sniff_min_interval = 80; |
3786 | |
3787 | hdev->le_adv_channel_map = 0x07; |
3788 | hdev->le_scan_interval = 0x0060; |
3789 | hdev->le_scan_window = 0x0030; |
3790 | hdev->le_conn_min_interval = 0x0028; |
3791 | hdev->le_conn_max_interval = 0x0038; |
3792 | |
3793 | hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; |
3794 | |
3795 | mutex_init(&hdev->lock); |
3796 | mutex_init(&hdev->req_lock); |
3797 | |
3798 | INIT_LIST_HEAD(&hdev->mgmt_pending); |
3799 | INIT_LIST_HEAD(&hdev->blacklist); |
3800 | INIT_LIST_HEAD(&hdev->uuids); |
3801 | INIT_LIST_HEAD(&hdev->link_keys); |
3802 | INIT_LIST_HEAD(&hdev->long_term_keys); |
3803 | INIT_LIST_HEAD(&hdev->identity_resolving_keys); |
3804 | INIT_LIST_HEAD(&hdev->remote_oob_data); |
3805 | INIT_LIST_HEAD(&hdev->le_white_list); |
3806 | INIT_LIST_HEAD(&hdev->le_conn_params); |
3807 | INIT_LIST_HEAD(&hdev->pend_le_conns); |
3808 | INIT_LIST_HEAD(&hdev->conn_hash.list); |
3809 | |
3810 | INIT_WORK(&hdev->rx_work, hci_rx_work); |
3811 | INIT_WORK(&hdev->cmd_work, hci_cmd_work); |
3812 | INIT_WORK(&hdev->tx_work, hci_tx_work); |
3813 | INIT_WORK(&hdev->power_on, hci_power_on); |
3814 | |
3815 | INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); |
3816 | INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off); |
3817 | INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); |
3818 | |
3819 | skb_queue_head_init(&hdev->rx_q); |
3820 | skb_queue_head_init(&hdev->cmd_q); |
3821 | skb_queue_head_init(&hdev->raw_q); |
3822 | |
3823 | init_waitqueue_head(&hdev->req_wait_q); |
3824 | |
3825 | setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev); |
3826 | |
3827 | hci_init_sysfs(hdev); |
3828 | discovery_init(hdev); |
3829 | |
3830 | return hdev; |
3831 | } |
3832 | EXPORT_SYMBOL(hci_alloc_dev); |
3833 | |
3834 | /* Free HCI device */ |
3835 | void hci_free_dev(struct hci_dev *hdev) |
3836 | { |
3837 | /* will free via device release */ |
3838 | put_device(&hdev->dev); |
3839 | } |
3840 | EXPORT_SYMBOL(hci_free_dev); |
3841 | |
3842 | /* Register HCI device */ |
3843 | int hci_register_dev(struct hci_dev *hdev) |
3844 | { |
3845 | int id, error; |
3846 | |
3847 | if (!hdev->open || !hdev->close) |
3848 | return -EINVAL; |
3849 | |
3850 | /* Do not allow HCI_AMP devices to register at index 0, |
3851 | * so the index can be used as the AMP controller ID. |
3852 | */ |
3853 | switch (hdev->dev_type) { |
3854 | case HCI_BREDR: |
3855 | id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL); |
3856 | break; |
3857 | case HCI_AMP: |
3858 | id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL); |
3859 | break; |
3860 | default: |
3861 | return -EINVAL; |
3862 | } |
3863 | |
3864 | if (id < 0) |
3865 | return id; |
3866 | |
3867 | sprintf(hdev->name, "hci%d", id); |
3868 | hdev->id = id; |
3869 | |
3870 | BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); |
3871 | |
3872 | hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND | |
3873 | WQ_MEM_RECLAIM, 1, hdev->name); |
3874 | if (!hdev->workqueue) { |
3875 | error = -ENOMEM; |
3876 | goto err; |
3877 | } |
3878 | |
3879 | hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND | |
3880 | WQ_MEM_RECLAIM, 1, hdev->name); |
3881 | if (!hdev->req_workqueue) { |
3882 | destroy_workqueue(hdev->workqueue); |
3883 | error = -ENOMEM; |
3884 | goto err; |
3885 | } |
3886 | |
3887 | if (!IS_ERR_OR_NULL(bt_debugfs)) |
3888 | hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); |
3889 | |
3890 | dev_set_name(&hdev->dev, "%s", hdev->name); |
3891 | |
3892 | hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, |
3893 | CRYPTO_ALG_ASYNC); |
3894 | if (IS_ERR(hdev->tfm_aes)) { |
3895 | BT_ERR("Unable to create crypto context"); |
3896 | error = PTR_ERR(hdev->tfm_aes); |
3897 | hdev->tfm_aes = NULL; |
3898 | goto err_wqueue; |
3899 | } |
3900 | |
3901 | error = device_add(&hdev->dev); |
3902 | if (error < 0) |
3903 | goto err_tfm; |
3904 | |
3905 | hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, |
3906 | RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, |
3907 | hdev); |
3908 | if (hdev->rfkill) { |
3909 | if (rfkill_register(hdev->rfkill) < 0) { |
3910 | rfkill_destroy(hdev->rfkill); |
3911 | hdev->rfkill = NULL; |
3912 | } |
3913 | } |
3914 | |
3915 | if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) |
3916 | set_bit(HCI_RFKILLED, &hdev->dev_flags); |
3917 | |
3918 | set_bit(HCI_SETUP, &hdev->dev_flags); |
3919 | set_bit(HCI_AUTO_OFF, &hdev->dev_flags); |
3920 | |
3921 | if (hdev->dev_type == HCI_BREDR) { |
3922 | /* Assume BR/EDR support until proven otherwise (such as |
3923 | * through reading supported features during init. |
3924 | */ |
3925 | set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags); |
3926 | } |
3927 | |
3928 | write_lock(&hci_dev_list_lock); |
3929 | list_add(&hdev->list, &hci_dev_list); |
3930 | write_unlock(&hci_dev_list_lock); |
3931 | |
3932 | hci_notify(hdev, HCI_DEV_REG); |
3933 | hci_dev_hold(hdev); |
3934 | |
3935 | queue_work(hdev->req_workqueue, &hdev->power_on); |
3936 | |
3937 | return id; |
3938 | |
3939 | err_tfm: |
3940 | crypto_free_blkcipher(hdev->tfm_aes); |
3941 | err_wqueue: |
3942 | destroy_workqueue(hdev->workqueue); |
3943 | destroy_workqueue(hdev->req_workqueue); |
3944 | err: |
3945 | ida_simple_remove(&hci_index_ida, hdev->id); |
3946 | |
3947 | return error; |
3948 | } |
3949 | EXPORT_SYMBOL(hci_register_dev); |
3950 | |
3951 | /* Unregister HCI device */ |
3952 | void hci_unregister_dev(struct hci_dev *hdev) |
3953 | { |
3954 | int i, id; |
3955 | |
3956 | BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); |
3957 | |
3958 | set_bit(HCI_UNREGISTER, &hdev->dev_flags); |
3959 | |
3960 | id = hdev->id; |
3961 | |
3962 | write_lock(&hci_dev_list_lock); |
3963 | list_del(&hdev->list); |
3964 | write_unlock(&hci_dev_list_lock); |
3965 | |
3966 | hci_dev_do_close(hdev); |
3967 | |
3968 | for (i = 0; i < NUM_REASSEMBLY; i++) |
3969 | kfree_skb(hdev->reassembly[i]); |
3970 | |
3971 | cancel_work_sync(&hdev->power_on); |
3972 | |
3973 | if (!test_bit(HCI_INIT, &hdev->flags) && |
3974 | !test_bit(HCI_SETUP, &hdev->dev_flags)) { |
3975 | hci_dev_lock(hdev); |
3976 | mgmt_index_removed(hdev); |
3977 | hci_dev_unlock(hdev); |
3978 | } |
3979 | |
3980 | /* mgmt_index_removed should take care of emptying the |
3981 | * pending list */ |
3982 | BUG_ON(!list_empty(&hdev->mgmt_pending)); |
3983 | |
3984 | hci_notify(hdev, HCI_DEV_UNREG); |
3985 | |
3986 | if (hdev->rfkill) { |
3987 | rfkill_unregister(hdev->rfkill); |
3988 | rfkill_destroy(hdev->rfkill); |
3989 | } |
3990 | |
3991 | if (hdev->tfm_aes) |
3992 | crypto_free_blkcipher(hdev->tfm_aes); |
3993 | |
3994 | device_del(&hdev->dev); |
3995 | |
3996 | debugfs_remove_recursive(hdev->debugfs); |
3997 | |
3998 | destroy_workqueue(hdev->workqueue); |
3999 | destroy_workqueue(hdev->req_workqueue); |
4000 | |
4001 | hci_dev_lock(hdev); |
4002 | hci_blacklist_clear(hdev); |
4003 | hci_uuids_clear(hdev); |
4004 | hci_link_keys_clear(hdev); |
4005 | hci_smp_ltks_clear(hdev); |
4006 | hci_smp_irks_clear(hdev); |
4007 | hci_remote_oob_data_clear(hdev); |
4008 | hci_white_list_clear(hdev); |
4009 | hci_conn_params_clear(hdev); |
4010 | hci_pend_le_conns_clear(hdev); |
4011 | hci_dev_unlock(hdev); |
4012 | |
4013 | hci_dev_put(hdev); |
4014 | |
4015 | ida_simple_remove(&hci_index_ida, id); |
4016 | } |
4017 | EXPORT_SYMBOL(hci_unregister_dev); |
4018 | |
4019 | /* Suspend HCI device */ |
4020 | int hci_suspend_dev(struct hci_dev *hdev) |
4021 | { |
4022 | hci_notify(hdev, HCI_DEV_SUSPEND); |
4023 | return 0; |
4024 | } |
4025 | EXPORT_SYMBOL(hci_suspend_dev); |
4026 | |
4027 | /* Resume HCI device */ |
4028 | int hci_resume_dev(struct hci_dev *hdev) |
4029 | { |
4030 | hci_notify(hdev, HCI_DEV_RESUME); |
4031 | return 0; |
4032 | } |
4033 | EXPORT_SYMBOL(hci_resume_dev); |
4034 | |
4035 | /* Receive frame from HCI drivers */ |
4036 | int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) |
4037 | { |
4038 | if (!hdev || (!test_bit(HCI_UP, &hdev->flags) |
4039 | && !test_bit(HCI_INIT, &hdev->flags))) { |
4040 | kfree_skb(skb); |
4041 | return -ENXIO; |
4042 | } |
4043 | |
4044 | /* Incoming skb */ |
4045 | bt_cb(skb)->incoming = 1; |
4046 | |
4047 | /* Time stamp */ |
4048 | __net_timestamp(skb); |
4049 | |
4050 | skb_queue_tail(&hdev->rx_q, skb); |
4051 | queue_work(hdev->workqueue, &hdev->rx_work); |
4052 | |
4053 | return 0; |
4054 | } |
4055 | EXPORT_SYMBOL(hci_recv_frame); |
4056 | |
4057 | static int hci_reassembly(struct hci_dev *hdev, int type, void *data, |
4058 | int count, __u8 index) |
4059 | { |
4060 | int len = 0; |
4061 | int hlen = 0; |
4062 | int remain = count; |
4063 | struct sk_buff *skb; |
4064 | struct bt_skb_cb *scb; |
4065 | |
4066 | if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) || |
4067 | index >= NUM_REASSEMBLY) |
4068 | return -EILSEQ; |
4069 | |
4070 | skb = hdev->reassembly[index]; |
4071 | |
4072 | if (!skb) { |
4073 | switch (type) { |
4074 | case HCI_ACLDATA_PKT: |
4075 | len = HCI_MAX_FRAME_SIZE; |
4076 | hlen = HCI_ACL_HDR_SIZE; |
4077 | break; |
4078 | case HCI_EVENT_PKT: |
4079 | len = HCI_MAX_EVENT_SIZE; |
4080 | hlen = HCI_EVENT_HDR_SIZE; |
4081 | break; |
4082 | case HCI_SCODATA_PKT: |
4083 | len = HCI_MAX_SCO_SIZE; |
4084 | hlen = HCI_SCO_HDR_SIZE; |
4085 | break; |
4086 | } |
4087 | |
4088 | skb = bt_skb_alloc(len, GFP_ATOMIC); |
4089 | if (!skb) |
4090 | return -ENOMEM; |
4091 | |
4092 | scb = (void *) skb->cb; |
4093 | scb->expect = hlen; |
4094 | scb->pkt_type = type; |
4095 | |
4096 | hdev->reassembly[index] = skb; |
4097 | } |
4098 | |
4099 | while (count) { |
4100 | scb = (void *) skb->cb; |
4101 | len = min_t(uint, scb->expect, count); |
4102 | |
4103 | memcpy(skb_put(skb, len), data, len); |
4104 | |
4105 | count -= len; |
4106 | data += len; |
4107 | scb->expect -= len; |
4108 | remain = count; |
4109 | |
4110 | switch (type) { |
4111 | case HCI_EVENT_PKT: |
4112 | if (skb->len == HCI_EVENT_HDR_SIZE) { |
4113 | struct hci_event_hdr *h = hci_event_hdr(skb); |
4114 | scb->expect = h->plen; |
4115 | |
4116 | if (skb_tailroom(skb) < scb->expect) { |
4117 | kfree_skb(skb); |
4118 | hdev->reassembly[index] = NULL; |
4119 | return -ENOMEM; |
4120 | } |
4121 | } |
4122 | break; |
4123 | |
4124 | case HCI_ACLDATA_PKT: |
4125 | if (skb->len == HCI_ACL_HDR_SIZE) { |
4126 | struct hci_acl_hdr *h = hci_acl_hdr(skb); |
4127 | scb->expect = __le16_to_cpu(h->dlen); |
4128 | |
4129 | if (skb_tailroom(skb) < scb->expect) { |
4130 | kfree_skb(skb); |
4131 | hdev->reassembly[index] = NULL; |
4132 | return -ENOMEM; |
4133 | } |
4134 | } |
4135 | break; |
4136 | |
4137 | case HCI_SCODATA_PKT: |
4138 | if (skb->len == HCI_SCO_HDR_SIZE) { |
4139 | struct hci_sco_hdr *h = hci_sco_hdr(skb); |
4140 | scb->expect = h->dlen; |
4141 | |
4142 | if (skb_tailroom(skb) < scb->expect) { |
4143 | kfree_skb(skb); |
4144 | hdev->reassembly[index] = NULL; |
4145 | return -ENOMEM; |
4146 | } |
4147 | } |
4148 | break; |
4149 | } |
4150 | |
4151 | if (scb->expect == 0) { |
4152 | /* Complete frame */ |
4153 | |
4154 | bt_cb(skb)->pkt_type = type; |
4155 | hci_recv_frame(hdev, skb); |
4156 | |
4157 | hdev->reassembly[index] = NULL; |
4158 | return remain; |
4159 | } |
4160 | } |
4161 | |
4162 | return remain; |
4163 | } |
4164 | |
4165 | int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count) |
4166 | { |
4167 | int rem = 0; |
4168 | |
4169 | if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) |
4170 | return -EILSEQ; |
4171 | |
4172 | while (count) { |
4173 | rem = hci_reassembly(hdev, type, data, count, type - 1); |
4174 | if (rem < 0) |
4175 | return rem; |
4176 | |
4177 | data += (count - rem); |
4178 | count = rem; |
4179 | } |
4180 | |
4181 | return rem; |
4182 | } |
4183 | EXPORT_SYMBOL(hci_recv_fragment); |
4184 | |
4185 | #define STREAM_REASSEMBLY 0 |
4186 | |
4187 | int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count) |
4188 | { |
4189 | int type; |
4190 | int rem = 0; |
4191 | |
4192 | while (count) { |
4193 | struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY]; |
4194 | |
4195 | if (!skb) { |
4196 | struct { char type; } *pkt; |
4197 | |
4198 | /* Start of the frame */ |
4199 | pkt = data; |
4200 | type = pkt->type; |
4201 | |
4202 | data++; |
4203 | count--; |
4204 | } else |
4205 | type = bt_cb(skb)->pkt_type; |
4206 | |
4207 | rem = hci_reassembly(hdev, type, data, count, |
4208 | STREAM_REASSEMBLY); |
4209 | if (rem < 0) |
4210 | return rem; |
4211 | |
4212 | data += (count - rem); |
4213 | count = rem; |
4214 | } |
4215 | |
4216 | return rem; |
4217 | } |
4218 | EXPORT_SYMBOL(hci_recv_stream_fragment); |
4219 | |
4220 | /* ---- Interface to upper protocols ---- */ |
4221 | |
4222 | int hci_register_cb(struct hci_cb *cb) |
4223 | { |
4224 | BT_DBG("%p name %s", cb, cb->name); |
4225 | |
4226 | write_lock(&hci_cb_list_lock); |
4227 | list_add(&cb->list, &hci_cb_list); |
4228 | write_unlock(&hci_cb_list_lock); |
4229 | |
4230 | return 0; |
4231 | } |
4232 | EXPORT_SYMBOL(hci_register_cb); |
4233 | |
4234 | int hci_unregister_cb(struct hci_cb *cb) |
4235 | { |
4236 | BT_DBG("%p name %s", cb, cb->name); |
4237 | |
4238 | write_lock(&hci_cb_list_lock); |
4239 | list_del(&cb->list); |
4240 | write_unlock(&hci_cb_list_lock); |
4241 | |
4242 | return 0; |
4243 | } |
4244 | EXPORT_SYMBOL(hci_unregister_cb); |
4245 | |
4246 | static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) |
4247 | { |
4248 | BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len); |
4249 | |
4250 | /* Time stamp */ |
4251 | __net_timestamp(skb); |
4252 | |
4253 | /* Send copy to monitor */ |
4254 | hci_send_to_monitor(hdev, skb); |
4255 | |
4256 | if (atomic_read(&hdev->promisc)) { |
4257 | /* Send copy to the sockets */ |
4258 | hci_send_to_sock(hdev, skb); |
4259 | } |
4260 | |
4261 | /* Get rid of skb owner, prior to sending to the driver. */ |
4262 | skb_orphan(skb); |
4263 | |
4264 | if (hdev->send(hdev, skb) < 0) |
4265 | BT_ERR("%s sending frame failed", hdev->name); |
4266 | } |
4267 | |
4268 | void hci_req_init(struct hci_request *req, struct hci_dev *hdev) |
4269 | { |
4270 | skb_queue_head_init(&req->cmd_q); |
4271 | req->hdev = hdev; |
4272 | req->err = 0; |
4273 | } |
4274 | |
4275 | int hci_req_run(struct hci_request *req, hci_req_complete_t complete) |
4276 | { |
4277 | struct hci_dev *hdev = req->hdev; |
4278 | struct sk_buff *skb; |
4279 | unsigned long flags; |
4280 | |
4281 | BT_DBG("length %u", skb_queue_len(&req->cmd_q)); |
4282 | |
4283 | /* If an error occured during request building, remove all HCI |
4284 | * commands queued on the HCI request queue. |
4285 | */ |
4286 | if (req->err) { |
4287 | skb_queue_purge(&req->cmd_q); |
4288 | return req->err; |
4289 | } |
4290 | |
4291 | /* Do not allow empty requests */ |
4292 | if (skb_queue_empty(&req->cmd_q)) |
4293 | return -ENODATA; |
4294 | |
4295 | skb = skb_peek_tail(&req->cmd_q); |
4296 | bt_cb(skb)->req.complete = complete; |
4297 | |
4298 | spin_lock_irqsave(&hdev->cmd_q.lock, flags); |
4299 | skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); |
4300 | spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); |
4301 | |
4302 | queue_work(hdev->workqueue, &hdev->cmd_work); |
4303 | |
4304 | return 0; |
4305 | } |
4306 | |
4307 | static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, |
4308 | u32 plen, const void *param) |
4309 | { |
4310 | int len = HCI_COMMAND_HDR_SIZE + plen; |
4311 | struct hci_command_hdr *hdr; |
4312 | struct sk_buff *skb; |
4313 | |
4314 | skb = bt_skb_alloc(len, GFP_ATOMIC); |
4315 | if (!skb) |
4316 | return NULL; |
4317 | |
4318 | hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE); |
4319 | hdr->opcode = cpu_to_le16(opcode); |
4320 | hdr->plen = plen; |
4321 | |
4322 | if (plen) |
4323 | memcpy(skb_put(skb, plen), param, plen); |
4324 | |
4325 | BT_DBG("skb len %d", skb->len); |
4326 | |
4327 | bt_cb(skb)->pkt_type = HCI_COMMAND_PKT; |
4328 | |
4329 | return skb; |
4330 | } |
4331 | |
4332 | /* Send HCI command */ |
4333 | int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, |
4334 | const void *param) |
4335 | { |
4336 | struct sk_buff *skb; |
4337 | |
4338 | BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); |
4339 | |
4340 | skb = hci_prepare_cmd(hdev, opcode, plen, param); |
4341 | if (!skb) { |
4342 | BT_ERR("%s no memory for command", hdev->name); |
4343 | return -ENOMEM; |
4344 | } |
4345 | |
4346 | /* Stand-alone HCI commands must be flaged as |
4347 | * single-command requests. |
4348 | */ |
4349 | bt_cb(skb)->req.start = true; |
4350 | |
4351 | skb_queue_tail(&hdev->cmd_q, skb); |
4352 | queue_work(hdev->workqueue, &hdev->cmd_work); |
4353 | |
4354 | return 0; |
4355 | } |
4356 | |
4357 | /* Queue a command to an asynchronous HCI request */ |
4358 | void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, |
4359 | const void *param, u8 event) |
4360 | { |
4361 | struct hci_dev *hdev = req->hdev; |
4362 | struct sk_buff *skb; |
4363 | |
4364 | BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); |
4365 | |
4366 | /* If an error occured during request building, there is no point in |
4367 | * queueing the HCI command. We can simply return. |
4368 | */ |
4369 | if (req->err) |
4370 | return; |
4371 | |
4372 | skb = hci_prepare_cmd(hdev, opcode, plen, param); |
4373 | if (!skb) { |
4374 | BT_ERR("%s no memory for command (opcode 0x%4.4x)", |
4375 | hdev->name, opcode); |
4376 | req->err = -ENOMEM; |
4377 | return; |
4378 | } |
4379 | |
4380 | if (skb_queue_empty(&req->cmd_q)) |
4381 | bt_cb(skb)->req.start = true; |
4382 | |
4383 | bt_cb(skb)->req.event = event; |
4384 | |
4385 | skb_queue_tail(&req->cmd_q, skb); |
4386 | } |
4387 | |
4388 | void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, |
4389 | const void *param) |
4390 | { |
4391 | hci_req_add_ev(req, opcode, plen, param, 0); |
4392 | } |
4393 | |
4394 | /* Get data from the previously sent command */ |
4395 | void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) |
4396 | { |
4397 | struct hci_command_hdr *hdr; |
4398 | |
4399 | if (!hdev->sent_cmd) |
4400 | return NULL; |
4401 | |
4402 | hdr = (void *) hdev->sent_cmd->data; |
4403 | |
4404 | if (hdr->opcode != cpu_to_le16(opcode)) |
4405 | return NULL; |
4406 | |
4407 | BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); |
4408 | |
4409 | return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; |
4410 | } |
4411 | |
4412 | /* Send ACL data */ |
4413 | static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) |
4414 | { |
4415 | struct hci_acl_hdr *hdr; |
4416 | int len = skb->len; |
4417 | |
4418 | skb_push(skb, HCI_ACL_HDR_SIZE); |
4419 | skb_reset_transport_header(skb); |
4420 | hdr = (struct hci_acl_hdr *)skb_transport_header(skb); |
4421 | hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); |
4422 | hdr->dlen = cpu_to_le16(len); |
4423 | } |
4424 | |
4425 | static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, |
4426 | struct sk_buff *skb, __u16 flags) |
4427 | { |
4428 | struct hci_conn *conn = chan->conn; |
4429 | struct hci_dev *hdev = conn->hdev; |
4430 | struct sk_buff *list; |
4431 | |
4432 | skb->len = skb_headlen(skb); |
4433 | skb->data_len = 0; |
4434 | |
4435 | bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; |
4436 | |
4437 | switch (hdev->dev_type) { |
4438 | case HCI_BREDR: |
4439 | hci_add_acl_hdr(skb, conn->handle, flags); |
4440 | break; |
4441 | case HCI_AMP: |
4442 | hci_add_acl_hdr(skb, chan->handle, flags); |
4443 | break; |
4444 | default: |
4445 | BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type); |
4446 | return; |
4447 | } |
4448 | |
4449 | list = skb_shinfo(skb)->frag_list; |
4450 | if (!list) { |
4451 | /* Non fragmented */ |
4452 | BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); |
4453 | |
4454 | skb_queue_tail(queue, skb); |
4455 | } else { |
4456 | /* Fragmented */ |
4457 | BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); |
4458 | |
4459 | skb_shinfo(skb)->frag_list = NULL; |
4460 | |
4461 | /* Queue all fragments atomically */ |
4462 | spin_lock(&queue->lock); |
4463 | |
4464 | __skb_queue_tail(queue, skb); |
4465 | |
4466 | flags &= ~ACL_START; |
4467 | flags |= ACL_CONT; |
4468 | do { |
4469 | skb = list; list = list->next; |
4470 | |
4471 | bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; |
4472 | hci_add_acl_hdr(skb, conn->handle, flags); |
4473 | |
4474 | BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); |
4475 | |
4476 | __skb_queue_tail(queue, skb); |
4477 | } while (list); |
4478 | |
4479 | spin_unlock(&queue->lock); |
4480 | } |
4481 | } |
4482 | |
4483 | void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) |
4484 | { |
4485 | struct hci_dev *hdev = chan->conn->hdev; |
4486 | |
4487 | BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags); |
4488 | |
4489 | hci_queue_acl(chan, &chan->data_q, skb, flags); |
4490 | |
4491 | queue_work(hdev->workqueue, &hdev->tx_work); |
4492 | } |
4493 | |
4494 | /* Send SCO data */ |
4495 | void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) |
4496 | { |
4497 | struct hci_dev *hdev = conn->hdev; |
4498 | struct hci_sco_hdr hdr; |
4499 | |
4500 | BT_DBG("%s len %d", hdev->name, skb->len); |
4501 | |
4502 | hdr.handle = cpu_to_le16(conn->handle); |
4503 | hdr.dlen = skb->len; |
4504 | |
4505 | skb_push(skb, HCI_SCO_HDR_SIZE); |
4506 | skb_reset_transport_header(skb); |
4507 | memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); |
4508 | |
4509 | bt_cb(skb)->pkt_type = HCI_SCODATA_PKT; |
4510 | |
4511 | skb_queue_tail(&conn->data_q, skb); |
4512 | queue_work(hdev->workqueue, &hdev->tx_work); |
4513 | } |
4514 | |
4515 | /* ---- HCI TX task (outgoing data) ---- */ |
4516 | |
4517 | /* HCI Connection scheduler */ |
4518 | static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, |
4519 | int *quote) |
4520 | { |
4521 | struct hci_conn_hash *h = &hdev->conn_hash; |
4522 | struct hci_conn *conn = NULL, *c; |
4523 | unsigned int num = 0, min = ~0; |
4524 | |
4525 | /* We don't have to lock device here. Connections are always |
4526 | * added and removed with TX task disabled. */ |
4527 | |
4528 | rcu_read_lock(); |
4529 | |
4530 | list_for_each_entry_rcu(c, &h->list, list) { |
4531 | if (c->type != type || skb_queue_empty(&c->data_q)) |
4532 | continue; |
4533 | |
4534 | if (c->state != BT_CONNECTED && c->state != BT_CONFIG) |
4535 | continue; |
4536 | |
4537 | num++; |
4538 | |
4539 | if (c->sent < min) { |
4540 | min = c->sent; |
4541 | conn = c; |
4542 | } |
4543 | |
4544 | if (hci_conn_num(hdev, type) == num) |
4545 | break; |
4546 | } |
4547 | |
4548 | rcu_read_unlock(); |
4549 | |
4550 | if (conn) { |
4551 | int cnt, q; |
4552 | |
4553 | switch (conn->type) { |
4554 | case ACL_LINK: |
4555 | cnt = hdev->acl_cnt; |
4556 | break; |
4557 | case SCO_LINK: |
4558 | case ESCO_LINK: |
4559 | cnt = hdev->sco_cnt; |
4560 | break; |
4561 | case LE_LINK: |
4562 | cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; |
4563 | break; |
4564 | default: |
4565 | cnt = 0; |
4566 | BT_ERR("Unknown link type"); |
4567 | } |
4568 | |
4569 | q = cnt / num; |
4570 | *quote = q ? q : 1; |
4571 | } else |
4572 | *quote = 0; |
4573 | |
4574 | BT_DBG("conn %p quote %d", conn, *quote); |
4575 | return conn; |
4576 | } |
4577 | |
4578 | static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) |
4579 | { |
4580 | struct hci_conn_hash *h = &hdev->conn_hash; |
4581 | struct hci_conn *c; |
4582 | |
4583 | BT_ERR("%s link tx timeout", hdev->name); |
4584 | |
4585 | rcu_read_lock(); |
4586 | |
4587 | /* Kill stalled connections */ |
4588 | list_for_each_entry_rcu(c, &h->list, list) { |
4589 | if (c->type == type && c->sent) { |
4590 | BT_ERR("%s killing stalled connection %pMR", |
4591 | hdev->name, &c->dst); |
4592 | hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM); |
4593 | } |
4594 | } |
4595 | |
4596 | rcu_read_unlock(); |
4597 | } |
4598 | |
4599 | static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, |
4600 | int *quote) |
4601 | { |
4602 | struct hci_conn_hash *h = &hdev->conn_hash; |
4603 | struct hci_chan *chan = NULL; |
4604 | unsigned int num = 0, min = ~0, cur_prio = 0; |
4605 | struct hci_conn *conn; |
4606 | int cnt, q, conn_num = 0; |
4607 | |
4608 | BT_DBG("%s", hdev->name); |
4609 | |
4610 | rcu_read_lock(); |
4611 | |
4612 | list_for_each_entry_rcu(conn, &h->list, list) { |
4613 | struct hci_chan *tmp; |
4614 | |
4615 | if (conn->type != type) |
4616 | continue; |
4617 | |
4618 | if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) |
4619 | continue; |
4620 | |
4621 | conn_num++; |
4622 | |
4623 | list_for_each_entry_rcu(tmp, &conn->chan_list, list) { |
4624 | struct sk_buff *skb; |
4625 | |
4626 | if (skb_queue_empty(&tmp->data_q)) |
4627 | continue; |
4628 | |
4629 | skb = skb_peek(&tmp->data_q); |
4630 | if (skb->priority < cur_prio) |
4631 | continue; |
4632 | |
4633 | if (skb->priority > cur_prio) { |
4634 | num = 0; |
4635 | min = ~0; |
4636 | cur_prio = skb->priority; |
4637 | } |
4638 | |
4639 | num++; |
4640 | |
4641 | if (conn->sent < min) { |
4642 | min = conn->sent; |
4643 | chan = tmp; |
4644 | } |
4645 | } |
4646 | |
4647 | if (hci_conn_num(hdev, type) == conn_num) |
4648 | break; |
4649 | } |
4650 | |
4651 | rcu_read_unlock(); |
4652 | |
4653 | if (!chan) |
4654 | return NULL; |
4655 | |
4656 | switch (chan->conn->type) { |
4657 | case ACL_LINK: |
4658 | cnt = hdev->acl_cnt; |
4659 | break; |
4660 | case AMP_LINK: |
4661 | cnt = hdev->block_cnt; |
4662 | break; |
4663 | case SCO_LINK: |
4664 | case ESCO_LINK: |
4665 | cnt = hdev->sco_cnt; |
4666 | break; |
4667 | case LE_LINK: |
4668 | cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; |
4669 | break; |
4670 | default: |
4671 | cnt = 0; |
4672 | BT_ERR("Unknown link type"); |
4673 | } |
4674 | |
4675 | q = cnt / num; |
4676 | *quote = q ? q : 1; |
4677 | BT_DBG("chan %p quote %d", chan, *quote); |
4678 | return chan; |
4679 | } |
4680 | |
4681 | static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) |
4682 | { |
4683 | struct hci_conn_hash *h = &hdev->conn_hash; |
4684 | struct hci_conn *conn; |
4685 | int num = 0; |
4686 | |
4687 | BT_DBG("%s", hdev->name); |
4688 | |
4689 | rcu_read_lock(); |
4690 | |
4691 | list_for_each_entry_rcu(conn, &h->list, list) { |
4692 | struct hci_chan *chan; |
4693 | |
4694 | if (conn->type != type) |
4695 | continue; |
4696 | |
4697 | if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) |
4698 | continue; |
4699 | |
4700 | num++; |
4701 | |
4702 | list_for_each_entry_rcu(chan, &conn->chan_list, list) { |
4703 | struct sk_buff *skb; |
4704 | |
4705 | if (chan->sent) { |
4706 | chan->sent = 0; |
4707 | continue; |
4708 | } |
4709 | |
4710 | if (skb_queue_empty(&chan->data_q)) |
4711 | continue; |
4712 | |
4713 | skb = skb_peek(&chan->data_q); |
4714 | if (skb->priority >= HCI_PRIO_MAX - 1) |
4715 | continue; |
4716 | |
4717 | skb->priority = HCI_PRIO_MAX - 1; |
4718 | |
4719 | BT_DBG("chan %p skb %p promoted to %d", chan, skb, |
4720 | skb->priority); |
4721 | } |
4722 | |
4723 | if (hci_conn_num(hdev, type) == num) |
4724 | break; |
4725 | } |
4726 | |
4727 | rcu_read_unlock(); |
4728 | |
4729 | } |
4730 | |
4731 | static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb) |
4732 | { |
4733 | /* Calculate count of blocks used by this packet */ |
4734 | return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); |
4735 | } |
4736 | |
4737 | static void __check_timeout(struct hci_dev *hdev, unsigned int cnt) |
4738 | { |
4739 | if (!test_bit(HCI_RAW, &hdev->flags)) { |
4740 | /* ACL tx timeout must be longer than maximum |
4741 | * link supervision timeout (40.9 seconds) */ |
4742 | if (!cnt && time_after(jiffies, hdev->acl_last_tx + |
4743 | HCI_ACL_TX_TIMEOUT)) |
4744 | hci_link_tx_to(hdev, ACL_LINK); |
4745 | } |
4746 | } |
4747 | |
4748 | static void hci_sched_acl_pkt(struct hci_dev *hdev) |
4749 | { |
4750 | unsigned int cnt = hdev->acl_cnt; |
4751 | struct hci_chan *chan; |
4752 | struct sk_buff *skb; |
4753 | int quote; |
4754 | |
4755 | __check_timeout(hdev, cnt); |
4756 | |
4757 | while (hdev->acl_cnt && |
4758 | (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { |
4759 | u32 priority = (skb_peek(&chan->data_q))->priority; |
4760 | while (quote-- && (skb = skb_peek(&chan->data_q))) { |
4761 | BT_DBG("chan %p skb %p len %d priority %u", chan, skb, |
4762 | skb->len, skb->priority); |
4763 | |
4764 | /* Stop if priority has changed */ |
4765 | if (skb->priority < priority) |
4766 | break; |
4767 | |
4768 | skb = skb_dequeue(&chan->data_q); |
4769 | |
4770 | hci_conn_enter_active_mode(chan->conn, |
4771 | bt_cb(skb)->force_active); |
4772 | |
4773 | hci_send_frame(hdev, skb); |
4774 | hdev->acl_last_tx = jiffies; |
4775 | |
4776 | hdev->acl_cnt--; |
4777 | chan->sent++; |
4778 | chan->conn->sent++; |
4779 | } |
4780 | } |
4781 | |
4782 | if (cnt != hdev->acl_cnt) |
4783 | hci_prio_recalculate(hdev, ACL_LINK); |
4784 | } |
4785 | |
4786 | static void hci_sched_acl_blk(struct hci_dev *hdev) |
4787 | { |
4788 | unsigned int cnt = hdev->block_cnt; |
4789 | struct hci_chan *chan; |
4790 | struct sk_buff *skb; |
4791 | int quote; |
4792 | u8 type; |
4793 | |
4794 | __check_timeout(hdev, cnt); |
4795 | |
4796 | BT_DBG("%s", hdev->name); |
4797 | |
4798 | if (hdev->dev_type == HCI_AMP) |
4799 | type = AMP_LINK; |
4800 | else |
4801 | type = ACL_LINK; |
4802 | |
4803 | while (hdev->block_cnt > 0 && |
4804 | (chan = hci_chan_sent(hdev, type, "e))) { |
4805 | u32 priority = (skb_peek(&chan->data_q))->priority; |
4806 | while (quote > 0 && (skb = skb_peek(&chan->data_q))) { |
4807 | int blocks; |
4808 | |
4809 | BT_DBG("chan %p skb %p len %d priority %u", chan, skb, |
4810 | skb->len, skb->priority); |
4811 | |
4812 | /* Stop if priority has changed */ |
4813 | if (skb->priority < priority) |
4814 | break; |
4815 | |
4816 | skb = skb_dequeue(&chan->data_q); |
4817 | |
4818 | blocks = __get_blocks(hdev, skb); |
4819 | if (blocks > hdev->block_cnt) |
4820 | return; |
4821 | |
4822 | hci_conn_enter_active_mode(chan->conn, |
4823 | bt_cb(skb)->force_active); |
4824 | |
4825 | hci_send_frame(hdev, skb); |
4826 | hdev->acl_last_tx = jiffies; |
4827 | |
4828 | hdev->block_cnt -= blocks; |
4829 | quote -= blocks; |
4830 | |
4831 | chan->sent += blocks; |
4832 | chan->conn->sent += blocks; |
4833 | } |
4834 | } |
4835 | |
4836 | if (cnt != hdev->block_cnt) |
4837 | hci_prio_recalculate(hdev, type); |
4838 | } |
4839 | |
4840 | static void hci_sched_acl(struct hci_dev *hdev) |
4841 | { |
4842 | BT_DBG("%s", hdev->name); |
4843 | |
4844 | /* No ACL link over BR/EDR controller */ |
4845 | if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR) |
4846 | return; |
4847 | |
4848 | /* No AMP link over AMP controller */ |
4849 | if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP) |
4850 | return; |
4851 | |
4852 | switch (hdev->flow_ctl_mode) { |
4853 | case HCI_FLOW_CTL_MODE_PACKET_BASED: |
4854 | hci_sched_acl_pkt(hdev); |
4855 | break; |
4856 | |
4857 | case HCI_FLOW_CTL_MODE_BLOCK_BASED: |
4858 | hci_sched_acl_blk(hdev); |
4859 | break; |
4860 | } |
4861 | } |
4862 | |
4863 | /* Schedule SCO */ |
4864 | static void hci_sched_sco(struct hci_dev *hdev) |
4865 | { |
4866 | struct hci_conn *conn; |
4867 | struct sk_buff *skb; |
4868 | int quote; |
4869 | |
4870 | BT_DBG("%s", hdev->name); |
4871 | |
4872 | if (!hci_conn_num(hdev, SCO_LINK)) |
4873 | return; |
4874 | |
4875 | while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { |
4876 | while (quote-- && (skb = skb_dequeue(&conn->data_q))) { |
4877 | BT_DBG("skb %p len %d", skb, skb->len); |
4878 | hci_send_frame(hdev, skb); |
4879 | |
4880 | conn->sent++; |
4881 | if (conn->sent == ~0) |
4882 | conn->sent = 0; |
4883 | } |
4884 | } |
4885 | } |
4886 | |
4887 | static void hci_sched_esco(struct hci_dev *hdev) |
4888 | { |
4889 | struct hci_conn *conn; |
4890 | struct sk_buff *skb; |
4891 | int quote; |
4892 | |
4893 | BT_DBG("%s", hdev->name); |
4894 | |
4895 | if (!hci_conn_num(hdev, ESCO_LINK)) |
4896 | return; |
4897 | |
4898 | while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, |
4899 | "e))) { |
4900 | while (quote-- && (skb = skb_dequeue(&conn->data_q))) { |
4901 | BT_DBG("skb %p len %d", skb, skb->len); |
4902 | hci_send_frame(hdev, skb); |
4903 | |
4904 | conn->sent++; |
4905 | if (conn->sent == ~0) |
4906 | conn->sent = 0; |
4907 | } |
4908 | } |
4909 | } |
4910 | |
4911 | static void hci_sched_le(struct hci_dev *hdev) |
4912 | { |
4913 | struct hci_chan *chan; |
4914 | struct sk_buff *skb; |
4915 | int quote, cnt, tmp; |
4916 | |
4917 | BT_DBG("%s", hdev->name); |
4918 | |
4919 | if (!hci_conn_num(hdev, LE_LINK)) |
4920 | return; |
4921 | |
4922 | if (!test_bit(HCI_RAW, &hdev->flags)) { |
4923 | /* LE tx timeout must be longer than maximum |
4924 | * link supervision timeout (40.9 seconds) */ |
4925 | if (!hdev->le_cnt && hdev->le_pkts && |
4926 | time_after(jiffies, hdev->le_last_tx + HZ * 45)) |
4927 | hci_link_tx_to(hdev, LE_LINK); |
4928 | } |
4929 | |
4930 | cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; |
4931 | tmp = cnt; |
4932 | while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { |
4933 | u32 priority = (skb_peek(&chan->data_q))->priority; |
4934 | while (quote-- && (skb = skb_peek(&chan->data_q))) { |
4935 | BT_DBG("chan %p skb %p len %d priority %u", chan, skb, |
4936 | skb->len, skb->priority); |
4937 | |
4938 | /* Stop if priority has changed */ |
4939 | if (skb->priority < priority) |
4940 | break; |
4941 | |
4942 | skb = skb_dequeue(&chan->data_q); |
4943 | |
4944 | hci_send_frame(hdev, skb); |
4945 | hdev->le_last_tx = jiffies; |
4946 | |
4947 | cnt--; |
4948 | chan->sent++; |
4949 | chan->conn->sent++; |
4950 | } |
4951 | } |
4952 | |
4953 | if (hdev->le_pkts) |
4954 | hdev->le_cnt = cnt; |
4955 | else |
4956 | hdev->acl_cnt = cnt; |
4957 | |
4958 | if (cnt != tmp) |
4959 | hci_prio_recalculate(hdev, LE_LINK); |
4960 | } |
4961 | |
4962 | static void hci_tx_work(struct work_struct *work) |
4963 | { |
4964 | struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work); |
4965 | struct sk_buff *skb; |
4966 | |
4967 | BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, |
4968 | hdev->sco_cnt, hdev->le_cnt); |
4969 | |
4970 | if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { |
4971 | /* Schedule queues and send stuff to HCI driver */ |
4972 | hci_sched_acl(hdev); |
4973 | hci_sched_sco(hdev); |
4974 | hci_sched_esco(hdev); |
4975 | hci_sched_le(hdev); |
4976 | } |
4977 | |
4978 | /* Send next queued raw (unknown type) packet */ |
4979 | while ((skb = skb_dequeue(&hdev->raw_q))) |
4980 | hci_send_frame(hdev, skb); |
4981 | } |
4982 | |
4983 | /* ----- HCI RX task (incoming data processing) ----- */ |
4984 | |
4985 | /* ACL data packet */ |
4986 | static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) |
4987 | { |
4988 | struct hci_acl_hdr *hdr = (void *) skb->data; |
4989 | struct hci_conn *conn; |
4990 | __u16 handle, flags; |
4991 | |
4992 | skb_pull(skb, HCI_ACL_HDR_SIZE); |
4993 | |
4994 | handle = __le16_to_cpu(hdr->handle); |
4995 | flags = hci_flags(handle); |
4996 | handle = hci_handle(handle); |
4997 | |
4998 | BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, |
4999 | handle, flags); |
5000 | |
5001 | hdev->stat.acl_rx++; |
5002 | |
5003 | hci_dev_lock(hdev); |
5004 | conn = hci_conn_hash_lookup_handle(hdev, handle); |
5005 | hci_dev_unlock(hdev); |
5006 | |
5007 | if (conn) { |
5008 | hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); |
5009 | |
5010 | /* Send to upper protocol */ |
5011 | l2cap_recv_acldata(conn, skb, flags); |
5012 | return; |
5013 | } else { |
5014 | BT_ERR("%s ACL packet for unknown connection handle %d", |
5015 | hdev->name, handle); |
5016 | } |
5017 | |
5018 | kfree_skb(skb); |
5019 | } |
5020 | |
5021 | /* SCO data packet */ |
5022 | static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) |
5023 | { |
5024 | struct hci_sco_hdr *hdr = (void *) skb->data; |
5025 | struct hci_conn *conn; |
5026 | __u16 handle; |
5027 | |
5028 | skb_pull(skb, HCI_SCO_HDR_SIZE); |
5029 | |
5030 | handle = __le16_to_cpu(hdr->handle); |
5031 | |
5032 | BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle); |
5033 | |
5034 | hdev->stat.sco_rx++; |
5035 | |
5036 | hci_dev_lock(hdev); |
5037 | conn = hci_conn_hash_lookup_handle(hdev, handle); |
5038 | hci_dev_unlock(hdev); |
5039 | |
5040 | if (conn) { |
5041 | /* Send to upper protocol */ |
5042 | sco_recv_scodata(conn, skb); |
5043 | return; |
5044 | } else { |
5045 | BT_ERR("%s SCO packet for unknown connection handle %d", |
5046 | hdev->name, handle); |
5047 | } |
5048 | |
5049 | kfree_skb(skb); |
5050 | } |
5051 | |
5052 | static bool hci_req_is_complete(struct hci_dev *hdev) |
5053 | { |
5054 | struct sk_buff *skb; |
5055 | |
5056 | skb = skb_peek(&hdev->cmd_q); |
5057 | if (!skb) |
5058 | return true; |
5059 | |
5060 | return bt_cb(skb)->req.start; |
5061 | } |
5062 | |
5063 | static void hci_resend_last(struct hci_dev *hdev) |
5064 | { |
5065 | struct hci_command_hdr *sent; |
5066 | struct sk_buff *skb; |
5067 | u16 opcode; |
5068 | |
5069 | if (!hdev->sent_cmd) |
5070 | return; |
5071 | |
5072 | sent = (void *) hdev->sent_cmd->data; |
5073 | opcode = __le16_to_cpu(sent->opcode); |
5074 | if (opcode == HCI_OP_RESET) |
5075 | return; |
5076 | |
5077 | skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); |
5078 | if (!skb) |
5079 | return; |
5080 | |
5081 | skb_queue_head(&hdev->cmd_q, skb); |
5082 | queue_work(hdev->workqueue, &hdev->cmd_work); |
5083 | } |
5084 | |
5085 | void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status) |
5086 | { |
5087 | hci_req_complete_t req_complete = NULL; |
5088 | struct sk_buff *skb; |
5089 | unsigned long flags; |
5090 | |
5091 | BT_DBG("opcode 0x%04x status 0x%02x", opcode, status); |
5092 | |
5093 | /* If the completed command doesn't match the last one that was |
5094 | * sent we need to do special handling of it. |
5095 | */ |
5096 | if (!hci_sent_cmd_data(hdev, opcode)) { |
5097 | /* Some CSR based controllers generate a spontaneous |
5098 | * reset complete event during init and any pending |
5099 | * command will never be completed. In such a case we |
5100 | * need to resend whatever was the last sent |
5101 | * command. |
5102 | */ |
5103 | if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET) |
5104 | hci_resend_last(hdev); |
5105 | |
5106 | return; |
5107 | } |
5108 | |
5109 | /* If the command succeeded and there's still more commands in |
5110 | * this request the request is not yet complete. |
5111 | */ |
5112 | if (!status && !hci_req_is_complete(hdev)) |
5113 | return; |
5114 | |
5115 | /* If this was the last command in a request the complete |
5116 | * callback would be found in hdev->sent_cmd instead of the |
5117 | * command queue (hdev->cmd_q). |
5118 | */ |
5119 | if (hdev->sent_cmd) { |
5120 | req_complete = bt_cb(hdev->sent_cmd)->req.complete; |
5121 | |
5122 | if (req_complete) { |
5123 | /* We must set the complete callback to NULL to |
5124 | * avoid calling the callback more than once if |
5125 | * this function gets called again. |
5126 | */ |
5127 | bt_cb(hdev->sent_cmd)->req.complete = NULL; |
5128 | |
5129 | goto call_complete; |
5130 | } |
5131 | } |
5132 | |
5133 | /* Remove all pending commands belonging to this request */ |
5134 | spin_lock_irqsave(&hdev->cmd_q.lock, flags); |
5135 | while ((skb = __skb_dequeue(&hdev->cmd_q))) { |
5136 | if (bt_cb(skb)->req.start) { |
5137 | __skb_queue_head(&hdev->cmd_q, skb); |
5138 | break; |
5139 | } |
5140 | |
5141 | req_complete = bt_cb(skb)->req.complete; |
5142 | kfree_skb(skb); |
5143 | } |
5144 | spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); |
5145 | |
5146 | call_complete: |
5147 | if (req_complete) |
5148 | req_complete(hdev, status); |
5149 | } |
5150 | |
5151 | static void hci_rx_work(struct work_struct *work) |
5152 | { |
5153 | struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work); |
5154 | struct sk_buff *skb; |
5155 | |
5156 | BT_DBG("%s", hdev->name); |
5157 | |
5158 | while ((skb = skb_dequeue(&hdev->rx_q))) { |
5159 | /* Send copy to monitor */ |
5160 | hci_send_to_monitor(hdev, skb); |
5161 | |
5162 | if (atomic_read(&hdev->promisc)) { |
5163 | /* Send copy to the sockets */ |
5164 | hci_send_to_sock(hdev, skb); |
5165 | } |
5166 | |
5167 | if (test_bit(HCI_RAW, &hdev->flags) || |
5168 | test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) { |
5169 | kfree_skb(skb); |
5170 | continue; |
5171 | } |
5172 | |
5173 | if (test_bit(HCI_INIT, &hdev->flags)) { |
5174 | /* Don't process data packets in this states. */ |
5175 | switch (bt_cb(skb)->pkt_type) { |
5176 | case HCI_ACLDATA_PKT: |
5177 | case HCI_SCODATA_PKT: |
5178 | kfree_skb(skb); |
5179 | continue; |
5180 | } |
5181 | } |
5182 | |
5183 | /* Process frame */ |
5184 | switch (bt_cb(skb)->pkt_type) { |
5185 | case HCI_EVENT_PKT: |
5186 | BT_DBG("%s Event packet", hdev->name); |
5187 | hci_event_packet(hdev, skb); |
5188 | break; |
5189 | |
5190 | case HCI_ACLDATA_PKT: |
5191 | BT_DBG("%s ACL data packet", hdev->name); |
5192 | hci_acldata_packet(hdev, skb); |
5193 | break; |
5194 | |
5195 | case HCI_SCODATA_PKT: |
5196 | BT_DBG("%s SCO data packet", hdev->name); |
5197 | hci_scodata_packet(hdev, skb); |
5198 | break; |
5199 | |
5200 | default: |
5201 | kfree_skb(skb); |
5202 | break; |
5203 | } |
5204 | } |
5205 | } |
5206 | |
5207 | static void hci_cmd_work(struct work_struct *work) |
5208 | { |
5209 | struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); |
5210 | struct sk_buff *skb; |
5211 | |
5212 | BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name, |
5213 | atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q)); |
5214 | |
5215 | /* Send queued commands */ |
5216 | if (atomic_read(&hdev->cmd_cnt)) { |
5217 | skb = skb_dequeue(&hdev->cmd_q); |
5218 | if (!skb) |
5219 | return; |
5220 | |
5221 | kfree_skb(hdev->sent_cmd); |
5222 | |
5223 | hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); |
5224 | if (hdev->sent_cmd) { |
5225 | atomic_dec(&hdev->cmd_cnt); |
5226 | hci_send_frame(hdev, skb); |
5227 | if (test_bit(HCI_RESET, &hdev->flags)) |
5228 | del_timer(&hdev->cmd_timer); |
5229 | else |
5230 | mod_timer(&hdev->cmd_timer, |
5231 | jiffies + HCI_CMD_TIMEOUT); |
5232 | } else { |
5233 | skb_queue_head(&hdev->cmd_q, skb); |
5234 | queue_work(hdev->workqueue, &hdev->cmd_work); |
5235 | } |
5236 | } |
5237 | } |
5238 | |
5239 | void hci_req_add_le_scan_disable(struct hci_request *req) |
5240 | { |
5241 | struct hci_cp_le_set_scan_enable cp; |
5242 | |
5243 | memset(&cp, 0, sizeof(cp)); |
5244 | cp.enable = LE_SCAN_DISABLE; |
5245 | hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); |
5246 | } |
5247 | |
5248 | void hci_req_add_le_passive_scan(struct hci_request *req) |
5249 | { |
5250 | struct hci_cp_le_set_scan_param param_cp; |
5251 | struct hci_cp_le_set_scan_enable enable_cp; |
5252 | struct hci_dev *hdev = req->hdev; |
5253 | u8 own_addr_type; |
5254 | |
5255 | /* Set require_privacy to true to avoid identification from |
5256 | * unknown peer devices. Since this is passive scanning, no |
5257 | * SCAN_REQ using the local identity should be sent. Mandating |
5258 | * privacy is just an extra precaution. |
5259 | */ |
5260 | if (hci_update_random_address(req, true, &own_addr_type)) |
5261 | return; |
5262 | |
5263 | memset(¶m_cp, 0, sizeof(param_cp)); |
5264 | param_cp.type = LE_SCAN_PASSIVE; |
5265 | param_cp.interval = cpu_to_le16(hdev->le_scan_interval); |
5266 | param_cp.window = cpu_to_le16(hdev->le_scan_window); |
5267 | param_cp.own_address_type = own_addr_type; |
5268 | hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), |
5269 | ¶m_cp); |
5270 | |
5271 | memset(&enable_cp, 0, sizeof(enable_cp)); |
5272 | enable_cp.enable = LE_SCAN_ENABLE; |
5273 | enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; |
5274 | hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), |
5275 | &enable_cp); |
5276 | } |
5277 | |
5278 | static void update_background_scan_complete(struct hci_dev *hdev, u8 status) |
5279 | { |
5280 | if (status) |
5281 | BT_DBG("HCI request failed to update background scanning: " |
5282 | "status 0x%2.2x", status); |
5283 | } |
5284 | |
5285 | /* This function controls the background scanning based on hdev->pend_le_conns |
5286 | * list. If there are pending LE connection we start the background scanning, |
5287 | * otherwise we stop it. |
5288 | * |
5289 | * This function requires the caller holds hdev->lock. |
5290 | */ |
5291 | void hci_update_background_scan(struct hci_dev *hdev) |
5292 | { |
5293 | struct hci_request req; |
5294 | struct hci_conn *conn; |
5295 | int err; |
5296 | |
5297 | hci_req_init(&req, hdev); |
5298 | |
5299 | if (list_empty(&hdev->pend_le_conns)) { |
5300 | /* If there is no pending LE connections, we should stop |
5301 | * the background scanning. |
5302 | */ |
5303 | |
5304 | /* If controller is not scanning we are done. */ |
5305 | if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags)) |
5306 | return; |
5307 | |
5308 | hci_req_add_le_scan_disable(&req); |
5309 | |
5310 | BT_DBG("%s stopping background scanning", hdev->name); |
5311 | } else { |
5312 | /* If there is at least one pending LE connection, we should |
5313 | * keep the background scan running. |
5314 | */ |
5315 | |
5316 | /* If controller is connecting, we should not start scanning |
5317 | * since some controllers are not able to scan and connect at |
5318 | * the same time. |
5319 | */ |
5320 | conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); |
5321 | if (conn) |
5322 | return; |
5323 | |
5324 | /* If controller is currently scanning, we stop it to ensure we |
5325 | * don't miss any advertising (due to duplicates filter). |
5326 | */ |
5327 | if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) |
5328 | hci_req_add_le_scan_disable(&req); |
5329 | |
5330 | hci_req_add_le_passive_scan(&req); |
5331 | |
5332 | BT_DBG("%s starting background scanning", hdev->name); |
5333 | } |
5334 | |
5335 | err = hci_req_run(&req, update_background_scan_complete); |
5336 | if (err) |
5337 | BT_ERR("Failed to run HCI request: err %d", err); |
5338 | } |
5339 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9