2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
49 #include "hci_codec.h"
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
66 static int hci_scan_req(struct hci_request *req, unsigned long opt)
70 BT_DBG("%s %x", req->hdev->name, scan);
72 /* Inquiry and Page scans */
73 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
77 static int hci_auth_req(struct hci_request *req, unsigned long opt)
81 BT_DBG("%s %x", req->hdev->name, auth);
84 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
92 BT_DBG("%s %x", req->hdev->name, encrypt);
95 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
101 __le16 policy = cpu_to_le16(opt);
103 BT_DBG("%s %x", req->hdev->name, policy);
105 /* Default link policy */
106 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
110 /* Get HCI device by index.
111 * Device is held on return. */
112 struct hci_dev *hci_dev_get(int index)
114 struct hci_dev *hdev = NULL, *d;
121 read_lock(&hci_dev_list_lock);
122 list_for_each_entry(d, &hci_dev_list, list) {
123 if (d->id == index) {
124 hdev = hci_dev_hold(d);
128 read_unlock(&hci_dev_list_lock);
132 /* ---- Inquiry support ---- */
134 bool hci_discovery_active(struct hci_dev *hdev)
136 struct discovery_state *discov = &hdev->discovery;
138 switch (discov->state) {
139 case DISCOVERY_FINDING:
140 case DISCOVERY_RESOLVING:
148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
150 int old_state = hdev->discovery.state;
152 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
154 if (old_state == state)
157 hdev->discovery.state = state;
160 case DISCOVERY_STOPPED:
161 hci_update_passive_scan(hdev);
163 if (old_state != DISCOVERY_STARTING)
164 mgmt_discovering(hdev, 0);
166 case DISCOVERY_STARTING:
168 case DISCOVERY_FINDING:
169 mgmt_discovering(hdev, 1);
171 case DISCOVERY_RESOLVING:
173 case DISCOVERY_STOPPING:
178 void hci_inquiry_cache_flush(struct hci_dev *hdev)
180 struct discovery_state *cache = &hdev->discovery;
181 struct inquiry_entry *p, *n;
183 list_for_each_entry_safe(p, n, &cache->all, all) {
188 INIT_LIST_HEAD(&cache->unknown);
189 INIT_LIST_HEAD(&cache->resolve);
192 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
195 struct discovery_state *cache = &hdev->discovery;
196 struct inquiry_entry *e;
198 BT_DBG("cache %p, %pMR", cache, bdaddr);
200 list_for_each_entry(e, &cache->all, all) {
201 if (!bacmp(&e->data.bdaddr, bdaddr))
208 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
211 struct discovery_state *cache = &hdev->discovery;
212 struct inquiry_entry *e;
214 BT_DBG("cache %p, %pMR", cache, bdaddr);
216 list_for_each_entry(e, &cache->unknown, list) {
217 if (!bacmp(&e->data.bdaddr, bdaddr))
224 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
228 struct discovery_state *cache = &hdev->discovery;
229 struct inquiry_entry *e;
231 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
233 list_for_each_entry(e, &cache->resolve, list) {
234 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
236 if (!bacmp(&e->data.bdaddr, bdaddr))
243 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
244 struct inquiry_entry *ie)
246 struct discovery_state *cache = &hdev->discovery;
247 struct list_head *pos = &cache->resolve;
248 struct inquiry_entry *p;
252 list_for_each_entry(p, &cache->resolve, list) {
253 if (p->name_state != NAME_PENDING &&
254 abs(p->data.rssi) >= abs(ie->data.rssi))
259 list_add(&ie->list, pos);
262 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
265 struct discovery_state *cache = &hdev->discovery;
266 struct inquiry_entry *ie;
269 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
271 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
274 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
276 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
278 if (!ie->data.ssp_mode)
279 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
281 if (ie->name_state == NAME_NEEDED &&
282 data->rssi != ie->data.rssi) {
283 ie->data.rssi = data->rssi;
284 hci_inquiry_cache_update_resolve(hdev, ie);
290 /* Entry not in the cache. Add new one. */
291 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
293 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
297 list_add(&ie->all, &cache->all);
300 ie->name_state = NAME_KNOWN;
302 ie->name_state = NAME_NOT_KNOWN;
303 list_add(&ie->list, &cache->unknown);
307 if (name_known && ie->name_state != NAME_KNOWN &&
308 ie->name_state != NAME_PENDING) {
309 ie->name_state = NAME_KNOWN;
313 memcpy(&ie->data, data, sizeof(*data));
314 ie->timestamp = jiffies;
315 cache->timestamp = jiffies;
317 if (ie->name_state == NAME_NOT_KNOWN)
318 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
324 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
326 struct discovery_state *cache = &hdev->discovery;
327 struct inquiry_info *info = (struct inquiry_info *) buf;
328 struct inquiry_entry *e;
331 list_for_each_entry(e, &cache->all, all) {
332 struct inquiry_data *data = &e->data;
337 bacpy(&info->bdaddr, &data->bdaddr);
338 info->pscan_rep_mode = data->pscan_rep_mode;
339 info->pscan_period_mode = data->pscan_period_mode;
340 info->pscan_mode = data->pscan_mode;
341 memcpy(info->dev_class, data->dev_class, 3);
342 info->clock_offset = data->clock_offset;
348 BT_DBG("cache %p, copied %d", cache, copied);
352 static int hci_inq_req(struct hci_request *req, unsigned long opt)
354 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
355 struct hci_dev *hdev = req->hdev;
356 struct hci_cp_inquiry cp;
358 BT_DBG("%s", hdev->name);
360 if (test_bit(HCI_INQUIRY, &hdev->flags))
364 memcpy(&cp.lap, &ir->lap, 3);
365 cp.length = ir->length;
366 cp.num_rsp = ir->num_rsp;
367 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
372 int hci_inquiry(void __user *arg)
374 __u8 __user *ptr = arg;
375 struct hci_inquiry_req ir;
376 struct hci_dev *hdev;
377 int err = 0, do_inquiry = 0, max_rsp;
381 if (copy_from_user(&ir, ptr, sizeof(ir)))
384 hdev = hci_dev_get(ir.dev_id);
388 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
393 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
398 if (hdev->dev_type != HCI_PRIMARY) {
403 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
408 /* Restrict maximum inquiry length to 60 seconds */
409 if (ir.length > 60) {
415 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
416 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
417 hci_inquiry_cache_flush(hdev);
420 hci_dev_unlock(hdev);
422 timeo = ir.length * msecs_to_jiffies(2000);
425 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
430 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
431 * cleared). If it is interrupted by a signal, return -EINTR.
433 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
434 TASK_INTERRUPTIBLE)) {
440 /* for unlimited number of responses we will use buffer with
443 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
445 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
446 * copy it to the user space.
448 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
455 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
456 hci_dev_unlock(hdev);
458 BT_DBG("num_rsp %d", ir.num_rsp);
460 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
462 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
475 static int hci_dev_do_open(struct hci_dev *hdev)
479 BT_DBG("%s %p", hdev->name, hdev);
481 hci_req_sync_lock(hdev);
483 ret = hci_dev_open_sync(hdev);
485 hci_req_sync_unlock(hdev);
489 /* ---- HCI ioctl helpers ---- */
491 int hci_dev_open(__u16 dev)
493 struct hci_dev *hdev;
496 hdev = hci_dev_get(dev);
500 /* Devices that are marked as unconfigured can only be powered
501 * up as user channel. Trying to bring them up as normal devices
502 * will result into a failure. Only user channel operation is
505 * When this function is called for a user channel, the flag
506 * HCI_USER_CHANNEL will be set first before attempting to
509 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
510 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
515 /* We need to ensure that no other power on/off work is pending
516 * before proceeding to call hci_dev_do_open. This is
517 * particularly important if the setup procedure has not yet
520 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
521 cancel_delayed_work(&hdev->power_off);
523 /* After this call it is guaranteed that the setup procedure
524 * has finished. This means that error conditions like RFKILL
525 * or no valid public or static random address apply.
527 flush_workqueue(hdev->req_workqueue);
529 /* For controllers not using the management interface and that
530 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
531 * so that pairing works for them. Once the management interface
532 * is in use this bit will be cleared again and userspace has
533 * to explicitly enable it.
535 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
536 !hci_dev_test_flag(hdev, HCI_MGMT))
537 hci_dev_set_flag(hdev, HCI_BONDABLE);
539 err = hci_dev_do_open(hdev);
546 int hci_dev_do_close(struct hci_dev *hdev)
550 BT_DBG("%s %p", hdev->name, hdev);
552 hci_req_sync_lock(hdev);
554 err = hci_dev_close_sync(hdev);
556 hci_req_sync_unlock(hdev);
561 int hci_dev_close(__u16 dev)
563 struct hci_dev *hdev;
566 hdev = hci_dev_get(dev);
570 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
575 cancel_work_sync(&hdev->power_on);
576 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
577 cancel_delayed_work(&hdev->power_off);
579 err = hci_dev_do_close(hdev);
586 static int hci_dev_do_reset(struct hci_dev *hdev)
590 BT_DBG("%s %p", hdev->name, hdev);
592 hci_req_sync_lock(hdev);
595 skb_queue_purge(&hdev->rx_q);
596 skb_queue_purge(&hdev->cmd_q);
598 /* Cancel these to avoid queueing non-chained pending work */
599 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
602 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
603 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
605 * inside RCU section to see the flag or complete scheduling.
608 /* Explicitly cancel works in case scheduled after setting the flag. */
609 cancel_delayed_work(&hdev->cmd_timer);
610 cancel_delayed_work(&hdev->ncmd_timer);
612 /* Avoid potential lockdep warnings from the *_flush() calls by
613 * ensuring the workqueue is empty up front.
615 drain_workqueue(hdev->workqueue);
618 hci_inquiry_cache_flush(hdev);
619 hci_conn_hash_flush(hdev);
620 hci_dev_unlock(hdev);
625 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
627 atomic_set(&hdev->cmd_cnt, 1);
633 ret = hci_reset_sync(hdev);
635 hci_req_sync_unlock(hdev);
639 int hci_dev_reset(__u16 dev)
641 struct hci_dev *hdev;
644 hdev = hci_dev_get(dev);
648 if (!test_bit(HCI_UP, &hdev->flags)) {
653 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
658 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
663 err = hci_dev_do_reset(hdev);
670 int hci_dev_reset_stat(__u16 dev)
672 struct hci_dev *hdev;
675 hdev = hci_dev_get(dev);
679 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
684 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
689 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
696 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
698 bool conn_changed, discov_changed;
700 BT_DBG("%s scan 0x%02x", hdev->name, scan);
702 if ((scan & SCAN_PAGE))
703 conn_changed = !hci_dev_test_and_set_flag(hdev,
706 conn_changed = hci_dev_test_and_clear_flag(hdev,
709 if ((scan & SCAN_INQUIRY)) {
710 discov_changed = !hci_dev_test_and_set_flag(hdev,
713 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
714 discov_changed = hci_dev_test_and_clear_flag(hdev,
718 if (!hci_dev_test_flag(hdev, HCI_MGMT))
721 if (conn_changed || discov_changed) {
722 /* In case this was disabled through mgmt */
723 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
725 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
726 hci_update_adv_data(hdev, hdev->cur_adv_instance);
728 mgmt_new_settings(hdev);
732 int hci_dev_cmd(unsigned int cmd, void __user *arg)
734 struct hci_dev *hdev;
735 struct hci_dev_req dr;
738 if (copy_from_user(&dr, arg, sizeof(dr)))
741 hdev = hci_dev_get(dr.dev_id);
745 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
750 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
755 if (hdev->dev_type != HCI_PRIMARY) {
760 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
767 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
768 HCI_INIT_TIMEOUT, NULL);
772 if (!lmp_encrypt_capable(hdev)) {
777 if (!test_bit(HCI_AUTH, &hdev->flags)) {
778 /* Auth must be enabled first */
779 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
780 HCI_INIT_TIMEOUT, NULL);
785 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
786 HCI_INIT_TIMEOUT, NULL);
790 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
791 HCI_INIT_TIMEOUT, NULL);
793 /* Ensure that the connectable and discoverable states
794 * get correctly modified as this was a non-mgmt change.
797 hci_update_passive_scan_state(hdev, dr.dev_opt);
801 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
802 HCI_INIT_TIMEOUT, NULL);
806 hdev->link_mode = ((__u16) dr.dev_opt) &
807 (HCI_LM_MASTER | HCI_LM_ACCEPT);
811 if (hdev->pkt_type == (__u16) dr.dev_opt)
814 hdev->pkt_type = (__u16) dr.dev_opt;
815 mgmt_phy_configuration_changed(hdev, NULL);
819 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
820 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
824 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
825 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
838 int hci_get_dev_list(void __user *arg)
840 struct hci_dev *hdev;
841 struct hci_dev_list_req *dl;
842 struct hci_dev_req *dr;
843 int n = 0, size, err;
846 if (get_user(dev_num, (__u16 __user *) arg))
849 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
852 size = sizeof(*dl) + dev_num * sizeof(*dr);
854 dl = kzalloc(size, GFP_KERNEL);
860 read_lock(&hci_dev_list_lock);
861 list_for_each_entry(hdev, &hci_dev_list, list) {
862 unsigned long flags = hdev->flags;
864 /* When the auto-off is configured it means the transport
865 * is running, but in that case still indicate that the
866 * device is actually down.
868 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
869 flags &= ~BIT(HCI_UP);
871 (dr + n)->dev_id = hdev->id;
872 (dr + n)->dev_opt = flags;
877 read_unlock(&hci_dev_list_lock);
880 size = sizeof(*dl) + n * sizeof(*dr);
882 err = copy_to_user(arg, dl, size);
885 return err ? -EFAULT : 0;
888 int hci_get_dev_info(void __user *arg)
890 struct hci_dev *hdev;
891 struct hci_dev_info di;
895 if (copy_from_user(&di, arg, sizeof(di)))
898 hdev = hci_dev_get(di.dev_id);
902 /* When the auto-off is configured it means the transport
903 * is running, but in that case still indicate that the
904 * device is actually down.
906 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
907 flags = hdev->flags & ~BIT(HCI_UP);
911 strcpy(di.name, hdev->name);
912 di.bdaddr = hdev->bdaddr;
913 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
915 di.pkt_type = hdev->pkt_type;
916 if (lmp_bredr_capable(hdev)) {
917 di.acl_mtu = hdev->acl_mtu;
918 di.acl_pkts = hdev->acl_pkts;
919 di.sco_mtu = hdev->sco_mtu;
920 di.sco_pkts = hdev->sco_pkts;
922 di.acl_mtu = hdev->le_mtu;
923 di.acl_pkts = hdev->le_pkts;
927 di.link_policy = hdev->link_policy;
928 di.link_mode = hdev->link_mode;
930 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
931 memcpy(&di.features, &hdev->features, sizeof(di.features));
933 if (copy_to_user(arg, &di, sizeof(di)))
941 /* ---- Interface to HCI drivers ---- */
943 static int hci_rfkill_set_block(void *data, bool blocked)
945 struct hci_dev *hdev = data;
947 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
949 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
953 hci_dev_set_flag(hdev, HCI_RFKILLED);
954 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
955 !hci_dev_test_flag(hdev, HCI_CONFIG))
956 hci_dev_do_close(hdev);
958 hci_dev_clear_flag(hdev, HCI_RFKILLED);
964 static const struct rfkill_ops hci_rfkill_ops = {
965 .set_block = hci_rfkill_set_block,
968 static void hci_power_on(struct work_struct *work)
970 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
973 BT_DBG("%s", hdev->name);
975 if (test_bit(HCI_UP, &hdev->flags) &&
976 hci_dev_test_flag(hdev, HCI_MGMT) &&
977 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
978 cancel_delayed_work(&hdev->power_off);
979 err = hci_powered_update_sync(hdev);
980 mgmt_power_on(hdev, err);
984 err = hci_dev_do_open(hdev);
987 mgmt_set_powered_failed(hdev, err);
988 hci_dev_unlock(hdev);
992 /* During the HCI setup phase, a few error conditions are
993 * ignored and they need to be checked now. If they are still
994 * valid, it is important to turn the device back off.
996 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
997 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
998 (hdev->dev_type == HCI_PRIMARY &&
999 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1000 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1001 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
1002 hci_dev_do_close(hdev);
1003 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
1004 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1005 HCI_AUTO_OFF_TIMEOUT);
1008 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1009 /* For unconfigured devices, set the HCI_RAW flag
1010 * so that userspace can easily identify them.
1012 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1013 set_bit(HCI_RAW, &hdev->flags);
1015 /* For fully configured devices, this will send
1016 * the Index Added event. For unconfigured devices,
1017 * it will send Unconfigued Index Added event.
1019 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1020 * and no event will be send.
1022 mgmt_index_added(hdev);
1023 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1024 /* When the controller is now configured, then it
1025 * is important to clear the HCI_RAW flag.
1027 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1028 clear_bit(HCI_RAW, &hdev->flags);
1030 /* Powering on the controller with HCI_CONFIG set only
1031 * happens with the transition from unconfigured to
1032 * configured. This will send the Index Added event.
1034 mgmt_index_added(hdev);
1038 static void hci_power_off(struct work_struct *work)
1040 struct hci_dev *hdev = container_of(work, struct hci_dev,
1043 BT_DBG("%s", hdev->name);
1045 hci_dev_do_close(hdev);
1048 static void hci_error_reset(struct work_struct *work)
1050 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1052 BT_DBG("%s", hdev->name);
1055 hdev->hw_error(hdev, hdev->hw_error_code);
1057 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1059 if (hci_dev_do_close(hdev))
1062 hci_dev_do_open(hdev);
1065 void hci_uuids_clear(struct hci_dev *hdev)
1067 struct bt_uuid *uuid, *tmp;
1069 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1070 list_del(&uuid->list);
1075 void hci_link_keys_clear(struct hci_dev *hdev)
1077 struct link_key *key;
1079 list_for_each_entry(key, &hdev->link_keys, list) {
1080 list_del_rcu(&key->list);
1081 kfree_rcu(key, rcu);
1085 void hci_smp_ltks_clear(struct hci_dev *hdev)
1089 list_for_each_entry(k, &hdev->long_term_keys, list) {
1090 list_del_rcu(&k->list);
1095 void hci_smp_irks_clear(struct hci_dev *hdev)
1099 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
1100 list_del_rcu(&k->list);
1105 void hci_blocked_keys_clear(struct hci_dev *hdev)
1107 struct blocked_key *b;
1109 list_for_each_entry(b, &hdev->blocked_keys, list) {
1110 list_del_rcu(&b->list);
1115 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1117 bool blocked = false;
1118 struct blocked_key *b;
1121 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1122 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1132 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1137 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1138 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1141 if (hci_is_blocked_key(hdev,
1142 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1144 bt_dev_warn_ratelimited(hdev,
1145 "Link key blocked for %pMR",
1158 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1159 u8 key_type, u8 old_key_type)
1162 if (key_type < 0x03)
1165 /* Debug keys are insecure so don't store them persistently */
1166 if (key_type == HCI_LK_DEBUG_COMBINATION)
1169 /* Changed combination key and there's no previous one */
1170 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1173 /* Security mode 3 case */
1177 /* BR/EDR key derived using SC from an LE link */
1178 if (conn->type == LE_LINK)
1181 /* Neither local nor remote side had no-bonding as requirement */
1182 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1185 /* Local side had dedicated bonding as requirement */
1186 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1189 /* Remote side had dedicated bonding as requirement */
1190 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1193 /* If none of the above criteria match, then don't store the key
1198 static u8 ltk_role(u8 type)
1200 if (type == SMP_LTK)
1201 return HCI_ROLE_MASTER;
1203 return HCI_ROLE_SLAVE;
1206 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1207 u8 addr_type, u8 role)
1212 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1213 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1216 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1219 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1221 bt_dev_warn_ratelimited(hdev,
1222 "LTK blocked for %pMR",
1235 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1237 struct smp_irk *irk_to_return = NULL;
1238 struct smp_irk *irk;
1241 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1242 if (!bacmp(&irk->rpa, rpa)) {
1243 irk_to_return = irk;
1248 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1249 if (smp_irk_matches(hdev, irk->val, rpa)) {
1250 bacpy(&irk->rpa, rpa);
1251 irk_to_return = irk;
1257 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1258 irk_to_return->val)) {
1259 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1260 &irk_to_return->bdaddr);
1261 irk_to_return = NULL;
1266 return irk_to_return;
1269 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1272 struct smp_irk *irk_to_return = NULL;
1273 struct smp_irk *irk;
1275 /* Identity Address must be public or static random */
1276 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1280 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1281 if (addr_type == irk->addr_type &&
1282 bacmp(bdaddr, &irk->bdaddr) == 0) {
1283 irk_to_return = irk;
1290 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1291 irk_to_return->val)) {
1292 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1293 &irk_to_return->bdaddr);
1294 irk_to_return = NULL;
1299 return irk_to_return;
1302 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1303 bdaddr_t *bdaddr, u8 *val, u8 type,
1304 u8 pin_len, bool *persistent)
1306 struct link_key *key, *old_key;
1309 old_key = hci_find_link_key(hdev, bdaddr);
1311 old_key_type = old_key->type;
1314 old_key_type = conn ? conn->key_type : 0xff;
1315 key = kzalloc(sizeof(*key), GFP_KERNEL);
1318 list_add_rcu(&key->list, &hdev->link_keys);
1321 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1323 /* Some buggy controller combinations generate a changed
1324 * combination key for legacy pairing even when there's no
1326 if (type == HCI_LK_CHANGED_COMBINATION &&
1327 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1328 type = HCI_LK_COMBINATION;
1330 conn->key_type = type;
1333 bacpy(&key->bdaddr, bdaddr);
1334 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1335 key->pin_len = pin_len;
1337 if (type == HCI_LK_CHANGED_COMBINATION)
1338 key->type = old_key_type;
1343 *persistent = hci_persistent_key(hdev, conn, type,
1349 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1350 u8 addr_type, u8 type, u8 authenticated,
1351 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1353 struct smp_ltk *key, *old_key;
1354 u8 role = ltk_role(type);
1356 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1360 key = kzalloc(sizeof(*key), GFP_KERNEL);
1363 list_add_rcu(&key->list, &hdev->long_term_keys);
1366 bacpy(&key->bdaddr, bdaddr);
1367 key->bdaddr_type = addr_type;
1368 memcpy(key->val, tk, sizeof(key->val));
1369 key->authenticated = authenticated;
1372 key->enc_size = enc_size;
1378 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1379 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1381 struct smp_irk *irk;
1383 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1385 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1389 bacpy(&irk->bdaddr, bdaddr);
1390 irk->addr_type = addr_type;
1392 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1395 memcpy(irk->val, val, 16);
1396 bacpy(&irk->rpa, rpa);
1401 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1403 struct link_key *key;
1405 key = hci_find_link_key(hdev, bdaddr);
1409 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1411 list_del_rcu(&key->list);
1412 kfree_rcu(key, rcu);
1417 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1419 struct smp_ltk *k, *tmp;
1422 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1423 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1426 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1428 list_del_rcu(&k->list);
1433 return removed ? 0 : -ENOENT;
1436 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1438 struct smp_irk *k, *tmp;
1440 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1441 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1444 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1446 list_del_rcu(&k->list);
1451 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1454 struct smp_irk *irk;
1457 if (type == BDADDR_BREDR) {
1458 if (hci_find_link_key(hdev, bdaddr))
1463 /* Convert to HCI addr type which struct smp_ltk uses */
1464 if (type == BDADDR_LE_PUBLIC)
1465 addr_type = ADDR_LE_DEV_PUBLIC;
1467 addr_type = ADDR_LE_DEV_RANDOM;
1469 irk = hci_get_irk(hdev, bdaddr, addr_type);
1471 bdaddr = &irk->bdaddr;
1472 addr_type = irk->addr_type;
1476 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1477 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1487 /* HCI command timer function */
1488 static void hci_cmd_timeout(struct work_struct *work)
1490 struct hci_dev *hdev = container_of(work, struct hci_dev,
1493 if (hdev->sent_cmd) {
1494 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1495 u16 opcode = __le16_to_cpu(sent->opcode);
1497 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1499 bt_dev_err(hdev, "command tx timeout");
1502 if (hdev->cmd_timeout)
1503 hdev->cmd_timeout(hdev);
1505 atomic_set(&hdev->cmd_cnt, 1);
1506 queue_work(hdev->workqueue, &hdev->cmd_work);
1509 /* HCI ncmd timer function */
1510 static void hci_ncmd_timeout(struct work_struct *work)
1512 struct hci_dev *hdev = container_of(work, struct hci_dev,
1515 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1517 /* During HCI_INIT phase no events can be injected if the ncmd timer
1518 * triggers since the procedure has its own timeout handling.
1520 if (test_bit(HCI_INIT, &hdev->flags))
1523 /* This is an irrecoverable state, inject hardware error event */
1524 hci_reset_dev(hdev);
1527 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1528 bdaddr_t *bdaddr, u8 bdaddr_type)
1530 struct oob_data *data;
1532 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1533 if (bacmp(bdaddr, &data->bdaddr) != 0)
1535 if (data->bdaddr_type != bdaddr_type)
1543 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1546 struct oob_data *data;
1548 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1552 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1554 list_del(&data->list);
1560 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1562 struct oob_data *data, *n;
1564 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1565 list_del(&data->list);
1570 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1571 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1572 u8 *hash256, u8 *rand256)
1574 struct oob_data *data;
1576 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1578 data = kmalloc(sizeof(*data), GFP_KERNEL);
1582 bacpy(&data->bdaddr, bdaddr);
1583 data->bdaddr_type = bdaddr_type;
1584 list_add(&data->list, &hdev->remote_oob_data);
1587 if (hash192 && rand192) {
1588 memcpy(data->hash192, hash192, sizeof(data->hash192));
1589 memcpy(data->rand192, rand192, sizeof(data->rand192));
1590 if (hash256 && rand256)
1591 data->present = 0x03;
1593 memset(data->hash192, 0, sizeof(data->hash192));
1594 memset(data->rand192, 0, sizeof(data->rand192));
1595 if (hash256 && rand256)
1596 data->present = 0x02;
1598 data->present = 0x00;
1601 if (hash256 && rand256) {
1602 memcpy(data->hash256, hash256, sizeof(data->hash256));
1603 memcpy(data->rand256, rand256, sizeof(data->rand256));
1605 memset(data->hash256, 0, sizeof(data->hash256));
1606 memset(data->rand256, 0, sizeof(data->rand256));
1607 if (hash192 && rand192)
1608 data->present = 0x01;
1611 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1616 /* This function requires the caller holds hdev->lock */
1617 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1619 struct adv_info *adv_instance;
1621 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1622 if (adv_instance->instance == instance)
1623 return adv_instance;
1629 /* This function requires the caller holds hdev->lock */
1630 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1632 struct adv_info *cur_instance;
1634 cur_instance = hci_find_adv_instance(hdev, instance);
1638 if (cur_instance == list_last_entry(&hdev->adv_instances,
1639 struct adv_info, list))
1640 return list_first_entry(&hdev->adv_instances,
1641 struct adv_info, list);
1643 return list_next_entry(cur_instance, list);
1646 /* This function requires the caller holds hdev->lock */
1647 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1649 struct adv_info *adv_instance;
1651 adv_instance = hci_find_adv_instance(hdev, instance);
1655 BT_DBG("%s removing %dMR", hdev->name, instance);
1657 if (hdev->cur_adv_instance == instance) {
1658 if (hdev->adv_instance_timeout) {
1659 cancel_delayed_work(&hdev->adv_instance_expire);
1660 hdev->adv_instance_timeout = 0;
1662 hdev->cur_adv_instance = 0x00;
1665 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1667 list_del(&adv_instance->list);
1668 kfree(adv_instance);
1670 hdev->adv_instance_cnt--;
1675 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1677 struct adv_info *adv_instance, *n;
1679 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1680 adv_instance->rpa_expired = rpa_expired;
1683 /* This function requires the caller holds hdev->lock */
1684 void hci_adv_instances_clear(struct hci_dev *hdev)
1686 struct adv_info *adv_instance, *n;
1688 if (hdev->adv_instance_timeout) {
1689 cancel_delayed_work(&hdev->adv_instance_expire);
1690 hdev->adv_instance_timeout = 0;
1693 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1694 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1695 list_del(&adv_instance->list);
1696 kfree(adv_instance);
1699 hdev->adv_instance_cnt = 0;
1700 hdev->cur_adv_instance = 0x00;
1703 static void adv_instance_rpa_expired(struct work_struct *work)
1705 struct adv_info *adv_instance = container_of(work, struct adv_info,
1706 rpa_expired_cb.work);
1710 adv_instance->rpa_expired = true;
1713 /* This function requires the caller holds hdev->lock */
1714 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1715 u32 flags, u16 adv_data_len, u8 *adv_data,
1716 u16 scan_rsp_len, u8 *scan_rsp_data,
1717 u16 timeout, u16 duration, s8 tx_power,
1718 u32 min_interval, u32 max_interval,
1721 struct adv_info *adv;
1723 adv = hci_find_adv_instance(hdev, instance);
1725 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1726 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1727 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1729 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1730 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1731 return ERR_PTR(-EOVERFLOW);
1733 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1735 return ERR_PTR(-ENOMEM);
1737 adv->pending = true;
1738 adv->instance = instance;
1739 list_add(&adv->list, &hdev->adv_instances);
1740 hdev->adv_instance_cnt++;
1744 adv->min_interval = min_interval;
1745 adv->max_interval = max_interval;
1746 adv->tx_power = tx_power;
1747 /* Defining a mesh_handle changes the timing units to ms,
1748 * rather than seconds, and ties the instance to the requested
1751 adv->mesh = mesh_handle;
1753 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1754 scan_rsp_len, scan_rsp_data);
1756 adv->timeout = timeout;
1757 adv->remaining_time = timeout;
1760 adv->duration = hdev->def_multi_adv_rotation_duration;
1762 adv->duration = duration;
1764 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1766 BT_DBG("%s for %dMR", hdev->name, instance);
1771 /* This function requires the caller holds hdev->lock */
1772 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1773 u32 flags, u8 data_len, u8 *data,
1774 u32 min_interval, u32 max_interval)
1776 struct adv_info *adv;
1778 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1779 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1780 min_interval, max_interval, 0);
1784 adv->periodic = true;
1785 adv->per_adv_data_len = data_len;
1788 memcpy(adv->per_adv_data, data, data_len);
1793 /* This function requires the caller holds hdev->lock */
1794 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1795 u16 adv_data_len, u8 *adv_data,
1796 u16 scan_rsp_len, u8 *scan_rsp_data)
1798 struct adv_info *adv;
1800 adv = hci_find_adv_instance(hdev, instance);
1802 /* If advertisement doesn't exist, we can't modify its data */
1806 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1807 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1808 memcpy(adv->adv_data, adv_data, adv_data_len);
1809 adv->adv_data_len = adv_data_len;
1810 adv->adv_data_changed = true;
1813 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1814 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1815 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1816 adv->scan_rsp_len = scan_rsp_len;
1817 adv->scan_rsp_changed = true;
1820 /* Mark as changed if there are flags which would affect it */
1821 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1822 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1823 adv->scan_rsp_changed = true;
1828 /* This function requires the caller holds hdev->lock */
1829 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1832 struct adv_info *adv;
1834 if (instance == 0x00) {
1835 /* Instance 0 always manages the "Tx Power" and "Flags"
1838 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1840 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1841 * corresponds to the "connectable" instance flag.
1843 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1844 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1846 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1847 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1848 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1849 flags |= MGMT_ADV_FLAG_DISCOV;
1854 adv = hci_find_adv_instance(hdev, instance);
1856 /* Return 0 when we got an invalid instance identifier. */
1863 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1865 struct adv_info *adv;
1867 /* Instance 0x00 always set local name */
1868 if (instance == 0x00)
1871 adv = hci_find_adv_instance(hdev, instance);
1875 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1876 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1879 return adv->scan_rsp_len ? true : false;
1882 /* This function requires the caller holds hdev->lock */
1883 void hci_adv_monitors_clear(struct hci_dev *hdev)
1885 struct adv_monitor *monitor;
1888 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1889 hci_free_adv_monitor(hdev, monitor);
1891 idr_destroy(&hdev->adv_monitors_idr);
1894 /* Frees the monitor structure and do some bookkeepings.
1895 * This function requires the caller holds hdev->lock.
1897 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1899 struct adv_pattern *pattern;
1900 struct adv_pattern *tmp;
1905 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1906 list_del(&pattern->list);
1910 if (monitor->handle)
1911 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1913 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1914 hdev->adv_monitors_cnt--;
1915 mgmt_adv_monitor_removed(hdev, monitor->handle);
1921 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1922 * also attempts to forward the request to the controller.
1923 * This function requires the caller holds hci_req_sync_lock.
1925 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1927 int min, max, handle;
1935 min = HCI_MIN_ADV_MONITOR_HANDLE;
1936 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1937 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1940 hci_dev_unlock(hdev);
1945 monitor->handle = handle;
1947 if (!hdev_is_powered(hdev))
1950 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1951 case HCI_ADV_MONITOR_EXT_NONE:
1952 bt_dev_dbg(hdev, "%s add monitor %d status %d", hdev->name,
1953 monitor->handle, status);
1954 /* Message was not forwarded to controller - not an error */
1957 case HCI_ADV_MONITOR_EXT_MSFT:
1958 status = msft_add_monitor_pattern(hdev, monitor);
1959 bt_dev_dbg(hdev, "%s add monitor %d msft status %d", hdev->name,
1960 monitor->handle, status);
1967 /* Attempts to tell the controller and free the monitor. If somehow the
1968 * controller doesn't have a corresponding handle, remove anyway.
1969 * This function requires the caller holds hci_req_sync_lock.
1971 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1972 struct adv_monitor *monitor)
1977 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1978 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1979 bt_dev_dbg(hdev, "%s remove monitor %d status %d", hdev->name,
1980 monitor->handle, status);
1983 case HCI_ADV_MONITOR_EXT_MSFT:
1984 handle = monitor->handle;
1985 status = msft_remove_monitor(hdev, monitor);
1986 bt_dev_dbg(hdev, "%s remove monitor %d msft status %d",
1987 hdev->name, handle, status);
1991 /* In case no matching handle registered, just free the monitor */
1992 if (status == -ENOENT)
1998 if (status == -ENOENT)
1999 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
2001 hci_free_adv_monitor(hdev, monitor);
2006 /* This function requires the caller holds hci_req_sync_lock */
2007 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2009 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2014 return hci_remove_adv_monitor(hdev, monitor);
2017 /* This function requires the caller holds hci_req_sync_lock */
2018 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2020 struct adv_monitor *monitor;
2021 int idr_next_id = 0;
2025 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2029 status = hci_remove_adv_monitor(hdev, monitor);
2039 /* This function requires the caller holds hdev->lock */
2040 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2042 return !idr_is_empty(&hdev->adv_monitors_idr);
2045 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2047 if (msft_monitor_supported(hdev))
2048 return HCI_ADV_MONITOR_EXT_MSFT;
2050 return HCI_ADV_MONITOR_EXT_NONE;
2053 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2054 bdaddr_t *bdaddr, u8 type)
2056 struct bdaddr_list *b;
2058 list_for_each_entry(b, bdaddr_list, list) {
2059 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2066 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2067 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2070 struct bdaddr_list_with_irk *b;
2072 list_for_each_entry(b, bdaddr_list, list) {
2073 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2080 struct bdaddr_list_with_flags *
2081 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2082 bdaddr_t *bdaddr, u8 type)
2084 struct bdaddr_list_with_flags *b;
2086 list_for_each_entry(b, bdaddr_list, list) {
2087 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2094 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2096 struct bdaddr_list *b, *n;
2098 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2104 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2106 struct bdaddr_list *entry;
2108 if (!bacmp(bdaddr, BDADDR_ANY))
2111 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2114 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2118 bacpy(&entry->bdaddr, bdaddr);
2119 entry->bdaddr_type = type;
2121 list_add(&entry->list, list);
2126 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2127 u8 type, u8 *peer_irk, u8 *local_irk)
2129 struct bdaddr_list_with_irk *entry;
2131 if (!bacmp(bdaddr, BDADDR_ANY))
2134 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2137 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2141 bacpy(&entry->bdaddr, bdaddr);
2142 entry->bdaddr_type = type;
2145 memcpy(entry->peer_irk, peer_irk, 16);
2148 memcpy(entry->local_irk, local_irk, 16);
2150 list_add(&entry->list, list);
2155 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2158 struct bdaddr_list_with_flags *entry;
2160 if (!bacmp(bdaddr, BDADDR_ANY))
2163 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2166 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2170 bacpy(&entry->bdaddr, bdaddr);
2171 entry->bdaddr_type = type;
2172 entry->flags = flags;
2174 list_add(&entry->list, list);
2179 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2181 struct bdaddr_list *entry;
2183 if (!bacmp(bdaddr, BDADDR_ANY)) {
2184 hci_bdaddr_list_clear(list);
2188 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2192 list_del(&entry->list);
2198 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2201 struct bdaddr_list_with_irk *entry;
2203 if (!bacmp(bdaddr, BDADDR_ANY)) {
2204 hci_bdaddr_list_clear(list);
2208 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2212 list_del(&entry->list);
2218 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2221 struct bdaddr_list_with_flags *entry;
2223 if (!bacmp(bdaddr, BDADDR_ANY)) {
2224 hci_bdaddr_list_clear(list);
2228 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2232 list_del(&entry->list);
2238 /* This function requires the caller holds hdev->lock */
2239 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2240 bdaddr_t *addr, u8 addr_type)
2242 struct hci_conn_params *params;
2244 list_for_each_entry(params, &hdev->le_conn_params, list) {
2245 if (bacmp(¶ms->addr, addr) == 0 &&
2246 params->addr_type == addr_type) {
2254 /* This function requires the caller holds hdev->lock or rcu_read_lock */
2255 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2256 bdaddr_t *addr, u8 addr_type)
2258 struct hci_conn_params *param;
2262 list_for_each_entry_rcu(param, list, action) {
2263 if (bacmp(¶m->addr, addr) == 0 &&
2264 param->addr_type == addr_type) {
2275 /* This function requires the caller holds hdev->lock */
2276 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2278 if (list_empty(¶m->action))
2281 list_del_rcu(¶m->action);
2283 INIT_LIST_HEAD(¶m->action);
2286 /* This function requires the caller holds hdev->lock */
2287 void hci_pend_le_list_add(struct hci_conn_params *param,
2288 struct list_head *list)
2290 list_add_rcu(¶m->action, list);
2293 /* This function requires the caller holds hdev->lock */
2294 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2295 bdaddr_t *addr, u8 addr_type)
2297 struct hci_conn_params *params;
2299 params = hci_conn_params_lookup(hdev, addr, addr_type);
2303 params = kzalloc(sizeof(*params), GFP_KERNEL);
2305 bt_dev_err(hdev, "out of memory");
2309 bacpy(¶ms->addr, addr);
2310 params->addr_type = addr_type;
2312 list_add(¶ms->list, &hdev->le_conn_params);
2313 INIT_LIST_HEAD(¶ms->action);
2315 params->conn_min_interval = hdev->le_conn_min_interval;
2316 params->conn_max_interval = hdev->le_conn_max_interval;
2317 params->conn_latency = hdev->le_conn_latency;
2318 params->supervision_timeout = hdev->le_supv_timeout;
2319 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2321 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2326 void hci_conn_params_free(struct hci_conn_params *params)
2328 hci_pend_le_list_del_init(params);
2331 hci_conn_drop(params->conn);
2332 hci_conn_put(params->conn);
2335 list_del(¶ms->list);
2339 /* This function requires the caller holds hdev->lock */
2340 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2342 struct hci_conn_params *params;
2344 params = hci_conn_params_lookup(hdev, addr, addr_type);
2348 hci_conn_params_free(params);
2350 hci_update_passive_scan(hdev);
2352 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2355 /* This function requires the caller holds hdev->lock */
2356 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2358 struct hci_conn_params *params, *tmp;
2360 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2361 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2364 /* If trying to establish one time connection to disabled
2365 * device, leave the params, but mark them as just once.
2367 if (params->explicit_connect) {
2368 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2372 hci_conn_params_free(params);
2375 BT_DBG("All LE disabled connection parameters were removed");
2378 /* This function requires the caller holds hdev->lock */
2379 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2381 struct hci_conn_params *params, *tmp;
2383 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2384 hci_conn_params_free(params);
2386 BT_DBG("All LE connection parameters were removed");
2389 /* Copy the Identity Address of the controller.
2391 * If the controller has a public BD_ADDR, then by default use that one.
2392 * If this is a LE only controller without a public address, default to
2393 * the static random address.
2395 * For debugging purposes it is possible to force controllers with a
2396 * public address to use the static random address instead.
2398 * In case BR/EDR has been disabled on a dual-mode controller and
2399 * userspace has configured a static address, then that address
2400 * becomes the identity address instead of the public BR/EDR address.
2402 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2405 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2406 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2407 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2408 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2409 bacpy(bdaddr, &hdev->static_addr);
2410 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2412 bacpy(bdaddr, &hdev->bdaddr);
2413 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2417 static void hci_clear_wake_reason(struct hci_dev *hdev)
2421 hdev->wake_reason = 0;
2422 bacpy(&hdev->wake_addr, BDADDR_ANY);
2423 hdev->wake_addr_type = 0;
2425 hci_dev_unlock(hdev);
2428 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2431 struct hci_dev *hdev =
2432 container_of(nb, struct hci_dev, suspend_notifier);
2435 /* Userspace has full control of this device. Do nothing. */
2436 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2439 if (action == PM_SUSPEND_PREPARE)
2440 ret = hci_suspend_dev(hdev);
2441 else if (action == PM_POST_SUSPEND)
2442 ret = hci_resume_dev(hdev);
2445 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2451 /* Alloc HCI device */
2452 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2454 struct hci_dev *hdev;
2455 unsigned int alloc_size;
2457 alloc_size = sizeof(*hdev);
2459 /* Fixme: May need ALIGN-ment? */
2460 alloc_size += sizeof_priv;
2463 hdev = kzalloc(alloc_size, GFP_KERNEL);
2467 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2468 hdev->esco_type = (ESCO_HV1);
2469 hdev->link_mode = (HCI_LM_ACCEPT);
2470 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2471 hdev->io_capability = 0x03; /* No Input No Output */
2472 hdev->manufacturer = 0xffff; /* Default to internal use */
2473 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2474 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2475 hdev->adv_instance_cnt = 0;
2476 hdev->cur_adv_instance = 0x00;
2477 hdev->adv_instance_timeout = 0;
2479 hdev->advmon_allowlist_duration = 300;
2480 hdev->advmon_no_filter_duration = 500;
2481 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2483 hdev->sniff_max_interval = 800;
2484 hdev->sniff_min_interval = 80;
2486 hdev->le_adv_channel_map = 0x07;
2487 hdev->le_adv_min_interval = 0x0800;
2488 hdev->le_adv_max_interval = 0x0800;
2489 hdev->le_scan_interval = 0x0060;
2490 hdev->le_scan_window = 0x0030;
2491 hdev->le_scan_int_suspend = 0x0400;
2492 hdev->le_scan_window_suspend = 0x0012;
2493 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2494 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2495 hdev->le_scan_int_adv_monitor = 0x0060;
2496 hdev->le_scan_window_adv_monitor = 0x0030;
2497 hdev->le_scan_int_connect = 0x0060;
2498 hdev->le_scan_window_connect = 0x0060;
2499 hdev->le_conn_min_interval = 0x0018;
2500 hdev->le_conn_max_interval = 0x0028;
2501 hdev->le_conn_latency = 0x0000;
2502 hdev->le_supv_timeout = 0x002a;
2503 hdev->le_def_tx_len = 0x001b;
2504 hdev->le_def_tx_time = 0x0148;
2505 hdev->le_max_tx_len = 0x001b;
2506 hdev->le_max_tx_time = 0x0148;
2507 hdev->le_max_rx_len = 0x001b;
2508 hdev->le_max_rx_time = 0x0148;
2509 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2510 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2511 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2512 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2513 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2514 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2515 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2516 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2517 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2519 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2520 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2521 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2522 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2523 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2524 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2526 /* default 1.28 sec page scan */
2527 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2528 hdev->def_page_scan_int = 0x0800;
2529 hdev->def_page_scan_window = 0x0012;
2531 mutex_init(&hdev->lock);
2532 mutex_init(&hdev->req_lock);
2534 INIT_LIST_HEAD(&hdev->mesh_pending);
2535 INIT_LIST_HEAD(&hdev->mgmt_pending);
2536 INIT_LIST_HEAD(&hdev->reject_list);
2537 INIT_LIST_HEAD(&hdev->accept_list);
2538 INIT_LIST_HEAD(&hdev->uuids);
2539 INIT_LIST_HEAD(&hdev->link_keys);
2540 INIT_LIST_HEAD(&hdev->long_term_keys);
2541 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2542 INIT_LIST_HEAD(&hdev->remote_oob_data);
2543 INIT_LIST_HEAD(&hdev->le_accept_list);
2544 INIT_LIST_HEAD(&hdev->le_resolv_list);
2545 INIT_LIST_HEAD(&hdev->le_conn_params);
2546 INIT_LIST_HEAD(&hdev->pend_le_conns);
2547 INIT_LIST_HEAD(&hdev->pend_le_reports);
2548 INIT_LIST_HEAD(&hdev->conn_hash.list);
2549 INIT_LIST_HEAD(&hdev->adv_instances);
2550 INIT_LIST_HEAD(&hdev->blocked_keys);
2551 INIT_LIST_HEAD(&hdev->monitored_devices);
2553 INIT_LIST_HEAD(&hdev->local_codecs);
2554 INIT_WORK(&hdev->rx_work, hci_rx_work);
2555 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2556 INIT_WORK(&hdev->tx_work, hci_tx_work);
2557 INIT_WORK(&hdev->power_on, hci_power_on);
2558 INIT_WORK(&hdev->error_reset, hci_error_reset);
2560 hci_cmd_sync_init(hdev);
2562 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2564 skb_queue_head_init(&hdev->rx_q);
2565 skb_queue_head_init(&hdev->cmd_q);
2566 skb_queue_head_init(&hdev->raw_q);
2568 init_waitqueue_head(&hdev->req_wait_q);
2570 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2571 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2573 hci_devcd_setup(hdev);
2574 hci_request_setup(hdev);
2576 hci_init_sysfs(hdev);
2577 discovery_init(hdev);
2581 EXPORT_SYMBOL(hci_alloc_dev_priv);
2583 /* Free HCI device */
2584 void hci_free_dev(struct hci_dev *hdev)
2586 /* will free via device release */
2587 put_device(&hdev->dev);
2589 EXPORT_SYMBOL(hci_free_dev);
2591 /* Register HCI device */
2592 int hci_register_dev(struct hci_dev *hdev)
2596 if (!hdev->open || !hdev->close || !hdev->send)
2599 /* Do not allow HCI_AMP devices to register at index 0,
2600 * so the index can be used as the AMP controller ID.
2602 switch (hdev->dev_type) {
2604 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2607 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2616 snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
2619 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2621 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2622 if (!hdev->workqueue) {
2627 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2629 if (!hdev->req_workqueue) {
2630 destroy_workqueue(hdev->workqueue);
2635 if (!IS_ERR_OR_NULL(bt_debugfs))
2636 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2638 dev_set_name(&hdev->dev, "%s", hdev->name);
2640 error = device_add(&hdev->dev);
2644 hci_leds_init(hdev);
2646 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2647 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2650 if (rfkill_register(hdev->rfkill) < 0) {
2651 rfkill_destroy(hdev->rfkill);
2652 hdev->rfkill = NULL;
2656 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2657 hci_dev_set_flag(hdev, HCI_RFKILLED);
2659 hci_dev_set_flag(hdev, HCI_SETUP);
2660 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2662 if (hdev->dev_type == HCI_PRIMARY) {
2663 /* Assume BR/EDR support until proven otherwise (such as
2664 * through reading supported features during init.
2666 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2669 write_lock(&hci_dev_list_lock);
2670 list_add(&hdev->list, &hci_dev_list);
2671 write_unlock(&hci_dev_list_lock);
2673 /* Devices that are marked for raw-only usage are unconfigured
2674 * and should not be included in normal operation.
2676 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2677 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2679 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2683 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2685 hci_sock_dev_event(hdev, HCI_DEV_REG);
2688 error = hci_register_suspend_notifier(hdev);
2690 BT_WARN("register suspend notifier failed error:%d\n", error);
2692 queue_work(hdev->req_workqueue, &hdev->power_on);
2694 idr_init(&hdev->adv_monitors_idr);
2695 msft_register(hdev);
2700 debugfs_remove_recursive(hdev->debugfs);
2701 destroy_workqueue(hdev->workqueue);
2702 destroy_workqueue(hdev->req_workqueue);
2704 ida_simple_remove(&hci_index_ida, hdev->id);
2708 EXPORT_SYMBOL(hci_register_dev);
2710 /* Unregister HCI device */
2711 void hci_unregister_dev(struct hci_dev *hdev)
2713 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2715 mutex_lock(&hdev->unregister_lock);
2716 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2717 mutex_unlock(&hdev->unregister_lock);
2719 write_lock(&hci_dev_list_lock);
2720 list_del(&hdev->list);
2721 write_unlock(&hci_dev_list_lock);
2723 cancel_work_sync(&hdev->power_on);
2725 hci_cmd_sync_clear(hdev);
2727 hci_unregister_suspend_notifier(hdev);
2729 msft_unregister(hdev);
2731 hci_dev_do_close(hdev);
2733 if (!test_bit(HCI_INIT, &hdev->flags) &&
2734 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2735 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2737 mgmt_index_removed(hdev);
2738 hci_dev_unlock(hdev);
2741 /* mgmt_index_removed should take care of emptying the
2743 BUG_ON(!list_empty(&hdev->mgmt_pending));
2745 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2748 rfkill_unregister(hdev->rfkill);
2749 rfkill_destroy(hdev->rfkill);
2752 device_del(&hdev->dev);
2753 /* Actual cleanup is deferred until hci_release_dev(). */
2756 EXPORT_SYMBOL(hci_unregister_dev);
2758 /* Release HCI device */
2759 void hci_release_dev(struct hci_dev *hdev)
2761 debugfs_remove_recursive(hdev->debugfs);
2762 kfree_const(hdev->hw_info);
2763 kfree_const(hdev->fw_info);
2765 destroy_workqueue(hdev->workqueue);
2766 destroy_workqueue(hdev->req_workqueue);
2769 hci_bdaddr_list_clear(&hdev->reject_list);
2770 hci_bdaddr_list_clear(&hdev->accept_list);
2771 hci_uuids_clear(hdev);
2772 hci_link_keys_clear(hdev);
2773 hci_smp_ltks_clear(hdev);
2774 hci_smp_irks_clear(hdev);
2775 hci_remote_oob_data_clear(hdev);
2776 hci_adv_instances_clear(hdev);
2777 hci_adv_monitors_clear(hdev);
2778 hci_bdaddr_list_clear(&hdev->le_accept_list);
2779 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2780 hci_conn_params_clear_all(hdev);
2781 hci_discovery_filter_clear(hdev);
2782 hci_blocked_keys_clear(hdev);
2783 hci_dev_unlock(hdev);
2785 ida_simple_remove(&hci_index_ida, hdev->id);
2786 kfree_skb(hdev->sent_cmd);
2787 kfree_skb(hdev->recv_event);
2790 EXPORT_SYMBOL(hci_release_dev);
2792 int hci_register_suspend_notifier(struct hci_dev *hdev)
2796 if (!hdev->suspend_notifier.notifier_call &&
2797 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2798 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2799 ret = register_pm_notifier(&hdev->suspend_notifier);
2805 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2809 if (hdev->suspend_notifier.notifier_call) {
2810 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2812 hdev->suspend_notifier.notifier_call = NULL;
2818 /* Suspend HCI device */
2819 int hci_suspend_dev(struct hci_dev *hdev)
2823 bt_dev_dbg(hdev, "");
2825 /* Suspend should only act on when powered. */
2826 if (!hdev_is_powered(hdev) ||
2827 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2830 /* If powering down don't attempt to suspend */
2831 if (mgmt_powering_down(hdev))
2834 /* Cancel potentially blocking sync operation before suspend */
2835 __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
2837 hci_req_sync_lock(hdev);
2838 ret = hci_suspend_sync(hdev);
2839 hci_req_sync_unlock(hdev);
2841 hci_clear_wake_reason(hdev);
2842 mgmt_suspending(hdev, hdev->suspend_state);
2844 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2847 EXPORT_SYMBOL(hci_suspend_dev);
2849 /* Resume HCI device */
2850 int hci_resume_dev(struct hci_dev *hdev)
2854 bt_dev_dbg(hdev, "");
2856 /* Resume should only act on when powered. */
2857 if (!hdev_is_powered(hdev) ||
2858 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2861 /* If powering down don't attempt to resume */
2862 if (mgmt_powering_down(hdev))
2865 hci_req_sync_lock(hdev);
2866 ret = hci_resume_sync(hdev);
2867 hci_req_sync_unlock(hdev);
2869 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2870 hdev->wake_addr_type);
2872 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2875 EXPORT_SYMBOL(hci_resume_dev);
2877 /* Reset HCI device */
2878 int hci_reset_dev(struct hci_dev *hdev)
2880 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2881 struct sk_buff *skb;
2883 skb = bt_skb_alloc(3, GFP_ATOMIC);
2887 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2888 skb_put_data(skb, hw_err, 3);
2890 bt_dev_err(hdev, "Injecting HCI hardware error event");
2892 /* Send Hardware Error to upper stack */
2893 return hci_recv_frame(hdev, skb);
2895 EXPORT_SYMBOL(hci_reset_dev);
2897 /* Receive frame from HCI drivers */
2898 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2900 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2901 && !test_bit(HCI_INIT, &hdev->flags))) {
2906 switch (hci_skb_pkt_type(skb)) {
2909 case HCI_ACLDATA_PKT:
2910 /* Detect if ISO packet has been sent as ACL */
2911 if (hci_conn_num(hdev, ISO_LINK)) {
2912 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2915 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2916 if (type == ISO_LINK)
2917 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2920 case HCI_SCODATA_PKT:
2922 case HCI_ISODATA_PKT:
2930 bt_cb(skb)->incoming = 1;
2933 __net_timestamp(skb);
2935 skb_queue_tail(&hdev->rx_q, skb);
2936 queue_work(hdev->workqueue, &hdev->rx_work);
2940 EXPORT_SYMBOL(hci_recv_frame);
2942 /* Receive diagnostic message from HCI drivers */
2943 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2945 /* Mark as diagnostic packet */
2946 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2949 __net_timestamp(skb);
2951 skb_queue_tail(&hdev->rx_q, skb);
2952 queue_work(hdev->workqueue, &hdev->rx_work);
2956 EXPORT_SYMBOL(hci_recv_diag);
2958 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2962 va_start(vargs, fmt);
2963 kfree_const(hdev->hw_info);
2964 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2967 EXPORT_SYMBOL(hci_set_hw_info);
2969 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2973 va_start(vargs, fmt);
2974 kfree_const(hdev->fw_info);
2975 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2978 EXPORT_SYMBOL(hci_set_fw_info);
2980 /* ---- Interface to upper protocols ---- */
2982 int hci_register_cb(struct hci_cb *cb)
2984 BT_DBG("%p name %s", cb, cb->name);
2986 mutex_lock(&hci_cb_list_lock);
2987 list_add_tail(&cb->list, &hci_cb_list);
2988 mutex_unlock(&hci_cb_list_lock);
2992 EXPORT_SYMBOL(hci_register_cb);
2994 int hci_unregister_cb(struct hci_cb *cb)
2996 BT_DBG("%p name %s", cb, cb->name);
2998 mutex_lock(&hci_cb_list_lock);
2999 list_del(&cb->list);
3000 mutex_unlock(&hci_cb_list_lock);
3004 EXPORT_SYMBOL(hci_unregister_cb);
3006 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3010 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3014 __net_timestamp(skb);
3016 /* Send copy to monitor */
3017 hci_send_to_monitor(hdev, skb);
3019 if (atomic_read(&hdev->promisc)) {
3020 /* Send copy to the sockets */
3021 hci_send_to_sock(hdev, skb);
3024 /* Get rid of skb owner, prior to sending to the driver. */
3027 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3032 err = hdev->send(hdev, skb);
3034 bt_dev_err(hdev, "sending frame failed (%d)", err);
3042 /* Send HCI command */
3043 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3046 struct sk_buff *skb;
3048 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3050 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3052 bt_dev_err(hdev, "no memory for command");
3056 /* Stand-alone HCI commands must be flagged as
3057 * single-command requests.
3059 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3061 skb_queue_tail(&hdev->cmd_q, skb);
3062 queue_work(hdev->workqueue, &hdev->cmd_work);
3067 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3070 struct sk_buff *skb;
3072 if (hci_opcode_ogf(opcode) != 0x3f) {
3073 /* A controller receiving a command shall respond with either
3074 * a Command Status Event or a Command Complete Event.
3075 * Therefore, all standard HCI commands must be sent via the
3076 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3077 * Some vendors do not comply with this rule for vendor-specific
3078 * commands and do not return any event. We want to support
3079 * unresponded commands for such cases only.
3081 bt_dev_err(hdev, "unresponded command not supported");
3085 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3087 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3092 hci_send_frame(hdev, skb);
3096 EXPORT_SYMBOL(__hci_cmd_send);
3098 /* Get data from the previously sent command */
3099 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3101 struct hci_command_hdr *hdr;
3103 if (!hdev->sent_cmd)
3106 hdr = (void *) hdev->sent_cmd->data;
3108 if (hdr->opcode != cpu_to_le16(opcode))
3111 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3113 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3116 /* Get data from last received event */
3117 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3119 struct hci_event_hdr *hdr;
3122 if (!hdev->recv_event)
3125 hdr = (void *)hdev->recv_event->data;
3126 offset = sizeof(*hdr);
3128 if (hdr->evt != event) {
3129 /* In case of LE metaevent check the subevent match */
3130 if (hdr->evt == HCI_EV_LE_META) {
3131 struct hci_ev_le_meta *ev;
3133 ev = (void *)hdev->recv_event->data + offset;
3134 offset += sizeof(*ev);
3135 if (ev->subevent == event)
3142 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3144 return hdev->recv_event->data + offset;
3148 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3150 struct hci_acl_hdr *hdr;
3153 skb_push(skb, HCI_ACL_HDR_SIZE);
3154 skb_reset_transport_header(skb);
3155 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3156 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3157 hdr->dlen = cpu_to_le16(len);
3160 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3161 struct sk_buff *skb, __u16 flags)
3163 struct hci_conn *conn = chan->conn;
3164 struct hci_dev *hdev = conn->hdev;
3165 struct sk_buff *list;
3167 skb->len = skb_headlen(skb);
3170 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3172 switch (hdev->dev_type) {
3174 hci_add_acl_hdr(skb, conn->handle, flags);
3177 hci_add_acl_hdr(skb, chan->handle, flags);
3180 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3184 list = skb_shinfo(skb)->frag_list;
3186 /* Non fragmented */
3187 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3189 skb_queue_tail(queue, skb);
3192 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3194 skb_shinfo(skb)->frag_list = NULL;
3196 /* Queue all fragments atomically. We need to use spin_lock_bh
3197 * here because of 6LoWPAN links, as there this function is
3198 * called from softirq and using normal spin lock could cause
3201 spin_lock_bh(&queue->lock);
3203 __skb_queue_tail(queue, skb);
3205 flags &= ~ACL_START;
3208 skb = list; list = list->next;
3210 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3211 hci_add_acl_hdr(skb, conn->handle, flags);
3213 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3215 __skb_queue_tail(queue, skb);
3218 spin_unlock_bh(&queue->lock);
3222 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3224 struct hci_dev *hdev = chan->conn->hdev;
3226 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3228 hci_queue_acl(chan, &chan->data_q, skb, flags);
3230 queue_work(hdev->workqueue, &hdev->tx_work);
3234 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3236 struct hci_dev *hdev = conn->hdev;
3237 struct hci_sco_hdr hdr;
3239 BT_DBG("%s len %d", hdev->name, skb->len);
3241 hdr.handle = cpu_to_le16(conn->handle);
3242 hdr.dlen = skb->len;
3244 skb_push(skb, HCI_SCO_HDR_SIZE);
3245 skb_reset_transport_header(skb);
3246 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3248 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3250 skb_queue_tail(&conn->data_q, skb);
3251 queue_work(hdev->workqueue, &hdev->tx_work);
3255 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3257 struct hci_iso_hdr *hdr;
3260 skb_push(skb, HCI_ISO_HDR_SIZE);
3261 skb_reset_transport_header(skb);
3262 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3263 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3264 hdr->dlen = cpu_to_le16(len);
3267 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3268 struct sk_buff *skb)
3270 struct hci_dev *hdev = conn->hdev;
3271 struct sk_buff *list;
3274 skb->len = skb_headlen(skb);
3277 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3279 list = skb_shinfo(skb)->frag_list;
3281 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3282 hci_add_iso_hdr(skb, conn->handle, flags);
3285 /* Non fragmented */
3286 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3288 skb_queue_tail(queue, skb);
3291 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3293 skb_shinfo(skb)->frag_list = NULL;
3295 __skb_queue_tail(queue, skb);
3298 skb = list; list = list->next;
3300 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3301 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3303 hci_add_iso_hdr(skb, conn->handle, flags);
3305 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3307 __skb_queue_tail(queue, skb);
3312 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3314 struct hci_dev *hdev = conn->hdev;
3316 BT_DBG("%s len %d", hdev->name, skb->len);
3318 hci_queue_iso(conn, &conn->data_q, skb);
3320 queue_work(hdev->workqueue, &hdev->tx_work);
3323 /* ---- HCI TX task (outgoing data) ---- */
3325 /* HCI Connection scheduler */
3326 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3328 struct hci_dev *hdev;
3338 switch (conn->type) {
3340 cnt = hdev->acl_cnt;
3343 cnt = hdev->block_cnt;
3347 cnt = hdev->sco_cnt;
3350 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3353 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3354 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3358 bt_dev_err(hdev, "unknown link type %d", conn->type);
3365 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3368 struct hci_conn_hash *h = &hdev->conn_hash;
3369 struct hci_conn *conn = NULL, *c;
3370 unsigned int num = 0, min = ~0;
3372 /* We don't have to lock device here. Connections are always
3373 * added and removed with TX task disabled. */
3377 list_for_each_entry_rcu(c, &h->list, list) {
3378 if (c->type != type || skb_queue_empty(&c->data_q))
3381 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3386 if (c->sent < min) {
3391 if (hci_conn_num(hdev, type) == num)
3397 hci_quote_sent(conn, num, quote);
3399 BT_DBG("conn %p quote %d", conn, *quote);
3403 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3405 struct hci_conn_hash *h = &hdev->conn_hash;
3408 bt_dev_err(hdev, "link tx timeout");
3412 /* Kill stalled connections */
3413 list_for_each_entry_rcu(c, &h->list, list) {
3414 if (c->type == type && c->sent) {
3415 bt_dev_err(hdev, "killing stalled connection %pMR",
3417 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3424 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3427 struct hci_conn_hash *h = &hdev->conn_hash;
3428 struct hci_chan *chan = NULL;
3429 unsigned int num = 0, min = ~0, cur_prio = 0;
3430 struct hci_conn *conn;
3433 BT_DBG("%s", hdev->name);
3437 list_for_each_entry_rcu(conn, &h->list, list) {
3438 struct hci_chan *tmp;
3440 if (conn->type != type)
3443 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3448 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3449 struct sk_buff *skb;
3451 if (skb_queue_empty(&tmp->data_q))
3454 skb = skb_peek(&tmp->data_q);
3455 if (skb->priority < cur_prio)
3458 if (skb->priority > cur_prio) {
3461 cur_prio = skb->priority;
3466 if (conn->sent < min) {
3472 if (hci_conn_num(hdev, type) == conn_num)
3481 hci_quote_sent(chan->conn, num, quote);
3483 BT_DBG("chan %p quote %d", chan, *quote);
3487 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3489 struct hci_conn_hash *h = &hdev->conn_hash;
3490 struct hci_conn *conn;
3493 BT_DBG("%s", hdev->name);
3497 list_for_each_entry_rcu(conn, &h->list, list) {
3498 struct hci_chan *chan;
3500 if (conn->type != type)
3503 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3508 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3509 struct sk_buff *skb;
3516 if (skb_queue_empty(&chan->data_q))
3519 skb = skb_peek(&chan->data_q);
3520 if (skb->priority >= HCI_PRIO_MAX - 1)
3523 skb->priority = HCI_PRIO_MAX - 1;
3525 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3529 if (hci_conn_num(hdev, type) == num)
3537 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3539 /* Calculate count of blocks used by this packet */
3540 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3543 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3545 unsigned long last_tx;
3547 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3552 last_tx = hdev->le_last_tx;
3555 last_tx = hdev->acl_last_tx;
3559 /* tx timeout must be longer than maximum link supervision timeout
3562 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3563 hci_link_tx_to(hdev, type);
3567 static void hci_sched_sco(struct hci_dev *hdev)
3569 struct hci_conn *conn;
3570 struct sk_buff *skb;
3573 BT_DBG("%s", hdev->name);
3575 if (!hci_conn_num(hdev, SCO_LINK))
3578 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3579 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3580 BT_DBG("skb %p len %d", skb, skb->len);
3581 hci_send_frame(hdev, skb);
3584 if (conn->sent == ~0)
3590 static void hci_sched_esco(struct hci_dev *hdev)
3592 struct hci_conn *conn;
3593 struct sk_buff *skb;
3596 BT_DBG("%s", hdev->name);
3598 if (!hci_conn_num(hdev, ESCO_LINK))
3601 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3603 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3604 BT_DBG("skb %p len %d", skb, skb->len);
3605 hci_send_frame(hdev, skb);
3608 if (conn->sent == ~0)
3614 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3616 unsigned int cnt = hdev->acl_cnt;
3617 struct hci_chan *chan;
3618 struct sk_buff *skb;
3621 __check_timeout(hdev, cnt, ACL_LINK);
3623 while (hdev->acl_cnt &&
3624 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3625 u32 priority = (skb_peek(&chan->data_q))->priority;
3626 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3627 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3628 skb->len, skb->priority);
3630 /* Stop if priority has changed */
3631 if (skb->priority < priority)
3634 skb = skb_dequeue(&chan->data_q);
3636 hci_conn_enter_active_mode(chan->conn,
3637 bt_cb(skb)->force_active);
3639 hci_send_frame(hdev, skb);
3640 hdev->acl_last_tx = jiffies;
3646 /* Send pending SCO packets right away */
3647 hci_sched_sco(hdev);
3648 hci_sched_esco(hdev);
3652 if (cnt != hdev->acl_cnt)
3653 hci_prio_recalculate(hdev, ACL_LINK);
3656 static void hci_sched_acl_blk(struct hci_dev *hdev)
3658 unsigned int cnt = hdev->block_cnt;
3659 struct hci_chan *chan;
3660 struct sk_buff *skb;
3664 BT_DBG("%s", hdev->name);
3666 if (hdev->dev_type == HCI_AMP)
3671 __check_timeout(hdev, cnt, type);
3673 while (hdev->block_cnt > 0 &&
3674 (chan = hci_chan_sent(hdev, type, "e))) {
3675 u32 priority = (skb_peek(&chan->data_q))->priority;
3676 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3679 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3680 skb->len, skb->priority);
3682 /* Stop if priority has changed */
3683 if (skb->priority < priority)
3686 skb = skb_dequeue(&chan->data_q);
3688 blocks = __get_blocks(hdev, skb);
3689 if (blocks > hdev->block_cnt)
3692 hci_conn_enter_active_mode(chan->conn,
3693 bt_cb(skb)->force_active);
3695 hci_send_frame(hdev, skb);
3696 hdev->acl_last_tx = jiffies;
3698 hdev->block_cnt -= blocks;
3701 chan->sent += blocks;
3702 chan->conn->sent += blocks;
3706 if (cnt != hdev->block_cnt)
3707 hci_prio_recalculate(hdev, type);
3710 static void hci_sched_acl(struct hci_dev *hdev)
3712 BT_DBG("%s", hdev->name);
3714 /* No ACL link over BR/EDR controller */
3715 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3718 /* No AMP link over AMP controller */
3719 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3722 switch (hdev->flow_ctl_mode) {
3723 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3724 hci_sched_acl_pkt(hdev);
3727 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3728 hci_sched_acl_blk(hdev);
3733 static void hci_sched_le(struct hci_dev *hdev)
3735 struct hci_chan *chan;
3736 struct sk_buff *skb;
3737 int quote, cnt, tmp;
3739 BT_DBG("%s", hdev->name);
3741 if (!hci_conn_num(hdev, LE_LINK))
3744 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3746 __check_timeout(hdev, cnt, LE_LINK);
3749 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3750 u32 priority = (skb_peek(&chan->data_q))->priority;
3751 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3752 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3753 skb->len, skb->priority);
3755 /* Stop if priority has changed */
3756 if (skb->priority < priority)
3759 skb = skb_dequeue(&chan->data_q);
3761 hci_send_frame(hdev, skb);
3762 hdev->le_last_tx = jiffies;
3768 /* Send pending SCO packets right away */
3769 hci_sched_sco(hdev);
3770 hci_sched_esco(hdev);
3777 hdev->acl_cnt = cnt;
3780 hci_prio_recalculate(hdev, LE_LINK);
3784 static void hci_sched_iso(struct hci_dev *hdev)
3786 struct hci_conn *conn;
3787 struct sk_buff *skb;
3790 BT_DBG("%s", hdev->name);
3792 if (!hci_conn_num(hdev, ISO_LINK))
3795 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3796 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3797 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) {
3798 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3799 BT_DBG("skb %p len %d", skb, skb->len);
3800 hci_send_frame(hdev, skb);
3803 if (conn->sent == ~0)
3810 static void hci_tx_work(struct work_struct *work)
3812 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3813 struct sk_buff *skb;
3815 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3816 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3818 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3819 /* Schedule queues and send stuff to HCI driver */
3820 hci_sched_sco(hdev);
3821 hci_sched_esco(hdev);
3822 hci_sched_iso(hdev);
3823 hci_sched_acl(hdev);
3827 /* Send next queued raw (unknown type) packet */
3828 while ((skb = skb_dequeue(&hdev->raw_q)))
3829 hci_send_frame(hdev, skb);
3832 /* ----- HCI RX task (incoming data processing) ----- */
3834 /* ACL data packet */
3835 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3837 struct hci_acl_hdr *hdr = (void *) skb->data;
3838 struct hci_conn *conn;
3839 __u16 handle, flags;
3841 skb_pull(skb, HCI_ACL_HDR_SIZE);
3843 handle = __le16_to_cpu(hdr->handle);
3844 flags = hci_flags(handle);
3845 handle = hci_handle(handle);
3847 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3850 hdev->stat.acl_rx++;
3853 conn = hci_conn_hash_lookup_handle(hdev, handle);
3854 hci_dev_unlock(hdev);
3857 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3859 /* Send to upper protocol */
3860 l2cap_recv_acldata(conn, skb, flags);
3863 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3870 /* SCO data packet */
3871 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3873 struct hci_sco_hdr *hdr = (void *) skb->data;
3874 struct hci_conn *conn;
3875 __u16 handle, flags;
3877 skb_pull(skb, HCI_SCO_HDR_SIZE);
3879 handle = __le16_to_cpu(hdr->handle);
3880 flags = hci_flags(handle);
3881 handle = hci_handle(handle);
3883 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3886 hdev->stat.sco_rx++;
3889 conn = hci_conn_hash_lookup_handle(hdev, handle);
3890 hci_dev_unlock(hdev);
3893 /* Send to upper protocol */
3894 bt_cb(skb)->sco.pkt_status = flags & 0x03;
3895 sco_recv_scodata(conn, skb);
3898 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3905 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3907 struct hci_iso_hdr *hdr;
3908 struct hci_conn *conn;
3909 __u16 handle, flags;
3911 hdr = skb_pull_data(skb, sizeof(*hdr));
3913 bt_dev_err(hdev, "ISO packet too small");
3917 handle = __le16_to_cpu(hdr->handle);
3918 flags = hci_flags(handle);
3919 handle = hci_handle(handle);
3921 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3925 conn = hci_conn_hash_lookup_handle(hdev, handle);
3926 hci_dev_unlock(hdev);
3929 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3934 /* Send to upper protocol */
3935 iso_recv(conn, skb, flags);
3942 static bool hci_req_is_complete(struct hci_dev *hdev)
3944 struct sk_buff *skb;
3946 skb = skb_peek(&hdev->cmd_q);
3950 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3953 static void hci_resend_last(struct hci_dev *hdev)
3955 struct hci_command_hdr *sent;
3956 struct sk_buff *skb;
3959 if (!hdev->sent_cmd)
3962 sent = (void *) hdev->sent_cmd->data;
3963 opcode = __le16_to_cpu(sent->opcode);
3964 if (opcode == HCI_OP_RESET)
3967 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3971 skb_queue_head(&hdev->cmd_q, skb);
3972 queue_work(hdev->workqueue, &hdev->cmd_work);
3975 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3976 hci_req_complete_t *req_complete,
3977 hci_req_complete_skb_t *req_complete_skb)
3979 struct sk_buff *skb;
3980 unsigned long flags;
3982 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3984 /* If the completed command doesn't match the last one that was
3985 * sent we need to do special handling of it.
3987 if (!hci_sent_cmd_data(hdev, opcode)) {
3988 /* Some CSR based controllers generate a spontaneous
3989 * reset complete event during init and any pending
3990 * command will never be completed. In such a case we
3991 * need to resend whatever was the last sent
3994 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3995 hci_resend_last(hdev);
4000 /* If we reach this point this event matches the last command sent */
4001 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4003 /* If the command succeeded and there's still more commands in
4004 * this request the request is not yet complete.
4006 if (!status && !hci_req_is_complete(hdev))
4009 /* If this was the last command in a request the complete
4010 * callback would be found in hdev->sent_cmd instead of the
4011 * command queue (hdev->cmd_q).
4013 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4014 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4018 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4019 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4023 /* Remove all pending commands belonging to this request */
4024 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4025 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4026 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4027 __skb_queue_head(&hdev->cmd_q, skb);
4031 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4032 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4034 *req_complete = bt_cb(skb)->hci.req_complete;
4035 dev_kfree_skb_irq(skb);
4037 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4040 static void hci_rx_work(struct work_struct *work)
4042 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4043 struct sk_buff *skb;
4045 BT_DBG("%s", hdev->name);
4047 /* The kcov_remote functions used for collecting packet parsing
4048 * coverage information from this background thread and associate
4049 * the coverage with the syscall's thread which originally injected
4050 * the packet. This helps fuzzing the kernel.
4052 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4053 kcov_remote_start_common(skb_get_kcov_handle(skb));
4055 /* Send copy to monitor */
4056 hci_send_to_monitor(hdev, skb);
4058 if (atomic_read(&hdev->promisc)) {
4059 /* Send copy to the sockets */
4060 hci_send_to_sock(hdev, skb);
4063 /* If the device has been opened in HCI_USER_CHANNEL,
4064 * the userspace has exclusive access to device.
4065 * When device is HCI_INIT, we still need to process
4066 * the data packets to the driver in order
4067 * to complete its setup().
4069 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4070 !test_bit(HCI_INIT, &hdev->flags)) {
4075 if (test_bit(HCI_INIT, &hdev->flags)) {
4076 /* Don't process data packets in this states. */
4077 switch (hci_skb_pkt_type(skb)) {
4078 case HCI_ACLDATA_PKT:
4079 case HCI_SCODATA_PKT:
4080 case HCI_ISODATA_PKT:
4087 switch (hci_skb_pkt_type(skb)) {
4089 BT_DBG("%s Event packet", hdev->name);
4090 hci_event_packet(hdev, skb);
4093 case HCI_ACLDATA_PKT:
4094 BT_DBG("%s ACL data packet", hdev->name);
4095 hci_acldata_packet(hdev, skb);
4098 case HCI_SCODATA_PKT:
4099 BT_DBG("%s SCO data packet", hdev->name);
4100 hci_scodata_packet(hdev, skb);
4103 case HCI_ISODATA_PKT:
4104 BT_DBG("%s ISO data packet", hdev->name);
4105 hci_isodata_packet(hdev, skb);
4115 static void hci_cmd_work(struct work_struct *work)
4117 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4118 struct sk_buff *skb;
4120 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4121 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4123 /* Send queued commands */
4124 if (atomic_read(&hdev->cmd_cnt)) {
4125 skb = skb_dequeue(&hdev->cmd_q);
4129 kfree_skb(hdev->sent_cmd);
4131 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4132 if (hdev->sent_cmd) {
4134 if (hci_req_status_pend(hdev))
4135 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4136 atomic_dec(&hdev->cmd_cnt);
4138 res = hci_send_frame(hdev, skb);
4140 __hci_cmd_sync_cancel(hdev, -res);
4143 if (test_bit(HCI_RESET, &hdev->flags) ||
4144 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4145 cancel_delayed_work(&hdev->cmd_timer);
4147 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4151 skb_queue_head(&hdev->cmd_q, skb);
4152 queue_work(hdev->workqueue, &hdev->cmd_work);