2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
49 #include "hci_codec.h"
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
66 static int hci_scan_req(struct hci_request *req, unsigned long opt)
70 BT_DBG("%s %x", req->hdev->name, scan);
72 /* Inquiry and Page scans */
73 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
77 static int hci_auth_req(struct hci_request *req, unsigned long opt)
81 BT_DBG("%s %x", req->hdev->name, auth);
84 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
92 BT_DBG("%s %x", req->hdev->name, encrypt);
95 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
101 __le16 policy = cpu_to_le16(opt);
103 BT_DBG("%s %x", req->hdev->name, policy);
105 /* Default link policy */
106 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
110 /* Get HCI device by index.
111 * Device is held on return. */
112 struct hci_dev *hci_dev_get(int index)
114 struct hci_dev *hdev = NULL, *d;
121 read_lock(&hci_dev_list_lock);
122 list_for_each_entry(d, &hci_dev_list, list) {
123 if (d->id == index) {
124 hdev = hci_dev_hold(d);
128 read_unlock(&hci_dev_list_lock);
132 /* ---- Inquiry support ---- */
134 bool hci_discovery_active(struct hci_dev *hdev)
136 struct discovery_state *discov = &hdev->discovery;
138 switch (discov->state) {
139 case DISCOVERY_FINDING:
140 case DISCOVERY_RESOLVING:
148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
150 int old_state = hdev->discovery.state;
152 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
154 if (old_state == state)
157 hdev->discovery.state = state;
160 case DISCOVERY_STOPPED:
161 hci_update_passive_scan(hdev);
163 if (old_state != DISCOVERY_STARTING)
164 mgmt_discovering(hdev, 0);
166 case DISCOVERY_STARTING:
168 case DISCOVERY_FINDING:
169 mgmt_discovering(hdev, 1);
171 case DISCOVERY_RESOLVING:
173 case DISCOVERY_STOPPING:
178 void hci_inquiry_cache_flush(struct hci_dev *hdev)
180 struct discovery_state *cache = &hdev->discovery;
181 struct inquiry_entry *p, *n;
183 list_for_each_entry_safe(p, n, &cache->all, all) {
188 INIT_LIST_HEAD(&cache->unknown);
189 INIT_LIST_HEAD(&cache->resolve);
192 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
195 struct discovery_state *cache = &hdev->discovery;
196 struct inquiry_entry *e;
198 BT_DBG("cache %p, %pMR", cache, bdaddr);
200 list_for_each_entry(e, &cache->all, all) {
201 if (!bacmp(&e->data.bdaddr, bdaddr))
208 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
211 struct discovery_state *cache = &hdev->discovery;
212 struct inquiry_entry *e;
214 BT_DBG("cache %p, %pMR", cache, bdaddr);
216 list_for_each_entry(e, &cache->unknown, list) {
217 if (!bacmp(&e->data.bdaddr, bdaddr))
224 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
228 struct discovery_state *cache = &hdev->discovery;
229 struct inquiry_entry *e;
231 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
233 list_for_each_entry(e, &cache->resolve, list) {
234 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
236 if (!bacmp(&e->data.bdaddr, bdaddr))
243 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
244 struct inquiry_entry *ie)
246 struct discovery_state *cache = &hdev->discovery;
247 struct list_head *pos = &cache->resolve;
248 struct inquiry_entry *p;
252 list_for_each_entry(p, &cache->resolve, list) {
253 if (p->name_state != NAME_PENDING &&
254 abs(p->data.rssi) >= abs(ie->data.rssi))
259 list_add(&ie->list, pos);
262 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
265 struct discovery_state *cache = &hdev->discovery;
266 struct inquiry_entry *ie;
269 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
271 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
274 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
276 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
278 if (!ie->data.ssp_mode)
279 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
281 if (ie->name_state == NAME_NEEDED &&
282 data->rssi != ie->data.rssi) {
283 ie->data.rssi = data->rssi;
284 hci_inquiry_cache_update_resolve(hdev, ie);
290 /* Entry not in the cache. Add new one. */
291 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
293 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
297 list_add(&ie->all, &cache->all);
300 ie->name_state = NAME_KNOWN;
302 ie->name_state = NAME_NOT_KNOWN;
303 list_add(&ie->list, &cache->unknown);
307 if (name_known && ie->name_state != NAME_KNOWN &&
308 ie->name_state != NAME_PENDING) {
309 ie->name_state = NAME_KNOWN;
313 memcpy(&ie->data, data, sizeof(*data));
314 ie->timestamp = jiffies;
315 cache->timestamp = jiffies;
317 if (ie->name_state == NAME_NOT_KNOWN)
318 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
324 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
326 struct discovery_state *cache = &hdev->discovery;
327 struct inquiry_info *info = (struct inquiry_info *) buf;
328 struct inquiry_entry *e;
331 list_for_each_entry(e, &cache->all, all) {
332 struct inquiry_data *data = &e->data;
337 bacpy(&info->bdaddr, &data->bdaddr);
338 info->pscan_rep_mode = data->pscan_rep_mode;
339 info->pscan_period_mode = data->pscan_period_mode;
340 info->pscan_mode = data->pscan_mode;
341 memcpy(info->dev_class, data->dev_class, 3);
342 info->clock_offset = data->clock_offset;
348 BT_DBG("cache %p, copied %d", cache, copied);
352 static int hci_inq_req(struct hci_request *req, unsigned long opt)
354 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
355 struct hci_dev *hdev = req->hdev;
356 struct hci_cp_inquiry cp;
358 BT_DBG("%s", hdev->name);
360 if (test_bit(HCI_INQUIRY, &hdev->flags))
364 memcpy(&cp.lap, &ir->lap, 3);
365 cp.length = ir->length;
366 cp.num_rsp = ir->num_rsp;
367 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
372 int hci_inquiry(void __user *arg)
374 __u8 __user *ptr = arg;
375 struct hci_inquiry_req ir;
376 struct hci_dev *hdev;
377 int err = 0, do_inquiry = 0, max_rsp;
381 if (copy_from_user(&ir, ptr, sizeof(ir)))
384 hdev = hci_dev_get(ir.dev_id);
388 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
393 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
398 if (hdev->dev_type != HCI_PRIMARY) {
403 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
408 /* Restrict maximum inquiry length to 60 seconds */
409 if (ir.length > 60) {
415 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
416 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
417 hci_inquiry_cache_flush(hdev);
420 hci_dev_unlock(hdev);
422 timeo = ir.length * msecs_to_jiffies(2000);
425 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
430 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
431 * cleared). If it is interrupted by a signal, return -EINTR.
433 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
434 TASK_INTERRUPTIBLE)) {
440 /* for unlimited number of responses we will use buffer with
443 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
445 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
446 * copy it to the user space.
448 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
455 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
456 hci_dev_unlock(hdev);
458 BT_DBG("num_rsp %d", ir.num_rsp);
460 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
462 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
475 static int hci_dev_do_open(struct hci_dev *hdev)
479 BT_DBG("%s %p", hdev->name, hdev);
481 hci_req_sync_lock(hdev);
483 ret = hci_dev_open_sync(hdev);
485 hci_req_sync_unlock(hdev);
489 /* ---- HCI ioctl helpers ---- */
491 int hci_dev_open(__u16 dev)
493 struct hci_dev *hdev;
496 hdev = hci_dev_get(dev);
500 /* Devices that are marked as unconfigured can only be powered
501 * up as user channel. Trying to bring them up as normal devices
502 * will result into a failure. Only user channel operation is
505 * When this function is called for a user channel, the flag
506 * HCI_USER_CHANNEL will be set first before attempting to
509 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
510 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
515 /* We need to ensure that no other power on/off work is pending
516 * before proceeding to call hci_dev_do_open. This is
517 * particularly important if the setup procedure has not yet
520 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
521 cancel_delayed_work(&hdev->power_off);
523 /* After this call it is guaranteed that the setup procedure
524 * has finished. This means that error conditions like RFKILL
525 * or no valid public or static random address apply.
527 flush_workqueue(hdev->req_workqueue);
529 /* For controllers not using the management interface and that
530 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
531 * so that pairing works for them. Once the management interface
532 * is in use this bit will be cleared again and userspace has
533 * to explicitly enable it.
535 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
536 !hci_dev_test_flag(hdev, HCI_MGMT))
537 hci_dev_set_flag(hdev, HCI_BONDABLE);
539 err = hci_dev_do_open(hdev);
546 int hci_dev_do_close(struct hci_dev *hdev)
550 BT_DBG("%s %p", hdev->name, hdev);
552 hci_req_sync_lock(hdev);
554 err = hci_dev_close_sync(hdev);
556 hci_req_sync_unlock(hdev);
561 int hci_dev_close(__u16 dev)
563 struct hci_dev *hdev;
566 hdev = hci_dev_get(dev);
570 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
575 cancel_work_sync(&hdev->power_on);
576 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
577 cancel_delayed_work(&hdev->power_off);
579 err = hci_dev_do_close(hdev);
586 static int hci_dev_do_reset(struct hci_dev *hdev)
590 BT_DBG("%s %p", hdev->name, hdev);
592 hci_req_sync_lock(hdev);
595 skb_queue_purge(&hdev->rx_q);
596 skb_queue_purge(&hdev->cmd_q);
598 /* Cancel these to avoid queueing non-chained pending work */
599 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
602 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
603 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
605 * inside RCU section to see the flag or complete scheduling.
608 /* Explicitly cancel works in case scheduled after setting the flag. */
609 cancel_delayed_work(&hdev->cmd_timer);
610 cancel_delayed_work(&hdev->ncmd_timer);
612 /* Avoid potential lockdep warnings from the *_flush() calls by
613 * ensuring the workqueue is empty up front.
615 drain_workqueue(hdev->workqueue);
618 hci_inquiry_cache_flush(hdev);
619 hci_conn_hash_flush(hdev);
620 hci_dev_unlock(hdev);
625 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
627 atomic_set(&hdev->cmd_cnt, 1);
633 ret = hci_reset_sync(hdev);
635 hci_req_sync_unlock(hdev);
639 int hci_dev_reset(__u16 dev)
641 struct hci_dev *hdev;
644 hdev = hci_dev_get(dev);
648 if (!test_bit(HCI_UP, &hdev->flags)) {
653 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
658 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
663 err = hci_dev_do_reset(hdev);
670 int hci_dev_reset_stat(__u16 dev)
672 struct hci_dev *hdev;
675 hdev = hci_dev_get(dev);
679 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
684 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
689 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
696 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
698 bool conn_changed, discov_changed;
700 BT_DBG("%s scan 0x%02x", hdev->name, scan);
702 if ((scan & SCAN_PAGE))
703 conn_changed = !hci_dev_test_and_set_flag(hdev,
706 conn_changed = hci_dev_test_and_clear_flag(hdev,
709 if ((scan & SCAN_INQUIRY)) {
710 discov_changed = !hci_dev_test_and_set_flag(hdev,
713 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
714 discov_changed = hci_dev_test_and_clear_flag(hdev,
718 if (!hci_dev_test_flag(hdev, HCI_MGMT))
721 if (conn_changed || discov_changed) {
722 /* In case this was disabled through mgmt */
723 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
725 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
726 hci_update_adv_data(hdev, hdev->cur_adv_instance);
728 mgmt_new_settings(hdev);
732 int hci_dev_cmd(unsigned int cmd, void __user *arg)
734 struct hci_dev *hdev;
735 struct hci_dev_req dr;
738 if (copy_from_user(&dr, arg, sizeof(dr)))
741 hdev = hci_dev_get(dr.dev_id);
745 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
750 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
755 if (hdev->dev_type != HCI_PRIMARY) {
760 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
767 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
768 HCI_INIT_TIMEOUT, NULL);
772 if (!lmp_encrypt_capable(hdev)) {
777 if (!test_bit(HCI_AUTH, &hdev->flags)) {
778 /* Auth must be enabled first */
779 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
780 HCI_INIT_TIMEOUT, NULL);
785 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
786 HCI_INIT_TIMEOUT, NULL);
790 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
791 HCI_INIT_TIMEOUT, NULL);
793 /* Ensure that the connectable and discoverable states
794 * get correctly modified as this was a non-mgmt change.
797 hci_update_passive_scan_state(hdev, dr.dev_opt);
801 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
802 HCI_INIT_TIMEOUT, NULL);
806 hdev->link_mode = ((__u16) dr.dev_opt) &
807 (HCI_LM_MASTER | HCI_LM_ACCEPT);
811 if (hdev->pkt_type == (__u16) dr.dev_opt)
814 hdev->pkt_type = (__u16) dr.dev_opt;
815 mgmt_phy_configuration_changed(hdev, NULL);
819 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
820 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
824 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
825 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
838 int hci_get_dev_list(void __user *arg)
840 struct hci_dev *hdev;
841 struct hci_dev_list_req *dl;
842 struct hci_dev_req *dr;
843 int n = 0, size, err;
846 if (get_user(dev_num, (__u16 __user *) arg))
849 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
852 size = sizeof(*dl) + dev_num * sizeof(*dr);
854 dl = kzalloc(size, GFP_KERNEL);
860 read_lock(&hci_dev_list_lock);
861 list_for_each_entry(hdev, &hci_dev_list, list) {
862 unsigned long flags = hdev->flags;
864 /* When the auto-off is configured it means the transport
865 * is running, but in that case still indicate that the
866 * device is actually down.
868 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
869 flags &= ~BIT(HCI_UP);
871 (dr + n)->dev_id = hdev->id;
872 (dr + n)->dev_opt = flags;
877 read_unlock(&hci_dev_list_lock);
880 size = sizeof(*dl) + n * sizeof(*dr);
882 err = copy_to_user(arg, dl, size);
885 return err ? -EFAULT : 0;
888 int hci_get_dev_info(void __user *arg)
890 struct hci_dev *hdev;
891 struct hci_dev_info di;
895 if (copy_from_user(&di, arg, sizeof(di)))
898 hdev = hci_dev_get(di.dev_id);
902 /* When the auto-off is configured it means the transport
903 * is running, but in that case still indicate that the
904 * device is actually down.
906 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
907 flags = hdev->flags & ~BIT(HCI_UP);
911 strcpy(di.name, hdev->name);
912 di.bdaddr = hdev->bdaddr;
913 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
915 di.pkt_type = hdev->pkt_type;
916 if (lmp_bredr_capable(hdev)) {
917 di.acl_mtu = hdev->acl_mtu;
918 di.acl_pkts = hdev->acl_pkts;
919 di.sco_mtu = hdev->sco_mtu;
920 di.sco_pkts = hdev->sco_pkts;
922 di.acl_mtu = hdev->le_mtu;
923 di.acl_pkts = hdev->le_pkts;
927 di.link_policy = hdev->link_policy;
928 di.link_mode = hdev->link_mode;
930 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
931 memcpy(&di.features, &hdev->features, sizeof(di.features));
933 if (copy_to_user(arg, &di, sizeof(di)))
941 /* ---- Interface to HCI drivers ---- */
943 static int hci_rfkill_set_block(void *data, bool blocked)
945 struct hci_dev *hdev = data;
947 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
949 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
953 hci_dev_set_flag(hdev, HCI_RFKILLED);
954 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
955 !hci_dev_test_flag(hdev, HCI_CONFIG))
956 hci_dev_do_close(hdev);
958 hci_dev_clear_flag(hdev, HCI_RFKILLED);
964 static const struct rfkill_ops hci_rfkill_ops = {
965 .set_block = hci_rfkill_set_block,
968 static void hci_power_on(struct work_struct *work)
970 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
973 BT_DBG("%s", hdev->name);
975 if (test_bit(HCI_UP, &hdev->flags) &&
976 hci_dev_test_flag(hdev, HCI_MGMT) &&
977 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
978 cancel_delayed_work(&hdev->power_off);
979 err = hci_powered_update_sync(hdev);
980 mgmt_power_on(hdev, err);
984 err = hci_dev_do_open(hdev);
987 mgmt_set_powered_failed(hdev, err);
988 hci_dev_unlock(hdev);
992 /* During the HCI setup phase, a few error conditions are
993 * ignored and they need to be checked now. If they are still
994 * valid, it is important to turn the device back off.
996 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
997 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
998 (hdev->dev_type == HCI_PRIMARY &&
999 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1000 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1001 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
1002 hci_dev_do_close(hdev);
1003 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
1004 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1005 HCI_AUTO_OFF_TIMEOUT);
1008 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1009 /* For unconfigured devices, set the HCI_RAW flag
1010 * so that userspace can easily identify them.
1012 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1013 set_bit(HCI_RAW, &hdev->flags);
1015 /* For fully configured devices, this will send
1016 * the Index Added event. For unconfigured devices,
1017 * it will send Unconfigued Index Added event.
1019 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1020 * and no event will be send.
1022 mgmt_index_added(hdev);
1023 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1024 /* When the controller is now configured, then it
1025 * is important to clear the HCI_RAW flag.
1027 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1028 clear_bit(HCI_RAW, &hdev->flags);
1030 /* Powering on the controller with HCI_CONFIG set only
1031 * happens with the transition from unconfigured to
1032 * configured. This will send the Index Added event.
1034 mgmt_index_added(hdev);
1038 static void hci_power_off(struct work_struct *work)
1040 struct hci_dev *hdev = container_of(work, struct hci_dev,
1043 BT_DBG("%s", hdev->name);
1045 hci_dev_do_close(hdev);
1048 static void hci_error_reset(struct work_struct *work)
1050 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1052 BT_DBG("%s", hdev->name);
1055 hdev->hw_error(hdev, hdev->hw_error_code);
1057 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1059 if (hci_dev_do_close(hdev))
1062 hci_dev_do_open(hdev);
1065 void hci_uuids_clear(struct hci_dev *hdev)
1067 struct bt_uuid *uuid, *tmp;
1069 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1070 list_del(&uuid->list);
1075 void hci_link_keys_clear(struct hci_dev *hdev)
1077 struct link_key *key, *tmp;
1079 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1080 list_del_rcu(&key->list);
1081 kfree_rcu(key, rcu);
1085 void hci_smp_ltks_clear(struct hci_dev *hdev)
1087 struct smp_ltk *k, *tmp;
1089 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1090 list_del_rcu(&k->list);
1095 void hci_smp_irks_clear(struct hci_dev *hdev)
1097 struct smp_irk *k, *tmp;
1099 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1100 list_del_rcu(&k->list);
1105 void hci_blocked_keys_clear(struct hci_dev *hdev)
1107 struct blocked_key *b, *tmp;
1109 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1110 list_del_rcu(&b->list);
1115 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1117 bool blocked = false;
1118 struct blocked_key *b;
1121 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1122 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1132 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1137 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1138 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1141 if (hci_is_blocked_key(hdev,
1142 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1144 bt_dev_warn_ratelimited(hdev,
1145 "Link key blocked for %pMR",
1158 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1159 u8 key_type, u8 old_key_type)
1162 if (key_type < 0x03)
1165 /* Debug keys are insecure so don't store them persistently */
1166 if (key_type == HCI_LK_DEBUG_COMBINATION)
1169 /* Changed combination key and there's no previous one */
1170 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1173 /* Security mode 3 case */
1177 /* BR/EDR key derived using SC from an LE link */
1178 if (conn->type == LE_LINK)
1181 /* Neither local nor remote side had no-bonding as requirement */
1182 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1185 /* Local side had dedicated bonding as requirement */
1186 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1189 /* Remote side had dedicated bonding as requirement */
1190 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1193 /* If none of the above criteria match, then don't store the key
1198 static u8 ltk_role(u8 type)
1200 if (type == SMP_LTK)
1201 return HCI_ROLE_MASTER;
1203 return HCI_ROLE_SLAVE;
1206 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1207 u8 addr_type, u8 role)
1212 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1213 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1216 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1219 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1221 bt_dev_warn_ratelimited(hdev,
1222 "LTK blocked for %pMR",
1235 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1237 struct smp_irk *irk_to_return = NULL;
1238 struct smp_irk *irk;
1241 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1242 if (!bacmp(&irk->rpa, rpa)) {
1243 irk_to_return = irk;
1248 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1249 if (smp_irk_matches(hdev, irk->val, rpa)) {
1250 bacpy(&irk->rpa, rpa);
1251 irk_to_return = irk;
1257 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1258 irk_to_return->val)) {
1259 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1260 &irk_to_return->bdaddr);
1261 irk_to_return = NULL;
1266 return irk_to_return;
1269 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1272 struct smp_irk *irk_to_return = NULL;
1273 struct smp_irk *irk;
1275 /* Identity Address must be public or static random */
1276 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1280 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1281 if (addr_type == irk->addr_type &&
1282 bacmp(bdaddr, &irk->bdaddr) == 0) {
1283 irk_to_return = irk;
1290 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1291 irk_to_return->val)) {
1292 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1293 &irk_to_return->bdaddr);
1294 irk_to_return = NULL;
1299 return irk_to_return;
1302 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1303 bdaddr_t *bdaddr, u8 *val, u8 type,
1304 u8 pin_len, bool *persistent)
1306 struct link_key *key, *old_key;
1309 old_key = hci_find_link_key(hdev, bdaddr);
1311 old_key_type = old_key->type;
1314 old_key_type = conn ? conn->key_type : 0xff;
1315 key = kzalloc(sizeof(*key), GFP_KERNEL);
1318 list_add_rcu(&key->list, &hdev->link_keys);
1321 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1323 /* Some buggy controller combinations generate a changed
1324 * combination key for legacy pairing even when there's no
1326 if (type == HCI_LK_CHANGED_COMBINATION &&
1327 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1328 type = HCI_LK_COMBINATION;
1330 conn->key_type = type;
1333 bacpy(&key->bdaddr, bdaddr);
1334 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1335 key->pin_len = pin_len;
1337 if (type == HCI_LK_CHANGED_COMBINATION)
1338 key->type = old_key_type;
1343 *persistent = hci_persistent_key(hdev, conn, type,
1349 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1350 u8 addr_type, u8 type, u8 authenticated,
1351 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1353 struct smp_ltk *key, *old_key;
1354 u8 role = ltk_role(type);
1356 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1360 key = kzalloc(sizeof(*key), GFP_KERNEL);
1363 list_add_rcu(&key->list, &hdev->long_term_keys);
1366 bacpy(&key->bdaddr, bdaddr);
1367 key->bdaddr_type = addr_type;
1368 memcpy(key->val, tk, sizeof(key->val));
1369 key->authenticated = authenticated;
1372 key->enc_size = enc_size;
1378 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1379 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1381 struct smp_irk *irk;
1383 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1385 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1389 bacpy(&irk->bdaddr, bdaddr);
1390 irk->addr_type = addr_type;
1392 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1395 memcpy(irk->val, val, 16);
1396 bacpy(&irk->rpa, rpa);
1401 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1403 struct link_key *key;
1405 key = hci_find_link_key(hdev, bdaddr);
1409 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1411 list_del_rcu(&key->list);
1412 kfree_rcu(key, rcu);
1417 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1419 struct smp_ltk *k, *tmp;
1422 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1423 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1426 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1428 list_del_rcu(&k->list);
1433 return removed ? 0 : -ENOENT;
1436 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1438 struct smp_irk *k, *tmp;
1440 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1441 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1444 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1446 list_del_rcu(&k->list);
1451 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1454 struct smp_irk *irk;
1457 if (type == BDADDR_BREDR) {
1458 if (hci_find_link_key(hdev, bdaddr))
1463 /* Convert to HCI addr type which struct smp_ltk uses */
1464 if (type == BDADDR_LE_PUBLIC)
1465 addr_type = ADDR_LE_DEV_PUBLIC;
1467 addr_type = ADDR_LE_DEV_RANDOM;
1469 irk = hci_get_irk(hdev, bdaddr, addr_type);
1471 bdaddr = &irk->bdaddr;
1472 addr_type = irk->addr_type;
1476 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1477 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1487 /* HCI command timer function */
1488 static void hci_cmd_timeout(struct work_struct *work)
1490 struct hci_dev *hdev = container_of(work, struct hci_dev,
1493 if (hdev->sent_cmd) {
1494 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1495 u16 opcode = __le16_to_cpu(sent->opcode);
1497 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1499 bt_dev_err(hdev, "command tx timeout");
1502 if (hdev->cmd_timeout)
1503 hdev->cmd_timeout(hdev);
1505 atomic_set(&hdev->cmd_cnt, 1);
1506 queue_work(hdev->workqueue, &hdev->cmd_work);
1509 /* HCI ncmd timer function */
1510 static void hci_ncmd_timeout(struct work_struct *work)
1512 struct hci_dev *hdev = container_of(work, struct hci_dev,
1515 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1517 /* During HCI_INIT phase no events can be injected if the ncmd timer
1518 * triggers since the procedure has its own timeout handling.
1520 if (test_bit(HCI_INIT, &hdev->flags))
1523 /* This is an irrecoverable state, inject hardware error event */
1524 hci_reset_dev(hdev);
1527 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1528 bdaddr_t *bdaddr, u8 bdaddr_type)
1530 struct oob_data *data;
1532 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1533 if (bacmp(bdaddr, &data->bdaddr) != 0)
1535 if (data->bdaddr_type != bdaddr_type)
1543 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1546 struct oob_data *data;
1548 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1552 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1554 list_del(&data->list);
1560 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1562 struct oob_data *data, *n;
1564 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1565 list_del(&data->list);
1570 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1571 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1572 u8 *hash256, u8 *rand256)
1574 struct oob_data *data;
1576 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1578 data = kmalloc(sizeof(*data), GFP_KERNEL);
1582 bacpy(&data->bdaddr, bdaddr);
1583 data->bdaddr_type = bdaddr_type;
1584 list_add(&data->list, &hdev->remote_oob_data);
1587 if (hash192 && rand192) {
1588 memcpy(data->hash192, hash192, sizeof(data->hash192));
1589 memcpy(data->rand192, rand192, sizeof(data->rand192));
1590 if (hash256 && rand256)
1591 data->present = 0x03;
1593 memset(data->hash192, 0, sizeof(data->hash192));
1594 memset(data->rand192, 0, sizeof(data->rand192));
1595 if (hash256 && rand256)
1596 data->present = 0x02;
1598 data->present = 0x00;
1601 if (hash256 && rand256) {
1602 memcpy(data->hash256, hash256, sizeof(data->hash256));
1603 memcpy(data->rand256, rand256, sizeof(data->rand256));
1605 memset(data->hash256, 0, sizeof(data->hash256));
1606 memset(data->rand256, 0, sizeof(data->rand256));
1607 if (hash192 && rand192)
1608 data->present = 0x01;
1611 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1616 /* This function requires the caller holds hdev->lock */
1617 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1619 struct adv_info *adv_instance;
1621 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1622 if (adv_instance->instance == instance)
1623 return adv_instance;
1629 /* This function requires the caller holds hdev->lock */
1630 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1632 struct adv_info *cur_instance;
1634 cur_instance = hci_find_adv_instance(hdev, instance);
1638 if (cur_instance == list_last_entry(&hdev->adv_instances,
1639 struct adv_info, list))
1640 return list_first_entry(&hdev->adv_instances,
1641 struct adv_info, list);
1643 return list_next_entry(cur_instance, list);
1646 /* This function requires the caller holds hdev->lock */
1647 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1649 struct adv_info *adv_instance;
1651 adv_instance = hci_find_adv_instance(hdev, instance);
1655 BT_DBG("%s removing %dMR", hdev->name, instance);
1657 if (hdev->cur_adv_instance == instance) {
1658 if (hdev->adv_instance_timeout) {
1659 cancel_delayed_work(&hdev->adv_instance_expire);
1660 hdev->adv_instance_timeout = 0;
1662 hdev->cur_adv_instance = 0x00;
1665 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1667 list_del(&adv_instance->list);
1668 kfree(adv_instance);
1670 hdev->adv_instance_cnt--;
1675 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1677 struct adv_info *adv_instance, *n;
1679 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1680 adv_instance->rpa_expired = rpa_expired;
1683 /* This function requires the caller holds hdev->lock */
1684 void hci_adv_instances_clear(struct hci_dev *hdev)
1686 struct adv_info *adv_instance, *n;
1688 if (hdev->adv_instance_timeout) {
1689 cancel_delayed_work(&hdev->adv_instance_expire);
1690 hdev->adv_instance_timeout = 0;
1693 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1694 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1695 list_del(&adv_instance->list);
1696 kfree(adv_instance);
1699 hdev->adv_instance_cnt = 0;
1700 hdev->cur_adv_instance = 0x00;
1703 static void adv_instance_rpa_expired(struct work_struct *work)
1705 struct adv_info *adv_instance = container_of(work, struct adv_info,
1706 rpa_expired_cb.work);
1710 adv_instance->rpa_expired = true;
1713 /* This function requires the caller holds hdev->lock */
1714 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1715 u32 flags, u16 adv_data_len, u8 *adv_data,
1716 u16 scan_rsp_len, u8 *scan_rsp_data,
1717 u16 timeout, u16 duration, s8 tx_power,
1718 u32 min_interval, u32 max_interval,
1721 struct adv_info *adv;
1723 adv = hci_find_adv_instance(hdev, instance);
1725 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1726 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1727 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1729 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1730 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1731 return ERR_PTR(-EOVERFLOW);
1733 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1735 return ERR_PTR(-ENOMEM);
1737 adv->pending = true;
1738 adv->instance = instance;
1739 list_add(&adv->list, &hdev->adv_instances);
1740 hdev->adv_instance_cnt++;
1744 adv->min_interval = min_interval;
1745 adv->max_interval = max_interval;
1746 adv->tx_power = tx_power;
1747 /* Defining a mesh_handle changes the timing units to ms,
1748 * rather than seconds, and ties the instance to the requested
1751 adv->mesh = mesh_handle;
1753 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1754 scan_rsp_len, scan_rsp_data);
1756 adv->timeout = timeout;
1757 adv->remaining_time = timeout;
1760 adv->duration = hdev->def_multi_adv_rotation_duration;
1762 adv->duration = duration;
1764 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1766 BT_DBG("%s for %dMR", hdev->name, instance);
1771 /* This function requires the caller holds hdev->lock */
1772 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1773 u32 flags, u8 data_len, u8 *data,
1774 u32 min_interval, u32 max_interval)
1776 struct adv_info *adv;
1778 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1779 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1780 min_interval, max_interval, 0);
1784 adv->periodic = true;
1785 adv->per_adv_data_len = data_len;
1788 memcpy(adv->per_adv_data, data, data_len);
1793 /* This function requires the caller holds hdev->lock */
1794 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1795 u16 adv_data_len, u8 *adv_data,
1796 u16 scan_rsp_len, u8 *scan_rsp_data)
1798 struct adv_info *adv;
1800 adv = hci_find_adv_instance(hdev, instance);
1802 /* If advertisement doesn't exist, we can't modify its data */
1806 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1807 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1808 memcpy(adv->adv_data, adv_data, adv_data_len);
1809 adv->adv_data_len = adv_data_len;
1810 adv->adv_data_changed = true;
1813 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1814 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1815 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1816 adv->scan_rsp_len = scan_rsp_len;
1817 adv->scan_rsp_changed = true;
1820 /* Mark as changed if there are flags which would affect it */
1821 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1822 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1823 adv->scan_rsp_changed = true;
1828 /* This function requires the caller holds hdev->lock */
1829 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1832 struct adv_info *adv;
1834 if (instance == 0x00) {
1835 /* Instance 0 always manages the "Tx Power" and "Flags"
1838 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1840 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1841 * corresponds to the "connectable" instance flag.
1843 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1844 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1846 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1847 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1848 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1849 flags |= MGMT_ADV_FLAG_DISCOV;
1854 adv = hci_find_adv_instance(hdev, instance);
1856 /* Return 0 when we got an invalid instance identifier. */
1863 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1865 struct adv_info *adv;
1867 /* Instance 0x00 always set local name */
1868 if (instance == 0x00)
1871 adv = hci_find_adv_instance(hdev, instance);
1875 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1876 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1879 return adv->scan_rsp_len ? true : false;
1882 /* This function requires the caller holds hdev->lock */
1883 void hci_adv_monitors_clear(struct hci_dev *hdev)
1885 struct adv_monitor *monitor;
1888 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1889 hci_free_adv_monitor(hdev, monitor);
1891 idr_destroy(&hdev->adv_monitors_idr);
1894 /* Frees the monitor structure and do some bookkeepings.
1895 * This function requires the caller holds hdev->lock.
1897 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1899 struct adv_pattern *pattern;
1900 struct adv_pattern *tmp;
1905 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1906 list_del(&pattern->list);
1910 if (monitor->handle)
1911 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1913 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1914 hdev->adv_monitors_cnt--;
1915 mgmt_adv_monitor_removed(hdev, monitor->handle);
1921 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1922 * also attempts to forward the request to the controller.
1923 * This function requires the caller holds hci_req_sync_lock.
1925 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1927 int min, max, handle;
1935 min = HCI_MIN_ADV_MONITOR_HANDLE;
1936 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1937 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1940 hci_dev_unlock(hdev);
1945 monitor->handle = handle;
1947 if (!hdev_is_powered(hdev))
1950 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1951 case HCI_ADV_MONITOR_EXT_NONE:
1952 bt_dev_dbg(hdev, "add monitor %d status %d",
1953 monitor->handle, status);
1954 /* Message was not forwarded to controller - not an error */
1957 case HCI_ADV_MONITOR_EXT_MSFT:
1958 status = msft_add_monitor_pattern(hdev, monitor);
1959 bt_dev_dbg(hdev, "add monitor %d msft status %d",
1967 /* Attempts to tell the controller and free the monitor. If somehow the
1968 * controller doesn't have a corresponding handle, remove anyway.
1969 * This function requires the caller holds hci_req_sync_lock.
1971 static int hci_remove_adv_monitor(struct hci_dev *hdev,
1972 struct adv_monitor *monitor)
1977 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1978 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
1979 bt_dev_dbg(hdev, "remove monitor %d status %d",
1980 monitor->handle, status);
1983 case HCI_ADV_MONITOR_EXT_MSFT:
1984 handle = monitor->handle;
1985 status = msft_remove_monitor(hdev, monitor);
1986 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
1991 /* In case no matching handle registered, just free the monitor */
1992 if (status == -ENOENT)
1998 if (status == -ENOENT)
1999 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
2001 hci_free_adv_monitor(hdev, monitor);
2006 /* This function requires the caller holds hci_req_sync_lock */
2007 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2009 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2014 return hci_remove_adv_monitor(hdev, monitor);
2017 /* This function requires the caller holds hci_req_sync_lock */
2018 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2020 struct adv_monitor *monitor;
2021 int idr_next_id = 0;
2025 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2029 status = hci_remove_adv_monitor(hdev, monitor);
2039 /* This function requires the caller holds hdev->lock */
2040 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2042 return !idr_is_empty(&hdev->adv_monitors_idr);
2045 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2047 if (msft_monitor_supported(hdev))
2048 return HCI_ADV_MONITOR_EXT_MSFT;
2050 return HCI_ADV_MONITOR_EXT_NONE;
2053 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2054 bdaddr_t *bdaddr, u8 type)
2056 struct bdaddr_list *b;
2058 list_for_each_entry(b, bdaddr_list, list) {
2059 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2066 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2067 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2070 struct bdaddr_list_with_irk *b;
2072 list_for_each_entry(b, bdaddr_list, list) {
2073 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2080 struct bdaddr_list_with_flags *
2081 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2082 bdaddr_t *bdaddr, u8 type)
2084 struct bdaddr_list_with_flags *b;
2086 list_for_each_entry(b, bdaddr_list, list) {
2087 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2094 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2096 struct bdaddr_list *b, *n;
2098 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2104 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2106 struct bdaddr_list *entry;
2108 if (!bacmp(bdaddr, BDADDR_ANY))
2111 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2114 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2118 bacpy(&entry->bdaddr, bdaddr);
2119 entry->bdaddr_type = type;
2121 list_add(&entry->list, list);
2126 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2127 u8 type, u8 *peer_irk, u8 *local_irk)
2129 struct bdaddr_list_with_irk *entry;
2131 if (!bacmp(bdaddr, BDADDR_ANY))
2134 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2137 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2141 bacpy(&entry->bdaddr, bdaddr);
2142 entry->bdaddr_type = type;
2145 memcpy(entry->peer_irk, peer_irk, 16);
2148 memcpy(entry->local_irk, local_irk, 16);
2150 list_add(&entry->list, list);
2155 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2158 struct bdaddr_list_with_flags *entry;
2160 if (!bacmp(bdaddr, BDADDR_ANY))
2163 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2166 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2170 bacpy(&entry->bdaddr, bdaddr);
2171 entry->bdaddr_type = type;
2172 entry->flags = flags;
2174 list_add(&entry->list, list);
2179 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2181 struct bdaddr_list *entry;
2183 if (!bacmp(bdaddr, BDADDR_ANY)) {
2184 hci_bdaddr_list_clear(list);
2188 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2192 list_del(&entry->list);
2198 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2201 struct bdaddr_list_with_irk *entry;
2203 if (!bacmp(bdaddr, BDADDR_ANY)) {
2204 hci_bdaddr_list_clear(list);
2208 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2212 list_del(&entry->list);
2218 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2221 struct bdaddr_list_with_flags *entry;
2223 if (!bacmp(bdaddr, BDADDR_ANY)) {
2224 hci_bdaddr_list_clear(list);
2228 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2232 list_del(&entry->list);
2238 /* This function requires the caller holds hdev->lock */
2239 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2240 bdaddr_t *addr, u8 addr_type)
2242 struct hci_conn_params *params;
2244 list_for_each_entry(params, &hdev->le_conn_params, list) {
2245 if (bacmp(¶ms->addr, addr) == 0 &&
2246 params->addr_type == addr_type) {
2254 /* This function requires the caller holds hdev->lock or rcu_read_lock */
2255 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2256 bdaddr_t *addr, u8 addr_type)
2258 struct hci_conn_params *param;
2262 list_for_each_entry_rcu(param, list, action) {
2263 if (bacmp(¶m->addr, addr) == 0 &&
2264 param->addr_type == addr_type) {
2275 /* This function requires the caller holds hdev->lock */
2276 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2278 if (list_empty(¶m->action))
2281 list_del_rcu(¶m->action);
2283 INIT_LIST_HEAD(¶m->action);
2286 /* This function requires the caller holds hdev->lock */
2287 void hci_pend_le_list_add(struct hci_conn_params *param,
2288 struct list_head *list)
2290 list_add_rcu(¶m->action, list);
2293 /* This function requires the caller holds hdev->lock */
2294 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2295 bdaddr_t *addr, u8 addr_type)
2297 struct hci_conn_params *params;
2299 params = hci_conn_params_lookup(hdev, addr, addr_type);
2303 params = kzalloc(sizeof(*params), GFP_KERNEL);
2305 bt_dev_err(hdev, "out of memory");
2309 bacpy(¶ms->addr, addr);
2310 params->addr_type = addr_type;
2312 list_add(¶ms->list, &hdev->le_conn_params);
2313 INIT_LIST_HEAD(¶ms->action);
2315 params->conn_min_interval = hdev->le_conn_min_interval;
2316 params->conn_max_interval = hdev->le_conn_max_interval;
2317 params->conn_latency = hdev->le_conn_latency;
2318 params->supervision_timeout = hdev->le_supv_timeout;
2319 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2321 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2326 void hci_conn_params_free(struct hci_conn_params *params)
2328 hci_pend_le_list_del_init(params);
2331 hci_conn_drop(params->conn);
2332 hci_conn_put(params->conn);
2335 list_del(¶ms->list);
2339 /* This function requires the caller holds hdev->lock */
2340 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2342 struct hci_conn_params *params;
2344 params = hci_conn_params_lookup(hdev, addr, addr_type);
2348 hci_conn_params_free(params);
2350 hci_update_passive_scan(hdev);
2352 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2355 /* This function requires the caller holds hdev->lock */
2356 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2358 struct hci_conn_params *params, *tmp;
2360 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2361 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2364 /* If trying to establish one time connection to disabled
2365 * device, leave the params, but mark them as just once.
2367 if (params->explicit_connect) {
2368 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2372 hci_conn_params_free(params);
2375 BT_DBG("All LE disabled connection parameters were removed");
2378 /* This function requires the caller holds hdev->lock */
2379 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2381 struct hci_conn_params *params, *tmp;
2383 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2384 hci_conn_params_free(params);
2386 BT_DBG("All LE connection parameters were removed");
2389 /* Copy the Identity Address of the controller.
2391 * If the controller has a public BD_ADDR, then by default use that one.
2392 * If this is a LE only controller without a public address, default to
2393 * the static random address.
2395 * For debugging purposes it is possible to force controllers with a
2396 * public address to use the static random address instead.
2398 * In case BR/EDR has been disabled on a dual-mode controller and
2399 * userspace has configured a static address, then that address
2400 * becomes the identity address instead of the public BR/EDR address.
2402 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2405 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2406 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2407 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2408 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2409 bacpy(bdaddr, &hdev->static_addr);
2410 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2412 bacpy(bdaddr, &hdev->bdaddr);
2413 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2417 static void hci_clear_wake_reason(struct hci_dev *hdev)
2421 hdev->wake_reason = 0;
2422 bacpy(&hdev->wake_addr, BDADDR_ANY);
2423 hdev->wake_addr_type = 0;
2425 hci_dev_unlock(hdev);
2428 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2431 struct hci_dev *hdev =
2432 container_of(nb, struct hci_dev, suspend_notifier);
2435 /* Userspace has full control of this device. Do nothing. */
2436 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2439 /* To avoid a potential race with hci_unregister_dev. */
2442 if (action == PM_SUSPEND_PREPARE)
2443 ret = hci_suspend_dev(hdev);
2444 else if (action == PM_POST_SUSPEND)
2445 ret = hci_resume_dev(hdev);
2448 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2455 /* Alloc HCI device */
2456 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2458 struct hci_dev *hdev;
2459 unsigned int alloc_size;
2461 alloc_size = sizeof(*hdev);
2463 /* Fixme: May need ALIGN-ment? */
2464 alloc_size += sizeof_priv;
2467 hdev = kzalloc(alloc_size, GFP_KERNEL);
2471 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2472 hdev->esco_type = (ESCO_HV1);
2473 hdev->link_mode = (HCI_LM_ACCEPT);
2474 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2475 hdev->io_capability = 0x03; /* No Input No Output */
2476 hdev->manufacturer = 0xffff; /* Default to internal use */
2477 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2478 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2479 hdev->adv_instance_cnt = 0;
2480 hdev->cur_adv_instance = 0x00;
2481 hdev->adv_instance_timeout = 0;
2483 hdev->advmon_allowlist_duration = 300;
2484 hdev->advmon_no_filter_duration = 500;
2485 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2487 hdev->sniff_max_interval = 800;
2488 hdev->sniff_min_interval = 80;
2490 hdev->le_adv_channel_map = 0x07;
2491 hdev->le_adv_min_interval = 0x0800;
2492 hdev->le_adv_max_interval = 0x0800;
2493 hdev->le_scan_interval = 0x0060;
2494 hdev->le_scan_window = 0x0030;
2495 hdev->le_scan_int_suspend = 0x0400;
2496 hdev->le_scan_window_suspend = 0x0012;
2497 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2498 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2499 hdev->le_scan_int_adv_monitor = 0x0060;
2500 hdev->le_scan_window_adv_monitor = 0x0030;
2501 hdev->le_scan_int_connect = 0x0060;
2502 hdev->le_scan_window_connect = 0x0060;
2503 hdev->le_conn_min_interval = 0x0018;
2504 hdev->le_conn_max_interval = 0x0028;
2505 hdev->le_conn_latency = 0x0000;
2506 hdev->le_supv_timeout = 0x002a;
2507 hdev->le_def_tx_len = 0x001b;
2508 hdev->le_def_tx_time = 0x0148;
2509 hdev->le_max_tx_len = 0x001b;
2510 hdev->le_max_tx_time = 0x0148;
2511 hdev->le_max_rx_len = 0x001b;
2512 hdev->le_max_rx_time = 0x0148;
2513 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2514 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2515 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2516 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2517 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2518 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2519 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2520 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2521 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2523 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2524 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2525 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2526 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2527 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2528 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2530 /* default 1.28 sec page scan */
2531 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2532 hdev->def_page_scan_int = 0x0800;
2533 hdev->def_page_scan_window = 0x0012;
2535 mutex_init(&hdev->lock);
2536 mutex_init(&hdev->req_lock);
2538 INIT_LIST_HEAD(&hdev->mesh_pending);
2539 INIT_LIST_HEAD(&hdev->mgmt_pending);
2540 INIT_LIST_HEAD(&hdev->reject_list);
2541 INIT_LIST_HEAD(&hdev->accept_list);
2542 INIT_LIST_HEAD(&hdev->uuids);
2543 INIT_LIST_HEAD(&hdev->link_keys);
2544 INIT_LIST_HEAD(&hdev->long_term_keys);
2545 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2546 INIT_LIST_HEAD(&hdev->remote_oob_data);
2547 INIT_LIST_HEAD(&hdev->le_accept_list);
2548 INIT_LIST_HEAD(&hdev->le_resolv_list);
2549 INIT_LIST_HEAD(&hdev->le_conn_params);
2550 INIT_LIST_HEAD(&hdev->pend_le_conns);
2551 INIT_LIST_HEAD(&hdev->pend_le_reports);
2552 INIT_LIST_HEAD(&hdev->conn_hash.list);
2553 INIT_LIST_HEAD(&hdev->adv_instances);
2554 INIT_LIST_HEAD(&hdev->blocked_keys);
2555 INIT_LIST_HEAD(&hdev->monitored_devices);
2557 INIT_LIST_HEAD(&hdev->local_codecs);
2558 INIT_WORK(&hdev->rx_work, hci_rx_work);
2559 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2560 INIT_WORK(&hdev->tx_work, hci_tx_work);
2561 INIT_WORK(&hdev->power_on, hci_power_on);
2562 INIT_WORK(&hdev->error_reset, hci_error_reset);
2564 hci_cmd_sync_init(hdev);
2566 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2568 skb_queue_head_init(&hdev->rx_q);
2569 skb_queue_head_init(&hdev->cmd_q);
2570 skb_queue_head_init(&hdev->raw_q);
2572 init_waitqueue_head(&hdev->req_wait_q);
2574 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2575 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2577 hci_devcd_setup(hdev);
2578 hci_request_setup(hdev);
2580 hci_init_sysfs(hdev);
2581 discovery_init(hdev);
2585 EXPORT_SYMBOL(hci_alloc_dev_priv);
2587 /* Free HCI device */
2588 void hci_free_dev(struct hci_dev *hdev)
2590 /* will free via device release */
2591 put_device(&hdev->dev);
2593 EXPORT_SYMBOL(hci_free_dev);
2595 /* Register HCI device */
2596 int hci_register_dev(struct hci_dev *hdev)
2600 if (!hdev->open || !hdev->close || !hdev->send)
2603 /* Do not allow HCI_AMP devices to register at index 0,
2604 * so the index can be used as the AMP controller ID.
2606 switch (hdev->dev_type) {
2608 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2611 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2620 snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
2623 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2625 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2626 if (!hdev->workqueue) {
2631 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2633 if (!hdev->req_workqueue) {
2634 destroy_workqueue(hdev->workqueue);
2639 if (!IS_ERR_OR_NULL(bt_debugfs))
2640 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2642 dev_set_name(&hdev->dev, "%s", hdev->name);
2644 error = device_add(&hdev->dev);
2648 hci_leds_init(hdev);
2650 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2651 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2654 if (rfkill_register(hdev->rfkill) < 0) {
2655 rfkill_destroy(hdev->rfkill);
2656 hdev->rfkill = NULL;
2660 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2661 hci_dev_set_flag(hdev, HCI_RFKILLED);
2663 hci_dev_set_flag(hdev, HCI_SETUP);
2664 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2666 if (hdev->dev_type == HCI_PRIMARY) {
2667 /* Assume BR/EDR support until proven otherwise (such as
2668 * through reading supported features during init.
2670 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2673 write_lock(&hci_dev_list_lock);
2674 list_add(&hdev->list, &hci_dev_list);
2675 write_unlock(&hci_dev_list_lock);
2677 /* Devices that are marked for raw-only usage are unconfigured
2678 * and should not be included in normal operation.
2680 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2681 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2683 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2687 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2689 hci_sock_dev_event(hdev, HCI_DEV_REG);
2692 error = hci_register_suspend_notifier(hdev);
2694 BT_WARN("register suspend notifier failed error:%d\n", error);
2696 queue_work(hdev->req_workqueue, &hdev->power_on);
2698 idr_init(&hdev->adv_monitors_idr);
2699 msft_register(hdev);
2704 debugfs_remove_recursive(hdev->debugfs);
2705 destroy_workqueue(hdev->workqueue);
2706 destroy_workqueue(hdev->req_workqueue);
2708 ida_simple_remove(&hci_index_ida, hdev->id);
2712 EXPORT_SYMBOL(hci_register_dev);
2714 /* Unregister HCI device */
2715 void hci_unregister_dev(struct hci_dev *hdev)
2717 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2719 mutex_lock(&hdev->unregister_lock);
2720 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2721 mutex_unlock(&hdev->unregister_lock);
2723 write_lock(&hci_dev_list_lock);
2724 list_del(&hdev->list);
2725 write_unlock(&hci_dev_list_lock);
2727 cancel_work_sync(&hdev->power_on);
2729 hci_cmd_sync_clear(hdev);
2731 hci_unregister_suspend_notifier(hdev);
2733 msft_unregister(hdev);
2735 hci_dev_do_close(hdev);
2737 if (!test_bit(HCI_INIT, &hdev->flags) &&
2738 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2739 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2741 mgmt_index_removed(hdev);
2742 hci_dev_unlock(hdev);
2745 /* mgmt_index_removed should take care of emptying the
2747 BUG_ON(!list_empty(&hdev->mgmt_pending));
2749 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2752 rfkill_unregister(hdev->rfkill);
2753 rfkill_destroy(hdev->rfkill);
2756 device_del(&hdev->dev);
2757 /* Actual cleanup is deferred until hci_release_dev(). */
2760 EXPORT_SYMBOL(hci_unregister_dev);
2762 /* Release HCI device */
2763 void hci_release_dev(struct hci_dev *hdev)
2765 debugfs_remove_recursive(hdev->debugfs);
2766 kfree_const(hdev->hw_info);
2767 kfree_const(hdev->fw_info);
2769 destroy_workqueue(hdev->workqueue);
2770 destroy_workqueue(hdev->req_workqueue);
2773 hci_bdaddr_list_clear(&hdev->reject_list);
2774 hci_bdaddr_list_clear(&hdev->accept_list);
2775 hci_uuids_clear(hdev);
2776 hci_link_keys_clear(hdev);
2777 hci_smp_ltks_clear(hdev);
2778 hci_smp_irks_clear(hdev);
2779 hci_remote_oob_data_clear(hdev);
2780 hci_adv_instances_clear(hdev);
2781 hci_adv_monitors_clear(hdev);
2782 hci_bdaddr_list_clear(&hdev->le_accept_list);
2783 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2784 hci_conn_params_clear_all(hdev);
2785 hci_discovery_filter_clear(hdev);
2786 hci_blocked_keys_clear(hdev);
2787 hci_dev_unlock(hdev);
2789 ida_simple_remove(&hci_index_ida, hdev->id);
2790 kfree_skb(hdev->sent_cmd);
2791 kfree_skb(hdev->recv_event);
2794 EXPORT_SYMBOL(hci_release_dev);
2796 int hci_register_suspend_notifier(struct hci_dev *hdev)
2800 if (!hdev->suspend_notifier.notifier_call &&
2801 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2802 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2803 ret = register_pm_notifier(&hdev->suspend_notifier);
2809 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2813 if (hdev->suspend_notifier.notifier_call) {
2814 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2816 hdev->suspend_notifier.notifier_call = NULL;
2822 /* Suspend HCI device */
2823 int hci_suspend_dev(struct hci_dev *hdev)
2827 bt_dev_dbg(hdev, "");
2829 /* Suspend should only act on when powered. */
2830 if (!hdev_is_powered(hdev) ||
2831 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2834 /* If powering down don't attempt to suspend */
2835 if (mgmt_powering_down(hdev))
2838 /* Cancel potentially blocking sync operation before suspend */
2839 __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
2841 hci_req_sync_lock(hdev);
2842 ret = hci_suspend_sync(hdev);
2843 hci_req_sync_unlock(hdev);
2845 hci_clear_wake_reason(hdev);
2846 mgmt_suspending(hdev, hdev->suspend_state);
2848 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2851 EXPORT_SYMBOL(hci_suspend_dev);
2853 /* Resume HCI device */
2854 int hci_resume_dev(struct hci_dev *hdev)
2858 bt_dev_dbg(hdev, "");
2860 /* Resume should only act on when powered. */
2861 if (!hdev_is_powered(hdev) ||
2862 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2865 /* If powering down don't attempt to resume */
2866 if (mgmt_powering_down(hdev))
2869 hci_req_sync_lock(hdev);
2870 ret = hci_resume_sync(hdev);
2871 hci_req_sync_unlock(hdev);
2873 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2874 hdev->wake_addr_type);
2876 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2879 EXPORT_SYMBOL(hci_resume_dev);
2881 /* Reset HCI device */
2882 int hci_reset_dev(struct hci_dev *hdev)
2884 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2885 struct sk_buff *skb;
2887 skb = bt_skb_alloc(3, GFP_ATOMIC);
2891 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2892 skb_put_data(skb, hw_err, 3);
2894 bt_dev_err(hdev, "Injecting HCI hardware error event");
2896 /* Send Hardware Error to upper stack */
2897 return hci_recv_frame(hdev, skb);
2899 EXPORT_SYMBOL(hci_reset_dev);
2901 /* Receive frame from HCI drivers */
2902 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2904 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2905 && !test_bit(HCI_INIT, &hdev->flags))) {
2910 switch (hci_skb_pkt_type(skb)) {
2913 case HCI_ACLDATA_PKT:
2914 /* Detect if ISO packet has been sent as ACL */
2915 if (hci_conn_num(hdev, ISO_LINK)) {
2916 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2919 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2920 if (type == ISO_LINK)
2921 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2924 case HCI_SCODATA_PKT:
2926 case HCI_ISODATA_PKT:
2934 bt_cb(skb)->incoming = 1;
2937 __net_timestamp(skb);
2939 skb_queue_tail(&hdev->rx_q, skb);
2940 queue_work(hdev->workqueue, &hdev->rx_work);
2944 EXPORT_SYMBOL(hci_recv_frame);
2946 /* Receive diagnostic message from HCI drivers */
2947 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2949 /* Mark as diagnostic packet */
2950 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2953 __net_timestamp(skb);
2955 skb_queue_tail(&hdev->rx_q, skb);
2956 queue_work(hdev->workqueue, &hdev->rx_work);
2960 EXPORT_SYMBOL(hci_recv_diag);
2962 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2966 va_start(vargs, fmt);
2967 kfree_const(hdev->hw_info);
2968 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2971 EXPORT_SYMBOL(hci_set_hw_info);
2973 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2977 va_start(vargs, fmt);
2978 kfree_const(hdev->fw_info);
2979 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2982 EXPORT_SYMBOL(hci_set_fw_info);
2984 /* ---- Interface to upper protocols ---- */
2986 int hci_register_cb(struct hci_cb *cb)
2988 BT_DBG("%p name %s", cb, cb->name);
2990 mutex_lock(&hci_cb_list_lock);
2991 list_add_tail(&cb->list, &hci_cb_list);
2992 mutex_unlock(&hci_cb_list_lock);
2996 EXPORT_SYMBOL(hci_register_cb);
2998 int hci_unregister_cb(struct hci_cb *cb)
3000 BT_DBG("%p name %s", cb, cb->name);
3002 mutex_lock(&hci_cb_list_lock);
3003 list_del(&cb->list);
3004 mutex_unlock(&hci_cb_list_lock);
3008 EXPORT_SYMBOL(hci_unregister_cb);
3010 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3014 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3018 __net_timestamp(skb);
3020 /* Send copy to monitor */
3021 hci_send_to_monitor(hdev, skb);
3023 if (atomic_read(&hdev->promisc)) {
3024 /* Send copy to the sockets */
3025 hci_send_to_sock(hdev, skb);
3028 /* Get rid of skb owner, prior to sending to the driver. */
3031 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3036 err = hdev->send(hdev, skb);
3038 bt_dev_err(hdev, "sending frame failed (%d)", err);
3046 /* Send HCI command */
3047 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3050 struct sk_buff *skb;
3052 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3054 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3056 bt_dev_err(hdev, "no memory for command");
3060 /* Stand-alone HCI commands must be flagged as
3061 * single-command requests.
3063 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3065 skb_queue_tail(&hdev->cmd_q, skb);
3066 queue_work(hdev->workqueue, &hdev->cmd_work);
3071 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3074 struct sk_buff *skb;
3076 if (hci_opcode_ogf(opcode) != 0x3f) {
3077 /* A controller receiving a command shall respond with either
3078 * a Command Status Event or a Command Complete Event.
3079 * Therefore, all standard HCI commands must be sent via the
3080 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3081 * Some vendors do not comply with this rule for vendor-specific
3082 * commands and do not return any event. We want to support
3083 * unresponded commands for such cases only.
3085 bt_dev_err(hdev, "unresponded command not supported");
3089 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3091 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3096 hci_send_frame(hdev, skb);
3100 EXPORT_SYMBOL(__hci_cmd_send);
3102 /* Get data from the previously sent command */
3103 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3105 struct hci_command_hdr *hdr;
3107 if (!hdev->sent_cmd)
3110 hdr = (void *) hdev->sent_cmd->data;
3112 if (hdr->opcode != cpu_to_le16(opcode))
3115 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3117 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3120 /* Get data from last received event */
3121 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3123 struct hci_event_hdr *hdr;
3126 if (!hdev->recv_event)
3129 hdr = (void *)hdev->recv_event->data;
3130 offset = sizeof(*hdr);
3132 if (hdr->evt != event) {
3133 /* In case of LE metaevent check the subevent match */
3134 if (hdr->evt == HCI_EV_LE_META) {
3135 struct hci_ev_le_meta *ev;
3137 ev = (void *)hdev->recv_event->data + offset;
3138 offset += sizeof(*ev);
3139 if (ev->subevent == event)
3146 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3148 return hdev->recv_event->data + offset;
3152 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3154 struct hci_acl_hdr *hdr;
3157 skb_push(skb, HCI_ACL_HDR_SIZE);
3158 skb_reset_transport_header(skb);
3159 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3160 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3161 hdr->dlen = cpu_to_le16(len);
3164 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3165 struct sk_buff *skb, __u16 flags)
3167 struct hci_conn *conn = chan->conn;
3168 struct hci_dev *hdev = conn->hdev;
3169 struct sk_buff *list;
3171 skb->len = skb_headlen(skb);
3174 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3176 switch (hdev->dev_type) {
3178 hci_add_acl_hdr(skb, conn->handle, flags);
3181 hci_add_acl_hdr(skb, chan->handle, flags);
3184 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3188 list = skb_shinfo(skb)->frag_list;
3190 /* Non fragmented */
3191 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3193 skb_queue_tail(queue, skb);
3196 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3198 skb_shinfo(skb)->frag_list = NULL;
3200 /* Queue all fragments atomically. We need to use spin_lock_bh
3201 * here because of 6LoWPAN links, as there this function is
3202 * called from softirq and using normal spin lock could cause
3205 spin_lock_bh(&queue->lock);
3207 __skb_queue_tail(queue, skb);
3209 flags &= ~ACL_START;
3212 skb = list; list = list->next;
3214 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3215 hci_add_acl_hdr(skb, conn->handle, flags);
3217 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3219 __skb_queue_tail(queue, skb);
3222 spin_unlock_bh(&queue->lock);
3226 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3228 struct hci_dev *hdev = chan->conn->hdev;
3230 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3232 hci_queue_acl(chan, &chan->data_q, skb, flags);
3234 queue_work(hdev->workqueue, &hdev->tx_work);
3238 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3240 struct hci_dev *hdev = conn->hdev;
3241 struct hci_sco_hdr hdr;
3243 BT_DBG("%s len %d", hdev->name, skb->len);
3245 hdr.handle = cpu_to_le16(conn->handle);
3246 hdr.dlen = skb->len;
3248 skb_push(skb, HCI_SCO_HDR_SIZE);
3249 skb_reset_transport_header(skb);
3250 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3252 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3254 skb_queue_tail(&conn->data_q, skb);
3255 queue_work(hdev->workqueue, &hdev->tx_work);
3259 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3261 struct hci_iso_hdr *hdr;
3264 skb_push(skb, HCI_ISO_HDR_SIZE);
3265 skb_reset_transport_header(skb);
3266 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3267 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3268 hdr->dlen = cpu_to_le16(len);
3271 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3272 struct sk_buff *skb)
3274 struct hci_dev *hdev = conn->hdev;
3275 struct sk_buff *list;
3278 skb->len = skb_headlen(skb);
3281 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3283 list = skb_shinfo(skb)->frag_list;
3285 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3286 hci_add_iso_hdr(skb, conn->handle, flags);
3289 /* Non fragmented */
3290 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3292 skb_queue_tail(queue, skb);
3295 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3297 skb_shinfo(skb)->frag_list = NULL;
3299 __skb_queue_tail(queue, skb);
3302 skb = list; list = list->next;
3304 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3305 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3307 hci_add_iso_hdr(skb, conn->handle, flags);
3309 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3311 __skb_queue_tail(queue, skb);
3316 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3318 struct hci_dev *hdev = conn->hdev;
3320 BT_DBG("%s len %d", hdev->name, skb->len);
3322 hci_queue_iso(conn, &conn->data_q, skb);
3324 queue_work(hdev->workqueue, &hdev->tx_work);
3327 /* ---- HCI TX task (outgoing data) ---- */
3329 /* HCI Connection scheduler */
3330 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3332 struct hci_dev *hdev;
3342 switch (conn->type) {
3344 cnt = hdev->acl_cnt;
3347 cnt = hdev->block_cnt;
3351 cnt = hdev->sco_cnt;
3354 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3357 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3358 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3362 bt_dev_err(hdev, "unknown link type %d", conn->type);
3369 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3372 struct hci_conn_hash *h = &hdev->conn_hash;
3373 struct hci_conn *conn = NULL, *c;
3374 unsigned int num = 0, min = ~0;
3376 /* We don't have to lock device here. Connections are always
3377 * added and removed with TX task disabled. */
3381 list_for_each_entry_rcu(c, &h->list, list) {
3382 if (c->type != type || skb_queue_empty(&c->data_q))
3385 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3390 if (c->sent < min) {
3395 if (hci_conn_num(hdev, type) == num)
3401 hci_quote_sent(conn, num, quote);
3403 BT_DBG("conn %p quote %d", conn, *quote);
3407 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3409 struct hci_conn_hash *h = &hdev->conn_hash;
3412 bt_dev_err(hdev, "link tx timeout");
3416 /* Kill stalled connections */
3417 list_for_each_entry_rcu(c, &h->list, list) {
3418 if (c->type == type && c->sent) {
3419 bt_dev_err(hdev, "killing stalled connection %pMR",
3421 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3428 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3431 struct hci_conn_hash *h = &hdev->conn_hash;
3432 struct hci_chan *chan = NULL;
3433 unsigned int num = 0, min = ~0, cur_prio = 0;
3434 struct hci_conn *conn;
3437 BT_DBG("%s", hdev->name);
3441 list_for_each_entry_rcu(conn, &h->list, list) {
3442 struct hci_chan *tmp;
3444 if (conn->type != type)
3447 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3452 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3453 struct sk_buff *skb;
3455 if (skb_queue_empty(&tmp->data_q))
3458 skb = skb_peek(&tmp->data_q);
3459 if (skb->priority < cur_prio)
3462 if (skb->priority > cur_prio) {
3465 cur_prio = skb->priority;
3470 if (conn->sent < min) {
3476 if (hci_conn_num(hdev, type) == conn_num)
3485 hci_quote_sent(chan->conn, num, quote);
3487 BT_DBG("chan %p quote %d", chan, *quote);
3491 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3493 struct hci_conn_hash *h = &hdev->conn_hash;
3494 struct hci_conn *conn;
3497 BT_DBG("%s", hdev->name);
3501 list_for_each_entry_rcu(conn, &h->list, list) {
3502 struct hci_chan *chan;
3504 if (conn->type != type)
3507 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3512 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3513 struct sk_buff *skb;
3520 if (skb_queue_empty(&chan->data_q))
3523 skb = skb_peek(&chan->data_q);
3524 if (skb->priority >= HCI_PRIO_MAX - 1)
3527 skb->priority = HCI_PRIO_MAX - 1;
3529 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3533 if (hci_conn_num(hdev, type) == num)
3541 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3543 /* Calculate count of blocks used by this packet */
3544 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3547 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3549 unsigned long last_tx;
3551 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3556 last_tx = hdev->le_last_tx;
3559 last_tx = hdev->acl_last_tx;
3563 /* tx timeout must be longer than maximum link supervision timeout
3566 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3567 hci_link_tx_to(hdev, type);
3571 static void hci_sched_sco(struct hci_dev *hdev)
3573 struct hci_conn *conn;
3574 struct sk_buff *skb;
3577 BT_DBG("%s", hdev->name);
3579 if (!hci_conn_num(hdev, SCO_LINK))
3582 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3583 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3584 BT_DBG("skb %p len %d", skb, skb->len);
3585 hci_send_frame(hdev, skb);
3588 if (conn->sent == ~0)
3594 static void hci_sched_esco(struct hci_dev *hdev)
3596 struct hci_conn *conn;
3597 struct sk_buff *skb;
3600 BT_DBG("%s", hdev->name);
3602 if (!hci_conn_num(hdev, ESCO_LINK))
3605 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3607 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3608 BT_DBG("skb %p len %d", skb, skb->len);
3609 hci_send_frame(hdev, skb);
3612 if (conn->sent == ~0)
3618 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3620 unsigned int cnt = hdev->acl_cnt;
3621 struct hci_chan *chan;
3622 struct sk_buff *skb;
3625 __check_timeout(hdev, cnt, ACL_LINK);
3627 while (hdev->acl_cnt &&
3628 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3629 u32 priority = (skb_peek(&chan->data_q))->priority;
3630 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3631 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3632 skb->len, skb->priority);
3634 /* Stop if priority has changed */
3635 if (skb->priority < priority)
3638 skb = skb_dequeue(&chan->data_q);
3640 hci_conn_enter_active_mode(chan->conn,
3641 bt_cb(skb)->force_active);
3643 hci_send_frame(hdev, skb);
3644 hdev->acl_last_tx = jiffies;
3650 /* Send pending SCO packets right away */
3651 hci_sched_sco(hdev);
3652 hci_sched_esco(hdev);
3656 if (cnt != hdev->acl_cnt)
3657 hci_prio_recalculate(hdev, ACL_LINK);
3660 static void hci_sched_acl_blk(struct hci_dev *hdev)
3662 unsigned int cnt = hdev->block_cnt;
3663 struct hci_chan *chan;
3664 struct sk_buff *skb;
3668 BT_DBG("%s", hdev->name);
3670 if (hdev->dev_type == HCI_AMP)
3675 __check_timeout(hdev, cnt, type);
3677 while (hdev->block_cnt > 0 &&
3678 (chan = hci_chan_sent(hdev, type, "e))) {
3679 u32 priority = (skb_peek(&chan->data_q))->priority;
3680 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3683 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3684 skb->len, skb->priority);
3686 /* Stop if priority has changed */
3687 if (skb->priority < priority)
3690 skb = skb_dequeue(&chan->data_q);
3692 blocks = __get_blocks(hdev, skb);
3693 if (blocks > hdev->block_cnt)
3696 hci_conn_enter_active_mode(chan->conn,
3697 bt_cb(skb)->force_active);
3699 hci_send_frame(hdev, skb);
3700 hdev->acl_last_tx = jiffies;
3702 hdev->block_cnt -= blocks;
3705 chan->sent += blocks;
3706 chan->conn->sent += blocks;
3710 if (cnt != hdev->block_cnt)
3711 hci_prio_recalculate(hdev, type);
3714 static void hci_sched_acl(struct hci_dev *hdev)
3716 BT_DBG("%s", hdev->name);
3718 /* No ACL link over BR/EDR controller */
3719 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3722 /* No AMP link over AMP controller */
3723 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3726 switch (hdev->flow_ctl_mode) {
3727 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3728 hci_sched_acl_pkt(hdev);
3731 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3732 hci_sched_acl_blk(hdev);
3737 static void hci_sched_le(struct hci_dev *hdev)
3739 struct hci_chan *chan;
3740 struct sk_buff *skb;
3741 int quote, cnt, tmp;
3743 BT_DBG("%s", hdev->name);
3745 if (!hci_conn_num(hdev, LE_LINK))
3748 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3750 __check_timeout(hdev, cnt, LE_LINK);
3753 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3754 u32 priority = (skb_peek(&chan->data_q))->priority;
3755 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3756 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3757 skb->len, skb->priority);
3759 /* Stop if priority has changed */
3760 if (skb->priority < priority)
3763 skb = skb_dequeue(&chan->data_q);
3765 hci_send_frame(hdev, skb);
3766 hdev->le_last_tx = jiffies;
3772 /* Send pending SCO packets right away */
3773 hci_sched_sco(hdev);
3774 hci_sched_esco(hdev);
3781 hdev->acl_cnt = cnt;
3784 hci_prio_recalculate(hdev, LE_LINK);
3788 static void hci_sched_iso(struct hci_dev *hdev)
3790 struct hci_conn *conn;
3791 struct sk_buff *skb;
3794 BT_DBG("%s", hdev->name);
3796 if (!hci_conn_num(hdev, ISO_LINK))
3799 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3800 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3801 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) {
3802 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3803 BT_DBG("skb %p len %d", skb, skb->len);
3804 hci_send_frame(hdev, skb);
3807 if (conn->sent == ~0)
3814 static void hci_tx_work(struct work_struct *work)
3816 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3817 struct sk_buff *skb;
3819 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3820 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3822 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3823 /* Schedule queues and send stuff to HCI driver */
3824 hci_sched_sco(hdev);
3825 hci_sched_esco(hdev);
3826 hci_sched_iso(hdev);
3827 hci_sched_acl(hdev);
3831 /* Send next queued raw (unknown type) packet */
3832 while ((skb = skb_dequeue(&hdev->raw_q)))
3833 hci_send_frame(hdev, skb);
3836 /* ----- HCI RX task (incoming data processing) ----- */
3838 /* ACL data packet */
3839 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3841 struct hci_acl_hdr *hdr = (void *) skb->data;
3842 struct hci_conn *conn;
3843 __u16 handle, flags;
3845 skb_pull(skb, HCI_ACL_HDR_SIZE);
3847 handle = __le16_to_cpu(hdr->handle);
3848 flags = hci_flags(handle);
3849 handle = hci_handle(handle);
3851 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3854 hdev->stat.acl_rx++;
3857 conn = hci_conn_hash_lookup_handle(hdev, handle);
3858 hci_dev_unlock(hdev);
3861 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3863 /* Send to upper protocol */
3864 l2cap_recv_acldata(conn, skb, flags);
3867 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3874 /* SCO data packet */
3875 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3877 struct hci_sco_hdr *hdr = (void *) skb->data;
3878 struct hci_conn *conn;
3879 __u16 handle, flags;
3881 skb_pull(skb, HCI_SCO_HDR_SIZE);
3883 handle = __le16_to_cpu(hdr->handle);
3884 flags = hci_flags(handle);
3885 handle = hci_handle(handle);
3887 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3890 hdev->stat.sco_rx++;
3893 conn = hci_conn_hash_lookup_handle(hdev, handle);
3894 hci_dev_unlock(hdev);
3897 /* Send to upper protocol */
3898 hci_skb_pkt_status(skb) = flags & 0x03;
3899 sco_recv_scodata(conn, skb);
3902 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3909 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3911 struct hci_iso_hdr *hdr;
3912 struct hci_conn *conn;
3913 __u16 handle, flags;
3915 hdr = skb_pull_data(skb, sizeof(*hdr));
3917 bt_dev_err(hdev, "ISO packet too small");
3921 handle = __le16_to_cpu(hdr->handle);
3922 flags = hci_flags(handle);
3923 handle = hci_handle(handle);
3925 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3929 conn = hci_conn_hash_lookup_handle(hdev, handle);
3930 hci_dev_unlock(hdev);
3933 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3938 /* Send to upper protocol */
3939 iso_recv(conn, skb, flags);
3946 static bool hci_req_is_complete(struct hci_dev *hdev)
3948 struct sk_buff *skb;
3950 skb = skb_peek(&hdev->cmd_q);
3954 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3957 static void hci_resend_last(struct hci_dev *hdev)
3959 struct hci_command_hdr *sent;
3960 struct sk_buff *skb;
3963 if (!hdev->sent_cmd)
3966 sent = (void *) hdev->sent_cmd->data;
3967 opcode = __le16_to_cpu(sent->opcode);
3968 if (opcode == HCI_OP_RESET)
3971 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3975 skb_queue_head(&hdev->cmd_q, skb);
3976 queue_work(hdev->workqueue, &hdev->cmd_work);
3979 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3980 hci_req_complete_t *req_complete,
3981 hci_req_complete_skb_t *req_complete_skb)
3983 struct sk_buff *skb;
3984 unsigned long flags;
3986 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3988 /* If the completed command doesn't match the last one that was
3989 * sent we need to do special handling of it.
3991 if (!hci_sent_cmd_data(hdev, opcode)) {
3992 /* Some CSR based controllers generate a spontaneous
3993 * reset complete event during init and any pending
3994 * command will never be completed. In such a case we
3995 * need to resend whatever was the last sent
3998 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3999 hci_resend_last(hdev);
4004 /* If we reach this point this event matches the last command sent */
4005 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4007 /* If the command succeeded and there's still more commands in
4008 * this request the request is not yet complete.
4010 if (!status && !hci_req_is_complete(hdev))
4013 /* If this was the last command in a request the complete
4014 * callback would be found in hdev->sent_cmd instead of the
4015 * command queue (hdev->cmd_q).
4017 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4018 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4022 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4023 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4027 /* Remove all pending commands belonging to this request */
4028 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4029 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4030 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4031 __skb_queue_head(&hdev->cmd_q, skb);
4035 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4036 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4038 *req_complete = bt_cb(skb)->hci.req_complete;
4039 dev_kfree_skb_irq(skb);
4041 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4044 static void hci_rx_work(struct work_struct *work)
4046 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4047 struct sk_buff *skb;
4049 BT_DBG("%s", hdev->name);
4051 /* The kcov_remote functions used for collecting packet parsing
4052 * coverage information from this background thread and associate
4053 * the coverage with the syscall's thread which originally injected
4054 * the packet. This helps fuzzing the kernel.
4056 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4057 kcov_remote_start_common(skb_get_kcov_handle(skb));
4059 /* Send copy to monitor */
4060 hci_send_to_monitor(hdev, skb);
4062 if (atomic_read(&hdev->promisc)) {
4063 /* Send copy to the sockets */
4064 hci_send_to_sock(hdev, skb);
4067 /* If the device has been opened in HCI_USER_CHANNEL,
4068 * the userspace has exclusive access to device.
4069 * When device is HCI_INIT, we still need to process
4070 * the data packets to the driver in order
4071 * to complete its setup().
4073 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4074 !test_bit(HCI_INIT, &hdev->flags)) {
4079 if (test_bit(HCI_INIT, &hdev->flags)) {
4080 /* Don't process data packets in this states. */
4081 switch (hci_skb_pkt_type(skb)) {
4082 case HCI_ACLDATA_PKT:
4083 case HCI_SCODATA_PKT:
4084 case HCI_ISODATA_PKT:
4091 switch (hci_skb_pkt_type(skb)) {
4093 BT_DBG("%s Event packet", hdev->name);
4094 hci_event_packet(hdev, skb);
4097 case HCI_ACLDATA_PKT:
4098 BT_DBG("%s ACL data packet", hdev->name);
4099 hci_acldata_packet(hdev, skb);
4102 case HCI_SCODATA_PKT:
4103 BT_DBG("%s SCO data packet", hdev->name);
4104 hci_scodata_packet(hdev, skb);
4107 case HCI_ISODATA_PKT:
4108 BT_DBG("%s ISO data packet", hdev->name);
4109 hci_isodata_packet(hdev, skb);
4119 static void hci_cmd_work(struct work_struct *work)
4121 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4122 struct sk_buff *skb;
4124 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4125 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4127 /* Send queued commands */
4128 if (atomic_read(&hdev->cmd_cnt)) {
4129 skb = skb_dequeue(&hdev->cmd_q);
4133 kfree_skb(hdev->sent_cmd);
4135 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4136 if (hdev->sent_cmd) {
4138 if (hci_req_status_pend(hdev))
4139 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4140 atomic_dec(&hdev->cmd_cnt);
4142 res = hci_send_frame(hdev, skb);
4144 __hci_cmd_sync_cancel(hdev, -res);
4147 if (test_bit(HCI_RESET, &hdev->flags) ||
4148 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4149 cancel_delayed_work(&hdev->cmd_timer);
4151 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4155 skb_queue_head(&hdev->cmd_q, skb);
4156 queue_work(hdev->workqueue, &hdev->cmd_work);