2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
49 #include "hci_codec.h"
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
66 static int hci_scan_req(struct hci_request *req, unsigned long opt)
70 BT_DBG("%s %x", req->hdev->name, scan);
72 /* Inquiry and Page scans */
73 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
77 static int hci_auth_req(struct hci_request *req, unsigned long opt)
81 BT_DBG("%s %x", req->hdev->name, auth);
84 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
92 BT_DBG("%s %x", req->hdev->name, encrypt);
95 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
101 __le16 policy = cpu_to_le16(opt);
103 BT_DBG("%s %x", req->hdev->name, policy);
105 /* Default link policy */
106 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
110 /* Get HCI device by index.
111 * Device is held on return. */
112 struct hci_dev *hci_dev_get(int index)
114 struct hci_dev *hdev = NULL, *d;
121 read_lock(&hci_dev_list_lock);
122 list_for_each_entry(d, &hci_dev_list, list) {
123 if (d->id == index) {
124 hdev = hci_dev_hold(d);
128 read_unlock(&hci_dev_list_lock);
132 /* ---- Inquiry support ---- */
134 bool hci_discovery_active(struct hci_dev *hdev)
136 struct discovery_state *discov = &hdev->discovery;
138 switch (discov->state) {
139 case DISCOVERY_FINDING:
140 case DISCOVERY_RESOLVING:
148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
150 int old_state = hdev->discovery.state;
152 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
154 if (old_state == state)
157 hdev->discovery.state = state;
160 case DISCOVERY_STOPPED:
161 hci_update_passive_scan(hdev);
163 if (old_state != DISCOVERY_STARTING)
164 mgmt_discovering(hdev, 0);
166 case DISCOVERY_STARTING:
168 case DISCOVERY_FINDING:
169 mgmt_discovering(hdev, 1);
171 case DISCOVERY_RESOLVING:
173 case DISCOVERY_STOPPING:
179 bool hci_le_discovery_active(struct hci_dev *hdev)
181 struct discovery_state *discov = &hdev->le_discovery;
183 switch (discov->state) {
184 case DISCOVERY_FINDING:
185 case DISCOVERY_RESOLVING:
193 void hci_le_discovery_set_state(struct hci_dev *hdev, int state)
195 BT_DBG("%s state %u -> %u", hdev->name,
196 hdev->le_discovery.state, state);
198 if (hdev->le_discovery.state == state)
202 case DISCOVERY_STOPPED:
203 hci_update_passive_scan(hdev);
205 if (hdev->le_discovery.state != DISCOVERY_STARTING)
206 mgmt_le_discovering(hdev, 0);
208 case DISCOVERY_STARTING:
210 case DISCOVERY_FINDING:
211 mgmt_le_discovering(hdev, 1);
213 case DISCOVERY_RESOLVING:
215 case DISCOVERY_STOPPING:
219 hdev->le_discovery.state = state;
222 static void hci_tx_timeout_error_evt(struct hci_dev *hdev)
224 BT_ERR("%s H/W TX Timeout error", hdev->name);
226 mgmt_tx_timeout_error(hdev);
230 void hci_inquiry_cache_flush(struct hci_dev *hdev)
232 struct discovery_state *cache = &hdev->discovery;
233 struct inquiry_entry *p, *n;
235 list_for_each_entry_safe(p, n, &cache->all, all) {
240 INIT_LIST_HEAD(&cache->unknown);
241 INIT_LIST_HEAD(&cache->resolve);
244 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
247 struct discovery_state *cache = &hdev->discovery;
248 struct inquiry_entry *e;
250 BT_DBG("cache %p, %pMR", cache, bdaddr);
252 list_for_each_entry(e, &cache->all, all) {
253 if (!bacmp(&e->data.bdaddr, bdaddr))
260 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
263 struct discovery_state *cache = &hdev->discovery;
264 struct inquiry_entry *e;
266 BT_DBG("cache %p, %pMR", cache, bdaddr);
268 list_for_each_entry(e, &cache->unknown, list) {
269 if (!bacmp(&e->data.bdaddr, bdaddr))
276 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
280 struct discovery_state *cache = &hdev->discovery;
281 struct inquiry_entry *e;
283 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
285 list_for_each_entry(e, &cache->resolve, list) {
286 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
288 if (!bacmp(&e->data.bdaddr, bdaddr))
295 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
296 struct inquiry_entry *ie)
298 struct discovery_state *cache = &hdev->discovery;
299 struct list_head *pos = &cache->resolve;
300 struct inquiry_entry *p;
304 list_for_each_entry(p, &cache->resolve, list) {
305 if (p->name_state != NAME_PENDING &&
306 abs(p->data.rssi) >= abs(ie->data.rssi))
311 list_add(&ie->list, pos);
314 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
317 struct discovery_state *cache = &hdev->discovery;
318 struct inquiry_entry *ie;
321 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
323 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
326 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
328 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
330 if (!ie->data.ssp_mode)
331 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
333 if (ie->name_state == NAME_NEEDED &&
334 data->rssi != ie->data.rssi) {
335 ie->data.rssi = data->rssi;
336 hci_inquiry_cache_update_resolve(hdev, ie);
342 /* Entry not in the cache. Add new one. */
343 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
345 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
349 list_add(&ie->all, &cache->all);
352 ie->name_state = NAME_KNOWN;
354 ie->name_state = NAME_NOT_KNOWN;
355 list_add(&ie->list, &cache->unknown);
359 if (name_known && ie->name_state != NAME_KNOWN &&
360 ie->name_state != NAME_PENDING) {
361 ie->name_state = NAME_KNOWN;
365 memcpy(&ie->data, data, sizeof(*data));
366 ie->timestamp = jiffies;
367 cache->timestamp = jiffies;
369 if (ie->name_state == NAME_NOT_KNOWN)
370 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
376 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
378 struct discovery_state *cache = &hdev->discovery;
379 struct inquiry_info *info = (struct inquiry_info *) buf;
380 struct inquiry_entry *e;
383 list_for_each_entry(e, &cache->all, all) {
384 struct inquiry_data *data = &e->data;
389 bacpy(&info->bdaddr, &data->bdaddr);
390 info->pscan_rep_mode = data->pscan_rep_mode;
391 info->pscan_period_mode = data->pscan_period_mode;
392 info->pscan_mode = data->pscan_mode;
393 memcpy(info->dev_class, data->dev_class, 3);
394 info->clock_offset = data->clock_offset;
400 BT_DBG("cache %p, copied %d", cache, copied);
404 static int hci_inq_req(struct hci_request *req, unsigned long opt)
406 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
407 struct hci_dev *hdev = req->hdev;
408 struct hci_cp_inquiry cp;
410 BT_DBG("%s", hdev->name);
412 if (test_bit(HCI_INQUIRY, &hdev->flags))
416 memcpy(&cp.lap, &ir->lap, 3);
417 cp.length = ir->length;
418 cp.num_rsp = ir->num_rsp;
419 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
424 int hci_inquiry(void __user *arg)
426 __u8 __user *ptr = arg;
427 struct hci_inquiry_req ir;
428 struct hci_dev *hdev;
429 int err = 0, do_inquiry = 0, max_rsp;
433 if (copy_from_user(&ir, ptr, sizeof(ir)))
436 hdev = hci_dev_get(ir.dev_id);
440 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
445 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
450 if (hdev->dev_type != HCI_PRIMARY) {
455 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
460 /* Restrict maximum inquiry length to 60 seconds */
461 if (ir.length > 60) {
467 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
468 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
469 hci_inquiry_cache_flush(hdev);
472 hci_dev_unlock(hdev);
474 timeo = ir.length * msecs_to_jiffies(2000);
477 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
482 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
483 * cleared). If it is interrupted by a signal, return -EINTR.
485 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
486 TASK_INTERRUPTIBLE)) {
492 /* for unlimited number of responses we will use buffer with
495 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
497 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
498 * copy it to the user space.
500 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
507 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
508 hci_dev_unlock(hdev);
510 BT_DBG("num_rsp %d", ir.num_rsp);
512 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
514 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
527 static int hci_dev_do_open(struct hci_dev *hdev)
531 BT_DBG("%s %p", hdev->name, hdev);
533 hci_req_sync_lock(hdev);
535 ret = hci_dev_open_sync(hdev);
537 hci_req_sync_unlock(hdev);
541 /* ---- HCI ioctl helpers ---- */
543 int hci_dev_open(__u16 dev)
545 struct hci_dev *hdev;
548 hdev = hci_dev_get(dev);
552 /* Devices that are marked as unconfigured can only be powered
553 * up as user channel. Trying to bring them up as normal devices
554 * will result into a failure. Only user channel operation is
557 * When this function is called for a user channel, the flag
558 * HCI_USER_CHANNEL will be set first before attempting to
561 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
562 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
567 /* We need to ensure that no other power on/off work is pending
568 * before proceeding to call hci_dev_do_open. This is
569 * particularly important if the setup procedure has not yet
572 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
573 cancel_delayed_work(&hdev->power_off);
575 /* After this call it is guaranteed that the setup procedure
576 * has finished. This means that error conditions like RFKILL
577 * or no valid public or static random address apply.
579 flush_workqueue(hdev->req_workqueue);
581 /* For controllers not using the management interface and that
582 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
583 * so that pairing works for them. Once the management interface
584 * is in use this bit will be cleared again and userspace has
585 * to explicitly enable it.
587 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
588 !hci_dev_test_flag(hdev, HCI_MGMT))
589 hci_dev_set_flag(hdev, HCI_BONDABLE);
591 err = hci_dev_do_open(hdev);
598 int hci_dev_do_close(struct hci_dev *hdev)
602 BT_DBG("%s %p", hdev->name, hdev);
604 hci_req_sync_lock(hdev);
606 err = hci_dev_close_sync(hdev);
608 hci_req_sync_unlock(hdev);
613 int hci_dev_close(__u16 dev)
615 struct hci_dev *hdev;
618 hdev = hci_dev_get(dev);
622 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
627 cancel_work_sync(&hdev->power_on);
628 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
629 cancel_delayed_work(&hdev->power_off);
631 err = hci_dev_do_close(hdev);
638 static int hci_dev_do_reset(struct hci_dev *hdev)
642 BT_DBG("%s %p", hdev->name, hdev);
644 hci_req_sync_lock(hdev);
647 skb_queue_purge(&hdev->rx_q);
648 skb_queue_purge(&hdev->cmd_q);
650 /* Cancel these to avoid queueing non-chained pending work */
651 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
654 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
655 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
657 * inside RCU section to see the flag or complete scheduling.
660 /* Explicitly cancel works in case scheduled after setting the flag. */
661 cancel_delayed_work(&hdev->cmd_timer);
662 cancel_delayed_work(&hdev->ncmd_timer);
664 /* Avoid potential lockdep warnings from the *_flush() calls by
665 * ensuring the workqueue is empty up front.
667 drain_workqueue(hdev->workqueue);
670 hci_inquiry_cache_flush(hdev);
671 hci_conn_hash_flush(hdev);
672 hci_dev_unlock(hdev);
677 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
679 atomic_set(&hdev->cmd_cnt, 1);
685 ret = hci_reset_sync(hdev);
687 hci_req_sync_unlock(hdev);
691 int hci_dev_reset(__u16 dev)
693 struct hci_dev *hdev;
696 hdev = hci_dev_get(dev);
700 if (!test_bit(HCI_UP, &hdev->flags)) {
705 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
710 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
715 err = hci_dev_do_reset(hdev);
722 int hci_dev_reset_stat(__u16 dev)
724 struct hci_dev *hdev;
727 hdev = hci_dev_get(dev);
731 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
736 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
741 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
748 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
750 bool conn_changed, discov_changed;
752 BT_DBG("%s scan 0x%02x", hdev->name, scan);
754 if ((scan & SCAN_PAGE))
755 conn_changed = !hci_dev_test_and_set_flag(hdev,
758 conn_changed = hci_dev_test_and_clear_flag(hdev,
761 if ((scan & SCAN_INQUIRY)) {
762 discov_changed = !hci_dev_test_and_set_flag(hdev,
765 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
766 discov_changed = hci_dev_test_and_clear_flag(hdev,
770 if (!hci_dev_test_flag(hdev, HCI_MGMT))
773 if (conn_changed || discov_changed) {
774 /* In case this was disabled through mgmt */
775 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
777 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
778 hci_update_adv_data(hdev, hdev->cur_adv_instance);
780 mgmt_new_settings(hdev);
784 int hci_dev_cmd(unsigned int cmd, void __user *arg)
786 struct hci_dev *hdev;
787 struct hci_dev_req dr;
790 if (copy_from_user(&dr, arg, sizeof(dr)))
793 hdev = hci_dev_get(dr.dev_id);
797 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
802 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
807 if (hdev->dev_type != HCI_PRIMARY) {
812 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
819 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
820 HCI_INIT_TIMEOUT, NULL);
824 if (!lmp_encrypt_capable(hdev)) {
829 if (!test_bit(HCI_AUTH, &hdev->flags)) {
830 /* Auth must be enabled first */
831 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
832 HCI_INIT_TIMEOUT, NULL);
837 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
838 HCI_INIT_TIMEOUT, NULL);
842 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
843 HCI_INIT_TIMEOUT, NULL);
845 /* Ensure that the connectable and discoverable states
846 * get correctly modified as this was a non-mgmt change.
849 hci_update_passive_scan_state(hdev, dr.dev_opt);
853 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
854 HCI_INIT_TIMEOUT, NULL);
858 hdev->link_mode = ((__u16) dr.dev_opt) &
859 (HCI_LM_MASTER | HCI_LM_ACCEPT);
863 if (hdev->pkt_type == (__u16) dr.dev_opt)
866 hdev->pkt_type = (__u16) dr.dev_opt;
867 mgmt_phy_configuration_changed(hdev, NULL);
871 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
872 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
876 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
877 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
890 int hci_get_dev_list(void __user *arg)
892 struct hci_dev *hdev;
893 struct hci_dev_list_req *dl;
894 struct hci_dev_req *dr;
895 int n = 0, size, err;
898 if (get_user(dev_num, (__u16 __user *) arg))
901 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
904 size = sizeof(*dl) + dev_num * sizeof(*dr);
906 dl = kzalloc(size, GFP_KERNEL);
912 read_lock(&hci_dev_list_lock);
913 list_for_each_entry(hdev, &hci_dev_list, list) {
914 unsigned long flags = hdev->flags;
916 /* When the auto-off is configured it means the transport
917 * is running, but in that case still indicate that the
918 * device is actually down.
920 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
921 flags &= ~BIT(HCI_UP);
923 (dr + n)->dev_id = hdev->id;
924 (dr + n)->dev_opt = flags;
929 read_unlock(&hci_dev_list_lock);
932 size = sizeof(*dl) + n * sizeof(*dr);
934 err = copy_to_user(arg, dl, size);
937 return err ? -EFAULT : 0;
940 int hci_get_dev_info(void __user *arg)
942 struct hci_dev *hdev;
943 struct hci_dev_info di;
947 if (copy_from_user(&di, arg, sizeof(di)))
950 hdev = hci_dev_get(di.dev_id);
954 /* When the auto-off is configured it means the transport
955 * is running, but in that case still indicate that the
956 * device is actually down.
958 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
959 flags = hdev->flags & ~BIT(HCI_UP);
963 strcpy(di.name, hdev->name);
964 di.bdaddr = hdev->bdaddr;
965 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
967 di.pkt_type = hdev->pkt_type;
968 if (lmp_bredr_capable(hdev)) {
969 di.acl_mtu = hdev->acl_mtu;
970 di.acl_pkts = hdev->acl_pkts;
971 di.sco_mtu = hdev->sco_mtu;
972 di.sco_pkts = hdev->sco_pkts;
974 di.acl_mtu = hdev->le_mtu;
975 di.acl_pkts = hdev->le_pkts;
979 di.link_policy = hdev->link_policy;
980 di.link_mode = hdev->link_mode;
982 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
983 memcpy(&di.features, &hdev->features, sizeof(di.features));
985 if (copy_to_user(arg, &di, sizeof(di)))
993 /* ---- Interface to HCI drivers ---- */
995 static int hci_rfkill_set_block(void *data, bool blocked)
997 struct hci_dev *hdev = data;
999 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1001 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1005 hci_dev_set_flag(hdev, HCI_RFKILLED);
1006 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1007 !hci_dev_test_flag(hdev, HCI_CONFIG))
1008 hci_dev_do_close(hdev);
1010 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1016 static const struct rfkill_ops hci_rfkill_ops = {
1017 .set_block = hci_rfkill_set_block,
1020 static void hci_power_on(struct work_struct *work)
1022 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1025 BT_DBG("%s", hdev->name);
1027 if (test_bit(HCI_UP, &hdev->flags) &&
1028 hci_dev_test_flag(hdev, HCI_MGMT) &&
1029 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1030 cancel_delayed_work(&hdev->power_off);
1031 err = hci_powered_update_sync(hdev);
1032 mgmt_power_on(hdev, err);
1036 err = hci_dev_do_open(hdev);
1039 mgmt_set_powered_failed(hdev, err);
1040 hci_dev_unlock(hdev);
1044 /* During the HCI setup phase, a few error conditions are
1045 * ignored and they need to be checked now. If they are still
1046 * valid, it is important to turn the device back off.
1048 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
1049 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
1050 (hdev->dev_type == HCI_PRIMARY &&
1051 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1052 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1053 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
1054 hci_dev_do_close(hdev);
1055 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
1056 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1057 HCI_AUTO_OFF_TIMEOUT);
1060 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1061 /* For unconfigured devices, set the HCI_RAW flag
1062 * so that userspace can easily identify them.
1064 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1065 set_bit(HCI_RAW, &hdev->flags);
1067 /* For fully configured devices, this will send
1068 * the Index Added event. For unconfigured devices,
1069 * it will send Unconfigued Index Added event.
1071 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1072 * and no event will be send.
1074 mgmt_index_added(hdev);
1075 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1076 /* When the controller is now configured, then it
1077 * is important to clear the HCI_RAW flag.
1079 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1080 clear_bit(HCI_RAW, &hdev->flags);
1082 /* Powering on the controller with HCI_CONFIG set only
1083 * happens with the transition from unconfigured to
1084 * configured. This will send the Index Added event.
1086 mgmt_index_added(hdev);
1090 static void hci_power_off(struct work_struct *work)
1092 struct hci_dev *hdev = container_of(work, struct hci_dev,
1095 BT_DBG("%s", hdev->name);
1097 hci_dev_do_close(hdev);
1100 static void hci_error_reset(struct work_struct *work)
1102 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1104 BT_DBG("%s", hdev->name);
1107 hdev->hw_error(hdev, hdev->hw_error_code);
1109 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1111 if (hci_dev_do_close(hdev))
1114 hci_dev_do_open(hdev);
1117 void hci_uuids_clear(struct hci_dev *hdev)
1119 struct bt_uuid *uuid, *tmp;
1121 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1122 list_del(&uuid->list);
1127 void hci_link_keys_clear(struct hci_dev *hdev)
1129 struct link_key *key, *tmp;
1131 list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1132 list_del_rcu(&key->list);
1133 kfree_rcu(key, rcu);
1137 void hci_smp_ltks_clear(struct hci_dev *hdev)
1139 struct smp_ltk *k, *tmp;
1141 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1142 list_del_rcu(&k->list);
1147 void hci_smp_irks_clear(struct hci_dev *hdev)
1149 struct smp_irk *k, *tmp;
1151 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1152 list_del_rcu(&k->list);
1157 void hci_blocked_keys_clear(struct hci_dev *hdev)
1159 struct blocked_key *b, *tmp;
1161 list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1162 list_del_rcu(&b->list);
1167 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1169 bool blocked = false;
1170 struct blocked_key *b;
1173 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1174 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1184 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1189 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1190 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1193 if (hci_is_blocked_key(hdev,
1194 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1196 bt_dev_warn_ratelimited(hdev,
1197 "Link key blocked for %pMR",
1210 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1211 u8 key_type, u8 old_key_type)
1214 if (key_type < 0x03)
1217 /* Debug keys are insecure so don't store them persistently */
1218 if (key_type == HCI_LK_DEBUG_COMBINATION)
1221 /* Changed combination key and there's no previous one */
1222 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1225 /* Security mode 3 case */
1229 /* BR/EDR key derived using SC from an LE link */
1230 if (conn->type == LE_LINK)
1233 /* Neither local nor remote side had no-bonding as requirement */
1234 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1237 /* Local side had dedicated bonding as requirement */
1238 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1241 /* Remote side had dedicated bonding as requirement */
1242 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1245 /* If none of the above criteria match, then don't store the key
1250 static u8 ltk_role(u8 type)
1252 if (type == SMP_LTK)
1253 return HCI_ROLE_MASTER;
1255 return HCI_ROLE_SLAVE;
1258 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1259 u8 addr_type, u8 role)
1264 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1265 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1268 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1271 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1273 bt_dev_warn_ratelimited(hdev,
1274 "LTK blocked for %pMR",
1287 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1289 struct smp_irk *irk_to_return = NULL;
1290 struct smp_irk *irk;
1293 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1294 if (!bacmp(&irk->rpa, rpa)) {
1295 irk_to_return = irk;
1300 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1301 if (smp_irk_matches(hdev, irk->val, rpa)) {
1302 bacpy(&irk->rpa, rpa);
1303 irk_to_return = irk;
1309 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1310 irk_to_return->val)) {
1311 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1312 &irk_to_return->bdaddr);
1313 irk_to_return = NULL;
1318 return irk_to_return;
1321 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1324 struct smp_irk *irk_to_return = NULL;
1325 struct smp_irk *irk;
1327 /* Identity Address must be public or static random */
1328 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1332 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1333 if (addr_type == irk->addr_type &&
1334 bacmp(bdaddr, &irk->bdaddr) == 0) {
1335 irk_to_return = irk;
1342 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1343 irk_to_return->val)) {
1344 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1345 &irk_to_return->bdaddr);
1346 irk_to_return = NULL;
1351 return irk_to_return;
1354 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1355 bdaddr_t *bdaddr, u8 *val, u8 type,
1356 u8 pin_len, bool *persistent)
1358 struct link_key *key, *old_key;
1361 old_key = hci_find_link_key(hdev, bdaddr);
1363 old_key_type = old_key->type;
1366 old_key_type = conn ? conn->key_type : 0xff;
1367 key = kzalloc(sizeof(*key), GFP_KERNEL);
1370 list_add_rcu(&key->list, &hdev->link_keys);
1373 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1375 /* Some buggy controller combinations generate a changed
1376 * combination key for legacy pairing even when there's no
1378 if (type == HCI_LK_CHANGED_COMBINATION &&
1379 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1380 type = HCI_LK_COMBINATION;
1382 conn->key_type = type;
1385 bacpy(&key->bdaddr, bdaddr);
1386 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1387 key->pin_len = pin_len;
1389 if (type == HCI_LK_CHANGED_COMBINATION)
1390 key->type = old_key_type;
1395 *persistent = hci_persistent_key(hdev, conn, type,
1401 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1402 u8 addr_type, u8 type, u8 authenticated,
1403 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1405 struct smp_ltk *key, *old_key;
1406 u8 role = ltk_role(type);
1408 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1412 key = kzalloc(sizeof(*key), GFP_KERNEL);
1415 list_add_rcu(&key->list, &hdev->long_term_keys);
1418 bacpy(&key->bdaddr, bdaddr);
1419 key->bdaddr_type = addr_type;
1420 memcpy(key->val, tk, sizeof(key->val));
1421 key->authenticated = authenticated;
1424 key->enc_size = enc_size;
1430 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1431 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1433 struct smp_irk *irk;
1435 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1437 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1441 bacpy(&irk->bdaddr, bdaddr);
1442 irk->addr_type = addr_type;
1444 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1447 memcpy(irk->val, val, 16);
1448 bacpy(&irk->rpa, rpa);
1453 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1455 struct link_key *key;
1457 key = hci_find_link_key(hdev, bdaddr);
1461 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1463 list_del_rcu(&key->list);
1464 kfree_rcu(key, rcu);
1469 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1471 struct smp_ltk *k, *tmp;
1474 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1475 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1478 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1480 list_del_rcu(&k->list);
1485 return removed ? 0 : -ENOENT;
1488 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1490 struct smp_irk *k, *tmp;
1492 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1493 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1496 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1498 list_del_rcu(&k->list);
1503 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1506 struct smp_irk *irk;
1509 if (type == BDADDR_BREDR) {
1510 if (hci_find_link_key(hdev, bdaddr))
1515 /* Convert to HCI addr type which struct smp_ltk uses */
1516 if (type == BDADDR_LE_PUBLIC)
1517 addr_type = ADDR_LE_DEV_PUBLIC;
1519 addr_type = ADDR_LE_DEV_RANDOM;
1521 irk = hci_get_irk(hdev, bdaddr, addr_type);
1523 bdaddr = &irk->bdaddr;
1524 addr_type = irk->addr_type;
1528 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1529 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1539 /* HCI command timer function */
1540 static void hci_cmd_timeout(struct work_struct *work)
1542 struct hci_dev *hdev = container_of(work, struct hci_dev,
1545 if (hdev->sent_cmd) {
1546 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1547 u16 opcode = __le16_to_cpu(sent->opcode);
1549 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1551 bt_dev_err(hdev, "command tx timeout");
1554 if (hdev->cmd_timeout)
1555 hdev->cmd_timeout(hdev);
1558 hci_tx_timeout_error_evt(hdev);
1561 atomic_set(&hdev->cmd_cnt, 1);
1562 queue_work(hdev->workqueue, &hdev->cmd_work);
1565 /* HCI ncmd timer function */
1566 static void hci_ncmd_timeout(struct work_struct *work)
1568 struct hci_dev *hdev = container_of(work, struct hci_dev,
1571 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1573 /* During HCI_INIT phase no events can be injected if the ncmd timer
1574 * triggers since the procedure has its own timeout handling.
1576 if (test_bit(HCI_INIT, &hdev->flags))
1579 /* This is an irrecoverable state, inject hardware error event */
1580 hci_reset_dev(hdev);
1583 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1584 bdaddr_t *bdaddr, u8 bdaddr_type)
1586 struct oob_data *data;
1588 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1589 if (bacmp(bdaddr, &data->bdaddr) != 0)
1591 if (data->bdaddr_type != bdaddr_type)
1599 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1602 struct oob_data *data;
1604 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1608 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1610 list_del(&data->list);
1616 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1618 struct oob_data *data, *n;
1620 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1621 list_del(&data->list);
1626 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1627 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1628 u8 *hash256, u8 *rand256)
1630 struct oob_data *data;
1632 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1634 data = kmalloc(sizeof(*data), GFP_KERNEL);
1638 bacpy(&data->bdaddr, bdaddr);
1639 data->bdaddr_type = bdaddr_type;
1640 list_add(&data->list, &hdev->remote_oob_data);
1643 if (hash192 && rand192) {
1644 memcpy(data->hash192, hash192, sizeof(data->hash192));
1645 memcpy(data->rand192, rand192, sizeof(data->rand192));
1646 if (hash256 && rand256)
1647 data->present = 0x03;
1649 memset(data->hash192, 0, sizeof(data->hash192));
1650 memset(data->rand192, 0, sizeof(data->rand192));
1651 if (hash256 && rand256)
1652 data->present = 0x02;
1654 data->present = 0x00;
1657 if (hash256 && rand256) {
1658 memcpy(data->hash256, hash256, sizeof(data->hash256));
1659 memcpy(data->rand256, rand256, sizeof(data->rand256));
1661 memset(data->hash256, 0, sizeof(data->hash256));
1662 memset(data->rand256, 0, sizeof(data->rand256));
1663 if (hash192 && rand192)
1664 data->present = 0x01;
1667 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1672 /* This function requires the caller holds hdev->lock */
1673 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1675 struct adv_info *adv_instance;
1677 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1678 if (adv_instance->instance == instance)
1679 return adv_instance;
1685 /* This function requires the caller holds hdev->lock */
1686 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1688 struct adv_info *cur_instance;
1690 cur_instance = hci_find_adv_instance(hdev, instance);
1694 if (cur_instance == list_last_entry(&hdev->adv_instances,
1695 struct adv_info, list))
1696 return list_first_entry(&hdev->adv_instances,
1697 struct adv_info, list);
1699 return list_next_entry(cur_instance, list);
1702 /* This function requires the caller holds hdev->lock */
1703 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1705 struct adv_info *adv_instance;
1707 adv_instance = hci_find_adv_instance(hdev, instance);
1711 BT_DBG("%s removing %dMR", hdev->name, instance);
1713 if (hdev->cur_adv_instance == instance) {
1714 if (hdev->adv_instance_timeout) {
1715 cancel_delayed_work(&hdev->adv_instance_expire);
1716 hdev->adv_instance_timeout = 0;
1718 hdev->cur_adv_instance = 0x00;
1721 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1723 list_del(&adv_instance->list);
1724 kfree(adv_instance);
1726 hdev->adv_instance_cnt--;
1731 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1733 struct adv_info *adv_instance, *n;
1735 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1736 adv_instance->rpa_expired = rpa_expired;
1739 /* This function requires the caller holds hdev->lock */
1740 void hci_adv_instances_clear(struct hci_dev *hdev)
1742 struct adv_info *adv_instance, *n;
1744 if (hdev->adv_instance_timeout) {
1745 cancel_delayed_work(&hdev->adv_instance_expire);
1746 hdev->adv_instance_timeout = 0;
1749 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1750 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1751 list_del(&adv_instance->list);
1752 kfree(adv_instance);
1755 hdev->adv_instance_cnt = 0;
1756 hdev->cur_adv_instance = 0x00;
1759 static void adv_instance_rpa_expired(struct work_struct *work)
1761 struct adv_info *adv_instance = container_of(work, struct adv_info,
1762 rpa_expired_cb.work);
1766 adv_instance->rpa_expired = true;
1769 /* This function requires the caller holds hdev->lock */
1770 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1771 u32 flags, u16 adv_data_len, u8 *adv_data,
1772 u16 scan_rsp_len, u8 *scan_rsp_data,
1773 u16 timeout, u16 duration, s8 tx_power,
1774 u32 min_interval, u32 max_interval,
1777 struct adv_info *adv;
1779 adv = hci_find_adv_instance(hdev, instance);
1781 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1782 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1783 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1785 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1786 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1787 return ERR_PTR(-EOVERFLOW);
1789 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1791 return ERR_PTR(-ENOMEM);
1793 adv->pending = true;
1794 adv->instance = instance;
1795 list_add(&adv->list, &hdev->adv_instances);
1796 hdev->adv_instance_cnt++;
1800 adv->min_interval = min_interval;
1801 adv->max_interval = max_interval;
1802 adv->tx_power = tx_power;
1803 /* Defining a mesh_handle changes the timing units to ms,
1804 * rather than seconds, and ties the instance to the requested
1807 adv->mesh = mesh_handle;
1809 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1810 scan_rsp_len, scan_rsp_data);
1812 adv->timeout = timeout;
1813 adv->remaining_time = timeout;
1816 adv->duration = hdev->def_multi_adv_rotation_duration;
1818 adv->duration = duration;
1820 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1822 BT_DBG("%s for %dMR", hdev->name, instance);
1827 /* This function requires the caller holds hdev->lock */
1828 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1829 u32 flags, u8 data_len, u8 *data,
1830 u32 min_interval, u32 max_interval)
1832 struct adv_info *adv;
1834 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1835 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1836 min_interval, max_interval, 0);
1840 adv->periodic = true;
1841 adv->per_adv_data_len = data_len;
1844 memcpy(adv->per_adv_data, data, data_len);
1849 /* This function requires the caller holds hdev->lock */
1850 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1851 u16 adv_data_len, u8 *adv_data,
1852 u16 scan_rsp_len, u8 *scan_rsp_data)
1854 struct adv_info *adv;
1856 adv = hci_find_adv_instance(hdev, instance);
1858 /* If advertisement doesn't exist, we can't modify its data */
1862 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1863 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1864 memcpy(adv->adv_data, adv_data, adv_data_len);
1865 adv->adv_data_len = adv_data_len;
1866 adv->adv_data_changed = true;
1869 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1870 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1871 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1872 adv->scan_rsp_len = scan_rsp_len;
1873 adv->scan_rsp_changed = true;
1876 /* Mark as changed if there are flags which would affect it */
1877 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1878 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1879 adv->scan_rsp_changed = true;
1884 /* This function requires the caller holds hdev->lock */
1885 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1888 struct adv_info *adv;
1890 if (instance == 0x00) {
1891 /* Instance 0 always manages the "Tx Power" and "Flags"
1894 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1896 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1897 * corresponds to the "connectable" instance flag.
1899 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1900 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1902 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1903 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1904 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1905 flags |= MGMT_ADV_FLAG_DISCOV;
1910 adv = hci_find_adv_instance(hdev, instance);
1912 /* Return 0 when we got an invalid instance identifier. */
1919 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1921 struct adv_info *adv;
1923 /* Instance 0x00 always set local name */
1924 if (instance == 0x00)
1927 adv = hci_find_adv_instance(hdev, instance);
1931 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1932 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1935 return adv->scan_rsp_len ? true : false;
1938 /* This function requires the caller holds hdev->lock */
1939 void hci_adv_monitors_clear(struct hci_dev *hdev)
1941 struct adv_monitor *monitor;
1944 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1945 hci_free_adv_monitor(hdev, monitor);
1947 idr_destroy(&hdev->adv_monitors_idr);
1950 /* Frees the monitor structure and do some bookkeepings.
1951 * This function requires the caller holds hdev->lock.
1953 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1955 struct adv_pattern *pattern;
1956 struct adv_pattern *tmp;
1961 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1962 list_del(&pattern->list);
1966 if (monitor->handle)
1967 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1969 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1970 hdev->adv_monitors_cnt--;
1971 mgmt_adv_monitor_removed(hdev, monitor->handle);
1977 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1978 * also attempts to forward the request to the controller.
1979 * This function requires the caller holds hci_req_sync_lock.
1981 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1983 int min, max, handle;
1991 min = HCI_MIN_ADV_MONITOR_HANDLE;
1992 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1993 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1996 hci_dev_unlock(hdev);
2001 monitor->handle = handle;
2003 if (!hdev_is_powered(hdev))
2006 switch (hci_get_adv_monitor_offload_ext(hdev)) {
2007 case HCI_ADV_MONITOR_EXT_NONE:
2008 bt_dev_dbg(hdev, "add monitor %d status %d",
2009 monitor->handle, status);
2010 /* Message was not forwarded to controller - not an error */
2013 case HCI_ADV_MONITOR_EXT_MSFT:
2014 status = msft_add_monitor_pattern(hdev, monitor);
2015 bt_dev_dbg(hdev, "add monitor %d msft status %d",
2023 /* Attempts to tell the controller and free the monitor. If somehow the
2024 * controller doesn't have a corresponding handle, remove anyway.
2025 * This function requires the caller holds hci_req_sync_lock.
2027 static int hci_remove_adv_monitor(struct hci_dev *hdev,
2028 struct adv_monitor *monitor)
2033 switch (hci_get_adv_monitor_offload_ext(hdev)) {
2034 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
2035 bt_dev_dbg(hdev, "remove monitor %d status %d",
2036 monitor->handle, status);
2039 case HCI_ADV_MONITOR_EXT_MSFT:
2040 handle = monitor->handle;
2041 status = msft_remove_monitor(hdev, monitor);
2042 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
2047 /* In case no matching handle registered, just free the monitor */
2048 if (status == -ENOENT)
2054 if (status == -ENOENT)
2055 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
2057 hci_free_adv_monitor(hdev, monitor);
2062 /* This function requires the caller holds hci_req_sync_lock */
2063 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2065 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2070 return hci_remove_adv_monitor(hdev, monitor);
2073 /* This function requires the caller holds hci_req_sync_lock */
2074 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2076 struct adv_monitor *monitor;
2077 int idr_next_id = 0;
2081 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2085 status = hci_remove_adv_monitor(hdev, monitor);
2095 /* This function requires the caller holds hdev->lock */
2096 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2098 return !idr_is_empty(&hdev->adv_monitors_idr);
2101 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2103 if (msft_monitor_supported(hdev))
2104 return HCI_ADV_MONITOR_EXT_MSFT;
2106 return HCI_ADV_MONITOR_EXT_NONE;
2109 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2110 bdaddr_t *bdaddr, u8 type)
2112 struct bdaddr_list *b;
2114 list_for_each_entry(b, bdaddr_list, list) {
2115 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2122 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2123 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2126 struct bdaddr_list_with_irk *b;
2128 list_for_each_entry(b, bdaddr_list, list) {
2129 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2136 struct bdaddr_list_with_flags *
2137 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2138 bdaddr_t *bdaddr, u8 type)
2140 struct bdaddr_list_with_flags *b;
2142 list_for_each_entry(b, bdaddr_list, list) {
2143 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2150 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2152 struct bdaddr_list *b, *n;
2154 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2160 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2162 struct bdaddr_list *entry;
2164 if (!bacmp(bdaddr, BDADDR_ANY))
2167 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2170 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2174 bacpy(&entry->bdaddr, bdaddr);
2175 entry->bdaddr_type = type;
2177 list_add(&entry->list, list);
2182 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2183 u8 type, u8 *peer_irk, u8 *local_irk)
2185 struct bdaddr_list_with_irk *entry;
2187 if (!bacmp(bdaddr, BDADDR_ANY))
2190 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2193 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2197 bacpy(&entry->bdaddr, bdaddr);
2198 entry->bdaddr_type = type;
2201 memcpy(entry->peer_irk, peer_irk, 16);
2204 memcpy(entry->local_irk, local_irk, 16);
2206 list_add(&entry->list, list);
2211 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2214 struct bdaddr_list_with_flags *entry;
2216 if (!bacmp(bdaddr, BDADDR_ANY))
2219 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2222 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2226 bacpy(&entry->bdaddr, bdaddr);
2227 entry->bdaddr_type = type;
2228 entry->flags = flags;
2230 list_add(&entry->list, list);
2235 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2237 struct bdaddr_list *entry;
2239 if (!bacmp(bdaddr, BDADDR_ANY)) {
2240 hci_bdaddr_list_clear(list);
2244 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2248 list_del(&entry->list);
2254 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2257 struct bdaddr_list_with_irk *entry;
2259 if (!bacmp(bdaddr, BDADDR_ANY)) {
2260 hci_bdaddr_list_clear(list);
2264 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2268 list_del(&entry->list);
2274 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2277 struct bdaddr_list_with_flags *entry;
2279 if (!bacmp(bdaddr, BDADDR_ANY)) {
2280 hci_bdaddr_list_clear(list);
2284 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2288 list_del(&entry->list);
2294 /* This function requires the caller holds hdev->lock */
2295 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2296 bdaddr_t *addr, u8 addr_type)
2298 struct hci_conn_params *params;
2300 list_for_each_entry(params, &hdev->le_conn_params, list) {
2301 if (bacmp(¶ms->addr, addr) == 0 &&
2302 params->addr_type == addr_type) {
2310 /* This function requires the caller holds hdev->lock or rcu_read_lock */
2311 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2312 bdaddr_t *addr, u8 addr_type)
2314 struct hci_conn_params *param;
2318 list_for_each_entry_rcu(param, list, action) {
2319 if (bacmp(¶m->addr, addr) == 0 &&
2320 param->addr_type == addr_type) {
2331 /* This function requires the caller holds hdev->lock */
2332 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2334 if (list_empty(¶m->action))
2337 list_del_rcu(¶m->action);
2339 INIT_LIST_HEAD(¶m->action);
2342 /* This function requires the caller holds hdev->lock */
2343 void hci_pend_le_list_add(struct hci_conn_params *param,
2344 struct list_head *list)
2346 list_add_rcu(¶m->action, list);
2349 /* This function requires the caller holds hdev->lock */
2350 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2351 bdaddr_t *addr, u8 addr_type)
2353 struct hci_conn_params *params;
2355 params = hci_conn_params_lookup(hdev, addr, addr_type);
2359 params = kzalloc(sizeof(*params), GFP_KERNEL);
2361 bt_dev_err(hdev, "out of memory");
2365 bacpy(¶ms->addr, addr);
2366 params->addr_type = addr_type;
2368 list_add(¶ms->list, &hdev->le_conn_params);
2369 INIT_LIST_HEAD(¶ms->action);
2371 params->conn_min_interval = hdev->le_conn_min_interval;
2372 params->conn_max_interval = hdev->le_conn_max_interval;
2373 params->conn_latency = hdev->le_conn_latency;
2374 params->supervision_timeout = hdev->le_supv_timeout;
2375 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2377 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2382 void hci_conn_params_free(struct hci_conn_params *params)
2384 hci_pend_le_list_del_init(params);
2387 hci_conn_drop(params->conn);
2388 hci_conn_put(params->conn);
2391 list_del(¶ms->list);
2395 /* This function requires the caller holds hdev->lock */
2396 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2398 struct hci_conn_params *params;
2400 params = hci_conn_params_lookup(hdev, addr, addr_type);
2404 hci_conn_params_free(params);
2406 hci_update_passive_scan(hdev);
2408 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2411 /* This function requires the caller holds hdev->lock */
2412 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2414 struct hci_conn_params *params, *tmp;
2416 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2417 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2420 /* If trying to establish one time connection to disabled
2421 * device, leave the params, but mark them as just once.
2423 if (params->explicit_connect) {
2424 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2428 hci_conn_params_free(params);
2431 BT_DBG("All LE disabled connection parameters were removed");
2434 /* This function requires the caller holds hdev->lock */
2435 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2437 struct hci_conn_params *params, *tmp;
2439 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2440 hci_conn_params_free(params);
2442 BT_DBG("All LE connection parameters were removed");
2445 /* Copy the Identity Address of the controller.
2447 * If the controller has a public BD_ADDR, then by default use that one.
2448 * If this is a LE only controller without a public address, default to
2449 * the static random address.
2451 * For debugging purposes it is possible to force controllers with a
2452 * public address to use the static random address instead.
2454 * In case BR/EDR has been disabled on a dual-mode controller and
2455 * userspace has configured a static address, then that address
2456 * becomes the identity address instead of the public BR/EDR address.
2458 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2461 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2462 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2463 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2464 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2465 bacpy(bdaddr, &hdev->static_addr);
2466 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2468 bacpy(bdaddr, &hdev->bdaddr);
2469 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2473 static void hci_clear_wake_reason(struct hci_dev *hdev)
2477 hdev->wake_reason = 0;
2478 bacpy(&hdev->wake_addr, BDADDR_ANY);
2479 hdev->wake_addr_type = 0;
2481 hci_dev_unlock(hdev);
2484 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2487 struct hci_dev *hdev =
2488 container_of(nb, struct hci_dev, suspend_notifier);
2491 /* Userspace has full control of this device. Do nothing. */
2492 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2495 /* To avoid a potential race with hci_unregister_dev. */
2498 if (action == PM_SUSPEND_PREPARE)
2499 ret = hci_suspend_dev(hdev);
2500 else if (action == PM_POST_SUSPEND)
2501 ret = hci_resume_dev(hdev);
2504 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2511 /* Alloc HCI device */
2512 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2514 struct hci_dev *hdev;
2515 unsigned int alloc_size;
2517 alloc_size = sizeof(*hdev);
2519 /* Fixme: May need ALIGN-ment? */
2520 alloc_size += sizeof_priv;
2523 hdev = kzalloc(alloc_size, GFP_KERNEL);
2527 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2528 hdev->esco_type = (ESCO_HV1);
2529 hdev->link_mode = (HCI_LM_ACCEPT);
2530 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2531 hdev->io_capability = 0x03; /* No Input No Output */
2532 hdev->manufacturer = 0xffff; /* Default to internal use */
2533 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2534 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2535 hdev->adv_instance_cnt = 0;
2536 hdev->cur_adv_instance = 0x00;
2537 hdev->adv_instance_timeout = 0;
2539 hdev->advmon_allowlist_duration = 300;
2540 hdev->advmon_no_filter_duration = 500;
2541 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2543 hdev->sniff_max_interval = 800;
2545 hdev->sniff_min_interval = 400;
2547 hdev->sniff_min_interval = 80;
2549 hdev->le_adv_channel_map = 0x07;
2550 hdev->le_adv_min_interval = 0x0800;
2551 hdev->le_adv_max_interval = 0x0800;
2553 /* automatically enable sniff mode for connection */
2554 hdev->idle_timeout = TIZEN_SNIFF_TIMEOUT * 1000;
2556 hdev->adv_filter_policy = 0x00;
2557 hdev->adv_type = 0x00;
2559 hdev->le_scan_interval = 0x0060;
2560 hdev->le_scan_window = 0x0030;
2561 hdev->le_scan_int_suspend = 0x0400;
2562 hdev->le_scan_window_suspend = 0x0012;
2563 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2564 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2565 hdev->le_scan_int_adv_monitor = 0x0060;
2566 hdev->le_scan_window_adv_monitor = 0x0030;
2567 hdev->le_scan_int_connect = 0x0060;
2568 hdev->le_scan_window_connect = 0x0060;
2569 hdev->le_conn_min_interval = 0x0018;
2570 hdev->le_conn_max_interval = 0x0028;
2571 hdev->le_conn_latency = 0x0000;
2572 hdev->le_supv_timeout = 0x002a;
2573 hdev->le_def_tx_len = 0x001b;
2574 hdev->le_def_tx_time = 0x0148;
2575 hdev->le_max_tx_len = 0x001b;
2576 hdev->le_max_tx_time = 0x0148;
2577 hdev->le_max_rx_len = 0x001b;
2578 hdev->le_max_rx_time = 0x0148;
2579 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2580 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2581 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2582 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2583 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2584 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2585 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2586 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2587 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2589 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2590 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2591 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2592 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2593 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2594 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2596 /* default 1.28 sec page scan */
2597 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2598 hdev->def_page_scan_int = 0x0800;
2599 hdev->def_page_scan_window = 0x0012;
2601 mutex_init(&hdev->lock);
2602 mutex_init(&hdev->req_lock);
2604 ida_init(&hdev->unset_handle_ida);
2606 INIT_LIST_HEAD(&hdev->mesh_pending);
2607 INIT_LIST_HEAD(&hdev->mgmt_pending);
2608 INIT_LIST_HEAD(&hdev->reject_list);
2609 INIT_LIST_HEAD(&hdev->accept_list);
2610 INIT_LIST_HEAD(&hdev->uuids);
2611 INIT_LIST_HEAD(&hdev->link_keys);
2612 INIT_LIST_HEAD(&hdev->long_term_keys);
2613 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2614 INIT_LIST_HEAD(&hdev->remote_oob_data);
2615 INIT_LIST_HEAD(&hdev->le_accept_list);
2616 INIT_LIST_HEAD(&hdev->le_resolv_list);
2617 INIT_LIST_HEAD(&hdev->le_conn_params);
2618 INIT_LIST_HEAD(&hdev->pend_le_conns);
2619 INIT_LIST_HEAD(&hdev->pend_le_reports);
2620 INIT_LIST_HEAD(&hdev->conn_hash.list);
2621 INIT_LIST_HEAD(&hdev->adv_instances);
2622 INIT_LIST_HEAD(&hdev->blocked_keys);
2623 INIT_LIST_HEAD(&hdev->monitored_devices);
2625 INIT_LIST_HEAD(&hdev->local_codecs);
2626 INIT_WORK(&hdev->rx_work, hci_rx_work);
2627 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2628 INIT_WORK(&hdev->tx_work, hci_tx_work);
2629 INIT_WORK(&hdev->power_on, hci_power_on);
2630 INIT_WORK(&hdev->error_reset, hci_error_reset);
2632 hci_cmd_sync_init(hdev);
2634 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2636 skb_queue_head_init(&hdev->rx_q);
2637 skb_queue_head_init(&hdev->cmd_q);
2638 skb_queue_head_init(&hdev->raw_q);
2640 init_waitqueue_head(&hdev->req_wait_q);
2642 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2643 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2645 hci_devcd_setup(hdev);
2646 hci_request_setup(hdev);
2648 hci_init_sysfs(hdev);
2649 discovery_init(hdev);
2653 EXPORT_SYMBOL(hci_alloc_dev_priv);
2655 /* Free HCI device */
2656 void hci_free_dev(struct hci_dev *hdev)
2658 /* will free via device release */
2659 put_device(&hdev->dev);
2661 EXPORT_SYMBOL(hci_free_dev);
2663 /* Register HCI device */
2664 int hci_register_dev(struct hci_dev *hdev)
2668 if (!hdev->open || !hdev->close || !hdev->send)
2671 /* Do not allow HCI_AMP devices to register at index 0,
2672 * so the index can be used as the AMP controller ID.
2674 switch (hdev->dev_type) {
2676 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2679 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2688 error = dev_set_name(&hdev->dev, "hci%u", id);
2692 hdev->name = dev_name(&hdev->dev);
2695 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2697 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2698 if (!hdev->workqueue) {
2703 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2705 if (!hdev->req_workqueue) {
2706 destroy_workqueue(hdev->workqueue);
2711 if (!IS_ERR_OR_NULL(bt_debugfs))
2712 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2714 error = device_add(&hdev->dev);
2718 hci_leds_init(hdev);
2720 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2721 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2724 if (rfkill_register(hdev->rfkill) < 0) {
2725 rfkill_destroy(hdev->rfkill);
2726 hdev->rfkill = NULL;
2730 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2731 hci_dev_set_flag(hdev, HCI_RFKILLED);
2733 hci_dev_set_flag(hdev, HCI_SETUP);
2734 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2736 if (hdev->dev_type == HCI_PRIMARY) {
2737 /* Assume BR/EDR support until proven otherwise (such as
2738 * through reading supported features during init.
2740 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2743 write_lock(&hci_dev_list_lock);
2744 list_add(&hdev->list, &hci_dev_list);
2745 write_unlock(&hci_dev_list_lock);
2747 /* Devices that are marked for raw-only usage are unconfigured
2748 * and should not be included in normal operation.
2750 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2751 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2753 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2757 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2759 hci_sock_dev_event(hdev, HCI_DEV_REG);
2762 error = hci_register_suspend_notifier(hdev);
2764 BT_WARN("register suspend notifier failed error:%d\n", error);
2766 queue_work(hdev->req_workqueue, &hdev->power_on);
2768 idr_init(&hdev->adv_monitors_idr);
2769 msft_register(hdev);
2774 debugfs_remove_recursive(hdev->debugfs);
2775 destroy_workqueue(hdev->workqueue);
2776 destroy_workqueue(hdev->req_workqueue);
2778 ida_simple_remove(&hci_index_ida, hdev->id);
2782 EXPORT_SYMBOL(hci_register_dev);
2784 /* Unregister HCI device */
2785 void hci_unregister_dev(struct hci_dev *hdev)
2787 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2789 mutex_lock(&hdev->unregister_lock);
2790 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2791 mutex_unlock(&hdev->unregister_lock);
2793 write_lock(&hci_dev_list_lock);
2794 list_del(&hdev->list);
2795 write_unlock(&hci_dev_list_lock);
2797 cancel_work_sync(&hdev->power_on);
2799 hci_cmd_sync_clear(hdev);
2801 hci_unregister_suspend_notifier(hdev);
2803 msft_unregister(hdev);
2805 hci_dev_do_close(hdev);
2807 if (!test_bit(HCI_INIT, &hdev->flags) &&
2808 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2809 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2811 mgmt_index_removed(hdev);
2812 hci_dev_unlock(hdev);
2815 /* mgmt_index_removed should take care of emptying the
2817 BUG_ON(!list_empty(&hdev->mgmt_pending));
2819 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2822 rfkill_unregister(hdev->rfkill);
2823 rfkill_destroy(hdev->rfkill);
2826 device_del(&hdev->dev);
2827 /* Actual cleanup is deferred until hci_release_dev(). */
2830 EXPORT_SYMBOL(hci_unregister_dev);
2832 /* Release HCI device */
2833 void hci_release_dev(struct hci_dev *hdev)
2835 debugfs_remove_recursive(hdev->debugfs);
2836 kfree_const(hdev->hw_info);
2837 kfree_const(hdev->fw_info);
2839 destroy_workqueue(hdev->workqueue);
2840 destroy_workqueue(hdev->req_workqueue);
2843 hci_bdaddr_list_clear(&hdev->reject_list);
2844 hci_bdaddr_list_clear(&hdev->accept_list);
2845 hci_uuids_clear(hdev);
2846 hci_link_keys_clear(hdev);
2847 hci_smp_ltks_clear(hdev);
2848 hci_smp_irks_clear(hdev);
2849 hci_remote_oob_data_clear(hdev);
2850 hci_adv_instances_clear(hdev);
2851 hci_adv_monitors_clear(hdev);
2852 hci_bdaddr_list_clear(&hdev->le_accept_list);
2853 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2854 hci_conn_params_clear_all(hdev);
2855 hci_discovery_filter_clear(hdev);
2856 hci_blocked_keys_clear(hdev);
2857 hci_codec_list_clear(&hdev->local_codecs);
2858 hci_dev_unlock(hdev);
2860 ida_destroy(&hdev->unset_handle_ida);
2861 ida_simple_remove(&hci_index_ida, hdev->id);
2862 kfree_skb(hdev->sent_cmd);
2863 kfree_skb(hdev->recv_event);
2866 EXPORT_SYMBOL(hci_release_dev);
2868 int hci_register_suspend_notifier(struct hci_dev *hdev)
2872 if (!hdev->suspend_notifier.notifier_call &&
2873 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2874 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2875 ret = register_pm_notifier(&hdev->suspend_notifier);
2881 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2885 if (hdev->suspend_notifier.notifier_call) {
2886 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2888 hdev->suspend_notifier.notifier_call = NULL;
2894 /* Suspend HCI device */
2895 int hci_suspend_dev(struct hci_dev *hdev)
2899 bt_dev_dbg(hdev, "");
2901 /* Suspend should only act on when powered. */
2902 if (!hdev_is_powered(hdev) ||
2903 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2906 /* If powering down don't attempt to suspend */
2907 if (mgmt_powering_down(hdev))
2910 /* Cancel potentially blocking sync operation before suspend */
2911 __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
2913 hci_req_sync_lock(hdev);
2914 ret = hci_suspend_sync(hdev);
2915 hci_req_sync_unlock(hdev);
2917 hci_clear_wake_reason(hdev);
2918 mgmt_suspending(hdev, hdev->suspend_state);
2920 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2923 EXPORT_SYMBOL(hci_suspend_dev);
2925 /* Resume HCI device */
2926 int hci_resume_dev(struct hci_dev *hdev)
2930 bt_dev_dbg(hdev, "");
2932 /* Resume should only act on when powered. */
2933 if (!hdev_is_powered(hdev) ||
2934 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2937 /* If powering down don't attempt to resume */
2938 if (mgmt_powering_down(hdev))
2941 hci_req_sync_lock(hdev);
2942 ret = hci_resume_sync(hdev);
2943 hci_req_sync_unlock(hdev);
2945 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2946 hdev->wake_addr_type);
2948 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2951 EXPORT_SYMBOL(hci_resume_dev);
2953 /* Reset HCI device */
2954 int hci_reset_dev(struct hci_dev *hdev)
2956 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2957 struct sk_buff *skb;
2959 skb = bt_skb_alloc(3, GFP_ATOMIC);
2963 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2964 skb_put_data(skb, hw_err, 3);
2966 bt_dev_err(hdev, "Injecting HCI hardware error event");
2968 /* Send Hardware Error to upper stack */
2969 return hci_recv_frame(hdev, skb);
2971 EXPORT_SYMBOL(hci_reset_dev);
2973 /* Receive frame from HCI drivers */
2974 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2976 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2977 && !test_bit(HCI_INIT, &hdev->flags))) {
2982 switch (hci_skb_pkt_type(skb)) {
2985 case HCI_ACLDATA_PKT:
2986 /* Detect if ISO packet has been sent as ACL */
2987 if (hci_conn_num(hdev, ISO_LINK)) {
2988 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2991 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2992 if (type == ISO_LINK)
2993 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2996 case HCI_SCODATA_PKT:
2998 case HCI_ISODATA_PKT:
3006 bt_cb(skb)->incoming = 1;
3009 __net_timestamp(skb);
3011 skb_queue_tail(&hdev->rx_q, skb);
3012 queue_work(hdev->workqueue, &hdev->rx_work);
3016 EXPORT_SYMBOL(hci_recv_frame);
3018 /* Receive diagnostic message from HCI drivers */
3019 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3021 /* Mark as diagnostic packet */
3022 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3025 __net_timestamp(skb);
3027 skb_queue_tail(&hdev->rx_q, skb);
3028 queue_work(hdev->workqueue, &hdev->rx_work);
3032 EXPORT_SYMBOL(hci_recv_diag);
3034 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3038 va_start(vargs, fmt);
3039 kfree_const(hdev->hw_info);
3040 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3043 EXPORT_SYMBOL(hci_set_hw_info);
3045 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3049 va_start(vargs, fmt);
3050 kfree_const(hdev->fw_info);
3051 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3054 EXPORT_SYMBOL(hci_set_fw_info);
3056 /* ---- Interface to upper protocols ---- */
3058 int hci_register_cb(struct hci_cb *cb)
3060 BT_DBG("%p name %s", cb, cb->name);
3062 mutex_lock(&hci_cb_list_lock);
3063 list_add_tail(&cb->list, &hci_cb_list);
3064 mutex_unlock(&hci_cb_list_lock);
3068 EXPORT_SYMBOL(hci_register_cb);
3070 int hci_unregister_cb(struct hci_cb *cb)
3072 BT_DBG("%p name %s", cb, cb->name);
3074 mutex_lock(&hci_cb_list_lock);
3075 list_del(&cb->list);
3076 mutex_unlock(&hci_cb_list_lock);
3080 EXPORT_SYMBOL(hci_unregister_cb);
3082 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3086 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3090 __net_timestamp(skb);
3092 /* Send copy to monitor */
3093 hci_send_to_monitor(hdev, skb);
3095 if (atomic_read(&hdev->promisc)) {
3096 /* Send copy to the sockets */
3097 hci_send_to_sock(hdev, skb);
3100 /* Get rid of skb owner, prior to sending to the driver. */
3103 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3108 err = hdev->send(hdev, skb);
3110 bt_dev_err(hdev, "sending frame failed (%d)", err);
3118 /* Send HCI command */
3119 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3122 struct sk_buff *skb;
3124 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3126 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3128 bt_dev_err(hdev, "no memory for command");
3132 /* Stand-alone HCI commands must be flagged as
3133 * single-command requests.
3135 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3137 skb_queue_tail(&hdev->cmd_q, skb);
3138 queue_work(hdev->workqueue, &hdev->cmd_work);
3143 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3146 struct sk_buff *skb;
3148 if (hci_opcode_ogf(opcode) != 0x3f) {
3149 /* A controller receiving a command shall respond with either
3150 * a Command Status Event or a Command Complete Event.
3151 * Therefore, all standard HCI commands must be sent via the
3152 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3153 * Some vendors do not comply with this rule for vendor-specific
3154 * commands and do not return any event. We want to support
3155 * unresponded commands for such cases only.
3157 bt_dev_err(hdev, "unresponded command not supported");
3161 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3163 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3168 hci_send_frame(hdev, skb);
3172 EXPORT_SYMBOL(__hci_cmd_send);
3174 /* Get data from the previously sent command */
3175 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3177 struct hci_command_hdr *hdr;
3179 if (!hdev->sent_cmd)
3182 hdr = (void *) hdev->sent_cmd->data;
3184 if (hdr->opcode != cpu_to_le16(opcode))
3187 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3189 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3192 /* Get data from last received event */
3193 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3195 struct hci_event_hdr *hdr;
3198 if (!hdev->recv_event)
3201 hdr = (void *)hdev->recv_event->data;
3202 offset = sizeof(*hdr);
3204 if (hdr->evt != event) {
3205 /* In case of LE metaevent check the subevent match */
3206 if (hdr->evt == HCI_EV_LE_META) {
3207 struct hci_ev_le_meta *ev;
3209 ev = (void *)hdev->recv_event->data + offset;
3210 offset += sizeof(*ev);
3211 if (ev->subevent == event)
3218 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3220 return hdev->recv_event->data + offset;
3224 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3226 struct hci_acl_hdr *hdr;
3229 skb_push(skb, HCI_ACL_HDR_SIZE);
3230 skb_reset_transport_header(skb);
3231 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3232 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3233 hdr->dlen = cpu_to_le16(len);
3236 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3237 struct sk_buff *skb, __u16 flags)
3239 struct hci_conn *conn = chan->conn;
3240 struct hci_dev *hdev = conn->hdev;
3241 struct sk_buff *list;
3243 skb->len = skb_headlen(skb);
3246 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3248 switch (hdev->dev_type) {
3250 hci_add_acl_hdr(skb, conn->handle, flags);
3253 hci_add_acl_hdr(skb, chan->handle, flags);
3256 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3260 list = skb_shinfo(skb)->frag_list;
3262 /* Non fragmented */
3263 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3265 skb_queue_tail(queue, skb);
3268 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3270 skb_shinfo(skb)->frag_list = NULL;
3272 /* Queue all fragments atomically. We need to use spin_lock_bh
3273 * here because of 6LoWPAN links, as there this function is
3274 * called from softirq and using normal spin lock could cause
3277 spin_lock_bh(&queue->lock);
3279 __skb_queue_tail(queue, skb);
3281 flags &= ~ACL_START;
3284 skb = list; list = list->next;
3286 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3287 hci_add_acl_hdr(skb, conn->handle, flags);
3289 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3291 __skb_queue_tail(queue, skb);
3294 spin_unlock_bh(&queue->lock);
3298 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3300 struct hci_dev *hdev = chan->conn->hdev;
3302 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3304 hci_queue_acl(chan, &chan->data_q, skb, flags);
3306 queue_work(hdev->workqueue, &hdev->tx_work);
3310 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3312 struct hci_dev *hdev = conn->hdev;
3313 struct hci_sco_hdr hdr;
3315 BT_DBG("%s len %d", hdev->name, skb->len);
3317 hdr.handle = cpu_to_le16(conn->handle);
3318 hdr.dlen = skb->len;
3320 skb_push(skb, HCI_SCO_HDR_SIZE);
3321 skb_reset_transport_header(skb);
3322 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3324 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3326 skb_queue_tail(&conn->data_q, skb);
3327 queue_work(hdev->workqueue, &hdev->tx_work);
3331 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3333 struct hci_iso_hdr *hdr;
3336 skb_push(skb, HCI_ISO_HDR_SIZE);
3337 skb_reset_transport_header(skb);
3338 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3339 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3340 hdr->dlen = cpu_to_le16(len);
3343 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3344 struct sk_buff *skb)
3346 struct hci_dev *hdev = conn->hdev;
3347 struct sk_buff *list;
3350 skb->len = skb_headlen(skb);
3353 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3355 list = skb_shinfo(skb)->frag_list;
3357 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3358 hci_add_iso_hdr(skb, conn->handle, flags);
3361 /* Non fragmented */
3362 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3364 skb_queue_tail(queue, skb);
3367 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3369 skb_shinfo(skb)->frag_list = NULL;
3371 __skb_queue_tail(queue, skb);
3374 skb = list; list = list->next;
3376 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3377 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3379 hci_add_iso_hdr(skb, conn->handle, flags);
3381 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3383 __skb_queue_tail(queue, skb);
3388 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3390 struct hci_dev *hdev = conn->hdev;
3392 BT_DBG("%s len %d", hdev->name, skb->len);
3394 hci_queue_iso(conn, &conn->data_q, skb);
3396 queue_work(hdev->workqueue, &hdev->tx_work);
3399 /* ---- HCI TX task (outgoing data) ---- */
3401 /* HCI Connection scheduler */
3402 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3404 struct hci_dev *hdev;
3414 switch (conn->type) {
3416 cnt = hdev->acl_cnt;
3419 cnt = hdev->block_cnt;
3423 cnt = hdev->sco_cnt;
3426 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3429 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3430 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3434 bt_dev_err(hdev, "unknown link type %d", conn->type);
3441 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3444 struct hci_conn_hash *h = &hdev->conn_hash;
3445 struct hci_conn *conn = NULL, *c;
3446 unsigned int num = 0, min = ~0;
3448 /* We don't have to lock device here. Connections are always
3449 * added and removed with TX task disabled. */
3453 list_for_each_entry_rcu(c, &h->list, list) {
3454 if (c->type != type || skb_queue_empty(&c->data_q))
3457 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3462 if (c->sent < min) {
3467 if (hci_conn_num(hdev, type) == num)
3473 hci_quote_sent(conn, num, quote);
3475 BT_DBG("conn %p quote %d", conn, *quote);
3479 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3481 struct hci_conn_hash *h = &hdev->conn_hash;
3484 bt_dev_err(hdev, "link tx timeout");
3488 /* Kill stalled connections */
3489 list_for_each_entry_rcu(c, &h->list, list) {
3490 if (c->type == type && c->sent) {
3491 bt_dev_err(hdev, "killing stalled connection %pMR",
3493 /* hci_disconnect might sleep, so, we have to release
3494 * the RCU read lock before calling it.
3497 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3505 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3508 struct hci_conn_hash *h = &hdev->conn_hash;
3509 struct hci_chan *chan = NULL;
3510 unsigned int num = 0, min = ~0, cur_prio = 0;
3511 struct hci_conn *conn;
3514 BT_DBG("%s", hdev->name);
3518 list_for_each_entry_rcu(conn, &h->list, list) {
3519 struct hci_chan *tmp;
3521 if (conn->type != type)
3524 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3529 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3530 struct sk_buff *skb;
3532 if (skb_queue_empty(&tmp->data_q))
3535 skb = skb_peek(&tmp->data_q);
3536 if (skb->priority < cur_prio)
3539 if (skb->priority > cur_prio) {
3542 cur_prio = skb->priority;
3547 if (conn->sent < min) {
3553 if (hci_conn_num(hdev, type) == conn_num)
3562 hci_quote_sent(chan->conn, num, quote);
3564 BT_DBG("chan %p quote %d", chan, *quote);
3568 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3570 struct hci_conn_hash *h = &hdev->conn_hash;
3571 struct hci_conn *conn;
3574 BT_DBG("%s", hdev->name);
3578 list_for_each_entry_rcu(conn, &h->list, list) {
3579 struct hci_chan *chan;
3581 if (conn->type != type)
3584 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3589 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3590 struct sk_buff *skb;
3597 if (skb_queue_empty(&chan->data_q))
3600 skb = skb_peek(&chan->data_q);
3601 if (skb->priority >= HCI_PRIO_MAX - 1)
3604 skb->priority = HCI_PRIO_MAX - 1;
3606 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3610 if (hci_conn_num(hdev, type) == num)
3618 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3620 /* Calculate count of blocks used by this packet */
3621 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3624 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3626 unsigned long last_tx;
3628 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3633 last_tx = hdev->le_last_tx;
3636 last_tx = hdev->acl_last_tx;
3640 /* tx timeout must be longer than maximum link supervision timeout
3643 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3644 hci_link_tx_to(hdev, type);
3648 static void hci_sched_sco(struct hci_dev *hdev)
3650 struct hci_conn *conn;
3651 struct sk_buff *skb;
3654 BT_DBG("%s", hdev->name);
3656 if (!hci_conn_num(hdev, SCO_LINK))
3659 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3660 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3661 BT_DBG("skb %p len %d", skb, skb->len);
3662 hci_send_frame(hdev, skb);
3665 if (conn->sent == ~0)
3671 static void hci_sched_esco(struct hci_dev *hdev)
3673 struct hci_conn *conn;
3674 struct sk_buff *skb;
3677 BT_DBG("%s", hdev->name);
3679 if (!hci_conn_num(hdev, ESCO_LINK))
3682 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3684 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3685 BT_DBG("skb %p len %d", skb, skb->len);
3686 hci_send_frame(hdev, skb);
3689 if (conn->sent == ~0)
3695 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3697 unsigned int cnt = hdev->acl_cnt;
3698 struct hci_chan *chan;
3699 struct sk_buff *skb;
3702 __check_timeout(hdev, cnt, ACL_LINK);
3704 while (hdev->acl_cnt &&
3705 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3706 u32 priority = (skb_peek(&chan->data_q))->priority;
3707 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3708 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3709 skb->len, skb->priority);
3711 /* Stop if priority has changed */
3712 if (skb->priority < priority)
3715 skb = skb_dequeue(&chan->data_q);
3717 hci_conn_enter_active_mode(chan->conn,
3718 bt_cb(skb)->force_active);
3720 hci_send_frame(hdev, skb);
3721 hdev->acl_last_tx = jiffies;
3727 /* Send pending SCO packets right away */
3728 hci_sched_sco(hdev);
3729 hci_sched_esco(hdev);
3733 if (cnt != hdev->acl_cnt)
3734 hci_prio_recalculate(hdev, ACL_LINK);
3737 static void hci_sched_acl_blk(struct hci_dev *hdev)
3739 unsigned int cnt = hdev->block_cnt;
3740 struct hci_chan *chan;
3741 struct sk_buff *skb;
3745 BT_DBG("%s", hdev->name);
3747 if (hdev->dev_type == HCI_AMP)
3752 __check_timeout(hdev, cnt, type);
3754 while (hdev->block_cnt > 0 &&
3755 (chan = hci_chan_sent(hdev, type, "e))) {
3756 u32 priority = (skb_peek(&chan->data_q))->priority;
3757 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3760 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3761 skb->len, skb->priority);
3763 /* Stop if priority has changed */
3764 if (skb->priority < priority)
3767 skb = skb_dequeue(&chan->data_q);
3769 blocks = __get_blocks(hdev, skb);
3770 if (blocks > hdev->block_cnt)
3773 hci_conn_enter_active_mode(chan->conn,
3774 bt_cb(skb)->force_active);
3776 hci_send_frame(hdev, skb);
3777 hdev->acl_last_tx = jiffies;
3779 hdev->block_cnt -= blocks;
3782 chan->sent += blocks;
3783 chan->conn->sent += blocks;
3787 if (cnt != hdev->block_cnt)
3788 hci_prio_recalculate(hdev, type);
3791 static void hci_sched_acl(struct hci_dev *hdev)
3793 BT_DBG("%s", hdev->name);
3795 /* No ACL link over BR/EDR controller */
3796 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3799 /* No AMP link over AMP controller */
3800 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3803 switch (hdev->flow_ctl_mode) {
3804 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3805 hci_sched_acl_pkt(hdev);
3808 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3809 hci_sched_acl_blk(hdev);
3814 static void hci_sched_le(struct hci_dev *hdev)
3816 struct hci_chan *chan;
3817 struct sk_buff *skb;
3818 int quote, cnt, tmp;
3820 BT_DBG("%s", hdev->name);
3822 if (!hci_conn_num(hdev, LE_LINK))
3825 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3827 __check_timeout(hdev, cnt, LE_LINK);
3830 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3831 u32 priority = (skb_peek(&chan->data_q))->priority;
3832 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3833 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3834 skb->len, skb->priority);
3836 /* Stop if priority has changed */
3837 if (skb->priority < priority)
3840 skb = skb_dequeue(&chan->data_q);
3842 hci_send_frame(hdev, skb);
3843 hdev->le_last_tx = jiffies;
3849 /* Send pending SCO packets right away */
3850 hci_sched_sco(hdev);
3851 hci_sched_esco(hdev);
3858 hdev->acl_cnt = cnt;
3861 hci_prio_recalculate(hdev, LE_LINK);
3865 static void hci_sched_iso(struct hci_dev *hdev)
3867 struct hci_conn *conn;
3868 struct sk_buff *skb;
3871 BT_DBG("%s", hdev->name);
3873 if (!hci_conn_num(hdev, ISO_LINK))
3876 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3877 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3878 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) {
3879 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3880 BT_DBG("skb %p len %d", skb, skb->len);
3881 hci_send_frame(hdev, skb);
3884 if (conn->sent == ~0)
3891 static void hci_tx_work(struct work_struct *work)
3893 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3894 struct sk_buff *skb;
3896 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3897 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3899 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3900 /* Schedule queues and send stuff to HCI driver */
3901 hci_sched_sco(hdev);
3902 hci_sched_esco(hdev);
3903 hci_sched_iso(hdev);
3904 hci_sched_acl(hdev);
3908 /* Send next queued raw (unknown type) packet */
3909 while ((skb = skb_dequeue(&hdev->raw_q)))
3910 hci_send_frame(hdev, skb);
3913 /* ----- HCI RX task (incoming data processing) ----- */
3915 /* ACL data packet */
3916 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3918 struct hci_acl_hdr *hdr = (void *) skb->data;
3919 struct hci_conn *conn;
3920 __u16 handle, flags;
3922 skb_pull(skb, HCI_ACL_HDR_SIZE);
3924 handle = __le16_to_cpu(hdr->handle);
3925 flags = hci_flags(handle);
3926 handle = hci_handle(handle);
3928 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3931 hdev->stat.acl_rx++;
3934 conn = hci_conn_hash_lookup_handle(hdev, handle);
3935 hci_dev_unlock(hdev);
3938 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3940 /* Send to upper protocol */
3941 l2cap_recv_acldata(conn, skb, flags);
3944 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3951 /* SCO data packet */
3952 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3954 struct hci_sco_hdr *hdr = (void *) skb->data;
3955 struct hci_conn *conn;
3956 __u16 handle, flags;
3958 skb_pull(skb, HCI_SCO_HDR_SIZE);
3960 handle = __le16_to_cpu(hdr->handle);
3961 flags = hci_flags(handle);
3962 handle = hci_handle(handle);
3964 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3967 hdev->stat.sco_rx++;
3970 conn = hci_conn_hash_lookup_handle(hdev, handle);
3971 hci_dev_unlock(hdev);
3974 /* Send to upper protocol */
3975 hci_skb_pkt_status(skb) = flags & 0x03;
3976 sco_recv_scodata(conn, skb);
3979 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3986 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3988 struct hci_iso_hdr *hdr;
3989 struct hci_conn *conn;
3990 __u16 handle, flags;
3992 hdr = skb_pull_data(skb, sizeof(*hdr));
3994 bt_dev_err(hdev, "ISO packet too small");
3998 handle = __le16_to_cpu(hdr->handle);
3999 flags = hci_flags(handle);
4000 handle = hci_handle(handle);
4002 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
4006 conn = hci_conn_hash_lookup_handle(hdev, handle);
4007 hci_dev_unlock(hdev);
4010 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
4015 /* Send to upper protocol */
4016 iso_recv(conn, skb, flags);
4023 static bool hci_req_is_complete(struct hci_dev *hdev)
4025 struct sk_buff *skb;
4027 skb = skb_peek(&hdev->cmd_q);
4031 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4034 static void hci_resend_last(struct hci_dev *hdev)
4036 struct hci_command_hdr *sent;
4037 struct sk_buff *skb;
4040 if (!hdev->sent_cmd)
4043 sent = (void *) hdev->sent_cmd->data;
4044 opcode = __le16_to_cpu(sent->opcode);
4045 if (opcode == HCI_OP_RESET)
4048 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4052 skb_queue_head(&hdev->cmd_q, skb);
4053 queue_work(hdev->workqueue, &hdev->cmd_work);
4056 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4057 hci_req_complete_t *req_complete,
4058 hci_req_complete_skb_t *req_complete_skb)
4060 struct sk_buff *skb;
4061 unsigned long flags;
4063 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4065 /* If the completed command doesn't match the last one that was
4066 * sent we need to do special handling of it.
4068 if (!hci_sent_cmd_data(hdev, opcode)) {
4069 /* Some CSR based controllers generate a spontaneous
4070 * reset complete event during init and any pending
4071 * command will never be completed. In such a case we
4072 * need to resend whatever was the last sent
4075 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4076 hci_resend_last(hdev);
4081 /* If we reach this point this event matches the last command sent */
4082 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4084 /* If the command succeeded and there's still more commands in
4085 * this request the request is not yet complete.
4087 if (!status && !hci_req_is_complete(hdev))
4090 /* If this was the last command in a request the complete
4091 * callback would be found in hdev->sent_cmd instead of the
4092 * command queue (hdev->cmd_q).
4094 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4095 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4099 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4100 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4104 /* Remove all pending commands belonging to this request */
4105 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4106 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4107 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4108 __skb_queue_head(&hdev->cmd_q, skb);
4112 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4113 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4115 *req_complete = bt_cb(skb)->hci.req_complete;
4116 dev_kfree_skb_irq(skb);
4118 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4121 static void hci_rx_work(struct work_struct *work)
4123 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4124 struct sk_buff *skb;
4126 BT_DBG("%s", hdev->name);
4128 /* The kcov_remote functions used for collecting packet parsing
4129 * coverage information from this background thread and associate
4130 * the coverage with the syscall's thread which originally injected
4131 * the packet. This helps fuzzing the kernel.
4133 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4134 kcov_remote_start_common(skb_get_kcov_handle(skb));
4136 /* Send copy to monitor */
4137 hci_send_to_monitor(hdev, skb);
4139 if (atomic_read(&hdev->promisc)) {
4140 /* Send copy to the sockets */
4141 hci_send_to_sock(hdev, skb);
4144 /* If the device has been opened in HCI_USER_CHANNEL,
4145 * the userspace has exclusive access to device.
4146 * When device is HCI_INIT, we still need to process
4147 * the data packets to the driver in order
4148 * to complete its setup().
4150 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4151 !test_bit(HCI_INIT, &hdev->flags)) {
4156 if (test_bit(HCI_INIT, &hdev->flags)) {
4157 /* Don't process data packets in this states. */
4158 switch (hci_skb_pkt_type(skb)) {
4159 case HCI_ACLDATA_PKT:
4160 case HCI_SCODATA_PKT:
4161 case HCI_ISODATA_PKT:
4168 switch (hci_skb_pkt_type(skb)) {
4170 BT_DBG("%s Event packet", hdev->name);
4171 hci_event_packet(hdev, skb);
4174 case HCI_ACLDATA_PKT:
4175 BT_DBG("%s ACL data packet", hdev->name);
4176 hci_acldata_packet(hdev, skb);
4179 case HCI_SCODATA_PKT:
4180 BT_DBG("%s SCO data packet", hdev->name);
4181 hci_scodata_packet(hdev, skb);
4184 case HCI_ISODATA_PKT:
4185 BT_DBG("%s ISO data packet", hdev->name);
4186 hci_isodata_packet(hdev, skb);
4196 static void hci_cmd_work(struct work_struct *work)
4198 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4199 struct sk_buff *skb;
4201 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4202 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4204 /* Send queued commands */
4205 if (atomic_read(&hdev->cmd_cnt)) {
4206 skb = skb_dequeue(&hdev->cmd_q);
4210 kfree_skb(hdev->sent_cmd);
4212 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4213 if (hdev->sent_cmd) {
4215 if (hci_req_status_pend(hdev))
4216 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4217 atomic_dec(&hdev->cmd_cnt);
4219 res = hci_send_frame(hdev, skb);
4221 __hci_cmd_sync_cancel(hdev, -res);
4224 if (test_bit(HCI_RESET, &hdev->flags) ||
4225 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4226 cancel_delayed_work(&hdev->cmd_timer);
4228 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4232 skb_queue_head(&hdev->cmd_q, skb);
4233 queue_work(hdev->workqueue, &hdev->cmd_work);