2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
49 #include "hci_codec.h"
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
66 static int hci_scan_req(struct hci_request *req, unsigned long opt)
70 BT_DBG("%s %x", req->hdev->name, scan);
72 /* Inquiry and Page scans */
73 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
77 static int hci_auth_req(struct hci_request *req, unsigned long opt)
81 BT_DBG("%s %x", req->hdev->name, auth);
84 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
92 BT_DBG("%s %x", req->hdev->name, encrypt);
95 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
101 __le16 policy = cpu_to_le16(opt);
103 BT_DBG("%s %x", req->hdev->name, policy);
105 /* Default link policy */
106 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
110 /* Get HCI device by index.
111 * Device is held on return. */
112 struct hci_dev *hci_dev_get(int index)
114 struct hci_dev *hdev = NULL, *d;
121 read_lock(&hci_dev_list_lock);
122 list_for_each_entry(d, &hci_dev_list, list) {
123 if (d->id == index) {
124 hdev = hci_dev_hold(d);
128 read_unlock(&hci_dev_list_lock);
132 /* ---- Inquiry support ---- */
134 bool hci_discovery_active(struct hci_dev *hdev)
136 struct discovery_state *discov = &hdev->discovery;
138 switch (discov->state) {
139 case DISCOVERY_FINDING:
140 case DISCOVERY_RESOLVING:
148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
150 int old_state = hdev->discovery.state;
152 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
154 if (old_state == state)
157 hdev->discovery.state = state;
160 case DISCOVERY_STOPPED:
161 hci_update_passive_scan(hdev);
163 if (old_state != DISCOVERY_STARTING)
164 mgmt_discovering(hdev, 0);
166 case DISCOVERY_STARTING:
168 case DISCOVERY_FINDING:
169 mgmt_discovering(hdev, 1);
171 case DISCOVERY_RESOLVING:
173 case DISCOVERY_STOPPING:
179 bool hci_le_discovery_active(struct hci_dev *hdev)
181 struct discovery_state *discov = &hdev->le_discovery;
183 switch (discov->state) {
184 case DISCOVERY_FINDING:
185 case DISCOVERY_RESOLVING:
193 void hci_le_discovery_set_state(struct hci_dev *hdev, int state)
195 BT_DBG("%s state %u -> %u", hdev->name,
196 hdev->le_discovery.state, state);
198 if (hdev->le_discovery.state == state)
202 case DISCOVERY_STOPPED:
203 hci_update_passive_scan(hdev);
205 if (hdev->le_discovery.state != DISCOVERY_STARTING)
206 mgmt_le_discovering(hdev, 0);
208 case DISCOVERY_STARTING:
210 case DISCOVERY_FINDING:
211 mgmt_le_discovering(hdev, 1);
213 case DISCOVERY_RESOLVING:
215 case DISCOVERY_STOPPING:
219 hdev->le_discovery.state = state;
223 void hci_inquiry_cache_flush(struct hci_dev *hdev)
225 struct discovery_state *cache = &hdev->discovery;
226 struct inquiry_entry *p, *n;
228 list_for_each_entry_safe(p, n, &cache->all, all) {
233 INIT_LIST_HEAD(&cache->unknown);
234 INIT_LIST_HEAD(&cache->resolve);
237 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
240 struct discovery_state *cache = &hdev->discovery;
241 struct inquiry_entry *e;
243 BT_DBG("cache %p, %pMR", cache, bdaddr);
245 list_for_each_entry(e, &cache->all, all) {
246 if (!bacmp(&e->data.bdaddr, bdaddr))
253 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
256 struct discovery_state *cache = &hdev->discovery;
257 struct inquiry_entry *e;
259 BT_DBG("cache %p, %pMR", cache, bdaddr);
261 list_for_each_entry(e, &cache->unknown, list) {
262 if (!bacmp(&e->data.bdaddr, bdaddr))
269 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
273 struct discovery_state *cache = &hdev->discovery;
274 struct inquiry_entry *e;
276 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
278 list_for_each_entry(e, &cache->resolve, list) {
279 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
281 if (!bacmp(&e->data.bdaddr, bdaddr))
288 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
289 struct inquiry_entry *ie)
291 struct discovery_state *cache = &hdev->discovery;
292 struct list_head *pos = &cache->resolve;
293 struct inquiry_entry *p;
297 list_for_each_entry(p, &cache->resolve, list) {
298 if (p->name_state != NAME_PENDING &&
299 abs(p->data.rssi) >= abs(ie->data.rssi))
304 list_add(&ie->list, pos);
307 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
310 struct discovery_state *cache = &hdev->discovery;
311 struct inquiry_entry *ie;
314 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
316 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
319 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
321 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
323 if (!ie->data.ssp_mode)
324 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
326 if (ie->name_state == NAME_NEEDED &&
327 data->rssi != ie->data.rssi) {
328 ie->data.rssi = data->rssi;
329 hci_inquiry_cache_update_resolve(hdev, ie);
335 /* Entry not in the cache. Add new one. */
336 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
338 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
342 list_add(&ie->all, &cache->all);
345 ie->name_state = NAME_KNOWN;
347 ie->name_state = NAME_NOT_KNOWN;
348 list_add(&ie->list, &cache->unknown);
352 if (name_known && ie->name_state != NAME_KNOWN &&
353 ie->name_state != NAME_PENDING) {
354 ie->name_state = NAME_KNOWN;
358 memcpy(&ie->data, data, sizeof(*data));
359 ie->timestamp = jiffies;
360 cache->timestamp = jiffies;
362 if (ie->name_state == NAME_NOT_KNOWN)
363 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
369 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
371 struct discovery_state *cache = &hdev->discovery;
372 struct inquiry_info *info = (struct inquiry_info *) buf;
373 struct inquiry_entry *e;
376 list_for_each_entry(e, &cache->all, all) {
377 struct inquiry_data *data = &e->data;
382 bacpy(&info->bdaddr, &data->bdaddr);
383 info->pscan_rep_mode = data->pscan_rep_mode;
384 info->pscan_period_mode = data->pscan_period_mode;
385 info->pscan_mode = data->pscan_mode;
386 memcpy(info->dev_class, data->dev_class, 3);
387 info->clock_offset = data->clock_offset;
393 BT_DBG("cache %p, copied %d", cache, copied);
397 static int hci_inq_req(struct hci_request *req, unsigned long opt)
399 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
400 struct hci_dev *hdev = req->hdev;
401 struct hci_cp_inquiry cp;
403 BT_DBG("%s", hdev->name);
405 if (test_bit(HCI_INQUIRY, &hdev->flags))
409 memcpy(&cp.lap, &ir->lap, 3);
410 cp.length = ir->length;
411 cp.num_rsp = ir->num_rsp;
412 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
417 int hci_inquiry(void __user *arg)
419 __u8 __user *ptr = arg;
420 struct hci_inquiry_req ir;
421 struct hci_dev *hdev;
422 int err = 0, do_inquiry = 0, max_rsp;
426 if (copy_from_user(&ir, ptr, sizeof(ir)))
429 hdev = hci_dev_get(ir.dev_id);
433 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
438 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
443 if (hdev->dev_type != HCI_PRIMARY) {
448 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
453 /* Restrict maximum inquiry length to 60 seconds */
454 if (ir.length > 60) {
460 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
461 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
462 hci_inquiry_cache_flush(hdev);
465 hci_dev_unlock(hdev);
467 timeo = ir.length * msecs_to_jiffies(2000);
470 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
475 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
476 * cleared). If it is interrupted by a signal, return -EINTR.
478 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
479 TASK_INTERRUPTIBLE)) {
485 /* for unlimited number of responses we will use buffer with
488 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
490 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
491 * copy it to the user space.
493 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
500 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
501 hci_dev_unlock(hdev);
503 BT_DBG("num_rsp %d", ir.num_rsp);
505 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
507 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
520 static int hci_dev_do_open(struct hci_dev *hdev)
524 BT_DBG("%s %p", hdev->name, hdev);
526 hci_req_sync_lock(hdev);
528 ret = hci_dev_open_sync(hdev);
530 hci_req_sync_unlock(hdev);
534 /* ---- HCI ioctl helpers ---- */
536 int hci_dev_open(__u16 dev)
538 struct hci_dev *hdev;
541 hdev = hci_dev_get(dev);
545 /* Devices that are marked as unconfigured can only be powered
546 * up as user channel. Trying to bring them up as normal devices
547 * will result into a failure. Only user channel operation is
550 * When this function is called for a user channel, the flag
551 * HCI_USER_CHANNEL will be set first before attempting to
554 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
555 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
560 /* We need to ensure that no other power on/off work is pending
561 * before proceeding to call hci_dev_do_open. This is
562 * particularly important if the setup procedure has not yet
565 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
566 cancel_delayed_work(&hdev->power_off);
568 /* After this call it is guaranteed that the setup procedure
569 * has finished. This means that error conditions like RFKILL
570 * or no valid public or static random address apply.
572 flush_workqueue(hdev->req_workqueue);
574 /* For controllers not using the management interface and that
575 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
576 * so that pairing works for them. Once the management interface
577 * is in use this bit will be cleared again and userspace has
578 * to explicitly enable it.
580 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
581 !hci_dev_test_flag(hdev, HCI_MGMT))
582 hci_dev_set_flag(hdev, HCI_BONDABLE);
584 err = hci_dev_do_open(hdev);
591 int hci_dev_do_close(struct hci_dev *hdev)
595 BT_DBG("%s %p", hdev->name, hdev);
597 hci_req_sync_lock(hdev);
599 err = hci_dev_close_sync(hdev);
601 hci_req_sync_unlock(hdev);
606 int hci_dev_close(__u16 dev)
608 struct hci_dev *hdev;
611 hdev = hci_dev_get(dev);
615 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
620 cancel_work_sync(&hdev->power_on);
621 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
622 cancel_delayed_work(&hdev->power_off);
624 err = hci_dev_do_close(hdev);
631 static int hci_dev_do_reset(struct hci_dev *hdev)
635 BT_DBG("%s %p", hdev->name, hdev);
637 hci_req_sync_lock(hdev);
640 skb_queue_purge(&hdev->rx_q);
641 skb_queue_purge(&hdev->cmd_q);
643 /* Cancel these to avoid queueing non-chained pending work */
644 hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
647 * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
648 * queue_delayed_work(&hdev->{cmd,ncmd}_timer)
650 * inside RCU section to see the flag or complete scheduling.
653 /* Explicitly cancel works in case scheduled after setting the flag. */
654 cancel_delayed_work(&hdev->cmd_timer);
655 cancel_delayed_work(&hdev->ncmd_timer);
657 /* Avoid potential lockdep warnings from the *_flush() calls by
658 * ensuring the workqueue is empty up front.
660 drain_workqueue(hdev->workqueue);
663 hci_inquiry_cache_flush(hdev);
664 hci_conn_hash_flush(hdev);
665 hci_dev_unlock(hdev);
670 hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
672 atomic_set(&hdev->cmd_cnt, 1);
678 ret = hci_reset_sync(hdev);
680 hci_req_sync_unlock(hdev);
684 int hci_dev_reset(__u16 dev)
686 struct hci_dev *hdev;
689 hdev = hci_dev_get(dev);
693 if (!test_bit(HCI_UP, &hdev->flags)) {
698 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
703 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
708 err = hci_dev_do_reset(hdev);
715 int hci_dev_reset_stat(__u16 dev)
717 struct hci_dev *hdev;
720 hdev = hci_dev_get(dev);
724 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
729 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
734 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
741 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
743 bool conn_changed, discov_changed;
745 BT_DBG("%s scan 0x%02x", hdev->name, scan);
747 if ((scan & SCAN_PAGE))
748 conn_changed = !hci_dev_test_and_set_flag(hdev,
751 conn_changed = hci_dev_test_and_clear_flag(hdev,
754 if ((scan & SCAN_INQUIRY)) {
755 discov_changed = !hci_dev_test_and_set_flag(hdev,
758 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
759 discov_changed = hci_dev_test_and_clear_flag(hdev,
763 if (!hci_dev_test_flag(hdev, HCI_MGMT))
766 if (conn_changed || discov_changed) {
767 /* In case this was disabled through mgmt */
768 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
770 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
771 hci_update_adv_data(hdev, hdev->cur_adv_instance);
773 mgmt_new_settings(hdev);
777 int hci_dev_cmd(unsigned int cmd, void __user *arg)
779 struct hci_dev *hdev;
780 struct hci_dev_req dr;
783 if (copy_from_user(&dr, arg, sizeof(dr)))
786 hdev = hci_dev_get(dr.dev_id);
790 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
795 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
800 if (hdev->dev_type != HCI_PRIMARY) {
805 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
812 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
813 HCI_INIT_TIMEOUT, NULL);
817 if (!lmp_encrypt_capable(hdev)) {
822 if (!test_bit(HCI_AUTH, &hdev->flags)) {
823 /* Auth must be enabled first */
824 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
825 HCI_INIT_TIMEOUT, NULL);
830 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
831 HCI_INIT_TIMEOUT, NULL);
835 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
836 HCI_INIT_TIMEOUT, NULL);
838 /* Ensure that the connectable and discoverable states
839 * get correctly modified as this was a non-mgmt change.
842 hci_update_passive_scan_state(hdev, dr.dev_opt);
846 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
847 HCI_INIT_TIMEOUT, NULL);
851 hdev->link_mode = ((__u16) dr.dev_opt) &
852 (HCI_LM_MASTER | HCI_LM_ACCEPT);
856 if (hdev->pkt_type == (__u16) dr.dev_opt)
859 hdev->pkt_type = (__u16) dr.dev_opt;
860 mgmt_phy_configuration_changed(hdev, NULL);
864 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
865 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
869 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
870 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
883 int hci_get_dev_list(void __user *arg)
885 struct hci_dev *hdev;
886 struct hci_dev_list_req *dl;
887 struct hci_dev_req *dr;
888 int n = 0, size, err;
891 if (get_user(dev_num, (__u16 __user *) arg))
894 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
897 size = sizeof(*dl) + dev_num * sizeof(*dr);
899 dl = kzalloc(size, GFP_KERNEL);
905 read_lock(&hci_dev_list_lock);
906 list_for_each_entry(hdev, &hci_dev_list, list) {
907 unsigned long flags = hdev->flags;
909 /* When the auto-off is configured it means the transport
910 * is running, but in that case still indicate that the
911 * device is actually down.
913 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
914 flags &= ~BIT(HCI_UP);
916 (dr + n)->dev_id = hdev->id;
917 (dr + n)->dev_opt = flags;
922 read_unlock(&hci_dev_list_lock);
925 size = sizeof(*dl) + n * sizeof(*dr);
927 err = copy_to_user(arg, dl, size);
930 return err ? -EFAULT : 0;
933 int hci_get_dev_info(void __user *arg)
935 struct hci_dev *hdev;
936 struct hci_dev_info di;
940 if (copy_from_user(&di, arg, sizeof(di)))
943 hdev = hci_dev_get(di.dev_id);
947 /* When the auto-off is configured it means the transport
948 * is running, but in that case still indicate that the
949 * device is actually down.
951 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
952 flags = hdev->flags & ~BIT(HCI_UP);
956 strcpy(di.name, hdev->name);
957 di.bdaddr = hdev->bdaddr;
958 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
960 di.pkt_type = hdev->pkt_type;
961 if (lmp_bredr_capable(hdev)) {
962 di.acl_mtu = hdev->acl_mtu;
963 di.acl_pkts = hdev->acl_pkts;
964 di.sco_mtu = hdev->sco_mtu;
965 di.sco_pkts = hdev->sco_pkts;
967 di.acl_mtu = hdev->le_mtu;
968 di.acl_pkts = hdev->le_pkts;
972 di.link_policy = hdev->link_policy;
973 di.link_mode = hdev->link_mode;
975 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
976 memcpy(&di.features, &hdev->features, sizeof(di.features));
978 if (copy_to_user(arg, &di, sizeof(di)))
986 /* ---- Interface to HCI drivers ---- */
988 static int hci_rfkill_set_block(void *data, bool blocked)
990 struct hci_dev *hdev = data;
992 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
994 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
998 hci_dev_set_flag(hdev, HCI_RFKILLED);
999 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1000 !hci_dev_test_flag(hdev, HCI_CONFIG))
1001 hci_dev_do_close(hdev);
1003 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1009 static const struct rfkill_ops hci_rfkill_ops = {
1010 .set_block = hci_rfkill_set_block,
1013 static void hci_power_on(struct work_struct *work)
1015 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1018 BT_DBG("%s", hdev->name);
1020 if (test_bit(HCI_UP, &hdev->flags) &&
1021 hci_dev_test_flag(hdev, HCI_MGMT) &&
1022 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1023 cancel_delayed_work(&hdev->power_off);
1024 err = hci_powered_update_sync(hdev);
1025 mgmt_power_on(hdev, err);
1029 err = hci_dev_do_open(hdev);
1032 mgmt_set_powered_failed(hdev, err);
1033 hci_dev_unlock(hdev);
1037 /* During the HCI setup phase, a few error conditions are
1038 * ignored and they need to be checked now. If they are still
1039 * valid, it is important to turn the device back off.
1041 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
1042 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
1043 (hdev->dev_type == HCI_PRIMARY &&
1044 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1045 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1046 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
1047 hci_dev_do_close(hdev);
1048 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
1049 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1050 HCI_AUTO_OFF_TIMEOUT);
1053 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1054 /* For unconfigured devices, set the HCI_RAW flag
1055 * so that userspace can easily identify them.
1057 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1058 set_bit(HCI_RAW, &hdev->flags);
1060 /* For fully configured devices, this will send
1061 * the Index Added event. For unconfigured devices,
1062 * it will send Unconfigued Index Added event.
1064 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1065 * and no event will be send.
1067 mgmt_index_added(hdev);
1068 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1069 /* When the controller is now configured, then it
1070 * is important to clear the HCI_RAW flag.
1072 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1073 clear_bit(HCI_RAW, &hdev->flags);
1075 /* Powering on the controller with HCI_CONFIG set only
1076 * happens with the transition from unconfigured to
1077 * configured. This will send the Index Added event.
1079 mgmt_index_added(hdev);
1083 static void hci_power_off(struct work_struct *work)
1085 struct hci_dev *hdev = container_of(work, struct hci_dev,
1088 BT_DBG("%s", hdev->name);
1090 hci_dev_do_close(hdev);
1093 static void hci_error_reset(struct work_struct *work)
1095 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1097 BT_DBG("%s", hdev->name);
1100 hdev->hw_error(hdev, hdev->hw_error_code);
1102 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1104 if (hci_dev_do_close(hdev))
1107 hci_dev_do_open(hdev);
1110 void hci_uuids_clear(struct hci_dev *hdev)
1112 struct bt_uuid *uuid, *tmp;
1114 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1115 list_del(&uuid->list);
1120 void hci_link_keys_clear(struct hci_dev *hdev)
1122 struct link_key *key;
1124 list_for_each_entry(key, &hdev->link_keys, list) {
1125 list_del_rcu(&key->list);
1126 kfree_rcu(key, rcu);
1130 void hci_smp_ltks_clear(struct hci_dev *hdev)
1134 list_for_each_entry(k, &hdev->long_term_keys, list) {
1135 list_del_rcu(&k->list);
1140 void hci_smp_irks_clear(struct hci_dev *hdev)
1144 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
1145 list_del_rcu(&k->list);
1150 void hci_blocked_keys_clear(struct hci_dev *hdev)
1152 struct blocked_key *b;
1154 list_for_each_entry(b, &hdev->blocked_keys, list) {
1155 list_del_rcu(&b->list);
1160 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1162 bool blocked = false;
1163 struct blocked_key *b;
1166 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1167 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1177 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1182 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1183 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1186 if (hci_is_blocked_key(hdev,
1187 HCI_BLOCKED_KEY_TYPE_LINKKEY,
1189 bt_dev_warn_ratelimited(hdev,
1190 "Link key blocked for %pMR",
1203 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1204 u8 key_type, u8 old_key_type)
1207 if (key_type < 0x03)
1210 /* Debug keys are insecure so don't store them persistently */
1211 if (key_type == HCI_LK_DEBUG_COMBINATION)
1214 /* Changed combination key and there's no previous one */
1215 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1218 /* Security mode 3 case */
1222 /* BR/EDR key derived using SC from an LE link */
1223 if (conn->type == LE_LINK)
1226 /* Neither local nor remote side had no-bonding as requirement */
1227 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1230 /* Local side had dedicated bonding as requirement */
1231 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1234 /* Remote side had dedicated bonding as requirement */
1235 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1238 /* If none of the above criteria match, then don't store the key
1243 static u8 ltk_role(u8 type)
1245 if (type == SMP_LTK)
1246 return HCI_ROLE_MASTER;
1248 return HCI_ROLE_SLAVE;
1251 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1252 u8 addr_type, u8 role)
1257 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1258 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1261 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1264 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1266 bt_dev_warn_ratelimited(hdev,
1267 "LTK blocked for %pMR",
1280 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1282 struct smp_irk *irk_to_return = NULL;
1283 struct smp_irk *irk;
1286 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1287 if (!bacmp(&irk->rpa, rpa)) {
1288 irk_to_return = irk;
1293 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1294 if (smp_irk_matches(hdev, irk->val, rpa)) {
1295 bacpy(&irk->rpa, rpa);
1296 irk_to_return = irk;
1302 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1303 irk_to_return->val)) {
1304 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1305 &irk_to_return->bdaddr);
1306 irk_to_return = NULL;
1311 return irk_to_return;
1314 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1317 struct smp_irk *irk_to_return = NULL;
1318 struct smp_irk *irk;
1320 /* Identity Address must be public or static random */
1321 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1325 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1326 if (addr_type == irk->addr_type &&
1327 bacmp(bdaddr, &irk->bdaddr) == 0) {
1328 irk_to_return = irk;
1335 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1336 irk_to_return->val)) {
1337 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1338 &irk_to_return->bdaddr);
1339 irk_to_return = NULL;
1344 return irk_to_return;
1347 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1348 bdaddr_t *bdaddr, u8 *val, u8 type,
1349 u8 pin_len, bool *persistent)
1351 struct link_key *key, *old_key;
1354 old_key = hci_find_link_key(hdev, bdaddr);
1356 old_key_type = old_key->type;
1359 old_key_type = conn ? conn->key_type : 0xff;
1360 key = kzalloc(sizeof(*key), GFP_KERNEL);
1363 list_add_rcu(&key->list, &hdev->link_keys);
1366 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1368 /* Some buggy controller combinations generate a changed
1369 * combination key for legacy pairing even when there's no
1371 if (type == HCI_LK_CHANGED_COMBINATION &&
1372 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1373 type = HCI_LK_COMBINATION;
1375 conn->key_type = type;
1378 bacpy(&key->bdaddr, bdaddr);
1379 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1380 key->pin_len = pin_len;
1382 if (type == HCI_LK_CHANGED_COMBINATION)
1383 key->type = old_key_type;
1388 *persistent = hci_persistent_key(hdev, conn, type,
1394 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1395 u8 addr_type, u8 type, u8 authenticated,
1396 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1398 struct smp_ltk *key, *old_key;
1399 u8 role = ltk_role(type);
1401 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1405 key = kzalloc(sizeof(*key), GFP_KERNEL);
1408 list_add_rcu(&key->list, &hdev->long_term_keys);
1411 bacpy(&key->bdaddr, bdaddr);
1412 key->bdaddr_type = addr_type;
1413 memcpy(key->val, tk, sizeof(key->val));
1414 key->authenticated = authenticated;
1417 key->enc_size = enc_size;
1423 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1424 u8 addr_type, u8 val[16], bdaddr_t *rpa)
1426 struct smp_irk *irk;
1428 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1430 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1434 bacpy(&irk->bdaddr, bdaddr);
1435 irk->addr_type = addr_type;
1437 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1440 memcpy(irk->val, val, 16);
1441 bacpy(&irk->rpa, rpa);
1446 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1448 struct link_key *key;
1450 key = hci_find_link_key(hdev, bdaddr);
1454 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1456 list_del_rcu(&key->list);
1457 kfree_rcu(key, rcu);
1462 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1464 struct smp_ltk *k, *tmp;
1467 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1468 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1471 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1473 list_del_rcu(&k->list);
1478 return removed ? 0 : -ENOENT;
1481 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1483 struct smp_irk *k, *tmp;
1485 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1486 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1489 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1491 list_del_rcu(&k->list);
1496 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1499 struct smp_irk *irk;
1502 if (type == BDADDR_BREDR) {
1503 if (hci_find_link_key(hdev, bdaddr))
1508 /* Convert to HCI addr type which struct smp_ltk uses */
1509 if (type == BDADDR_LE_PUBLIC)
1510 addr_type = ADDR_LE_DEV_PUBLIC;
1512 addr_type = ADDR_LE_DEV_RANDOM;
1514 irk = hci_get_irk(hdev, bdaddr, addr_type);
1516 bdaddr = &irk->bdaddr;
1517 addr_type = irk->addr_type;
1521 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1522 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1532 /* HCI command timer function */
1533 static void hci_cmd_timeout(struct work_struct *work)
1535 struct hci_dev *hdev = container_of(work, struct hci_dev,
1538 if (hdev->sent_cmd) {
1539 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1540 u16 opcode = __le16_to_cpu(sent->opcode);
1542 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1544 bt_dev_err(hdev, "command tx timeout");
1547 if (hdev->cmd_timeout)
1548 hdev->cmd_timeout(hdev);
1550 atomic_set(&hdev->cmd_cnt, 1);
1551 queue_work(hdev->workqueue, &hdev->cmd_work);
1554 /* HCI ncmd timer function */
1555 static void hci_ncmd_timeout(struct work_struct *work)
1557 struct hci_dev *hdev = container_of(work, struct hci_dev,
1560 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1562 /* During HCI_INIT phase no events can be injected if the ncmd timer
1563 * triggers since the procedure has its own timeout handling.
1565 if (test_bit(HCI_INIT, &hdev->flags))
1568 /* This is an irrecoverable state, inject hardware error event */
1569 hci_reset_dev(hdev);
1572 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1573 bdaddr_t *bdaddr, u8 bdaddr_type)
1575 struct oob_data *data;
1577 list_for_each_entry(data, &hdev->remote_oob_data, list) {
1578 if (bacmp(bdaddr, &data->bdaddr) != 0)
1580 if (data->bdaddr_type != bdaddr_type)
1588 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1591 struct oob_data *data;
1593 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1597 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1599 list_del(&data->list);
1605 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1607 struct oob_data *data, *n;
1609 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1610 list_del(&data->list);
1615 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1616 u8 bdaddr_type, u8 *hash192, u8 *rand192,
1617 u8 *hash256, u8 *rand256)
1619 struct oob_data *data;
1621 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1623 data = kmalloc(sizeof(*data), GFP_KERNEL);
1627 bacpy(&data->bdaddr, bdaddr);
1628 data->bdaddr_type = bdaddr_type;
1629 list_add(&data->list, &hdev->remote_oob_data);
1632 if (hash192 && rand192) {
1633 memcpy(data->hash192, hash192, sizeof(data->hash192));
1634 memcpy(data->rand192, rand192, sizeof(data->rand192));
1635 if (hash256 && rand256)
1636 data->present = 0x03;
1638 memset(data->hash192, 0, sizeof(data->hash192));
1639 memset(data->rand192, 0, sizeof(data->rand192));
1640 if (hash256 && rand256)
1641 data->present = 0x02;
1643 data->present = 0x00;
1646 if (hash256 && rand256) {
1647 memcpy(data->hash256, hash256, sizeof(data->hash256));
1648 memcpy(data->rand256, rand256, sizeof(data->rand256));
1650 memset(data->hash256, 0, sizeof(data->hash256));
1651 memset(data->rand256, 0, sizeof(data->rand256));
1652 if (hash192 && rand192)
1653 data->present = 0x01;
1656 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1661 /* This function requires the caller holds hdev->lock */
1662 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1664 struct adv_info *adv_instance;
1666 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1667 if (adv_instance->instance == instance)
1668 return adv_instance;
1674 /* This function requires the caller holds hdev->lock */
1675 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1677 struct adv_info *cur_instance;
1679 cur_instance = hci_find_adv_instance(hdev, instance);
1683 if (cur_instance == list_last_entry(&hdev->adv_instances,
1684 struct adv_info, list))
1685 return list_first_entry(&hdev->adv_instances,
1686 struct adv_info, list);
1688 return list_next_entry(cur_instance, list);
1691 /* This function requires the caller holds hdev->lock */
1692 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1694 struct adv_info *adv_instance;
1696 adv_instance = hci_find_adv_instance(hdev, instance);
1700 BT_DBG("%s removing %dMR", hdev->name, instance);
1702 if (hdev->cur_adv_instance == instance) {
1703 if (hdev->adv_instance_timeout) {
1704 cancel_delayed_work(&hdev->adv_instance_expire);
1705 hdev->adv_instance_timeout = 0;
1707 hdev->cur_adv_instance = 0x00;
1710 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1712 list_del(&adv_instance->list);
1713 kfree(adv_instance);
1715 hdev->adv_instance_cnt--;
1720 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1722 struct adv_info *adv_instance, *n;
1724 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1725 adv_instance->rpa_expired = rpa_expired;
1728 /* This function requires the caller holds hdev->lock */
1729 void hci_adv_instances_clear(struct hci_dev *hdev)
1731 struct adv_info *adv_instance, *n;
1733 if (hdev->adv_instance_timeout) {
1734 cancel_delayed_work(&hdev->adv_instance_expire);
1735 hdev->adv_instance_timeout = 0;
1738 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1739 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1740 list_del(&adv_instance->list);
1741 kfree(adv_instance);
1744 hdev->adv_instance_cnt = 0;
1745 hdev->cur_adv_instance = 0x00;
1748 static void adv_instance_rpa_expired(struct work_struct *work)
1750 struct adv_info *adv_instance = container_of(work, struct adv_info,
1751 rpa_expired_cb.work);
1755 adv_instance->rpa_expired = true;
1758 /* This function requires the caller holds hdev->lock */
1759 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1760 u32 flags, u16 adv_data_len, u8 *adv_data,
1761 u16 scan_rsp_len, u8 *scan_rsp_data,
1762 u16 timeout, u16 duration, s8 tx_power,
1763 u32 min_interval, u32 max_interval,
1766 struct adv_info *adv;
1768 adv = hci_find_adv_instance(hdev, instance);
1770 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1771 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1772 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1774 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1775 instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1776 return ERR_PTR(-EOVERFLOW);
1778 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1780 return ERR_PTR(-ENOMEM);
1782 adv->pending = true;
1783 adv->instance = instance;
1784 list_add(&adv->list, &hdev->adv_instances);
1785 hdev->adv_instance_cnt++;
1789 adv->min_interval = min_interval;
1790 adv->max_interval = max_interval;
1791 adv->tx_power = tx_power;
1792 /* Defining a mesh_handle changes the timing units to ms,
1793 * rather than seconds, and ties the instance to the requested
1796 adv->mesh = mesh_handle;
1798 hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1799 scan_rsp_len, scan_rsp_data);
1801 adv->timeout = timeout;
1802 adv->remaining_time = timeout;
1805 adv->duration = hdev->def_multi_adv_rotation_duration;
1807 adv->duration = duration;
1809 INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1811 BT_DBG("%s for %dMR", hdev->name, instance);
1816 /* This function requires the caller holds hdev->lock */
1817 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1818 u32 flags, u8 data_len, u8 *data,
1819 u32 min_interval, u32 max_interval)
1821 struct adv_info *adv;
1823 adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1824 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1825 min_interval, max_interval, 0);
1829 adv->periodic = true;
1830 adv->per_adv_data_len = data_len;
1833 memcpy(adv->per_adv_data, data, data_len);
1838 /* This function requires the caller holds hdev->lock */
1839 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1840 u16 adv_data_len, u8 *adv_data,
1841 u16 scan_rsp_len, u8 *scan_rsp_data)
1843 struct adv_info *adv;
1845 adv = hci_find_adv_instance(hdev, instance);
1847 /* If advertisement doesn't exist, we can't modify its data */
1851 if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1852 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1853 memcpy(adv->adv_data, adv_data, adv_data_len);
1854 adv->adv_data_len = adv_data_len;
1855 adv->adv_data_changed = true;
1858 if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1859 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1860 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1861 adv->scan_rsp_len = scan_rsp_len;
1862 adv->scan_rsp_changed = true;
1865 /* Mark as changed if there are flags which would affect it */
1866 if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1867 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1868 adv->scan_rsp_changed = true;
1873 /* This function requires the caller holds hdev->lock */
1874 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1877 struct adv_info *adv;
1879 if (instance == 0x00) {
1880 /* Instance 0 always manages the "Tx Power" and "Flags"
1883 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1885 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1886 * corresponds to the "connectable" instance flag.
1888 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1889 flags |= MGMT_ADV_FLAG_CONNECTABLE;
1891 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1892 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1893 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1894 flags |= MGMT_ADV_FLAG_DISCOV;
1899 adv = hci_find_adv_instance(hdev, instance);
1901 /* Return 0 when we got an invalid instance identifier. */
1908 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1910 struct adv_info *adv;
1912 /* Instance 0x00 always set local name */
1913 if (instance == 0x00)
1916 adv = hci_find_adv_instance(hdev, instance);
1920 if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1921 adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1924 return adv->scan_rsp_len ? true : false;
1927 /* This function requires the caller holds hdev->lock */
1928 void hci_adv_monitors_clear(struct hci_dev *hdev)
1930 struct adv_monitor *monitor;
1933 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1934 hci_free_adv_monitor(hdev, monitor);
1936 idr_destroy(&hdev->adv_monitors_idr);
1939 /* Frees the monitor structure and do some bookkeepings.
1940 * This function requires the caller holds hdev->lock.
1942 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1944 struct adv_pattern *pattern;
1945 struct adv_pattern *tmp;
1950 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1951 list_del(&pattern->list);
1955 if (monitor->handle)
1956 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1958 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1959 hdev->adv_monitors_cnt--;
1960 mgmt_adv_monitor_removed(hdev, monitor->handle);
1966 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1967 * also attempts to forward the request to the controller.
1968 * This function requires the caller holds hci_req_sync_lock.
1970 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1972 int min, max, handle;
1980 min = HCI_MIN_ADV_MONITOR_HANDLE;
1981 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1982 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1985 hci_dev_unlock(hdev);
1990 monitor->handle = handle;
1992 if (!hdev_is_powered(hdev))
1995 switch (hci_get_adv_monitor_offload_ext(hdev)) {
1996 case HCI_ADV_MONITOR_EXT_NONE:
1997 bt_dev_dbg(hdev, "%s add monitor %d status %d", hdev->name,
1998 monitor->handle, status);
1999 /* Message was not forwarded to controller - not an error */
2002 case HCI_ADV_MONITOR_EXT_MSFT:
2003 status = msft_add_monitor_pattern(hdev, monitor);
2004 bt_dev_dbg(hdev, "%s add monitor %d msft status %d", hdev->name,
2005 monitor->handle, status);
2012 /* Attempts to tell the controller and free the monitor. If somehow the
2013 * controller doesn't have a corresponding handle, remove anyway.
2014 * This function requires the caller holds hci_req_sync_lock.
2016 static int hci_remove_adv_monitor(struct hci_dev *hdev,
2017 struct adv_monitor *monitor)
2021 switch (hci_get_adv_monitor_offload_ext(hdev)) {
2022 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
2023 bt_dev_dbg(hdev, "%s remove monitor %d status %d", hdev->name,
2024 monitor->handle, status);
2027 case HCI_ADV_MONITOR_EXT_MSFT:
2028 status = msft_remove_monitor(hdev, monitor);
2029 bt_dev_dbg(hdev, "%s remove monitor %d msft status %d",
2030 hdev->name, monitor->handle, status);
2034 /* In case no matching handle registered, just free the monitor */
2035 if (status == -ENOENT)
2041 if (status == -ENOENT)
2042 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
2044 hci_free_adv_monitor(hdev, monitor);
2049 /* This function requires the caller holds hci_req_sync_lock */
2050 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2052 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2057 return hci_remove_adv_monitor(hdev, monitor);
2060 /* This function requires the caller holds hci_req_sync_lock */
2061 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2063 struct adv_monitor *monitor;
2064 int idr_next_id = 0;
2068 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2072 status = hci_remove_adv_monitor(hdev, monitor);
2082 /* This function requires the caller holds hdev->lock */
2083 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2085 return !idr_is_empty(&hdev->adv_monitors_idr);
2088 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2090 if (msft_monitor_supported(hdev))
2091 return HCI_ADV_MONITOR_EXT_MSFT;
2093 return HCI_ADV_MONITOR_EXT_NONE;
2096 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2097 bdaddr_t *bdaddr, u8 type)
2099 struct bdaddr_list *b;
2101 list_for_each_entry(b, bdaddr_list, list) {
2102 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2109 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2110 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2113 struct bdaddr_list_with_irk *b;
2115 list_for_each_entry(b, bdaddr_list, list) {
2116 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2123 struct bdaddr_list_with_flags *
2124 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2125 bdaddr_t *bdaddr, u8 type)
2127 struct bdaddr_list_with_flags *b;
2129 list_for_each_entry(b, bdaddr_list, list) {
2130 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2137 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2139 struct bdaddr_list *b, *n;
2141 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2147 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2149 struct bdaddr_list *entry;
2151 if (!bacmp(bdaddr, BDADDR_ANY))
2154 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2157 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2161 bacpy(&entry->bdaddr, bdaddr);
2162 entry->bdaddr_type = type;
2164 list_add(&entry->list, list);
2169 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2170 u8 type, u8 *peer_irk, u8 *local_irk)
2172 struct bdaddr_list_with_irk *entry;
2174 if (!bacmp(bdaddr, BDADDR_ANY))
2177 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2180 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2184 bacpy(&entry->bdaddr, bdaddr);
2185 entry->bdaddr_type = type;
2188 memcpy(entry->peer_irk, peer_irk, 16);
2191 memcpy(entry->local_irk, local_irk, 16);
2193 list_add(&entry->list, list);
2198 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2201 struct bdaddr_list_with_flags *entry;
2203 if (!bacmp(bdaddr, BDADDR_ANY))
2206 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2209 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2213 bacpy(&entry->bdaddr, bdaddr);
2214 entry->bdaddr_type = type;
2215 entry->flags = flags;
2217 list_add(&entry->list, list);
2222 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2224 struct bdaddr_list *entry;
2226 if (!bacmp(bdaddr, BDADDR_ANY)) {
2227 hci_bdaddr_list_clear(list);
2231 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2235 list_del(&entry->list);
2241 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2244 struct bdaddr_list_with_irk *entry;
2246 if (!bacmp(bdaddr, BDADDR_ANY)) {
2247 hci_bdaddr_list_clear(list);
2251 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2255 list_del(&entry->list);
2261 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2264 struct bdaddr_list_with_flags *entry;
2266 if (!bacmp(bdaddr, BDADDR_ANY)) {
2267 hci_bdaddr_list_clear(list);
2271 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2275 list_del(&entry->list);
2281 /* This function requires the caller holds hdev->lock */
2282 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2283 bdaddr_t *addr, u8 addr_type)
2285 struct hci_conn_params *params;
2287 list_for_each_entry(params, &hdev->le_conn_params, list) {
2288 if (bacmp(¶ms->addr, addr) == 0 &&
2289 params->addr_type == addr_type) {
2297 /* This function requires the caller holds hdev->lock */
2298 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2299 bdaddr_t *addr, u8 addr_type)
2301 struct hci_conn_params *param;
2303 list_for_each_entry(param, list, action) {
2304 if (bacmp(¶m->addr, addr) == 0 &&
2305 param->addr_type == addr_type)
2312 /* This function requires the caller holds hdev->lock */
2313 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2314 bdaddr_t *addr, u8 addr_type)
2316 struct hci_conn_params *params;
2318 params = hci_conn_params_lookup(hdev, addr, addr_type);
2322 params = kzalloc(sizeof(*params), GFP_KERNEL);
2324 bt_dev_err(hdev, "out of memory");
2328 bacpy(¶ms->addr, addr);
2329 params->addr_type = addr_type;
2331 list_add(¶ms->list, &hdev->le_conn_params);
2332 INIT_LIST_HEAD(¶ms->action);
2334 params->conn_min_interval = hdev->le_conn_min_interval;
2335 params->conn_max_interval = hdev->le_conn_max_interval;
2336 params->conn_latency = hdev->le_conn_latency;
2337 params->supervision_timeout = hdev->le_supv_timeout;
2338 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2340 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2345 static void hci_conn_params_free(struct hci_conn_params *params)
2348 hci_conn_drop(params->conn);
2349 hci_conn_put(params->conn);
2352 list_del(¶ms->action);
2353 list_del(¶ms->list);
2357 /* This function requires the caller holds hdev->lock */
2358 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2360 struct hci_conn_params *params;
2362 params = hci_conn_params_lookup(hdev, addr, addr_type);
2366 hci_conn_params_free(params);
2368 hci_update_passive_scan(hdev);
2370 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2373 /* This function requires the caller holds hdev->lock */
2374 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2376 struct hci_conn_params *params, *tmp;
2378 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2379 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2382 /* If trying to establish one time connection to disabled
2383 * device, leave the params, but mark them as just once.
2385 if (params->explicit_connect) {
2386 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2390 list_del(¶ms->list);
2394 BT_DBG("All LE disabled connection parameters were removed");
2397 /* This function requires the caller holds hdev->lock */
2398 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2400 struct hci_conn_params *params, *tmp;
2402 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2403 hci_conn_params_free(params);
2405 BT_DBG("All LE connection parameters were removed");
2408 /* Copy the Identity Address of the controller.
2410 * If the controller has a public BD_ADDR, then by default use that one.
2411 * If this is a LE only controller without a public address, default to
2412 * the static random address.
2414 * For debugging purposes it is possible to force controllers with a
2415 * public address to use the static random address instead.
2417 * In case BR/EDR has been disabled on a dual-mode controller and
2418 * userspace has configured a static address, then that address
2419 * becomes the identity address instead of the public BR/EDR address.
2421 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2424 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2425 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2426 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2427 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2428 bacpy(bdaddr, &hdev->static_addr);
2429 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2431 bacpy(bdaddr, &hdev->bdaddr);
2432 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2436 static void hci_clear_wake_reason(struct hci_dev *hdev)
2440 hdev->wake_reason = 0;
2441 bacpy(&hdev->wake_addr, BDADDR_ANY);
2442 hdev->wake_addr_type = 0;
2444 hci_dev_unlock(hdev);
2447 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2450 struct hci_dev *hdev =
2451 container_of(nb, struct hci_dev, suspend_notifier);
2454 /* Userspace has full control of this device. Do nothing. */
2455 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2458 if (action == PM_SUSPEND_PREPARE)
2459 ret = hci_suspend_dev(hdev);
2460 else if (action == PM_POST_SUSPEND)
2461 ret = hci_resume_dev(hdev);
2464 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2470 /* Alloc HCI device */
2471 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2473 struct hci_dev *hdev;
2474 unsigned int alloc_size;
2476 alloc_size = sizeof(*hdev);
2478 /* Fixme: May need ALIGN-ment? */
2479 alloc_size += sizeof_priv;
2482 hdev = kzalloc(alloc_size, GFP_KERNEL);
2486 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2487 hdev->esco_type = (ESCO_HV1);
2488 hdev->link_mode = (HCI_LM_ACCEPT);
2489 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2490 hdev->io_capability = 0x03; /* No Input No Output */
2491 hdev->manufacturer = 0xffff; /* Default to internal use */
2492 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2493 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2494 hdev->adv_instance_cnt = 0;
2495 hdev->cur_adv_instance = 0x00;
2496 hdev->adv_instance_timeout = 0;
2498 hdev->advmon_allowlist_duration = 300;
2499 hdev->advmon_no_filter_duration = 500;
2500 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
2502 hdev->sniff_max_interval = 800;
2503 hdev->sniff_min_interval = 80;
2505 hdev->le_adv_channel_map = 0x07;
2506 hdev->le_adv_min_interval = 0x0800;
2507 hdev->le_adv_max_interval = 0x0800;
2509 hdev->adv_filter_policy = 0x00;
2510 hdev->adv_type = 0x00;
2512 hdev->le_scan_interval = 0x0060;
2513 hdev->le_scan_window = 0x0030;
2514 hdev->le_scan_int_suspend = 0x0400;
2515 hdev->le_scan_window_suspend = 0x0012;
2516 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2517 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2518 hdev->le_scan_int_adv_monitor = 0x0060;
2519 hdev->le_scan_window_adv_monitor = 0x0030;
2520 hdev->le_scan_int_connect = 0x0060;
2521 hdev->le_scan_window_connect = 0x0060;
2522 hdev->le_conn_min_interval = 0x0018;
2523 hdev->le_conn_max_interval = 0x0028;
2524 hdev->le_conn_latency = 0x0000;
2525 hdev->le_supv_timeout = 0x002a;
2526 hdev->le_def_tx_len = 0x001b;
2527 hdev->le_def_tx_time = 0x0148;
2528 hdev->le_max_tx_len = 0x001b;
2529 hdev->le_max_tx_time = 0x0148;
2530 hdev->le_max_rx_len = 0x001b;
2531 hdev->le_max_rx_time = 0x0148;
2532 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2533 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2534 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2535 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2536 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2537 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2538 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2539 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2540 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2542 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2543 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2544 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2545 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2546 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2547 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2549 /* default 1.28 sec page scan */
2550 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2551 hdev->def_page_scan_int = 0x0800;
2552 hdev->def_page_scan_window = 0x0012;
2554 mutex_init(&hdev->lock);
2555 mutex_init(&hdev->req_lock);
2557 INIT_LIST_HEAD(&hdev->mesh_pending);
2558 INIT_LIST_HEAD(&hdev->mgmt_pending);
2559 INIT_LIST_HEAD(&hdev->reject_list);
2560 INIT_LIST_HEAD(&hdev->accept_list);
2561 INIT_LIST_HEAD(&hdev->uuids);
2562 INIT_LIST_HEAD(&hdev->link_keys);
2563 INIT_LIST_HEAD(&hdev->long_term_keys);
2564 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2565 INIT_LIST_HEAD(&hdev->remote_oob_data);
2566 INIT_LIST_HEAD(&hdev->le_accept_list);
2567 INIT_LIST_HEAD(&hdev->le_resolv_list);
2568 INIT_LIST_HEAD(&hdev->le_conn_params);
2569 INIT_LIST_HEAD(&hdev->pend_le_conns);
2570 INIT_LIST_HEAD(&hdev->pend_le_reports);
2571 INIT_LIST_HEAD(&hdev->conn_hash.list);
2572 INIT_LIST_HEAD(&hdev->adv_instances);
2573 INIT_LIST_HEAD(&hdev->blocked_keys);
2574 INIT_LIST_HEAD(&hdev->monitored_devices);
2576 INIT_LIST_HEAD(&hdev->local_codecs);
2577 INIT_WORK(&hdev->rx_work, hci_rx_work);
2578 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2579 INIT_WORK(&hdev->tx_work, hci_tx_work);
2580 INIT_WORK(&hdev->power_on, hci_power_on);
2581 INIT_WORK(&hdev->error_reset, hci_error_reset);
2583 hci_cmd_sync_init(hdev);
2585 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2587 skb_queue_head_init(&hdev->rx_q);
2588 skb_queue_head_init(&hdev->cmd_q);
2589 skb_queue_head_init(&hdev->raw_q);
2591 init_waitqueue_head(&hdev->req_wait_q);
2593 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2594 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2596 hci_request_setup(hdev);
2598 hci_init_sysfs(hdev);
2599 discovery_init(hdev);
2603 EXPORT_SYMBOL(hci_alloc_dev_priv);
2605 /* Free HCI device */
2606 void hci_free_dev(struct hci_dev *hdev)
2608 /* will free via device release */
2609 put_device(&hdev->dev);
2611 EXPORT_SYMBOL(hci_free_dev);
2613 /* Register HCI device */
2614 int hci_register_dev(struct hci_dev *hdev)
2618 if (!hdev->open || !hdev->close || !hdev->send)
2621 /* Do not allow HCI_AMP devices to register at index 0,
2622 * so the index can be used as the AMP controller ID.
2624 switch (hdev->dev_type) {
2626 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
2629 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
2638 snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
2641 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2643 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2644 if (!hdev->workqueue) {
2649 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2651 if (!hdev->req_workqueue) {
2652 destroy_workqueue(hdev->workqueue);
2657 if (!IS_ERR_OR_NULL(bt_debugfs))
2658 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2660 dev_set_name(&hdev->dev, "%s", hdev->name);
2662 error = device_add(&hdev->dev);
2666 hci_leds_init(hdev);
2668 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2669 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2672 if (rfkill_register(hdev->rfkill) < 0) {
2673 rfkill_destroy(hdev->rfkill);
2674 hdev->rfkill = NULL;
2678 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2679 hci_dev_set_flag(hdev, HCI_RFKILLED);
2681 hci_dev_set_flag(hdev, HCI_SETUP);
2682 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2684 if (hdev->dev_type == HCI_PRIMARY) {
2685 /* Assume BR/EDR support until proven otherwise (such as
2686 * through reading supported features during init.
2688 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2691 write_lock(&hci_dev_list_lock);
2692 list_add(&hdev->list, &hci_dev_list);
2693 write_unlock(&hci_dev_list_lock);
2695 /* Devices that are marked for raw-only usage are unconfigured
2696 * and should not be included in normal operation.
2698 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2699 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2701 /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2705 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2707 hci_sock_dev_event(hdev, HCI_DEV_REG);
2710 error = hci_register_suspend_notifier(hdev);
2712 BT_WARN("register suspend notifier failed error:%d\n", error);
2714 queue_work(hdev->req_workqueue, &hdev->power_on);
2716 idr_init(&hdev->adv_monitors_idr);
2717 msft_register(hdev);
2722 debugfs_remove_recursive(hdev->debugfs);
2723 destroy_workqueue(hdev->workqueue);
2724 destroy_workqueue(hdev->req_workqueue);
2726 ida_simple_remove(&hci_index_ida, hdev->id);
2730 EXPORT_SYMBOL(hci_register_dev);
2732 /* Unregister HCI device */
2733 void hci_unregister_dev(struct hci_dev *hdev)
2735 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2737 mutex_lock(&hdev->unregister_lock);
2738 hci_dev_set_flag(hdev, HCI_UNREGISTER);
2739 mutex_unlock(&hdev->unregister_lock);
2741 write_lock(&hci_dev_list_lock);
2742 list_del(&hdev->list);
2743 write_unlock(&hci_dev_list_lock);
2745 cancel_work_sync(&hdev->power_on);
2747 hci_cmd_sync_clear(hdev);
2749 hci_unregister_suspend_notifier(hdev);
2751 msft_unregister(hdev);
2753 hci_dev_do_close(hdev);
2755 if (!test_bit(HCI_INIT, &hdev->flags) &&
2756 !hci_dev_test_flag(hdev, HCI_SETUP) &&
2757 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2759 mgmt_index_removed(hdev);
2760 hci_dev_unlock(hdev);
2763 /* mgmt_index_removed should take care of emptying the
2765 BUG_ON(!list_empty(&hdev->mgmt_pending));
2767 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2770 rfkill_unregister(hdev->rfkill);
2771 rfkill_destroy(hdev->rfkill);
2774 device_del(&hdev->dev);
2775 /* Actual cleanup is deferred until hci_release_dev(). */
2778 EXPORT_SYMBOL(hci_unregister_dev);
2780 /* Release HCI device */
2781 void hci_release_dev(struct hci_dev *hdev)
2783 debugfs_remove_recursive(hdev->debugfs);
2784 kfree_const(hdev->hw_info);
2785 kfree_const(hdev->fw_info);
2787 destroy_workqueue(hdev->workqueue);
2788 destroy_workqueue(hdev->req_workqueue);
2791 hci_bdaddr_list_clear(&hdev->reject_list);
2792 hci_bdaddr_list_clear(&hdev->accept_list);
2793 hci_uuids_clear(hdev);
2794 hci_link_keys_clear(hdev);
2795 hci_smp_ltks_clear(hdev);
2796 hci_smp_irks_clear(hdev);
2797 hci_remote_oob_data_clear(hdev);
2798 hci_adv_instances_clear(hdev);
2799 hci_adv_monitors_clear(hdev);
2800 hci_bdaddr_list_clear(&hdev->le_accept_list);
2801 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2802 hci_conn_params_clear_all(hdev);
2803 hci_discovery_filter_clear(hdev);
2804 hci_blocked_keys_clear(hdev);
2805 hci_dev_unlock(hdev);
2807 ida_simple_remove(&hci_index_ida, hdev->id);
2808 kfree_skb(hdev->sent_cmd);
2809 kfree_skb(hdev->recv_event);
2812 EXPORT_SYMBOL(hci_release_dev);
2814 int hci_register_suspend_notifier(struct hci_dev *hdev)
2818 if (!hdev->suspend_notifier.notifier_call &&
2819 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2820 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2821 ret = register_pm_notifier(&hdev->suspend_notifier);
2827 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2831 if (hdev->suspend_notifier.notifier_call) {
2832 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2834 hdev->suspend_notifier.notifier_call = NULL;
2840 /* Suspend HCI device */
2841 int hci_suspend_dev(struct hci_dev *hdev)
2845 bt_dev_dbg(hdev, "");
2847 /* Suspend should only act on when powered. */
2848 if (!hdev_is_powered(hdev) ||
2849 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2852 /* If powering down don't attempt to suspend */
2853 if (mgmt_powering_down(hdev))
2856 hci_req_sync_lock(hdev);
2857 ret = hci_suspend_sync(hdev);
2858 hci_req_sync_unlock(hdev);
2860 hci_clear_wake_reason(hdev);
2861 mgmt_suspending(hdev, hdev->suspend_state);
2863 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2866 EXPORT_SYMBOL(hci_suspend_dev);
2868 /* Resume HCI device */
2869 int hci_resume_dev(struct hci_dev *hdev)
2873 bt_dev_dbg(hdev, "");
2875 /* Resume should only act on when powered. */
2876 if (!hdev_is_powered(hdev) ||
2877 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2880 /* If powering down don't attempt to resume */
2881 if (mgmt_powering_down(hdev))
2884 hci_req_sync_lock(hdev);
2885 ret = hci_resume_sync(hdev);
2886 hci_req_sync_unlock(hdev);
2888 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2889 hdev->wake_addr_type);
2891 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2894 EXPORT_SYMBOL(hci_resume_dev);
2896 /* Reset HCI device */
2897 int hci_reset_dev(struct hci_dev *hdev)
2899 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2900 struct sk_buff *skb;
2902 skb = bt_skb_alloc(3, GFP_ATOMIC);
2906 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2907 skb_put_data(skb, hw_err, 3);
2909 bt_dev_err(hdev, "Injecting HCI hardware error event");
2911 /* Send Hardware Error to upper stack */
2912 return hci_recv_frame(hdev, skb);
2914 EXPORT_SYMBOL(hci_reset_dev);
2916 /* Receive frame from HCI drivers */
2917 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2919 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2920 && !test_bit(HCI_INIT, &hdev->flags))) {
2925 switch (hci_skb_pkt_type(skb)) {
2928 case HCI_ACLDATA_PKT:
2929 /* Detect if ISO packet has been sent as ACL */
2930 if (hci_conn_num(hdev, ISO_LINK)) {
2931 __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2934 type = hci_conn_lookup_type(hdev, hci_handle(handle));
2935 if (type == ISO_LINK)
2936 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2939 case HCI_SCODATA_PKT:
2941 case HCI_ISODATA_PKT:
2949 bt_cb(skb)->incoming = 1;
2952 __net_timestamp(skb);
2954 skb_queue_tail(&hdev->rx_q, skb);
2955 queue_work(hdev->workqueue, &hdev->rx_work);
2959 EXPORT_SYMBOL(hci_recv_frame);
2961 /* Receive diagnostic message from HCI drivers */
2962 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
2964 /* Mark as diagnostic packet */
2965 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
2968 __net_timestamp(skb);
2970 skb_queue_tail(&hdev->rx_q, skb);
2971 queue_work(hdev->workqueue, &hdev->rx_work);
2975 EXPORT_SYMBOL(hci_recv_diag);
2977 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
2981 va_start(vargs, fmt);
2982 kfree_const(hdev->hw_info);
2983 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2986 EXPORT_SYMBOL(hci_set_hw_info);
2988 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
2992 va_start(vargs, fmt);
2993 kfree_const(hdev->fw_info);
2994 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
2997 EXPORT_SYMBOL(hci_set_fw_info);
2999 /* ---- Interface to upper protocols ---- */
3001 int hci_register_cb(struct hci_cb *cb)
3003 BT_DBG("%p name %s", cb, cb->name);
3005 mutex_lock(&hci_cb_list_lock);
3006 list_add_tail(&cb->list, &hci_cb_list);
3007 mutex_unlock(&hci_cb_list_lock);
3011 EXPORT_SYMBOL(hci_register_cb);
3013 int hci_unregister_cb(struct hci_cb *cb)
3015 BT_DBG("%p name %s", cb, cb->name);
3017 mutex_lock(&hci_cb_list_lock);
3018 list_del(&cb->list);
3019 mutex_unlock(&hci_cb_list_lock);
3023 EXPORT_SYMBOL(hci_unregister_cb);
3025 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3029 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3033 __net_timestamp(skb);
3035 /* Send copy to monitor */
3036 hci_send_to_monitor(hdev, skb);
3038 if (atomic_read(&hdev->promisc)) {
3039 /* Send copy to the sockets */
3040 hci_send_to_sock(hdev, skb);
3043 /* Get rid of skb owner, prior to sending to the driver. */
3046 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3051 err = hdev->send(hdev, skb);
3053 bt_dev_err(hdev, "sending frame failed (%d)", err);
3061 /* Send HCI command */
3062 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3065 struct sk_buff *skb;
3067 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3069 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3071 bt_dev_err(hdev, "no memory for command");
3075 /* Stand-alone HCI commands must be flagged as
3076 * single-command requests.
3078 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3080 skb_queue_tail(&hdev->cmd_q, skb);
3081 queue_work(hdev->workqueue, &hdev->cmd_work);
3086 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3089 struct sk_buff *skb;
3091 if (hci_opcode_ogf(opcode) != 0x3f) {
3092 /* A controller receiving a command shall respond with either
3093 * a Command Status Event or a Command Complete Event.
3094 * Therefore, all standard HCI commands must be sent via the
3095 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3096 * Some vendors do not comply with this rule for vendor-specific
3097 * commands and do not return any event. We want to support
3098 * unresponded commands for such cases only.
3100 bt_dev_err(hdev, "unresponded command not supported");
3104 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3106 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3111 hci_send_frame(hdev, skb);
3115 EXPORT_SYMBOL(__hci_cmd_send);
3117 /* Get data from the previously sent command */
3118 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3120 struct hci_command_hdr *hdr;
3122 if (!hdev->sent_cmd)
3125 hdr = (void *) hdev->sent_cmd->data;
3127 if (hdr->opcode != cpu_to_le16(opcode))
3130 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3132 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3135 /* Get data from last received event */
3136 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3138 struct hci_event_hdr *hdr;
3141 if (!hdev->recv_event)
3144 hdr = (void *)hdev->recv_event->data;
3145 offset = sizeof(*hdr);
3147 if (hdr->evt != event) {
3148 /* In case of LE metaevent check the subevent match */
3149 if (hdr->evt == HCI_EV_LE_META) {
3150 struct hci_ev_le_meta *ev;
3152 ev = (void *)hdev->recv_event->data + offset;
3153 offset += sizeof(*ev);
3154 if (ev->subevent == event)
3161 bt_dev_dbg(hdev, "event 0x%2.2x", event);
3163 return hdev->recv_event->data + offset;
3167 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3169 struct hci_acl_hdr *hdr;
3172 skb_push(skb, HCI_ACL_HDR_SIZE);
3173 skb_reset_transport_header(skb);
3174 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3175 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3176 hdr->dlen = cpu_to_le16(len);
3179 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3180 struct sk_buff *skb, __u16 flags)
3182 struct hci_conn *conn = chan->conn;
3183 struct hci_dev *hdev = conn->hdev;
3184 struct sk_buff *list;
3186 skb->len = skb_headlen(skb);
3189 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3191 switch (hdev->dev_type) {
3193 hci_add_acl_hdr(skb, conn->handle, flags);
3196 hci_add_acl_hdr(skb, chan->handle, flags);
3199 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3203 list = skb_shinfo(skb)->frag_list;
3205 /* Non fragmented */
3206 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3208 skb_queue_tail(queue, skb);
3211 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3213 skb_shinfo(skb)->frag_list = NULL;
3215 /* Queue all fragments atomically. We need to use spin_lock_bh
3216 * here because of 6LoWPAN links, as there this function is
3217 * called from softirq and using normal spin lock could cause
3220 spin_lock_bh(&queue->lock);
3222 __skb_queue_tail(queue, skb);
3224 flags &= ~ACL_START;
3227 skb = list; list = list->next;
3229 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3230 hci_add_acl_hdr(skb, conn->handle, flags);
3232 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3234 __skb_queue_tail(queue, skb);
3237 spin_unlock_bh(&queue->lock);
3241 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3243 struct hci_dev *hdev = chan->conn->hdev;
3245 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3247 hci_queue_acl(chan, &chan->data_q, skb, flags);
3249 queue_work(hdev->workqueue, &hdev->tx_work);
3253 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3255 struct hci_dev *hdev = conn->hdev;
3256 struct hci_sco_hdr hdr;
3258 BT_DBG("%s len %d", hdev->name, skb->len);
3260 hdr.handle = cpu_to_le16(conn->handle);
3261 hdr.dlen = skb->len;
3263 skb_push(skb, HCI_SCO_HDR_SIZE);
3264 skb_reset_transport_header(skb);
3265 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3267 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3269 skb_queue_tail(&conn->data_q, skb);
3270 queue_work(hdev->workqueue, &hdev->tx_work);
3274 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3276 struct hci_iso_hdr *hdr;
3279 skb_push(skb, HCI_ISO_HDR_SIZE);
3280 skb_reset_transport_header(skb);
3281 hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3282 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3283 hdr->dlen = cpu_to_le16(len);
3286 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3287 struct sk_buff *skb)
3289 struct hci_dev *hdev = conn->hdev;
3290 struct sk_buff *list;
3293 skb->len = skb_headlen(skb);
3296 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3298 list = skb_shinfo(skb)->frag_list;
3300 flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3301 hci_add_iso_hdr(skb, conn->handle, flags);
3304 /* Non fragmented */
3305 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3307 skb_queue_tail(queue, skb);
3310 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3312 skb_shinfo(skb)->frag_list = NULL;
3314 __skb_queue_tail(queue, skb);
3317 skb = list; list = list->next;
3319 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3320 flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3322 hci_add_iso_hdr(skb, conn->handle, flags);
3324 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3326 __skb_queue_tail(queue, skb);
3331 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3333 struct hci_dev *hdev = conn->hdev;
3335 BT_DBG("%s len %d", hdev->name, skb->len);
3337 hci_queue_iso(conn, &conn->data_q, skb);
3339 queue_work(hdev->workqueue, &hdev->tx_work);
3342 /* ---- HCI TX task (outgoing data) ---- */
3344 /* HCI Connection scheduler */
3345 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3347 struct hci_dev *hdev;
3357 switch (conn->type) {
3359 cnt = hdev->acl_cnt;
3362 cnt = hdev->block_cnt;
3366 cnt = hdev->sco_cnt;
3369 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3372 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3373 hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3377 bt_dev_err(hdev, "unknown link type %d", conn->type);
3384 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3387 struct hci_conn_hash *h = &hdev->conn_hash;
3388 struct hci_conn *conn = NULL, *c;
3389 unsigned int num = 0, min = ~0;
3391 /* We don't have to lock device here. Connections are always
3392 * added and removed with TX task disabled. */
3396 list_for_each_entry_rcu(c, &h->list, list) {
3397 if (c->type != type || skb_queue_empty(&c->data_q))
3400 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3405 if (c->sent < min) {
3410 if (hci_conn_num(hdev, type) == num)
3416 hci_quote_sent(conn, num, quote);
3418 BT_DBG("conn %p quote %d", conn, *quote);
3422 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3424 struct hci_conn_hash *h = &hdev->conn_hash;
3427 bt_dev_err(hdev, "link tx timeout");
3431 /* Kill stalled connections */
3432 list_for_each_entry_rcu(c, &h->list, list) {
3433 if (c->type == type && c->sent) {
3434 bt_dev_err(hdev, "killing stalled connection %pMR",
3436 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3443 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3446 struct hci_conn_hash *h = &hdev->conn_hash;
3447 struct hci_chan *chan = NULL;
3448 unsigned int num = 0, min = ~0, cur_prio = 0;
3449 struct hci_conn *conn;
3452 BT_DBG("%s", hdev->name);
3456 list_for_each_entry_rcu(conn, &h->list, list) {
3457 struct hci_chan *tmp;
3459 if (conn->type != type)
3462 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3467 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3468 struct sk_buff *skb;
3470 if (skb_queue_empty(&tmp->data_q))
3473 skb = skb_peek(&tmp->data_q);
3474 if (skb->priority < cur_prio)
3477 if (skb->priority > cur_prio) {
3480 cur_prio = skb->priority;
3485 if (conn->sent < min) {
3491 if (hci_conn_num(hdev, type) == conn_num)
3500 hci_quote_sent(chan->conn, num, quote);
3502 BT_DBG("chan %p quote %d", chan, *quote);
3506 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3508 struct hci_conn_hash *h = &hdev->conn_hash;
3509 struct hci_conn *conn;
3512 BT_DBG("%s", hdev->name);
3516 list_for_each_entry_rcu(conn, &h->list, list) {
3517 struct hci_chan *chan;
3519 if (conn->type != type)
3522 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3527 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3528 struct sk_buff *skb;
3535 if (skb_queue_empty(&chan->data_q))
3538 skb = skb_peek(&chan->data_q);
3539 if (skb->priority >= HCI_PRIO_MAX - 1)
3542 skb->priority = HCI_PRIO_MAX - 1;
3544 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3548 if (hci_conn_num(hdev, type) == num)
3556 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3558 /* Calculate count of blocks used by this packet */
3559 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3562 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3564 unsigned long last_tx;
3566 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3571 last_tx = hdev->le_last_tx;
3574 last_tx = hdev->acl_last_tx;
3578 /* tx timeout must be longer than maximum link supervision timeout
3581 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3582 hci_link_tx_to(hdev, type);
3586 static void hci_sched_sco(struct hci_dev *hdev)
3588 struct hci_conn *conn;
3589 struct sk_buff *skb;
3592 BT_DBG("%s", hdev->name);
3594 if (!hci_conn_num(hdev, SCO_LINK))
3597 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3598 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3599 BT_DBG("skb %p len %d", skb, skb->len);
3600 hci_send_frame(hdev, skb);
3603 if (conn->sent == ~0)
3609 static void hci_sched_esco(struct hci_dev *hdev)
3611 struct hci_conn *conn;
3612 struct sk_buff *skb;
3615 BT_DBG("%s", hdev->name);
3617 if (!hci_conn_num(hdev, ESCO_LINK))
3620 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3622 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3623 BT_DBG("skb %p len %d", skb, skb->len);
3624 hci_send_frame(hdev, skb);
3627 if (conn->sent == ~0)
3633 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3635 unsigned int cnt = hdev->acl_cnt;
3636 struct hci_chan *chan;
3637 struct sk_buff *skb;
3640 __check_timeout(hdev, cnt, ACL_LINK);
3642 while (hdev->acl_cnt &&
3643 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3644 u32 priority = (skb_peek(&chan->data_q))->priority;
3645 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3646 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3647 skb->len, skb->priority);
3649 /* Stop if priority has changed */
3650 if (skb->priority < priority)
3653 skb = skb_dequeue(&chan->data_q);
3655 hci_conn_enter_active_mode(chan->conn,
3656 bt_cb(skb)->force_active);
3658 hci_send_frame(hdev, skb);
3659 hdev->acl_last_tx = jiffies;
3665 /* Send pending SCO packets right away */
3666 hci_sched_sco(hdev);
3667 hci_sched_esco(hdev);
3671 if (cnt != hdev->acl_cnt)
3672 hci_prio_recalculate(hdev, ACL_LINK);
3675 static void hci_sched_acl_blk(struct hci_dev *hdev)
3677 unsigned int cnt = hdev->block_cnt;
3678 struct hci_chan *chan;
3679 struct sk_buff *skb;
3683 BT_DBG("%s", hdev->name);
3685 if (hdev->dev_type == HCI_AMP)
3690 __check_timeout(hdev, cnt, type);
3692 while (hdev->block_cnt > 0 &&
3693 (chan = hci_chan_sent(hdev, type, "e))) {
3694 u32 priority = (skb_peek(&chan->data_q))->priority;
3695 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3698 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3699 skb->len, skb->priority);
3701 /* Stop if priority has changed */
3702 if (skb->priority < priority)
3705 skb = skb_dequeue(&chan->data_q);
3707 blocks = __get_blocks(hdev, skb);
3708 if (blocks > hdev->block_cnt)
3711 hci_conn_enter_active_mode(chan->conn,
3712 bt_cb(skb)->force_active);
3714 hci_send_frame(hdev, skb);
3715 hdev->acl_last_tx = jiffies;
3717 hdev->block_cnt -= blocks;
3720 chan->sent += blocks;
3721 chan->conn->sent += blocks;
3725 if (cnt != hdev->block_cnt)
3726 hci_prio_recalculate(hdev, type);
3729 static void hci_sched_acl(struct hci_dev *hdev)
3731 BT_DBG("%s", hdev->name);
3733 /* No ACL link over BR/EDR controller */
3734 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3737 /* No AMP link over AMP controller */
3738 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3741 switch (hdev->flow_ctl_mode) {
3742 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3743 hci_sched_acl_pkt(hdev);
3746 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3747 hci_sched_acl_blk(hdev);
3752 static void hci_sched_le(struct hci_dev *hdev)
3754 struct hci_chan *chan;
3755 struct sk_buff *skb;
3756 int quote, cnt, tmp;
3758 BT_DBG("%s", hdev->name);
3760 if (!hci_conn_num(hdev, LE_LINK))
3763 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3765 __check_timeout(hdev, cnt, LE_LINK);
3768 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3769 u32 priority = (skb_peek(&chan->data_q))->priority;
3770 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3771 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3772 skb->len, skb->priority);
3774 /* Stop if priority has changed */
3775 if (skb->priority < priority)
3778 skb = skb_dequeue(&chan->data_q);
3780 hci_send_frame(hdev, skb);
3781 hdev->le_last_tx = jiffies;
3787 /* Send pending SCO packets right away */
3788 hci_sched_sco(hdev);
3789 hci_sched_esco(hdev);
3796 hdev->acl_cnt = cnt;
3799 hci_prio_recalculate(hdev, LE_LINK);
3803 static void hci_sched_iso(struct hci_dev *hdev)
3805 struct hci_conn *conn;
3806 struct sk_buff *skb;
3809 BT_DBG("%s", hdev->name);
3811 if (!hci_conn_num(hdev, ISO_LINK))
3814 cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3815 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3816 while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) {
3817 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3818 BT_DBG("skb %p len %d", skb, skb->len);
3819 hci_send_frame(hdev, skb);
3822 if (conn->sent == ~0)
3829 static void hci_tx_work(struct work_struct *work)
3831 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3832 struct sk_buff *skb;
3834 BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3835 hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3837 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3838 /* Schedule queues and send stuff to HCI driver */
3839 hci_sched_sco(hdev);
3840 hci_sched_esco(hdev);
3841 hci_sched_iso(hdev);
3842 hci_sched_acl(hdev);
3846 /* Send next queued raw (unknown type) packet */
3847 while ((skb = skb_dequeue(&hdev->raw_q)))
3848 hci_send_frame(hdev, skb);
3851 /* ----- HCI RX task (incoming data processing) ----- */
3853 /* ACL data packet */
3854 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3856 struct hci_acl_hdr *hdr = (void *) skb->data;
3857 struct hci_conn *conn;
3858 __u16 handle, flags;
3860 skb_pull(skb, HCI_ACL_HDR_SIZE);
3862 handle = __le16_to_cpu(hdr->handle);
3863 flags = hci_flags(handle);
3864 handle = hci_handle(handle);
3866 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3869 hdev->stat.acl_rx++;
3872 conn = hci_conn_hash_lookup_handle(hdev, handle);
3873 hci_dev_unlock(hdev);
3876 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3878 /* Send to upper protocol */
3879 l2cap_recv_acldata(conn, skb, flags);
3882 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3889 /* SCO data packet */
3890 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3892 struct hci_sco_hdr *hdr = (void *) skb->data;
3893 struct hci_conn *conn;
3894 __u16 handle, flags;
3896 skb_pull(skb, HCI_SCO_HDR_SIZE);
3898 handle = __le16_to_cpu(hdr->handle);
3899 flags = hci_flags(handle);
3900 handle = hci_handle(handle);
3902 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3905 hdev->stat.sco_rx++;
3908 conn = hci_conn_hash_lookup_handle(hdev, handle);
3909 hci_dev_unlock(hdev);
3912 /* Send to upper protocol */
3913 bt_cb(skb)->sco.pkt_status = flags & 0x03;
3914 sco_recv_scodata(conn, skb);
3917 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3924 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3926 struct hci_iso_hdr *hdr;
3927 struct hci_conn *conn;
3928 __u16 handle, flags;
3930 hdr = skb_pull_data(skb, sizeof(*hdr));
3932 bt_dev_err(hdev, "ISO packet too small");
3936 handle = __le16_to_cpu(hdr->handle);
3937 flags = hci_flags(handle);
3938 handle = hci_handle(handle);
3940 bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
3944 conn = hci_conn_hash_lookup_handle(hdev, handle);
3945 hci_dev_unlock(hdev);
3948 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
3953 /* Send to upper protocol */
3954 iso_recv(conn, skb, flags);
3961 static bool hci_req_is_complete(struct hci_dev *hdev)
3963 struct sk_buff *skb;
3965 skb = skb_peek(&hdev->cmd_q);
3969 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
3972 static void hci_resend_last(struct hci_dev *hdev)
3974 struct hci_command_hdr *sent;
3975 struct sk_buff *skb;
3978 if (!hdev->sent_cmd)
3981 sent = (void *) hdev->sent_cmd->data;
3982 opcode = __le16_to_cpu(sent->opcode);
3983 if (opcode == HCI_OP_RESET)
3986 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3990 skb_queue_head(&hdev->cmd_q, skb);
3991 queue_work(hdev->workqueue, &hdev->cmd_work);
3994 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
3995 hci_req_complete_t *req_complete,
3996 hci_req_complete_skb_t *req_complete_skb)
3998 struct sk_buff *skb;
3999 unsigned long flags;
4001 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4003 /* If the completed command doesn't match the last one that was
4004 * sent we need to do special handling of it.
4006 if (!hci_sent_cmd_data(hdev, opcode)) {
4007 /* Some CSR based controllers generate a spontaneous
4008 * reset complete event during init and any pending
4009 * command will never be completed. In such a case we
4010 * need to resend whatever was the last sent
4013 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4014 hci_resend_last(hdev);
4019 /* If we reach this point this event matches the last command sent */
4020 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4022 /* If the command succeeded and there's still more commands in
4023 * this request the request is not yet complete.
4025 if (!status && !hci_req_is_complete(hdev))
4028 /* If this was the last command in a request the complete
4029 * callback would be found in hdev->sent_cmd instead of the
4030 * command queue (hdev->cmd_q).
4032 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4033 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4037 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4038 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4042 /* Remove all pending commands belonging to this request */
4043 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4044 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4045 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4046 __skb_queue_head(&hdev->cmd_q, skb);
4050 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4051 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4053 *req_complete = bt_cb(skb)->hci.req_complete;
4054 dev_kfree_skb_irq(skb);
4056 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4059 static void hci_rx_work(struct work_struct *work)
4061 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4062 struct sk_buff *skb;
4064 BT_DBG("%s", hdev->name);
4066 /* The kcov_remote functions used for collecting packet parsing
4067 * coverage information from this background thread and associate
4068 * the coverage with the syscall's thread which originally injected
4069 * the packet. This helps fuzzing the kernel.
4071 for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4072 kcov_remote_start_common(skb_get_kcov_handle(skb));
4074 /* Send copy to monitor */
4075 hci_send_to_monitor(hdev, skb);
4077 if (atomic_read(&hdev->promisc)) {
4078 /* Send copy to the sockets */
4079 hci_send_to_sock(hdev, skb);
4082 /* If the device has been opened in HCI_USER_CHANNEL,
4083 * the userspace has exclusive access to device.
4084 * When device is HCI_INIT, we still need to process
4085 * the data packets to the driver in order
4086 * to complete its setup().
4088 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4089 !test_bit(HCI_INIT, &hdev->flags)) {
4094 if (test_bit(HCI_INIT, &hdev->flags)) {
4095 /* Don't process data packets in this states. */
4096 switch (hci_skb_pkt_type(skb)) {
4097 case HCI_ACLDATA_PKT:
4098 case HCI_SCODATA_PKT:
4099 case HCI_ISODATA_PKT:
4106 switch (hci_skb_pkt_type(skb)) {
4108 BT_DBG("%s Event packet", hdev->name);
4109 hci_event_packet(hdev, skb);
4112 case HCI_ACLDATA_PKT:
4113 BT_DBG("%s ACL data packet", hdev->name);
4114 hci_acldata_packet(hdev, skb);
4117 case HCI_SCODATA_PKT:
4118 BT_DBG("%s SCO data packet", hdev->name);
4119 hci_scodata_packet(hdev, skb);
4122 case HCI_ISODATA_PKT:
4123 BT_DBG("%s ISO data packet", hdev->name);
4124 hci_isodata_packet(hdev, skb);
4134 static void hci_cmd_work(struct work_struct *work)
4136 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4137 struct sk_buff *skb;
4139 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4140 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4142 /* Send queued commands */
4143 if (atomic_read(&hdev->cmd_cnt)) {
4144 skb = skb_dequeue(&hdev->cmd_q);
4148 kfree_skb(hdev->sent_cmd);
4150 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4151 if (hdev->sent_cmd) {
4153 if (hci_req_status_pend(hdev))
4154 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
4155 atomic_dec(&hdev->cmd_cnt);
4157 res = hci_send_frame(hdev, skb);
4159 __hci_cmd_sync_cancel(hdev, -res);
4162 if (test_bit(HCI_RESET, &hdev->flags) ||
4163 hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4164 cancel_delayed_work(&hdev->cmd_timer);
4166 queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4170 skb_queue_head(&hdev->cmd_q, skb);
4171 queue_work(hdev->workqueue, &hdev->cmd_work);