2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
30 #include <linux/rfkill.h>
32 #include <net/bluetooth/bluetooth.h>
33 #include <net/bluetooth/hci_core.h>
35 #define AUTO_OFF_TIMEOUT 2000
37 static void hci_rx_work(struct work_struct *work);
38 static void hci_cmd_work(struct work_struct *work);
39 static void hci_tx_work(struct work_struct *work);
42 LIST_HEAD(hci_dev_list);
43 DEFINE_RWLOCK(hci_dev_list_lock);
45 /* HCI callback list */
46 LIST_HEAD(hci_cb_list);
47 DEFINE_RWLOCK(hci_cb_list_lock);
49 /* ---- HCI notifications ---- */
51 static void hci_notify(struct hci_dev *hdev, int event)
53 hci_sock_dev_event(hdev, event);
56 /* ---- HCI requests ---- */
58 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
60 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
62 /* If this is the init phase check if the completed command matches
63 * the last init command, and if not just return.
65 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
66 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
67 u16 opcode = __le16_to_cpu(sent->opcode);
70 /* Some CSR based controllers generate a spontaneous
71 * reset complete event during init and any pending
72 * command will never be completed. In such a case we
73 * need to resend whatever was the last sent
77 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
80 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
82 skb_queue_head(&hdev->cmd_q, skb);
83 queue_work(hdev->workqueue, &hdev->cmd_work);
89 if (hdev->req_status == HCI_REQ_PEND) {
90 hdev->req_result = result;
91 hdev->req_status = HCI_REQ_DONE;
92 wake_up_interruptible(&hdev->req_wait_q);
96 static void hci_req_cancel(struct hci_dev *hdev, int err)
98 BT_DBG("%s err 0x%2.2x", hdev->name, err);
100 if (hdev->req_status == HCI_REQ_PEND) {
101 hdev->req_result = err;
102 hdev->req_status = HCI_REQ_CANCELED;
103 wake_up_interruptible(&hdev->req_wait_q);
107 /* Execute request and wait for completion. */
108 static int __hci_request(struct hci_dev *hdev,
109 void (*req)(struct hci_dev *hdev, unsigned long opt),
110 unsigned long opt, __u32 timeout)
112 DECLARE_WAITQUEUE(wait, current);
115 BT_DBG("%s start", hdev->name);
117 hdev->req_status = HCI_REQ_PEND;
119 add_wait_queue(&hdev->req_wait_q, &wait);
120 set_current_state(TASK_INTERRUPTIBLE);
123 schedule_timeout(timeout);
125 remove_wait_queue(&hdev->req_wait_q, &wait);
127 if (signal_pending(current))
130 switch (hdev->req_status) {
132 err = -bt_to_errno(hdev->req_result);
135 case HCI_REQ_CANCELED:
136 err = -hdev->req_result;
144 hdev->req_status = hdev->req_result = 0;
146 BT_DBG("%s end: err %d", hdev->name, err);
151 static int hci_request(struct hci_dev *hdev,
152 void (*req)(struct hci_dev *hdev, unsigned long opt),
153 unsigned long opt, __u32 timeout)
157 if (!test_bit(HCI_UP, &hdev->flags))
160 /* Serialize all requests */
162 ret = __hci_request(hdev, req, opt, timeout);
163 hci_req_unlock(hdev);
168 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
170 BT_DBG("%s %ld", hdev->name, opt);
173 set_bit(HCI_RESET, &hdev->flags);
174 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
177 static void bredr_init(struct hci_dev *hdev)
179 struct hci_cp_delete_stored_link_key cp;
183 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
185 /* Mandatory initialization */
188 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
189 set_bit(HCI_RESET, &hdev->flags);
190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
193 /* Read Local Supported Features */
194 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
196 /* Read Local Version */
197 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
199 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
200 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
202 /* Read BD Address */
203 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
205 /* Read Class of Device */
206 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
208 /* Read Local Name */
209 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
211 /* Read Voice Setting */
212 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
214 /* Optional initialization */
216 /* Clear Event Filters */
217 flt_type = HCI_FLT_CLEAR_ALL;
218 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
220 /* Connection accept timeout ~20 secs */
221 param = cpu_to_le16(0x7d00);
222 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
224 bacpy(&cp.bdaddr, BDADDR_ANY);
226 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
229 static void amp_init(struct hci_dev *hdev)
231 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
234 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
236 /* Read Local Version */
237 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
239 /* Read Local AMP Info */
240 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
243 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
247 BT_DBG("%s %ld", hdev->name, opt);
249 /* Driver initialization */
251 /* Special commands */
252 while ((skb = skb_dequeue(&hdev->driver_init))) {
253 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
254 skb->dev = (void *) hdev;
256 skb_queue_tail(&hdev->cmd_q, skb);
257 queue_work(hdev->workqueue, &hdev->cmd_work);
259 skb_queue_purge(&hdev->driver_init);
261 switch (hdev->dev_type) {
271 BT_ERR("Unknown device type %d", hdev->dev_type);
277 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
279 BT_DBG("%s", hdev->name);
281 /* Read LE buffer size */
282 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
285 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
289 BT_DBG("%s %x", hdev->name, scan);
291 /* Inquiry and Page scans */
292 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
295 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
299 BT_DBG("%s %x", hdev->name, auth);
302 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
305 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
309 BT_DBG("%s %x", hdev->name, encrypt);
312 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
315 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
317 __le16 policy = cpu_to_le16(opt);
319 BT_DBG("%s %x", hdev->name, policy);
321 /* Default link policy */
322 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
325 /* Get HCI device by index.
326 * Device is held on return. */
327 struct hci_dev *hci_dev_get(int index)
329 struct hci_dev *hdev = NULL, *d;
336 read_lock(&hci_dev_list_lock);
337 list_for_each_entry(d, &hci_dev_list, list) {
338 if (d->id == index) {
339 hdev = hci_dev_hold(d);
343 read_unlock(&hci_dev_list_lock);
347 /* ---- Inquiry support ---- */
349 bool hci_discovery_active(struct hci_dev *hdev)
351 struct discovery_state *discov = &hdev->discovery;
353 switch (discov->state) {
354 case DISCOVERY_FINDING:
355 case DISCOVERY_RESOLVING:
363 void hci_discovery_set_state(struct hci_dev *hdev, int state)
365 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
367 if (hdev->discovery.state == state)
371 case DISCOVERY_STOPPED:
372 if (hdev->discovery.state != DISCOVERY_STARTING)
373 mgmt_discovering(hdev, 0);
375 case DISCOVERY_STARTING:
377 case DISCOVERY_FINDING:
378 mgmt_discovering(hdev, 1);
380 case DISCOVERY_RESOLVING:
382 case DISCOVERY_STOPPING:
386 hdev->discovery.state = state;
389 static void inquiry_cache_flush(struct hci_dev *hdev)
391 struct discovery_state *cache = &hdev->discovery;
392 struct inquiry_entry *p, *n;
394 list_for_each_entry_safe(p, n, &cache->all, all) {
399 INIT_LIST_HEAD(&cache->unknown);
400 INIT_LIST_HEAD(&cache->resolve);
403 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
406 struct discovery_state *cache = &hdev->discovery;
407 struct inquiry_entry *e;
409 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
411 list_for_each_entry(e, &cache->all, all) {
412 if (!bacmp(&e->data.bdaddr, bdaddr))
419 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
422 struct discovery_state *cache = &hdev->discovery;
423 struct inquiry_entry *e;
425 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
427 list_for_each_entry(e, &cache->unknown, list) {
428 if (!bacmp(&e->data.bdaddr, bdaddr))
435 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
439 struct discovery_state *cache = &hdev->discovery;
440 struct inquiry_entry *e;
442 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
444 list_for_each_entry(e, &cache->resolve, list) {
445 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
447 if (!bacmp(&e->data.bdaddr, bdaddr))
454 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
455 struct inquiry_entry *ie)
457 struct discovery_state *cache = &hdev->discovery;
458 struct list_head *pos = &cache->resolve;
459 struct inquiry_entry *p;
463 list_for_each_entry(p, &cache->resolve, list) {
464 if (p->name_state != NAME_PENDING &&
465 abs(p->data.rssi) >= abs(ie->data.rssi))
470 list_add(&ie->list, pos);
473 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
474 bool name_known, bool *ssp)
476 struct discovery_state *cache = &hdev->discovery;
477 struct inquiry_entry *ie;
479 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
482 *ssp = data->ssp_mode;
484 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
486 if (ie->data.ssp_mode && ssp)
489 if (ie->name_state == NAME_NEEDED &&
490 data->rssi != ie->data.rssi) {
491 ie->data.rssi = data->rssi;
492 hci_inquiry_cache_update_resolve(hdev, ie);
498 /* Entry not in the cache. Add new one. */
499 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
503 list_add(&ie->all, &cache->all);
506 ie->name_state = NAME_KNOWN;
508 ie->name_state = NAME_NOT_KNOWN;
509 list_add(&ie->list, &cache->unknown);
513 if (name_known && ie->name_state != NAME_KNOWN &&
514 ie->name_state != NAME_PENDING) {
515 ie->name_state = NAME_KNOWN;
519 memcpy(&ie->data, data, sizeof(*data));
520 ie->timestamp = jiffies;
521 cache->timestamp = jiffies;
523 if (ie->name_state == NAME_NOT_KNOWN)
529 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
531 struct discovery_state *cache = &hdev->discovery;
532 struct inquiry_info *info = (struct inquiry_info *) buf;
533 struct inquiry_entry *e;
536 list_for_each_entry(e, &cache->all, all) {
537 struct inquiry_data *data = &e->data;
542 bacpy(&info->bdaddr, &data->bdaddr);
543 info->pscan_rep_mode = data->pscan_rep_mode;
544 info->pscan_period_mode = data->pscan_period_mode;
545 info->pscan_mode = data->pscan_mode;
546 memcpy(info->dev_class, data->dev_class, 3);
547 info->clock_offset = data->clock_offset;
553 BT_DBG("cache %p, copied %d", cache, copied);
557 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
559 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
560 struct hci_cp_inquiry cp;
562 BT_DBG("%s", hdev->name);
564 if (test_bit(HCI_INQUIRY, &hdev->flags))
568 memcpy(&cp.lap, &ir->lap, 3);
569 cp.length = ir->length;
570 cp.num_rsp = ir->num_rsp;
571 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
574 int hci_inquiry(void __user *arg)
576 __u8 __user *ptr = arg;
577 struct hci_inquiry_req ir;
578 struct hci_dev *hdev;
579 int err = 0, do_inquiry = 0, max_rsp;
583 if (copy_from_user(&ir, ptr, sizeof(ir)))
586 hdev = hci_dev_get(ir.dev_id);
591 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
592 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
593 inquiry_cache_flush(hdev);
596 hci_dev_unlock(hdev);
598 timeo = ir.length * msecs_to_jiffies(2000);
601 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
606 /* for unlimited number of responses we will use buffer with
609 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
611 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
612 * copy it to the user space.
614 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
621 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
622 hci_dev_unlock(hdev);
624 BT_DBG("num_rsp %d", ir.num_rsp);
626 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
628 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
641 /* ---- HCI ioctl helpers ---- */
643 int hci_dev_open(__u16 dev)
645 struct hci_dev *hdev;
648 hdev = hci_dev_get(dev);
652 BT_DBG("%s %p", hdev->name, hdev);
656 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
661 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
666 if (test_bit(HCI_UP, &hdev->flags)) {
671 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
672 set_bit(HCI_RAW, &hdev->flags);
674 /* Treat all non BR/EDR controllers as raw devices if
675 enable_hs is not set */
676 if (hdev->dev_type != HCI_BREDR && !enable_hs)
677 set_bit(HCI_RAW, &hdev->flags);
679 if (hdev->open(hdev)) {
684 if (!test_bit(HCI_RAW, &hdev->flags)) {
685 atomic_set(&hdev->cmd_cnt, 1);
686 set_bit(HCI_INIT, &hdev->flags);
687 hdev->init_last_cmd = 0;
689 ret = __hci_request(hdev, hci_init_req, 0,
690 msecs_to_jiffies(HCI_INIT_TIMEOUT));
692 if (lmp_host_le_capable(hdev))
693 ret = __hci_request(hdev, hci_le_init_req, 0,
694 msecs_to_jiffies(HCI_INIT_TIMEOUT));
696 clear_bit(HCI_INIT, &hdev->flags);
701 set_bit(HCI_UP, &hdev->flags);
702 hci_notify(hdev, HCI_DEV_UP);
703 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
705 mgmt_powered(hdev, 1);
706 hci_dev_unlock(hdev);
709 /* Init failed, cleanup */
710 flush_work(&hdev->tx_work);
711 flush_work(&hdev->cmd_work);
712 flush_work(&hdev->rx_work);
714 skb_queue_purge(&hdev->cmd_q);
715 skb_queue_purge(&hdev->rx_q);
720 if (hdev->sent_cmd) {
721 kfree_skb(hdev->sent_cmd);
722 hdev->sent_cmd = NULL;
730 hci_req_unlock(hdev);
735 static int hci_dev_do_close(struct hci_dev *hdev)
737 BT_DBG("%s %p", hdev->name, hdev);
739 cancel_work_sync(&hdev->le_scan);
741 hci_req_cancel(hdev, ENODEV);
744 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
745 del_timer_sync(&hdev->cmd_timer);
746 hci_req_unlock(hdev);
750 /* Flush RX and TX works */
751 flush_work(&hdev->tx_work);
752 flush_work(&hdev->rx_work);
754 if (hdev->discov_timeout > 0) {
755 cancel_delayed_work(&hdev->discov_off);
756 hdev->discov_timeout = 0;
757 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
760 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
761 cancel_delayed_work(&hdev->service_cache);
763 cancel_delayed_work_sync(&hdev->le_scan_disable);
766 inquiry_cache_flush(hdev);
767 hci_conn_hash_flush(hdev);
768 hci_dev_unlock(hdev);
770 hci_notify(hdev, HCI_DEV_DOWN);
776 skb_queue_purge(&hdev->cmd_q);
777 atomic_set(&hdev->cmd_cnt, 1);
778 if (!test_bit(HCI_RAW, &hdev->flags) &&
779 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
780 set_bit(HCI_INIT, &hdev->flags);
781 __hci_request(hdev, hci_reset_req, 0,
782 msecs_to_jiffies(250));
783 clear_bit(HCI_INIT, &hdev->flags);
787 flush_work(&hdev->cmd_work);
790 skb_queue_purge(&hdev->rx_q);
791 skb_queue_purge(&hdev->cmd_q);
792 skb_queue_purge(&hdev->raw_q);
794 /* Drop last sent command */
795 if (hdev->sent_cmd) {
796 del_timer_sync(&hdev->cmd_timer);
797 kfree_skb(hdev->sent_cmd);
798 hdev->sent_cmd = NULL;
801 /* After this point our queues are empty
802 * and no tasks are scheduled. */
805 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
807 mgmt_powered(hdev, 0);
808 hci_dev_unlock(hdev);
814 memset(hdev->eir, 0, sizeof(hdev->eir));
815 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
817 hci_req_unlock(hdev);
823 int hci_dev_close(__u16 dev)
825 struct hci_dev *hdev;
828 hdev = hci_dev_get(dev);
832 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
833 cancel_delayed_work(&hdev->power_off);
835 err = hci_dev_do_close(hdev);
841 int hci_dev_reset(__u16 dev)
843 struct hci_dev *hdev;
846 hdev = hci_dev_get(dev);
852 if (!test_bit(HCI_UP, &hdev->flags))
856 skb_queue_purge(&hdev->rx_q);
857 skb_queue_purge(&hdev->cmd_q);
860 inquiry_cache_flush(hdev);
861 hci_conn_hash_flush(hdev);
862 hci_dev_unlock(hdev);
867 atomic_set(&hdev->cmd_cnt, 1);
868 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
870 if (!test_bit(HCI_RAW, &hdev->flags))
871 ret = __hci_request(hdev, hci_reset_req, 0,
872 msecs_to_jiffies(HCI_INIT_TIMEOUT));
875 hci_req_unlock(hdev);
880 int hci_dev_reset_stat(__u16 dev)
882 struct hci_dev *hdev;
885 hdev = hci_dev_get(dev);
889 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
896 int hci_dev_cmd(unsigned int cmd, void __user *arg)
898 struct hci_dev *hdev;
899 struct hci_dev_req dr;
902 if (copy_from_user(&dr, arg, sizeof(dr)))
905 hdev = hci_dev_get(dr.dev_id);
911 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
912 msecs_to_jiffies(HCI_INIT_TIMEOUT));
916 if (!lmp_encrypt_capable(hdev)) {
921 if (!test_bit(HCI_AUTH, &hdev->flags)) {
922 /* Auth must be enabled first */
923 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
924 msecs_to_jiffies(HCI_INIT_TIMEOUT));
929 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
930 msecs_to_jiffies(HCI_INIT_TIMEOUT));
934 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
935 msecs_to_jiffies(HCI_INIT_TIMEOUT));
939 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
940 msecs_to_jiffies(HCI_INIT_TIMEOUT));
944 hdev->link_mode = ((__u16) dr.dev_opt) &
945 (HCI_LM_MASTER | HCI_LM_ACCEPT);
949 hdev->pkt_type = (__u16) dr.dev_opt;
953 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
954 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
958 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
959 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
971 int hci_get_dev_list(void __user *arg)
973 struct hci_dev *hdev;
974 struct hci_dev_list_req *dl;
975 struct hci_dev_req *dr;
976 int n = 0, size, err;
979 if (get_user(dev_num, (__u16 __user *) arg))
982 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
985 size = sizeof(*dl) + dev_num * sizeof(*dr);
987 dl = kzalloc(size, GFP_KERNEL);
993 read_lock(&hci_dev_list_lock);
994 list_for_each_entry(hdev, &hci_dev_list, list) {
995 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
996 cancel_delayed_work(&hdev->power_off);
998 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
999 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1001 (dr + n)->dev_id = hdev->id;
1002 (dr + n)->dev_opt = hdev->flags;
1007 read_unlock(&hci_dev_list_lock);
1010 size = sizeof(*dl) + n * sizeof(*dr);
1012 err = copy_to_user(arg, dl, size);
1015 return err ? -EFAULT : 0;
1018 int hci_get_dev_info(void __user *arg)
1020 struct hci_dev *hdev;
1021 struct hci_dev_info di;
1024 if (copy_from_user(&di, arg, sizeof(di)))
1027 hdev = hci_dev_get(di.dev_id);
1031 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1032 cancel_delayed_work_sync(&hdev->power_off);
1034 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1035 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1037 strcpy(di.name, hdev->name);
1038 di.bdaddr = hdev->bdaddr;
1039 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1040 di.flags = hdev->flags;
1041 di.pkt_type = hdev->pkt_type;
1042 di.acl_mtu = hdev->acl_mtu;
1043 di.acl_pkts = hdev->acl_pkts;
1044 di.sco_mtu = hdev->sco_mtu;
1045 di.sco_pkts = hdev->sco_pkts;
1046 di.link_policy = hdev->link_policy;
1047 di.link_mode = hdev->link_mode;
1049 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1050 memcpy(&di.features, &hdev->features, sizeof(di.features));
1052 if (copy_to_user(arg, &di, sizeof(di)))
1060 /* ---- Interface to HCI drivers ---- */
1062 static int hci_rfkill_set_block(void *data, bool blocked)
1064 struct hci_dev *hdev = data;
1066 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1071 hci_dev_do_close(hdev);
1076 static const struct rfkill_ops hci_rfkill_ops = {
1077 .set_block = hci_rfkill_set_block,
1080 static void hci_power_on(struct work_struct *work)
1082 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1084 BT_DBG("%s", hdev->name);
1086 if (hci_dev_open(hdev->id) < 0)
1089 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1090 schedule_delayed_work(&hdev->power_off,
1091 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1093 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1094 mgmt_index_added(hdev);
1097 static void hci_power_off(struct work_struct *work)
1099 struct hci_dev *hdev = container_of(work, struct hci_dev,
1102 BT_DBG("%s", hdev->name);
1104 hci_dev_do_close(hdev);
1107 static void hci_discov_off(struct work_struct *work)
1109 struct hci_dev *hdev;
1110 u8 scan = SCAN_PAGE;
1112 hdev = container_of(work, struct hci_dev, discov_off.work);
1114 BT_DBG("%s", hdev->name);
1118 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1120 hdev->discov_timeout = 0;
1122 hci_dev_unlock(hdev);
1125 int hci_uuids_clear(struct hci_dev *hdev)
1127 struct list_head *p, *n;
1129 list_for_each_safe(p, n, &hdev->uuids) {
1130 struct bt_uuid *uuid;
1132 uuid = list_entry(p, struct bt_uuid, list);
1141 int hci_link_keys_clear(struct hci_dev *hdev)
1143 struct list_head *p, *n;
1145 list_for_each_safe(p, n, &hdev->link_keys) {
1146 struct link_key *key;
1148 key = list_entry(p, struct link_key, list);
1157 int hci_smp_ltks_clear(struct hci_dev *hdev)
1159 struct smp_ltk *k, *tmp;
1161 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1169 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1173 list_for_each_entry(k, &hdev->link_keys, list)
1174 if (bacmp(bdaddr, &k->bdaddr) == 0)
1180 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1181 u8 key_type, u8 old_key_type)
1184 if (key_type < 0x03)
1187 /* Debug keys are insecure so don't store them persistently */
1188 if (key_type == HCI_LK_DEBUG_COMBINATION)
1191 /* Changed combination key and there's no previous one */
1192 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1195 /* Security mode 3 case */
1199 /* Neither local nor remote side had no-bonding as requirement */
1200 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1203 /* Local side had dedicated bonding as requirement */
1204 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1207 /* Remote side had dedicated bonding as requirement */
1208 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1211 /* If none of the above criteria match, then don't store the key
1216 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1220 list_for_each_entry(k, &hdev->long_term_keys, list) {
1221 if (k->ediv != ediv ||
1222 memcmp(rand, k->rand, sizeof(k->rand)))
1230 EXPORT_SYMBOL(hci_find_ltk);
1232 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1237 list_for_each_entry(k, &hdev->long_term_keys, list)
1238 if (addr_type == k->bdaddr_type &&
1239 bacmp(bdaddr, &k->bdaddr) == 0)
1244 EXPORT_SYMBOL(hci_find_ltk_by_addr);
1246 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1247 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1249 struct link_key *key, *old_key;
1253 old_key = hci_find_link_key(hdev, bdaddr);
1255 old_key_type = old_key->type;
1258 old_key_type = conn ? conn->key_type : 0xff;
1259 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1262 list_add(&key->list, &hdev->link_keys);
1265 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1267 /* Some buggy controller combinations generate a changed
1268 * combination key for legacy pairing even when there's no
1270 if (type == HCI_LK_CHANGED_COMBINATION &&
1271 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1272 type = HCI_LK_COMBINATION;
1274 conn->key_type = type;
1277 bacpy(&key->bdaddr, bdaddr);
1278 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1279 key->pin_len = pin_len;
1281 if (type == HCI_LK_CHANGED_COMBINATION)
1282 key->type = old_key_type;
1289 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1291 mgmt_new_link_key(hdev, key, persistent);
1294 conn->flush_key = !persistent;
1299 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1300 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1303 struct smp_ltk *key, *old_key;
1305 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1308 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1312 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1315 list_add(&key->list, &hdev->long_term_keys);
1318 bacpy(&key->bdaddr, bdaddr);
1319 key->bdaddr_type = addr_type;
1320 memcpy(key->val, tk, sizeof(key->val));
1321 key->authenticated = authenticated;
1323 key->enc_size = enc_size;
1325 memcpy(key->rand, rand, sizeof(key->rand));
1330 if (type & HCI_SMP_LTK)
1331 mgmt_new_ltk(hdev, key, 1);
1336 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1338 struct link_key *key;
1340 key = hci_find_link_key(hdev, bdaddr);
1344 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1346 list_del(&key->list);
1352 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1354 struct smp_ltk *k, *tmp;
1356 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1357 if (bacmp(bdaddr, &k->bdaddr))
1360 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1369 /* HCI command timer function */
1370 static void hci_cmd_timer(unsigned long arg)
1372 struct hci_dev *hdev = (void *) arg;
1374 BT_ERR("%s command tx timeout", hdev->name);
1375 atomic_set(&hdev->cmd_cnt, 1);
1376 queue_work(hdev->workqueue, &hdev->cmd_work);
1379 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1382 struct oob_data *data;
1384 list_for_each_entry(data, &hdev->remote_oob_data, list)
1385 if (bacmp(bdaddr, &data->bdaddr) == 0)
1391 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1393 struct oob_data *data;
1395 data = hci_find_remote_oob_data(hdev, bdaddr);
1399 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1401 list_del(&data->list);
1407 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1409 struct oob_data *data, *n;
1411 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1412 list_del(&data->list);
1419 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1422 struct oob_data *data;
1424 data = hci_find_remote_oob_data(hdev, bdaddr);
1427 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1431 bacpy(&data->bdaddr, bdaddr);
1432 list_add(&data->list, &hdev->remote_oob_data);
1435 memcpy(data->hash, hash, sizeof(data->hash));
1436 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1438 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1443 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1445 struct bdaddr_list *b;
1447 list_for_each_entry(b, &hdev->blacklist, list)
1448 if (bacmp(bdaddr, &b->bdaddr) == 0)
1454 int hci_blacklist_clear(struct hci_dev *hdev)
1456 struct list_head *p, *n;
1458 list_for_each_safe(p, n, &hdev->blacklist) {
1459 struct bdaddr_list *b;
1461 b = list_entry(p, struct bdaddr_list, list);
1470 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1472 struct bdaddr_list *entry;
1474 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1477 if (hci_blacklist_lookup(hdev, bdaddr))
1480 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1484 bacpy(&entry->bdaddr, bdaddr);
1486 list_add(&entry->list, &hdev->blacklist);
1488 return mgmt_device_blocked(hdev, bdaddr, type);
1491 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1493 struct bdaddr_list *entry;
1495 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1496 return hci_blacklist_clear(hdev);
1498 entry = hci_blacklist_lookup(hdev, bdaddr);
1502 list_del(&entry->list);
1505 return mgmt_device_unblocked(hdev, bdaddr, type);
1508 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1510 struct le_scan_params *param = (struct le_scan_params *) opt;
1511 struct hci_cp_le_set_scan_param cp;
1513 memset(&cp, 0, sizeof(cp));
1514 cp.type = param->type;
1515 cp.interval = cpu_to_le16(param->interval);
1516 cp.window = cpu_to_le16(param->window);
1518 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1521 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1523 struct hci_cp_le_set_scan_enable cp;
1525 memset(&cp, 0, sizeof(cp));
1528 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1531 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1532 u16 window, int timeout)
1534 long timeo = msecs_to_jiffies(3000);
1535 struct le_scan_params param;
1538 BT_DBG("%s", hdev->name);
1540 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1541 return -EINPROGRESS;
1544 param.interval = interval;
1545 param.window = window;
1549 err = __hci_request(hdev, le_scan_param_req, (unsigned long) ¶m,
1552 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1554 hci_req_unlock(hdev);
1559 schedule_delayed_work(&hdev->le_scan_disable,
1560 msecs_to_jiffies(timeout));
1565 int hci_cancel_le_scan(struct hci_dev *hdev)
1567 BT_DBG("%s", hdev->name);
1569 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1572 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1573 struct hci_cp_le_set_scan_enable cp;
1575 /* Send HCI command to disable LE Scan */
1576 memset(&cp, 0, sizeof(cp));
1577 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1583 static void le_scan_disable_work(struct work_struct *work)
1585 struct hci_dev *hdev = container_of(work, struct hci_dev,
1586 le_scan_disable.work);
1587 struct hci_cp_le_set_scan_enable cp;
1589 BT_DBG("%s", hdev->name);
1591 memset(&cp, 0, sizeof(cp));
1593 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1596 static void le_scan_work(struct work_struct *work)
1598 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1599 struct le_scan_params *param = &hdev->le_scan_params;
1601 BT_DBG("%s", hdev->name);
1603 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1607 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1610 struct le_scan_params *param = &hdev->le_scan_params;
1612 BT_DBG("%s", hdev->name);
1614 if (work_busy(&hdev->le_scan))
1615 return -EINPROGRESS;
1618 param->interval = interval;
1619 param->window = window;
1620 param->timeout = timeout;
1622 queue_work(system_long_wq, &hdev->le_scan);
1627 /* Alloc HCI device */
1628 struct hci_dev *hci_alloc_dev(void)
1630 struct hci_dev *hdev;
1632 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1636 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1637 hdev->esco_type = (ESCO_HV1);
1638 hdev->link_mode = (HCI_LM_ACCEPT);
1639 hdev->io_capability = 0x03; /* No Input No Output */
1641 hdev->sniff_max_interval = 800;
1642 hdev->sniff_min_interval = 80;
1644 mutex_init(&hdev->lock);
1645 mutex_init(&hdev->req_lock);
1647 INIT_LIST_HEAD(&hdev->mgmt_pending);
1648 INIT_LIST_HEAD(&hdev->blacklist);
1649 INIT_LIST_HEAD(&hdev->uuids);
1650 INIT_LIST_HEAD(&hdev->link_keys);
1651 INIT_LIST_HEAD(&hdev->long_term_keys);
1652 INIT_LIST_HEAD(&hdev->remote_oob_data);
1654 INIT_WORK(&hdev->rx_work, hci_rx_work);
1655 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1656 INIT_WORK(&hdev->tx_work, hci_tx_work);
1657 INIT_WORK(&hdev->power_on, hci_power_on);
1658 INIT_WORK(&hdev->le_scan, le_scan_work);
1660 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1661 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1662 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1664 skb_queue_head_init(&hdev->driver_init);
1665 skb_queue_head_init(&hdev->rx_q);
1666 skb_queue_head_init(&hdev->cmd_q);
1667 skb_queue_head_init(&hdev->raw_q);
1669 init_waitqueue_head(&hdev->req_wait_q);
1671 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1673 hci_init_sysfs(hdev);
1674 discovery_init(hdev);
1675 hci_conn_hash_init(hdev);
1679 EXPORT_SYMBOL(hci_alloc_dev);
1681 /* Free HCI device */
1682 void hci_free_dev(struct hci_dev *hdev)
1684 skb_queue_purge(&hdev->driver_init);
1686 /* will free via device release */
1687 put_device(&hdev->dev);
1689 EXPORT_SYMBOL(hci_free_dev);
1691 /* Register HCI device */
1692 int hci_register_dev(struct hci_dev *hdev)
1694 struct list_head *head, *p;
1697 if (!hdev->open || !hdev->close)
1700 write_lock(&hci_dev_list_lock);
1702 /* Do not allow HCI_AMP devices to register at index 0,
1703 * so the index can be used as the AMP controller ID.
1705 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1706 head = &hci_dev_list;
1708 /* Find first available device id */
1709 list_for_each(p, &hci_dev_list) {
1710 int nid = list_entry(p, struct hci_dev, list)->id;
1718 sprintf(hdev->name, "hci%d", id);
1721 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1723 list_add(&hdev->list, head);
1725 write_unlock(&hci_dev_list_lock);
1727 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1729 if (!hdev->workqueue) {
1734 error = hci_add_sysfs(hdev);
1738 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1739 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1742 if (rfkill_register(hdev->rfkill) < 0) {
1743 rfkill_destroy(hdev->rfkill);
1744 hdev->rfkill = NULL;
1748 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1749 set_bit(HCI_SETUP, &hdev->dev_flags);
1750 schedule_work(&hdev->power_on);
1752 hci_notify(hdev, HCI_DEV_REG);
1758 destroy_workqueue(hdev->workqueue);
1760 write_lock(&hci_dev_list_lock);
1761 list_del(&hdev->list);
1762 write_unlock(&hci_dev_list_lock);
1766 EXPORT_SYMBOL(hci_register_dev);
1768 /* Unregister HCI device */
1769 void hci_unregister_dev(struct hci_dev *hdev)
1773 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1775 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1777 write_lock(&hci_dev_list_lock);
1778 list_del(&hdev->list);
1779 write_unlock(&hci_dev_list_lock);
1781 hci_dev_do_close(hdev);
1783 for (i = 0; i < NUM_REASSEMBLY; i++)
1784 kfree_skb(hdev->reassembly[i]);
1786 if (!test_bit(HCI_INIT, &hdev->flags) &&
1787 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1789 mgmt_index_removed(hdev);
1790 hci_dev_unlock(hdev);
1793 /* mgmt_index_removed should take care of emptying the
1795 BUG_ON(!list_empty(&hdev->mgmt_pending));
1797 hci_notify(hdev, HCI_DEV_UNREG);
1800 rfkill_unregister(hdev->rfkill);
1801 rfkill_destroy(hdev->rfkill);
1804 hci_del_sysfs(hdev);
1806 destroy_workqueue(hdev->workqueue);
1809 hci_blacklist_clear(hdev);
1810 hci_uuids_clear(hdev);
1811 hci_link_keys_clear(hdev);
1812 hci_smp_ltks_clear(hdev);
1813 hci_remote_oob_data_clear(hdev);
1814 hci_dev_unlock(hdev);
1818 EXPORT_SYMBOL(hci_unregister_dev);
1820 /* Suspend HCI device */
1821 int hci_suspend_dev(struct hci_dev *hdev)
1823 hci_notify(hdev, HCI_DEV_SUSPEND);
1826 EXPORT_SYMBOL(hci_suspend_dev);
1828 /* Resume HCI device */
1829 int hci_resume_dev(struct hci_dev *hdev)
1831 hci_notify(hdev, HCI_DEV_RESUME);
1834 EXPORT_SYMBOL(hci_resume_dev);
1836 /* Receive frame from HCI drivers */
1837 int hci_recv_frame(struct sk_buff *skb)
1839 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1840 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1841 && !test_bit(HCI_INIT, &hdev->flags))) {
1847 bt_cb(skb)->incoming = 1;
1850 __net_timestamp(skb);
1852 skb_queue_tail(&hdev->rx_q, skb);
1853 queue_work(hdev->workqueue, &hdev->rx_work);
1857 EXPORT_SYMBOL(hci_recv_frame);
1859 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1860 int count, __u8 index)
1865 struct sk_buff *skb;
1866 struct bt_skb_cb *scb;
1868 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1869 index >= NUM_REASSEMBLY)
1872 skb = hdev->reassembly[index];
1876 case HCI_ACLDATA_PKT:
1877 len = HCI_MAX_FRAME_SIZE;
1878 hlen = HCI_ACL_HDR_SIZE;
1881 len = HCI_MAX_EVENT_SIZE;
1882 hlen = HCI_EVENT_HDR_SIZE;
1884 case HCI_SCODATA_PKT:
1885 len = HCI_MAX_SCO_SIZE;
1886 hlen = HCI_SCO_HDR_SIZE;
1890 skb = bt_skb_alloc(len, GFP_ATOMIC);
1894 scb = (void *) skb->cb;
1896 scb->pkt_type = type;
1898 skb->dev = (void *) hdev;
1899 hdev->reassembly[index] = skb;
1903 scb = (void *) skb->cb;
1904 len = min_t(uint, scb->expect, count);
1906 memcpy(skb_put(skb, len), data, len);
1915 if (skb->len == HCI_EVENT_HDR_SIZE) {
1916 struct hci_event_hdr *h = hci_event_hdr(skb);
1917 scb->expect = h->plen;
1919 if (skb_tailroom(skb) < scb->expect) {
1921 hdev->reassembly[index] = NULL;
1927 case HCI_ACLDATA_PKT:
1928 if (skb->len == HCI_ACL_HDR_SIZE) {
1929 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1930 scb->expect = __le16_to_cpu(h->dlen);
1932 if (skb_tailroom(skb) < scb->expect) {
1934 hdev->reassembly[index] = NULL;
1940 case HCI_SCODATA_PKT:
1941 if (skb->len == HCI_SCO_HDR_SIZE) {
1942 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1943 scb->expect = h->dlen;
1945 if (skb_tailroom(skb) < scb->expect) {
1947 hdev->reassembly[index] = NULL;
1954 if (scb->expect == 0) {
1955 /* Complete frame */
1957 bt_cb(skb)->pkt_type = type;
1958 hci_recv_frame(skb);
1960 hdev->reassembly[index] = NULL;
1968 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1972 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1976 rem = hci_reassembly(hdev, type, data, count, type - 1);
1980 data += (count - rem);
1986 EXPORT_SYMBOL(hci_recv_fragment);
1988 #define STREAM_REASSEMBLY 0
1990 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1996 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1999 struct { char type; } *pkt;
2001 /* Start of the frame */
2008 type = bt_cb(skb)->pkt_type;
2010 rem = hci_reassembly(hdev, type, data, count,
2015 data += (count - rem);
2021 EXPORT_SYMBOL(hci_recv_stream_fragment);
2023 /* ---- Interface to upper protocols ---- */
2025 int hci_register_cb(struct hci_cb *cb)
2027 BT_DBG("%p name %s", cb, cb->name);
2029 write_lock(&hci_cb_list_lock);
2030 list_add(&cb->list, &hci_cb_list);
2031 write_unlock(&hci_cb_list_lock);
2035 EXPORT_SYMBOL(hci_register_cb);
2037 int hci_unregister_cb(struct hci_cb *cb)
2039 BT_DBG("%p name %s", cb, cb->name);
2041 write_lock(&hci_cb_list_lock);
2042 list_del(&cb->list);
2043 write_unlock(&hci_cb_list_lock);
2047 EXPORT_SYMBOL(hci_unregister_cb);
2049 static int hci_send_frame(struct sk_buff *skb)
2051 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2058 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2061 __net_timestamp(skb);
2063 /* Send copy to monitor */
2064 hci_send_to_monitor(hdev, skb);
2066 if (atomic_read(&hdev->promisc)) {
2067 /* Send copy to the sockets */
2068 hci_send_to_sock(hdev, skb);
2071 /* Get rid of skb owner, prior to sending to the driver. */
2074 return hdev->send(skb);
2077 /* Send HCI command */
2078 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2080 int len = HCI_COMMAND_HDR_SIZE + plen;
2081 struct hci_command_hdr *hdr;
2082 struct sk_buff *skb;
2084 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2086 skb = bt_skb_alloc(len, GFP_ATOMIC);
2088 BT_ERR("%s no memory for command", hdev->name);
2092 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2093 hdr->opcode = cpu_to_le16(opcode);
2097 memcpy(skb_put(skb, plen), param, plen);
2099 BT_DBG("skb len %d", skb->len);
2101 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2102 skb->dev = (void *) hdev;
2104 if (test_bit(HCI_INIT, &hdev->flags))
2105 hdev->init_last_cmd = opcode;
2107 skb_queue_tail(&hdev->cmd_q, skb);
2108 queue_work(hdev->workqueue, &hdev->cmd_work);
2113 /* Get data from the previously sent command */
2114 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2116 struct hci_command_hdr *hdr;
2118 if (!hdev->sent_cmd)
2121 hdr = (void *) hdev->sent_cmd->data;
2123 if (hdr->opcode != cpu_to_le16(opcode))
2126 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2128 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2132 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2134 struct hci_acl_hdr *hdr;
2137 skb_push(skb, HCI_ACL_HDR_SIZE);
2138 skb_reset_transport_header(skb);
2139 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2140 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2141 hdr->dlen = cpu_to_le16(len);
2144 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2145 struct sk_buff *skb, __u16 flags)
2147 struct hci_dev *hdev = conn->hdev;
2148 struct sk_buff *list;
2150 skb->len = skb_headlen(skb);
2153 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2154 hci_add_acl_hdr(skb, conn->handle, flags);
2156 list = skb_shinfo(skb)->frag_list;
2158 /* Non fragmented */
2159 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2161 skb_queue_tail(queue, skb);
2164 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2166 skb_shinfo(skb)->frag_list = NULL;
2168 /* Queue all fragments atomically */
2169 spin_lock(&queue->lock);
2171 __skb_queue_tail(queue, skb);
2173 flags &= ~ACL_START;
2176 skb = list; list = list->next;
2178 skb->dev = (void *) hdev;
2179 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2180 hci_add_acl_hdr(skb, conn->handle, flags);
2182 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2184 __skb_queue_tail(queue, skb);
2187 spin_unlock(&queue->lock);
2191 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2193 struct hci_conn *conn = chan->conn;
2194 struct hci_dev *hdev = conn->hdev;
2196 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2198 skb->dev = (void *) hdev;
2200 hci_queue_acl(conn, &chan->data_q, skb, flags);
2202 queue_work(hdev->workqueue, &hdev->tx_work);
2204 EXPORT_SYMBOL(hci_send_acl);
2207 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2209 struct hci_dev *hdev = conn->hdev;
2210 struct hci_sco_hdr hdr;
2212 BT_DBG("%s len %d", hdev->name, skb->len);
2214 hdr.handle = cpu_to_le16(conn->handle);
2215 hdr.dlen = skb->len;
2217 skb_push(skb, HCI_SCO_HDR_SIZE);
2218 skb_reset_transport_header(skb);
2219 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2221 skb->dev = (void *) hdev;
2222 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2224 skb_queue_tail(&conn->data_q, skb);
2225 queue_work(hdev->workqueue, &hdev->tx_work);
2227 EXPORT_SYMBOL(hci_send_sco);
2229 /* ---- HCI TX task (outgoing data) ---- */
2231 /* HCI Connection scheduler */
2232 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2235 struct hci_conn_hash *h = &hdev->conn_hash;
2236 struct hci_conn *conn = NULL, *c;
2237 unsigned int num = 0, min = ~0;
2239 /* We don't have to lock device here. Connections are always
2240 * added and removed with TX task disabled. */
2244 list_for_each_entry_rcu(c, &h->list, list) {
2245 if (c->type != type || skb_queue_empty(&c->data_q))
2248 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2253 if (c->sent < min) {
2258 if (hci_conn_num(hdev, type) == num)
2267 switch (conn->type) {
2269 cnt = hdev->acl_cnt;
2273 cnt = hdev->sco_cnt;
2276 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2280 BT_ERR("Unknown link type");
2288 BT_DBG("conn %p quote %d", conn, *quote);
2292 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2294 struct hci_conn_hash *h = &hdev->conn_hash;
2297 BT_ERR("%s link tx timeout", hdev->name);
2301 /* Kill stalled connections */
2302 list_for_each_entry_rcu(c, &h->list, list) {
2303 if (c->type == type && c->sent) {
2304 BT_ERR("%s killing stalled connection %s",
2305 hdev->name, batostr(&c->dst));
2306 hci_acl_disconn(c, 0x13);
2313 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2316 struct hci_conn_hash *h = &hdev->conn_hash;
2317 struct hci_chan *chan = NULL;
2318 unsigned int num = 0, min = ~0, cur_prio = 0;
2319 struct hci_conn *conn;
2320 int cnt, q, conn_num = 0;
2322 BT_DBG("%s", hdev->name);
2326 list_for_each_entry_rcu(conn, &h->list, list) {
2327 struct hci_chan *tmp;
2329 if (conn->type != type)
2332 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2337 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2338 struct sk_buff *skb;
2340 if (skb_queue_empty(&tmp->data_q))
2343 skb = skb_peek(&tmp->data_q);
2344 if (skb->priority < cur_prio)
2347 if (skb->priority > cur_prio) {
2350 cur_prio = skb->priority;
2355 if (conn->sent < min) {
2361 if (hci_conn_num(hdev, type) == conn_num)
2370 switch (chan->conn->type) {
2372 cnt = hdev->acl_cnt;
2376 cnt = hdev->sco_cnt;
2379 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2383 BT_ERR("Unknown link type");
2388 BT_DBG("chan %p quote %d", chan, *quote);
2392 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2394 struct hci_conn_hash *h = &hdev->conn_hash;
2395 struct hci_conn *conn;
2398 BT_DBG("%s", hdev->name);
2402 list_for_each_entry_rcu(conn, &h->list, list) {
2403 struct hci_chan *chan;
2405 if (conn->type != type)
2408 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2413 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2414 struct sk_buff *skb;
2421 if (skb_queue_empty(&chan->data_q))
2424 skb = skb_peek(&chan->data_q);
2425 if (skb->priority >= HCI_PRIO_MAX - 1)
2428 skb->priority = HCI_PRIO_MAX - 1;
2430 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2434 if (hci_conn_num(hdev, type) == num)
2442 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2444 /* Calculate count of blocks used by this packet */
2445 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2448 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2450 if (!test_bit(HCI_RAW, &hdev->flags)) {
2451 /* ACL tx timeout must be longer than maximum
2452 * link supervision timeout (40.9 seconds) */
2453 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2454 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2455 hci_link_tx_to(hdev, ACL_LINK);
2459 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2461 unsigned int cnt = hdev->acl_cnt;
2462 struct hci_chan *chan;
2463 struct sk_buff *skb;
2466 __check_timeout(hdev, cnt);
2468 while (hdev->acl_cnt &&
2469 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2470 u32 priority = (skb_peek(&chan->data_q))->priority;
2471 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2472 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2473 skb->len, skb->priority);
2475 /* Stop if priority has changed */
2476 if (skb->priority < priority)
2479 skb = skb_dequeue(&chan->data_q);
2481 hci_conn_enter_active_mode(chan->conn,
2482 bt_cb(skb)->force_active);
2484 hci_send_frame(skb);
2485 hdev->acl_last_tx = jiffies;
2493 if (cnt != hdev->acl_cnt)
2494 hci_prio_recalculate(hdev, ACL_LINK);
2497 static void hci_sched_acl_blk(struct hci_dev *hdev)
2499 unsigned int cnt = hdev->block_cnt;
2500 struct hci_chan *chan;
2501 struct sk_buff *skb;
2504 __check_timeout(hdev, cnt);
2506 while (hdev->block_cnt > 0 &&
2507 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2508 u32 priority = (skb_peek(&chan->data_q))->priority;
2509 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2512 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2513 skb->len, skb->priority);
2515 /* Stop if priority has changed */
2516 if (skb->priority < priority)
2519 skb = skb_dequeue(&chan->data_q);
2521 blocks = __get_blocks(hdev, skb);
2522 if (blocks > hdev->block_cnt)
2525 hci_conn_enter_active_mode(chan->conn,
2526 bt_cb(skb)->force_active);
2528 hci_send_frame(skb);
2529 hdev->acl_last_tx = jiffies;
2531 hdev->block_cnt -= blocks;
2534 chan->sent += blocks;
2535 chan->conn->sent += blocks;
2539 if (cnt != hdev->block_cnt)
2540 hci_prio_recalculate(hdev, ACL_LINK);
2543 static void hci_sched_acl(struct hci_dev *hdev)
2545 BT_DBG("%s", hdev->name);
2547 if (!hci_conn_num(hdev, ACL_LINK))
2550 switch (hdev->flow_ctl_mode) {
2551 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2552 hci_sched_acl_pkt(hdev);
2555 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2556 hci_sched_acl_blk(hdev);
2562 static void hci_sched_sco(struct hci_dev *hdev)
2564 struct hci_conn *conn;
2565 struct sk_buff *skb;
2568 BT_DBG("%s", hdev->name);
2570 if (!hci_conn_num(hdev, SCO_LINK))
2573 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
2574 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2575 BT_DBG("skb %p len %d", skb, skb->len);
2576 hci_send_frame(skb);
2579 if (conn->sent == ~0)
2585 static void hci_sched_esco(struct hci_dev *hdev)
2587 struct hci_conn *conn;
2588 struct sk_buff *skb;
2591 BT_DBG("%s", hdev->name);
2593 if (!hci_conn_num(hdev, ESCO_LINK))
2596 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2598 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2599 BT_DBG("skb %p len %d", skb, skb->len);
2600 hci_send_frame(skb);
2603 if (conn->sent == ~0)
2609 static void hci_sched_le(struct hci_dev *hdev)
2611 struct hci_chan *chan;
2612 struct sk_buff *skb;
2613 int quote, cnt, tmp;
2615 BT_DBG("%s", hdev->name);
2617 if (!hci_conn_num(hdev, LE_LINK))
2620 if (!test_bit(HCI_RAW, &hdev->flags)) {
2621 /* LE tx timeout must be longer than maximum
2622 * link supervision timeout (40.9 seconds) */
2623 if (!hdev->le_cnt && hdev->le_pkts &&
2624 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2625 hci_link_tx_to(hdev, LE_LINK);
2628 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2630 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
2631 u32 priority = (skb_peek(&chan->data_q))->priority;
2632 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2633 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2634 skb->len, skb->priority);
2636 /* Stop if priority has changed */
2637 if (skb->priority < priority)
2640 skb = skb_dequeue(&chan->data_q);
2642 hci_send_frame(skb);
2643 hdev->le_last_tx = jiffies;
2654 hdev->acl_cnt = cnt;
2657 hci_prio_recalculate(hdev, LE_LINK);
2660 static void hci_tx_work(struct work_struct *work)
2662 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2663 struct sk_buff *skb;
2665 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2666 hdev->sco_cnt, hdev->le_cnt);
2668 /* Schedule queues and send stuff to HCI driver */
2670 hci_sched_acl(hdev);
2672 hci_sched_sco(hdev);
2674 hci_sched_esco(hdev);
2678 /* Send next queued raw (unknown type) packet */
2679 while ((skb = skb_dequeue(&hdev->raw_q)))
2680 hci_send_frame(skb);
2683 /* ----- HCI RX task (incoming data processing) ----- */
2685 /* ACL data packet */
2686 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2688 struct hci_acl_hdr *hdr = (void *) skb->data;
2689 struct hci_conn *conn;
2690 __u16 handle, flags;
2692 skb_pull(skb, HCI_ACL_HDR_SIZE);
2694 handle = __le16_to_cpu(hdr->handle);
2695 flags = hci_flags(handle);
2696 handle = hci_handle(handle);
2698 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len,
2701 hdev->stat.acl_rx++;
2704 conn = hci_conn_hash_lookup_handle(hdev, handle);
2705 hci_dev_unlock(hdev);
2708 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2711 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2712 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2713 mgmt_device_connected(hdev, &conn->dst, conn->type,
2714 conn->dst_type, 0, NULL, 0,
2716 hci_dev_unlock(hdev);
2718 /* Send to upper protocol */
2719 l2cap_recv_acldata(conn, skb, flags);
2722 BT_ERR("%s ACL packet for unknown connection handle %d",
2723 hdev->name, handle);
2729 /* SCO data packet */
2730 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2732 struct hci_sco_hdr *hdr = (void *) skb->data;
2733 struct hci_conn *conn;
2736 skb_pull(skb, HCI_SCO_HDR_SIZE);
2738 handle = __le16_to_cpu(hdr->handle);
2740 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2742 hdev->stat.sco_rx++;
2745 conn = hci_conn_hash_lookup_handle(hdev, handle);
2746 hci_dev_unlock(hdev);
2749 /* Send to upper protocol */
2750 sco_recv_scodata(conn, skb);
2753 BT_ERR("%s SCO packet for unknown connection handle %d",
2754 hdev->name, handle);
2760 static void hci_rx_work(struct work_struct *work)
2762 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2763 struct sk_buff *skb;
2765 BT_DBG("%s", hdev->name);
2767 while ((skb = skb_dequeue(&hdev->rx_q))) {
2768 /* Send copy to monitor */
2769 hci_send_to_monitor(hdev, skb);
2771 if (atomic_read(&hdev->promisc)) {
2772 /* Send copy to the sockets */
2773 hci_send_to_sock(hdev, skb);
2776 if (test_bit(HCI_RAW, &hdev->flags)) {
2781 if (test_bit(HCI_INIT, &hdev->flags)) {
2782 /* Don't process data packets in this states. */
2783 switch (bt_cb(skb)->pkt_type) {
2784 case HCI_ACLDATA_PKT:
2785 case HCI_SCODATA_PKT:
2792 switch (bt_cb(skb)->pkt_type) {
2794 BT_DBG("%s Event packet", hdev->name);
2795 hci_event_packet(hdev, skb);
2798 case HCI_ACLDATA_PKT:
2799 BT_DBG("%s ACL data packet", hdev->name);
2800 hci_acldata_packet(hdev, skb);
2803 case HCI_SCODATA_PKT:
2804 BT_DBG("%s SCO data packet", hdev->name);
2805 hci_scodata_packet(hdev, skb);
2815 static void hci_cmd_work(struct work_struct *work)
2817 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2818 struct sk_buff *skb;
2820 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2822 /* Send queued commands */
2823 if (atomic_read(&hdev->cmd_cnt)) {
2824 skb = skb_dequeue(&hdev->cmd_q);
2828 kfree_skb(hdev->sent_cmd);
2830 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2831 if (hdev->sent_cmd) {
2832 atomic_dec(&hdev->cmd_cnt);
2833 hci_send_frame(skb);
2834 if (test_bit(HCI_RESET, &hdev->flags))
2835 del_timer(&hdev->cmd_timer);
2837 mod_timer(&hdev->cmd_timer,
2838 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2840 skb_queue_head(&hdev->cmd_q, skb);
2841 queue_work(hdev->workqueue, &hdev->cmd_work);
2846 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2848 /* General inquiry access code (GIAC) */
2849 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2850 struct hci_cp_inquiry cp;
2852 BT_DBG("%s", hdev->name);
2854 if (test_bit(HCI_INQUIRY, &hdev->flags))
2855 return -EINPROGRESS;
2857 inquiry_cache_flush(hdev);
2859 memset(&cp, 0, sizeof(cp));
2860 memcpy(&cp.lap, lap, sizeof(cp.lap));
2863 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2866 int hci_cancel_inquiry(struct hci_dev *hdev)
2868 BT_DBG("%s", hdev->name);
2870 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2873 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2876 u8 bdaddr_to_le(u8 bdaddr_type)
2878 switch (bdaddr_type) {
2879 case BDADDR_LE_PUBLIC:
2880 return ADDR_LE_DEV_PUBLIC;
2883 /* Fallback to LE Random address type */
2884 return ADDR_LE_DEV_RANDOM;