2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
55 #define AUTO_OFF_TIMEOUT 2000
57 static void hci_rx_work(struct work_struct *work);
58 static void hci_cmd_work(struct work_struct *work);
59 static void hci_tx_work(struct work_struct *work);
62 LIST_HEAD(hci_dev_list);
63 DEFINE_RWLOCK(hci_dev_list_lock);
65 /* HCI callback list */
66 LIST_HEAD(hci_cb_list);
67 DEFINE_RWLOCK(hci_cb_list_lock);
69 /* ---- HCI notifications ---- */
71 static void hci_notify(struct hci_dev *hdev, int event)
73 hci_sock_dev_event(hdev, event);
76 /* ---- HCI requests ---- */
78 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
80 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
82 /* If this is the init phase check if the completed command matches
83 * the last init command, and if not just return.
85 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
86 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
89 /* Some CSR based controllers generate a spontaneous
90 * reset complete event during init and any pending
91 * command will never be completed. In such a case we
92 * need to resend whatever was the last sent
96 if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
99 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
101 skb_queue_head(&hdev->cmd_q, skb);
102 queue_work(hdev->workqueue, &hdev->cmd_work);
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
111 wake_up_interruptible(&hdev->req_wait_q);
115 static void hci_req_cancel(struct hci_dev *hdev, int err)
117 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119 if (hdev->req_status == HCI_REQ_PEND) {
120 hdev->req_result = err;
121 hdev->req_status = HCI_REQ_CANCELED;
122 wake_up_interruptible(&hdev->req_wait_q);
126 /* Execute request and wait for completion. */
127 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
128 unsigned long opt, __u32 timeout)
130 DECLARE_WAITQUEUE(wait, current);
133 BT_DBG("%s start", hdev->name);
135 hdev->req_status = HCI_REQ_PEND;
137 add_wait_queue(&hdev->req_wait_q, &wait);
138 set_current_state(TASK_INTERRUPTIBLE);
141 schedule_timeout(timeout);
143 remove_wait_queue(&hdev->req_wait_q, &wait);
145 if (signal_pending(current))
148 switch (hdev->req_status) {
150 err = -bt_to_errno(hdev->req_result);
153 case HCI_REQ_CANCELED:
154 err = -hdev->req_result;
162 hdev->req_status = hdev->req_result = 0;
164 BT_DBG("%s end: err %d", hdev->name, err);
169 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
170 unsigned long opt, __u32 timeout)
174 if (!test_bit(HCI_UP, &hdev->flags))
177 /* Serialize all requests */
179 ret = __hci_request(hdev, req, opt, timeout);
180 hci_req_unlock(hdev);
185 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
187 BT_DBG("%s %ld", hdev->name, opt);
190 set_bit(HCI_RESET, &hdev->flags);
191 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
194 static void bredr_init(struct hci_dev *hdev)
196 struct hci_cp_delete_stored_link_key cp;
200 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
202 /* Mandatory initialization */
205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
206 set_bit(HCI_RESET, &hdev->flags);
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
210 /* Read Local Supported Features */
211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
213 /* Read Local Version */
214 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
216 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
217 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
219 /* Read BD Address */
220 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
222 /* Read Class of Device */
223 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
225 /* Read Local Name */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
228 /* Read Voice Setting */
229 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
231 /* Optional initialization */
233 /* Clear Event Filters */
234 flt_type = HCI_FLT_CLEAR_ALL;
235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
237 /* Connection accept timeout ~20 secs */
238 param = cpu_to_le16(0x7d00);
239 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
241 bacpy(&cp.bdaddr, BDADDR_ANY);
243 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
246 static void amp_init(struct hci_dev *hdev)
248 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
251 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
253 /* Read Local Version */
254 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
257 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
261 BT_DBG("%s %ld", hdev->name, opt);
263 /* Driver initialization */
265 /* Special commands */
266 while ((skb = skb_dequeue(&hdev->driver_init))) {
267 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
268 skb->dev = (void *) hdev;
270 skb_queue_tail(&hdev->cmd_q, skb);
271 queue_work(hdev->workqueue, &hdev->cmd_work);
273 skb_queue_purge(&hdev->driver_init);
275 switch (hdev->dev_type) {
285 BT_ERR("Unknown device type %d", hdev->dev_type);
291 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
293 BT_DBG("%s", hdev->name);
295 /* Read LE buffer size */
296 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
299 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
303 BT_DBG("%s %x", hdev->name, scan);
305 /* Inquiry and Page scans */
306 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
309 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
313 BT_DBG("%s %x", hdev->name, auth);
316 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
319 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
323 BT_DBG("%s %x", hdev->name, encrypt);
326 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
329 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
331 __le16 policy = cpu_to_le16(opt);
333 BT_DBG("%s %x", hdev->name, policy);
335 /* Default link policy */
336 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
339 /* Get HCI device by index.
340 * Device is held on return. */
341 struct hci_dev *hci_dev_get(int index)
343 struct hci_dev *hdev = NULL, *d;
350 read_lock(&hci_dev_list_lock);
351 list_for_each_entry(d, &hci_dev_list, list) {
352 if (d->id == index) {
353 hdev = hci_dev_hold(d);
357 read_unlock(&hci_dev_list_lock);
361 /* ---- Inquiry support ---- */
363 bool hci_discovery_active(struct hci_dev *hdev)
365 struct discovery_state *discov = &hdev->discovery;
367 switch (discov->state) {
368 case DISCOVERY_FINDING:
369 case DISCOVERY_RESOLVING:
377 void hci_discovery_set_state(struct hci_dev *hdev, int state)
379 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
381 if (hdev->discovery.state == state)
385 case DISCOVERY_STOPPED:
386 if (hdev->discovery.state != DISCOVERY_STARTING)
387 mgmt_discovering(hdev, 0);
388 hdev->discovery.type = 0;
390 case DISCOVERY_STARTING:
392 case DISCOVERY_FINDING:
393 mgmt_discovering(hdev, 1);
395 case DISCOVERY_RESOLVING:
397 case DISCOVERY_STOPPING:
401 hdev->discovery.state = state;
404 static void inquiry_cache_flush(struct hci_dev *hdev)
406 struct discovery_state *cache = &hdev->discovery;
407 struct inquiry_entry *p, *n;
409 list_for_each_entry_safe(p, n, &cache->all, all) {
414 INIT_LIST_HEAD(&cache->unknown);
415 INIT_LIST_HEAD(&cache->resolve);
416 cache->state = DISCOVERY_STOPPED;
419 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
421 struct discovery_state *cache = &hdev->discovery;
422 struct inquiry_entry *e;
424 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
426 list_for_each_entry(e, &cache->all, all) {
427 if (!bacmp(&e->data.bdaddr, bdaddr))
434 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
437 struct discovery_state *cache = &hdev->discovery;
438 struct inquiry_entry *e;
440 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
442 list_for_each_entry(e, &cache->unknown, list) {
443 if (!bacmp(&e->data.bdaddr, bdaddr))
450 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
454 struct discovery_state *cache = &hdev->discovery;
455 struct inquiry_entry *e;
457 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
459 list_for_each_entry(e, &cache->resolve, list) {
460 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
462 if (!bacmp(&e->data.bdaddr, bdaddr))
469 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
470 struct inquiry_entry *ie)
472 struct discovery_state *cache = &hdev->discovery;
473 struct list_head *pos = &cache->resolve;
474 struct inquiry_entry *p;
478 list_for_each_entry(p, &cache->resolve, list) {
479 if (p->name_state != NAME_PENDING &&
480 abs(p->data.rssi) >= abs(ie->data.rssi))
485 list_add(&ie->list, pos);
488 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
489 bool name_known, bool *ssp)
491 struct discovery_state *cache = &hdev->discovery;
492 struct inquiry_entry *ie;
494 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
497 *ssp = data->ssp_mode;
499 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
501 if (ie->data.ssp_mode && ssp)
504 if (ie->name_state == NAME_NEEDED &&
505 data->rssi != ie->data.rssi) {
506 ie->data.rssi = data->rssi;
507 hci_inquiry_cache_update_resolve(hdev, ie);
513 /* Entry not in the cache. Add new one. */
514 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
518 list_add(&ie->all, &cache->all);
521 ie->name_state = NAME_KNOWN;
523 ie->name_state = NAME_NOT_KNOWN;
524 list_add(&ie->list, &cache->unknown);
528 if (name_known && ie->name_state != NAME_KNOWN &&
529 ie->name_state != NAME_PENDING) {
530 ie->name_state = NAME_KNOWN;
534 memcpy(&ie->data, data, sizeof(*data));
535 ie->timestamp = jiffies;
536 cache->timestamp = jiffies;
538 if (ie->name_state == NAME_NOT_KNOWN)
544 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
546 struct discovery_state *cache = &hdev->discovery;
547 struct inquiry_info *info = (struct inquiry_info *) buf;
548 struct inquiry_entry *e;
551 list_for_each_entry(e, &cache->all, all) {
552 struct inquiry_data *data = &e->data;
557 bacpy(&info->bdaddr, &data->bdaddr);
558 info->pscan_rep_mode = data->pscan_rep_mode;
559 info->pscan_period_mode = data->pscan_period_mode;
560 info->pscan_mode = data->pscan_mode;
561 memcpy(info->dev_class, data->dev_class, 3);
562 info->clock_offset = data->clock_offset;
568 BT_DBG("cache %p, copied %d", cache, copied);
572 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
574 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
575 struct hci_cp_inquiry cp;
577 BT_DBG("%s", hdev->name);
579 if (test_bit(HCI_INQUIRY, &hdev->flags))
583 memcpy(&cp.lap, &ir->lap, 3);
584 cp.length = ir->length;
585 cp.num_rsp = ir->num_rsp;
586 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
589 int hci_inquiry(void __user *arg)
591 __u8 __user *ptr = arg;
592 struct hci_inquiry_req ir;
593 struct hci_dev *hdev;
594 int err = 0, do_inquiry = 0, max_rsp;
598 if (copy_from_user(&ir, ptr, sizeof(ir)))
601 hdev = hci_dev_get(ir.dev_id);
606 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
607 inquiry_cache_empty(hdev) ||
608 ir.flags & IREQ_CACHE_FLUSH) {
609 inquiry_cache_flush(hdev);
612 hci_dev_unlock(hdev);
614 timeo = ir.length * msecs_to_jiffies(2000);
617 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
622 /* for unlimited number of responses we will use buffer with 255 entries */
623 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
625 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
626 * copy it to the user space.
628 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
635 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
636 hci_dev_unlock(hdev);
638 BT_DBG("num_rsp %d", ir.num_rsp);
640 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
642 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
655 /* ---- HCI ioctl helpers ---- */
657 int hci_dev_open(__u16 dev)
659 struct hci_dev *hdev;
662 hdev = hci_dev_get(dev);
666 BT_DBG("%s %p", hdev->name, hdev);
670 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
675 if (test_bit(HCI_UP, &hdev->flags)) {
680 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
681 set_bit(HCI_RAW, &hdev->flags);
683 /* Treat all non BR/EDR controllers as raw devices if
684 enable_hs is not set */
685 if (hdev->dev_type != HCI_BREDR && !enable_hs)
686 set_bit(HCI_RAW, &hdev->flags);
688 if (hdev->open(hdev)) {
693 if (!test_bit(HCI_RAW, &hdev->flags)) {
694 atomic_set(&hdev->cmd_cnt, 1);
695 set_bit(HCI_INIT, &hdev->flags);
696 hdev->init_last_cmd = 0;
698 ret = __hci_request(hdev, hci_init_req, 0,
699 msecs_to_jiffies(HCI_INIT_TIMEOUT));
701 if (lmp_host_le_capable(hdev))
702 ret = __hci_request(hdev, hci_le_init_req, 0,
703 msecs_to_jiffies(HCI_INIT_TIMEOUT));
705 clear_bit(HCI_INIT, &hdev->flags);
710 set_bit(HCI_UP, &hdev->flags);
711 hci_notify(hdev, HCI_DEV_UP);
712 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
714 mgmt_powered(hdev, 1);
715 hci_dev_unlock(hdev);
718 /* Init failed, cleanup */
719 flush_work(&hdev->tx_work);
720 flush_work(&hdev->cmd_work);
721 flush_work(&hdev->rx_work);
723 skb_queue_purge(&hdev->cmd_q);
724 skb_queue_purge(&hdev->rx_q);
729 if (hdev->sent_cmd) {
730 kfree_skb(hdev->sent_cmd);
731 hdev->sent_cmd = NULL;
739 hci_req_unlock(hdev);
744 static int hci_dev_do_close(struct hci_dev *hdev)
746 BT_DBG("%s %p", hdev->name, hdev);
748 cancel_work_sync(&hdev->le_scan);
750 hci_req_cancel(hdev, ENODEV);
753 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
754 del_timer_sync(&hdev->cmd_timer);
755 hci_req_unlock(hdev);
759 /* Flush RX and TX works */
760 flush_work(&hdev->tx_work);
761 flush_work(&hdev->rx_work);
763 if (hdev->discov_timeout > 0) {
764 cancel_delayed_work(&hdev->discov_off);
765 hdev->discov_timeout = 0;
766 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
769 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
770 cancel_delayed_work(&hdev->service_cache);
772 cancel_delayed_work_sync(&hdev->le_scan_disable);
775 inquiry_cache_flush(hdev);
776 hci_conn_hash_flush(hdev);
777 hci_dev_unlock(hdev);
779 hci_notify(hdev, HCI_DEV_DOWN);
785 skb_queue_purge(&hdev->cmd_q);
786 atomic_set(&hdev->cmd_cnt, 1);
787 if (!test_bit(HCI_RAW, &hdev->flags) &&
788 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
789 set_bit(HCI_INIT, &hdev->flags);
790 __hci_request(hdev, hci_reset_req, 0,
791 msecs_to_jiffies(250));
792 clear_bit(HCI_INIT, &hdev->flags);
796 flush_work(&hdev->cmd_work);
799 skb_queue_purge(&hdev->rx_q);
800 skb_queue_purge(&hdev->cmd_q);
801 skb_queue_purge(&hdev->raw_q);
803 /* Drop last sent command */
804 if (hdev->sent_cmd) {
805 del_timer_sync(&hdev->cmd_timer);
806 kfree_skb(hdev->sent_cmd);
807 hdev->sent_cmd = NULL;
810 /* After this point our queues are empty
811 * and no tasks are scheduled. */
814 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
816 mgmt_powered(hdev, 0);
817 hci_dev_unlock(hdev);
823 memset(hdev->eir, 0, sizeof(hdev->eir));
824 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
826 hci_req_unlock(hdev);
832 int hci_dev_close(__u16 dev)
834 struct hci_dev *hdev;
837 hdev = hci_dev_get(dev);
841 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
842 cancel_delayed_work(&hdev->power_off);
844 err = hci_dev_do_close(hdev);
850 int hci_dev_reset(__u16 dev)
852 struct hci_dev *hdev;
855 hdev = hci_dev_get(dev);
861 if (!test_bit(HCI_UP, &hdev->flags))
865 skb_queue_purge(&hdev->rx_q);
866 skb_queue_purge(&hdev->cmd_q);
869 inquiry_cache_flush(hdev);
870 hci_conn_hash_flush(hdev);
871 hci_dev_unlock(hdev);
876 atomic_set(&hdev->cmd_cnt, 1);
877 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
879 if (!test_bit(HCI_RAW, &hdev->flags))
880 ret = __hci_request(hdev, hci_reset_req, 0,
881 msecs_to_jiffies(HCI_INIT_TIMEOUT));
884 hci_req_unlock(hdev);
889 int hci_dev_reset_stat(__u16 dev)
891 struct hci_dev *hdev;
894 hdev = hci_dev_get(dev);
898 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
905 int hci_dev_cmd(unsigned int cmd, void __user *arg)
907 struct hci_dev *hdev;
908 struct hci_dev_req dr;
911 if (copy_from_user(&dr, arg, sizeof(dr)))
914 hdev = hci_dev_get(dr.dev_id);
920 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
921 msecs_to_jiffies(HCI_INIT_TIMEOUT));
925 if (!lmp_encrypt_capable(hdev)) {
930 if (!test_bit(HCI_AUTH, &hdev->flags)) {
931 /* Auth must be enabled first */
932 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
933 msecs_to_jiffies(HCI_INIT_TIMEOUT));
938 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
939 msecs_to_jiffies(HCI_INIT_TIMEOUT));
943 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
944 msecs_to_jiffies(HCI_INIT_TIMEOUT));
948 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
949 msecs_to_jiffies(HCI_INIT_TIMEOUT));
953 hdev->link_mode = ((__u16) dr.dev_opt) &
954 (HCI_LM_MASTER | HCI_LM_ACCEPT);
958 hdev->pkt_type = (__u16) dr.dev_opt;
962 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
963 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
967 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
968 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
980 int hci_get_dev_list(void __user *arg)
982 struct hci_dev *hdev;
983 struct hci_dev_list_req *dl;
984 struct hci_dev_req *dr;
985 int n = 0, size, err;
988 if (get_user(dev_num, (__u16 __user *) arg))
991 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
994 size = sizeof(*dl) + dev_num * sizeof(*dr);
996 dl = kzalloc(size, GFP_KERNEL);
1002 read_lock(&hci_dev_list_lock);
1003 list_for_each_entry(hdev, &hci_dev_list, list) {
1004 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1005 cancel_delayed_work(&hdev->power_off);
1007 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1008 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1010 (dr + n)->dev_id = hdev->id;
1011 (dr + n)->dev_opt = hdev->flags;
1016 read_unlock(&hci_dev_list_lock);
1019 size = sizeof(*dl) + n * sizeof(*dr);
1021 err = copy_to_user(arg, dl, size);
1024 return err ? -EFAULT : 0;
1027 int hci_get_dev_info(void __user *arg)
1029 struct hci_dev *hdev;
1030 struct hci_dev_info di;
1033 if (copy_from_user(&di, arg, sizeof(di)))
1036 hdev = hci_dev_get(di.dev_id);
1040 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1041 cancel_delayed_work_sync(&hdev->power_off);
1043 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1044 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1046 strcpy(di.name, hdev->name);
1047 di.bdaddr = hdev->bdaddr;
1048 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1049 di.flags = hdev->flags;
1050 di.pkt_type = hdev->pkt_type;
1051 di.acl_mtu = hdev->acl_mtu;
1052 di.acl_pkts = hdev->acl_pkts;
1053 di.sco_mtu = hdev->sco_mtu;
1054 di.sco_pkts = hdev->sco_pkts;
1055 di.link_policy = hdev->link_policy;
1056 di.link_mode = hdev->link_mode;
1058 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1059 memcpy(&di.features, &hdev->features, sizeof(di.features));
1061 if (copy_to_user(arg, &di, sizeof(di)))
1069 /* ---- Interface to HCI drivers ---- */
1071 static int hci_rfkill_set_block(void *data, bool blocked)
1073 struct hci_dev *hdev = data;
1075 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1080 hci_dev_do_close(hdev);
1085 static const struct rfkill_ops hci_rfkill_ops = {
1086 .set_block = hci_rfkill_set_block,
1089 /* Alloc HCI device */
1090 struct hci_dev *hci_alloc_dev(void)
1092 struct hci_dev *hdev;
1094 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1098 hci_init_sysfs(hdev);
1099 skb_queue_head_init(&hdev->driver_init);
1103 EXPORT_SYMBOL(hci_alloc_dev);
1105 /* Free HCI device */
1106 void hci_free_dev(struct hci_dev *hdev)
1108 skb_queue_purge(&hdev->driver_init);
1110 /* will free via device release */
1111 put_device(&hdev->dev);
1113 EXPORT_SYMBOL(hci_free_dev);
1115 static void hci_power_on(struct work_struct *work)
1117 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1119 BT_DBG("%s", hdev->name);
1121 if (hci_dev_open(hdev->id) < 0)
1124 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1125 schedule_delayed_work(&hdev->power_off,
1126 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1128 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1129 mgmt_index_added(hdev);
1132 static void hci_power_off(struct work_struct *work)
1134 struct hci_dev *hdev = container_of(work, struct hci_dev,
1137 BT_DBG("%s", hdev->name);
1139 hci_dev_do_close(hdev);
1142 static void hci_discov_off(struct work_struct *work)
1144 struct hci_dev *hdev;
1145 u8 scan = SCAN_PAGE;
1147 hdev = container_of(work, struct hci_dev, discov_off.work);
1149 BT_DBG("%s", hdev->name);
1153 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1155 hdev->discov_timeout = 0;
1157 hci_dev_unlock(hdev);
1160 int hci_uuids_clear(struct hci_dev *hdev)
1162 struct list_head *p, *n;
1164 list_for_each_safe(p, n, &hdev->uuids) {
1165 struct bt_uuid *uuid;
1167 uuid = list_entry(p, struct bt_uuid, list);
1176 int hci_link_keys_clear(struct hci_dev *hdev)
1178 struct list_head *p, *n;
1180 list_for_each_safe(p, n, &hdev->link_keys) {
1181 struct link_key *key;
1183 key = list_entry(p, struct link_key, list);
1192 int hci_smp_ltks_clear(struct hci_dev *hdev)
1194 struct smp_ltk *k, *tmp;
1196 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1204 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1208 list_for_each_entry(k, &hdev->link_keys, list)
1209 if (bacmp(bdaddr, &k->bdaddr) == 0)
1215 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1216 u8 key_type, u8 old_key_type)
1219 if (key_type < 0x03)
1222 /* Debug keys are insecure so don't store them persistently */
1223 if (key_type == HCI_LK_DEBUG_COMBINATION)
1226 /* Changed combination key and there's no previous one */
1227 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1230 /* Security mode 3 case */
1234 /* Neither local nor remote side had no-bonding as requirement */
1235 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1238 /* Local side had dedicated bonding as requirement */
1239 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1242 /* Remote side had dedicated bonding as requirement */
1243 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1246 /* If none of the above criteria match, then don't store the key
1251 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1255 list_for_each_entry(k, &hdev->long_term_keys, list) {
1256 if (k->ediv != ediv ||
1257 memcmp(rand, k->rand, sizeof(k->rand)))
1265 EXPORT_SYMBOL(hci_find_ltk);
1267 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1272 list_for_each_entry(k, &hdev->long_term_keys, list)
1273 if (addr_type == k->bdaddr_type &&
1274 bacmp(bdaddr, &k->bdaddr) == 0)
1279 EXPORT_SYMBOL(hci_find_ltk_by_addr);
1281 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1282 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1284 struct link_key *key, *old_key;
1285 u8 old_key_type, persistent;
1287 old_key = hci_find_link_key(hdev, bdaddr);
1289 old_key_type = old_key->type;
1292 old_key_type = conn ? conn->key_type : 0xff;
1293 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1296 list_add(&key->list, &hdev->link_keys);
1299 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1301 /* Some buggy controller combinations generate a changed
1302 * combination key for legacy pairing even when there's no
1304 if (type == HCI_LK_CHANGED_COMBINATION &&
1305 (!conn || conn->remote_auth == 0xff) &&
1306 old_key_type == 0xff) {
1307 type = HCI_LK_COMBINATION;
1309 conn->key_type = type;
1312 bacpy(&key->bdaddr, bdaddr);
1313 memcpy(key->val, val, 16);
1314 key->pin_len = pin_len;
1316 if (type == HCI_LK_CHANGED_COMBINATION)
1317 key->type = old_key_type;
1324 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1326 mgmt_new_link_key(hdev, key, persistent);
1329 list_del(&key->list);
1336 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1337 int new_key, u8 authenticated, u8 tk[16],
1338 u8 enc_size, u16 ediv, u8 rand[8])
1340 struct smp_ltk *key, *old_key;
1342 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1345 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1349 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1352 list_add(&key->list, &hdev->long_term_keys);
1355 bacpy(&key->bdaddr, bdaddr);
1356 key->bdaddr_type = addr_type;
1357 memcpy(key->val, tk, sizeof(key->val));
1358 key->authenticated = authenticated;
1360 key->enc_size = enc_size;
1362 memcpy(key->rand, rand, sizeof(key->rand));
1367 if (type & HCI_SMP_LTK)
1368 mgmt_new_ltk(hdev, key, 1);
1373 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1375 struct link_key *key;
1377 key = hci_find_link_key(hdev, bdaddr);
1381 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1383 list_del(&key->list);
1389 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1391 struct smp_ltk *k, *tmp;
1393 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1394 if (bacmp(bdaddr, &k->bdaddr))
1397 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1406 /* HCI command timer function */
1407 static void hci_cmd_timer(unsigned long arg)
1409 struct hci_dev *hdev = (void *) arg;
1411 BT_ERR("%s command tx timeout", hdev->name);
1412 atomic_set(&hdev->cmd_cnt, 1);
1413 queue_work(hdev->workqueue, &hdev->cmd_work);
1416 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1419 struct oob_data *data;
1421 list_for_each_entry(data, &hdev->remote_oob_data, list)
1422 if (bacmp(bdaddr, &data->bdaddr) == 0)
1428 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1430 struct oob_data *data;
1432 data = hci_find_remote_oob_data(hdev, bdaddr);
1436 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1438 list_del(&data->list);
1444 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1446 struct oob_data *data, *n;
1448 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1449 list_del(&data->list);
1456 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1459 struct oob_data *data;
1461 data = hci_find_remote_oob_data(hdev, bdaddr);
1464 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1468 bacpy(&data->bdaddr, bdaddr);
1469 list_add(&data->list, &hdev->remote_oob_data);
1472 memcpy(data->hash, hash, sizeof(data->hash));
1473 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1475 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1480 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1483 struct bdaddr_list *b;
1485 list_for_each_entry(b, &hdev->blacklist, list)
1486 if (bacmp(bdaddr, &b->bdaddr) == 0)
1492 int hci_blacklist_clear(struct hci_dev *hdev)
1494 struct list_head *p, *n;
1496 list_for_each_safe(p, n, &hdev->blacklist) {
1497 struct bdaddr_list *b;
1499 b = list_entry(p, struct bdaddr_list, list);
1508 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1510 struct bdaddr_list *entry;
1512 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1515 if (hci_blacklist_lookup(hdev, bdaddr))
1518 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1522 bacpy(&entry->bdaddr, bdaddr);
1524 list_add(&entry->list, &hdev->blacklist);
1526 return mgmt_device_blocked(hdev, bdaddr, type);
1529 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1531 struct bdaddr_list *entry;
1533 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1534 return hci_blacklist_clear(hdev);
1536 entry = hci_blacklist_lookup(hdev, bdaddr);
1540 list_del(&entry->list);
1543 return mgmt_device_unblocked(hdev, bdaddr, type);
1546 static void hci_clear_adv_cache(struct work_struct *work)
1548 struct hci_dev *hdev = container_of(work, struct hci_dev,
1553 hci_adv_entries_clear(hdev);
1555 hci_dev_unlock(hdev);
1558 int hci_adv_entries_clear(struct hci_dev *hdev)
1560 struct adv_entry *entry, *tmp;
1562 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1563 list_del(&entry->list);
1567 BT_DBG("%s adv cache cleared", hdev->name);
1572 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1574 struct adv_entry *entry;
1576 list_for_each_entry(entry, &hdev->adv_entries, list)
1577 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1583 static inline int is_connectable_adv(u8 evt_type)
1585 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1591 int hci_add_adv_entry(struct hci_dev *hdev,
1592 struct hci_ev_le_advertising_info *ev)
1594 struct adv_entry *entry;
1596 if (!is_connectable_adv(ev->evt_type))
1599 /* Only new entries should be added to adv_entries. So, if
1600 * bdaddr was found, don't add it. */
1601 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1604 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1608 bacpy(&entry->bdaddr, &ev->bdaddr);
1609 entry->bdaddr_type = ev->bdaddr_type;
1611 list_add(&entry->list, &hdev->adv_entries);
1613 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1614 batostr(&entry->bdaddr), entry->bdaddr_type);
1619 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1621 struct le_scan_params *param = (struct le_scan_params *) opt;
1622 struct hci_cp_le_set_scan_param cp;
1624 memset(&cp, 0, sizeof(cp));
1625 cp.type = param->type;
1626 cp.interval = cpu_to_le16(param->interval);
1627 cp.window = cpu_to_le16(param->window);
1629 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1632 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1634 struct hci_cp_le_set_scan_enable cp;
1636 memset(&cp, 0, sizeof(cp));
1639 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1642 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1643 u16 window, int timeout)
1645 long timeo = msecs_to_jiffies(3000);
1646 struct le_scan_params param;
1649 BT_DBG("%s", hdev->name);
1651 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1652 return -EINPROGRESS;
1655 param.interval = interval;
1656 param.window = window;
1660 err = __hci_request(hdev, le_scan_param_req, (unsigned long) ¶m,
1663 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1665 hci_req_unlock(hdev);
1670 schedule_delayed_work(&hdev->le_scan_disable,
1671 msecs_to_jiffies(timeout));
1676 static void le_scan_disable_work(struct work_struct *work)
1678 struct hci_dev *hdev = container_of(work, struct hci_dev,
1679 le_scan_disable.work);
1680 struct hci_cp_le_set_scan_enable cp;
1682 BT_DBG("%s", hdev->name);
1684 memset(&cp, 0, sizeof(cp));
1686 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1689 static void le_scan_work(struct work_struct *work)
1691 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1692 struct le_scan_params *param = &hdev->le_scan_params;
1694 BT_DBG("%s", hdev->name);
1696 hci_do_le_scan(hdev, param->type, param->interval,
1697 param->window, param->timeout);
1700 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1703 struct le_scan_params *param = &hdev->le_scan_params;
1705 BT_DBG("%s", hdev->name);
1707 if (work_busy(&hdev->le_scan))
1708 return -EINPROGRESS;
1711 param->interval = interval;
1712 param->window = window;
1713 param->timeout = timeout;
1715 queue_work(system_long_wq, &hdev->le_scan);
1720 /* Register HCI device */
1721 int hci_register_dev(struct hci_dev *hdev)
1723 struct list_head *head = &hci_dev_list, *p;
1726 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1728 if (!hdev->open || !hdev->close)
1731 /* Do not allow HCI_AMP devices to register at index 0,
1732 * so the index can be used as the AMP controller ID.
1734 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1736 write_lock(&hci_dev_list_lock);
1738 /* Find first available device id */
1739 list_for_each(p, &hci_dev_list) {
1740 if (list_entry(p, struct hci_dev, list)->id != id)
1745 sprintf(hdev->name, "hci%d", id);
1747 list_add_tail(&hdev->list, head);
1749 mutex_init(&hdev->lock);
1752 hdev->dev_flags = 0;
1753 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1754 hdev->esco_type = (ESCO_HV1);
1755 hdev->link_mode = (HCI_LM_ACCEPT);
1756 hdev->io_capability = 0x03; /* No Input No Output */
1758 hdev->idle_timeout = 0;
1759 hdev->sniff_max_interval = 800;
1760 hdev->sniff_min_interval = 80;
1762 INIT_WORK(&hdev->rx_work, hci_rx_work);
1763 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1764 INIT_WORK(&hdev->tx_work, hci_tx_work);
1767 skb_queue_head_init(&hdev->rx_q);
1768 skb_queue_head_init(&hdev->cmd_q);
1769 skb_queue_head_init(&hdev->raw_q);
1771 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1773 for (i = 0; i < NUM_REASSEMBLY; i++)
1774 hdev->reassembly[i] = NULL;
1776 init_waitqueue_head(&hdev->req_wait_q);
1777 mutex_init(&hdev->req_lock);
1779 discovery_init(hdev);
1781 hci_conn_hash_init(hdev);
1783 INIT_LIST_HEAD(&hdev->mgmt_pending);
1785 INIT_LIST_HEAD(&hdev->blacklist);
1787 INIT_LIST_HEAD(&hdev->uuids);
1789 INIT_LIST_HEAD(&hdev->link_keys);
1790 INIT_LIST_HEAD(&hdev->long_term_keys);
1792 INIT_LIST_HEAD(&hdev->remote_oob_data);
1794 INIT_LIST_HEAD(&hdev->adv_entries);
1796 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1797 INIT_WORK(&hdev->power_on, hci_power_on);
1798 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1800 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1802 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1804 atomic_set(&hdev->promisc, 0);
1806 INIT_WORK(&hdev->le_scan, le_scan_work);
1808 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1810 write_unlock(&hci_dev_list_lock);
1812 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1814 if (!hdev->workqueue) {
1819 error = hci_add_sysfs(hdev);
1823 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1824 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1826 if (rfkill_register(hdev->rfkill) < 0) {
1827 rfkill_destroy(hdev->rfkill);
1828 hdev->rfkill = NULL;
1832 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1833 set_bit(HCI_SETUP, &hdev->dev_flags);
1834 schedule_work(&hdev->power_on);
1836 hci_notify(hdev, HCI_DEV_REG);
1842 destroy_workqueue(hdev->workqueue);
1844 write_lock(&hci_dev_list_lock);
1845 list_del(&hdev->list);
1846 write_unlock(&hci_dev_list_lock);
1850 EXPORT_SYMBOL(hci_register_dev);
1852 /* Unregister HCI device */
1853 void hci_unregister_dev(struct hci_dev *hdev)
1857 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1859 write_lock(&hci_dev_list_lock);
1860 list_del(&hdev->list);
1861 write_unlock(&hci_dev_list_lock);
1863 hci_dev_do_close(hdev);
1865 for (i = 0; i < NUM_REASSEMBLY; i++)
1866 kfree_skb(hdev->reassembly[i]);
1868 if (!test_bit(HCI_INIT, &hdev->flags) &&
1869 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1871 mgmt_index_removed(hdev);
1872 hci_dev_unlock(hdev);
1875 /* mgmt_index_removed should take care of emptying the
1877 BUG_ON(!list_empty(&hdev->mgmt_pending));
1879 hci_notify(hdev, HCI_DEV_UNREG);
1882 rfkill_unregister(hdev->rfkill);
1883 rfkill_destroy(hdev->rfkill);
1886 hci_del_sysfs(hdev);
1888 cancel_delayed_work_sync(&hdev->adv_work);
1890 destroy_workqueue(hdev->workqueue);
1893 hci_blacklist_clear(hdev);
1894 hci_uuids_clear(hdev);
1895 hci_link_keys_clear(hdev);
1896 hci_smp_ltks_clear(hdev);
1897 hci_remote_oob_data_clear(hdev);
1898 hci_adv_entries_clear(hdev);
1899 hci_dev_unlock(hdev);
1903 EXPORT_SYMBOL(hci_unregister_dev);
1905 /* Suspend HCI device */
1906 int hci_suspend_dev(struct hci_dev *hdev)
1908 hci_notify(hdev, HCI_DEV_SUSPEND);
1911 EXPORT_SYMBOL(hci_suspend_dev);
1913 /* Resume HCI device */
1914 int hci_resume_dev(struct hci_dev *hdev)
1916 hci_notify(hdev, HCI_DEV_RESUME);
1919 EXPORT_SYMBOL(hci_resume_dev);
1921 /* Receive frame from HCI drivers */
1922 int hci_recv_frame(struct sk_buff *skb)
1924 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1925 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1926 && !test_bit(HCI_INIT, &hdev->flags))) {
1932 bt_cb(skb)->incoming = 1;
1935 __net_timestamp(skb);
1937 skb_queue_tail(&hdev->rx_q, skb);
1938 queue_work(hdev->workqueue, &hdev->rx_work);
1942 EXPORT_SYMBOL(hci_recv_frame);
1944 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1945 int count, __u8 index)
1950 struct sk_buff *skb;
1951 struct bt_skb_cb *scb;
1953 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1954 index >= NUM_REASSEMBLY)
1957 skb = hdev->reassembly[index];
1961 case HCI_ACLDATA_PKT:
1962 len = HCI_MAX_FRAME_SIZE;
1963 hlen = HCI_ACL_HDR_SIZE;
1966 len = HCI_MAX_EVENT_SIZE;
1967 hlen = HCI_EVENT_HDR_SIZE;
1969 case HCI_SCODATA_PKT:
1970 len = HCI_MAX_SCO_SIZE;
1971 hlen = HCI_SCO_HDR_SIZE;
1975 skb = bt_skb_alloc(len, GFP_ATOMIC);
1979 scb = (void *) skb->cb;
1981 scb->pkt_type = type;
1983 skb->dev = (void *) hdev;
1984 hdev->reassembly[index] = skb;
1988 scb = (void *) skb->cb;
1989 len = min_t(uint, scb->expect, count);
1991 memcpy(skb_put(skb, len), data, len);
2000 if (skb->len == HCI_EVENT_HDR_SIZE) {
2001 struct hci_event_hdr *h = hci_event_hdr(skb);
2002 scb->expect = h->plen;
2004 if (skb_tailroom(skb) < scb->expect) {
2006 hdev->reassembly[index] = NULL;
2012 case HCI_ACLDATA_PKT:
2013 if (skb->len == HCI_ACL_HDR_SIZE) {
2014 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2015 scb->expect = __le16_to_cpu(h->dlen);
2017 if (skb_tailroom(skb) < scb->expect) {
2019 hdev->reassembly[index] = NULL;
2025 case HCI_SCODATA_PKT:
2026 if (skb->len == HCI_SCO_HDR_SIZE) {
2027 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2028 scb->expect = h->dlen;
2030 if (skb_tailroom(skb) < scb->expect) {
2032 hdev->reassembly[index] = NULL;
2039 if (scb->expect == 0) {
2040 /* Complete frame */
2042 bt_cb(skb)->pkt_type = type;
2043 hci_recv_frame(skb);
2045 hdev->reassembly[index] = NULL;
2053 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2057 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2061 rem = hci_reassembly(hdev, type, data, count, type - 1);
2065 data += (count - rem);
2071 EXPORT_SYMBOL(hci_recv_fragment);
2073 #define STREAM_REASSEMBLY 0
2075 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2081 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2084 struct { char type; } *pkt;
2086 /* Start of the frame */
2093 type = bt_cb(skb)->pkt_type;
2095 rem = hci_reassembly(hdev, type, data, count,
2100 data += (count - rem);
2106 EXPORT_SYMBOL(hci_recv_stream_fragment);
2108 /* ---- Interface to upper protocols ---- */
2110 int hci_register_cb(struct hci_cb *cb)
2112 BT_DBG("%p name %s", cb, cb->name);
2114 write_lock(&hci_cb_list_lock);
2115 list_add(&cb->list, &hci_cb_list);
2116 write_unlock(&hci_cb_list_lock);
2120 EXPORT_SYMBOL(hci_register_cb);
2122 int hci_unregister_cb(struct hci_cb *cb)
2124 BT_DBG("%p name %s", cb, cb->name);
2126 write_lock(&hci_cb_list_lock);
2127 list_del(&cb->list);
2128 write_unlock(&hci_cb_list_lock);
2132 EXPORT_SYMBOL(hci_unregister_cb);
2134 static int hci_send_frame(struct sk_buff *skb)
2136 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2143 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2146 __net_timestamp(skb);
2148 /* Send copy to monitor */
2149 hci_send_to_monitor(hdev, skb);
2151 if (atomic_read(&hdev->promisc)) {
2152 /* Send copy to the sockets */
2153 hci_send_to_sock(hdev, skb);
2156 /* Get rid of skb owner, prior to sending to the driver. */
2159 return hdev->send(skb);
2162 /* Send HCI command */
2163 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2165 int len = HCI_COMMAND_HDR_SIZE + plen;
2166 struct hci_command_hdr *hdr;
2167 struct sk_buff *skb;
2169 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2171 skb = bt_skb_alloc(len, GFP_ATOMIC);
2173 BT_ERR("%s no memory for command", hdev->name);
2177 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2178 hdr->opcode = cpu_to_le16(opcode);
2182 memcpy(skb_put(skb, plen), param, plen);
2184 BT_DBG("skb len %d", skb->len);
2186 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2187 skb->dev = (void *) hdev;
2189 if (test_bit(HCI_INIT, &hdev->flags))
2190 hdev->init_last_cmd = opcode;
2192 skb_queue_tail(&hdev->cmd_q, skb);
2193 queue_work(hdev->workqueue, &hdev->cmd_work);
2198 /* Get data from the previously sent command */
2199 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2201 struct hci_command_hdr *hdr;
2203 if (!hdev->sent_cmd)
2206 hdr = (void *) hdev->sent_cmd->data;
2208 if (hdr->opcode != cpu_to_le16(opcode))
2211 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2213 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2217 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2219 struct hci_acl_hdr *hdr;
2222 skb_push(skb, HCI_ACL_HDR_SIZE);
2223 skb_reset_transport_header(skb);
2224 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2225 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2226 hdr->dlen = cpu_to_le16(len);
2229 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2230 struct sk_buff *skb, __u16 flags)
2232 struct hci_dev *hdev = conn->hdev;
2233 struct sk_buff *list;
2235 list = skb_shinfo(skb)->frag_list;
2237 /* Non fragmented */
2238 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2240 skb_queue_tail(queue, skb);
2243 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2245 skb_shinfo(skb)->frag_list = NULL;
2247 /* Queue all fragments atomically */
2248 spin_lock(&queue->lock);
2250 __skb_queue_tail(queue, skb);
2252 flags &= ~ACL_START;
2255 skb = list; list = list->next;
2257 skb->dev = (void *) hdev;
2258 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2259 hci_add_acl_hdr(skb, conn->handle, flags);
2261 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2263 __skb_queue_tail(queue, skb);
2266 spin_unlock(&queue->lock);
2270 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2272 struct hci_conn *conn = chan->conn;
2273 struct hci_dev *hdev = conn->hdev;
2275 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2277 skb->dev = (void *) hdev;
2278 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2279 hci_add_acl_hdr(skb, conn->handle, flags);
2281 hci_queue_acl(conn, &chan->data_q, skb, flags);
2283 queue_work(hdev->workqueue, &hdev->tx_work);
2285 EXPORT_SYMBOL(hci_send_acl);
2288 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2290 struct hci_dev *hdev = conn->hdev;
2291 struct hci_sco_hdr hdr;
2293 BT_DBG("%s len %d", hdev->name, skb->len);
2295 hdr.handle = cpu_to_le16(conn->handle);
2296 hdr.dlen = skb->len;
2298 skb_push(skb, HCI_SCO_HDR_SIZE);
2299 skb_reset_transport_header(skb);
2300 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2302 skb->dev = (void *) hdev;
2303 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2305 skb_queue_tail(&conn->data_q, skb);
2306 queue_work(hdev->workqueue, &hdev->tx_work);
2308 EXPORT_SYMBOL(hci_send_sco);
2310 /* ---- HCI TX task (outgoing data) ---- */
2312 /* HCI Connection scheduler */
2313 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2315 struct hci_conn_hash *h = &hdev->conn_hash;
2316 struct hci_conn *conn = NULL, *c;
2317 int num = 0, min = ~0;
2319 /* We don't have to lock device here. Connections are always
2320 * added and removed with TX task disabled. */
2324 list_for_each_entry_rcu(c, &h->list, list) {
2325 if (c->type != type || skb_queue_empty(&c->data_q))
2328 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2333 if (c->sent < min) {
2338 if (hci_conn_num(hdev, type) == num)
2347 switch (conn->type) {
2349 cnt = hdev->acl_cnt;
2353 cnt = hdev->sco_cnt;
2356 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2360 BT_ERR("Unknown link type");
2368 BT_DBG("conn %p quote %d", conn, *quote);
2372 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2374 struct hci_conn_hash *h = &hdev->conn_hash;
2377 BT_ERR("%s link tx timeout", hdev->name);
2381 /* Kill stalled connections */
2382 list_for_each_entry_rcu(c, &h->list, list) {
2383 if (c->type == type && c->sent) {
2384 BT_ERR("%s killing stalled connection %s",
2385 hdev->name, batostr(&c->dst));
2386 hci_acl_disconn(c, 0x13);
2393 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2396 struct hci_conn_hash *h = &hdev->conn_hash;
2397 struct hci_chan *chan = NULL;
2398 int num = 0, min = ~0, cur_prio = 0;
2399 struct hci_conn *conn;
2400 int cnt, q, conn_num = 0;
2402 BT_DBG("%s", hdev->name);
2406 list_for_each_entry_rcu(conn, &h->list, list) {
2407 struct hci_chan *tmp;
2409 if (conn->type != type)
2412 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2417 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2418 struct sk_buff *skb;
2420 if (skb_queue_empty(&tmp->data_q))
2423 skb = skb_peek(&tmp->data_q);
2424 if (skb->priority < cur_prio)
2427 if (skb->priority > cur_prio) {
2430 cur_prio = skb->priority;
2435 if (conn->sent < min) {
2441 if (hci_conn_num(hdev, type) == conn_num)
2450 switch (chan->conn->type) {
2452 cnt = hdev->acl_cnt;
2456 cnt = hdev->sco_cnt;
2459 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2463 BT_ERR("Unknown link type");
2468 BT_DBG("chan %p quote %d", chan, *quote);
2472 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2474 struct hci_conn_hash *h = &hdev->conn_hash;
2475 struct hci_conn *conn;
2478 BT_DBG("%s", hdev->name);
2482 list_for_each_entry_rcu(conn, &h->list, list) {
2483 struct hci_chan *chan;
2485 if (conn->type != type)
2488 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2493 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2494 struct sk_buff *skb;
2501 if (skb_queue_empty(&chan->data_q))
2504 skb = skb_peek(&chan->data_q);
2505 if (skb->priority >= HCI_PRIO_MAX - 1)
2508 skb->priority = HCI_PRIO_MAX - 1;
2510 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2514 if (hci_conn_num(hdev, type) == num)
2522 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2524 /* Calculate count of blocks used by this packet */
2525 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2528 static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2530 if (!test_bit(HCI_RAW, &hdev->flags)) {
2531 /* ACL tx timeout must be longer than maximum
2532 * link supervision timeout (40.9 seconds) */
2533 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2534 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2535 hci_link_tx_to(hdev, ACL_LINK);
2539 static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2541 unsigned int cnt = hdev->acl_cnt;
2542 struct hci_chan *chan;
2543 struct sk_buff *skb;
2546 __check_timeout(hdev, cnt);
2548 while (hdev->acl_cnt &&
2549 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2550 u32 priority = (skb_peek(&chan->data_q))->priority;
2551 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2552 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2553 skb->len, skb->priority);
2555 /* Stop if priority has changed */
2556 if (skb->priority < priority)
2559 skb = skb_dequeue(&chan->data_q);
2561 hci_conn_enter_active_mode(chan->conn,
2562 bt_cb(skb)->force_active);
2564 hci_send_frame(skb);
2565 hdev->acl_last_tx = jiffies;
2573 if (cnt != hdev->acl_cnt)
2574 hci_prio_recalculate(hdev, ACL_LINK);
2577 static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2579 unsigned int cnt = hdev->block_cnt;
2580 struct hci_chan *chan;
2581 struct sk_buff *skb;
2584 __check_timeout(hdev, cnt);
2586 while (hdev->block_cnt > 0 &&
2587 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2588 u32 priority = (skb_peek(&chan->data_q))->priority;
2589 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2592 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2593 skb->len, skb->priority);
2595 /* Stop if priority has changed */
2596 if (skb->priority < priority)
2599 skb = skb_dequeue(&chan->data_q);
2601 blocks = __get_blocks(hdev, skb);
2602 if (blocks > hdev->block_cnt)
2605 hci_conn_enter_active_mode(chan->conn,
2606 bt_cb(skb)->force_active);
2608 hci_send_frame(skb);
2609 hdev->acl_last_tx = jiffies;
2611 hdev->block_cnt -= blocks;
2614 chan->sent += blocks;
2615 chan->conn->sent += blocks;
2619 if (cnt != hdev->block_cnt)
2620 hci_prio_recalculate(hdev, ACL_LINK);
2623 static inline void hci_sched_acl(struct hci_dev *hdev)
2625 BT_DBG("%s", hdev->name);
2627 if (!hci_conn_num(hdev, ACL_LINK))
2630 switch (hdev->flow_ctl_mode) {
2631 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2632 hci_sched_acl_pkt(hdev);
2635 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2636 hci_sched_acl_blk(hdev);
2642 static inline void hci_sched_sco(struct hci_dev *hdev)
2644 struct hci_conn *conn;
2645 struct sk_buff *skb;
2648 BT_DBG("%s", hdev->name);
2650 if (!hci_conn_num(hdev, SCO_LINK))
2653 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
2654 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2655 BT_DBG("skb %p len %d", skb, skb->len);
2656 hci_send_frame(skb);
2659 if (conn->sent == ~0)
2665 static inline void hci_sched_esco(struct hci_dev *hdev)
2667 struct hci_conn *conn;
2668 struct sk_buff *skb;
2671 BT_DBG("%s", hdev->name);
2673 if (!hci_conn_num(hdev, ESCO_LINK))
2676 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
2677 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2678 BT_DBG("skb %p len %d", skb, skb->len);
2679 hci_send_frame(skb);
2682 if (conn->sent == ~0)
2688 static inline void hci_sched_le(struct hci_dev *hdev)
2690 struct hci_chan *chan;
2691 struct sk_buff *skb;
2692 int quote, cnt, tmp;
2694 BT_DBG("%s", hdev->name);
2696 if (!hci_conn_num(hdev, LE_LINK))
2699 if (!test_bit(HCI_RAW, &hdev->flags)) {
2700 /* LE tx timeout must be longer than maximum
2701 * link supervision timeout (40.9 seconds) */
2702 if (!hdev->le_cnt && hdev->le_pkts &&
2703 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2704 hci_link_tx_to(hdev, LE_LINK);
2707 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2709 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
2710 u32 priority = (skb_peek(&chan->data_q))->priority;
2711 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2712 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2713 skb->len, skb->priority);
2715 /* Stop if priority has changed */
2716 if (skb->priority < priority)
2719 skb = skb_dequeue(&chan->data_q);
2721 hci_send_frame(skb);
2722 hdev->le_last_tx = jiffies;
2733 hdev->acl_cnt = cnt;
2736 hci_prio_recalculate(hdev, LE_LINK);
2739 static void hci_tx_work(struct work_struct *work)
2741 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2742 struct sk_buff *skb;
2744 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2745 hdev->sco_cnt, hdev->le_cnt);
2747 /* Schedule queues and send stuff to HCI driver */
2749 hci_sched_acl(hdev);
2751 hci_sched_sco(hdev);
2753 hci_sched_esco(hdev);
2757 /* Send next queued raw (unknown type) packet */
2758 while ((skb = skb_dequeue(&hdev->raw_q)))
2759 hci_send_frame(skb);
2762 /* ----- HCI RX task (incoming data processing) ----- */
2764 /* ACL data packet */
2765 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2767 struct hci_acl_hdr *hdr = (void *) skb->data;
2768 struct hci_conn *conn;
2769 __u16 handle, flags;
2771 skb_pull(skb, HCI_ACL_HDR_SIZE);
2773 handle = __le16_to_cpu(hdr->handle);
2774 flags = hci_flags(handle);
2775 handle = hci_handle(handle);
2777 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2779 hdev->stat.acl_rx++;
2782 conn = hci_conn_hash_lookup_handle(hdev, handle);
2783 hci_dev_unlock(hdev);
2786 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2788 /* Send to upper protocol */
2789 l2cap_recv_acldata(conn, skb, flags);
2792 BT_ERR("%s ACL packet for unknown connection handle %d",
2793 hdev->name, handle);
2799 /* SCO data packet */
2800 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2802 struct hci_sco_hdr *hdr = (void *) skb->data;
2803 struct hci_conn *conn;
2806 skb_pull(skb, HCI_SCO_HDR_SIZE);
2808 handle = __le16_to_cpu(hdr->handle);
2810 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2812 hdev->stat.sco_rx++;
2815 conn = hci_conn_hash_lookup_handle(hdev, handle);
2816 hci_dev_unlock(hdev);
2819 /* Send to upper protocol */
2820 sco_recv_scodata(conn, skb);
2823 BT_ERR("%s SCO packet for unknown connection handle %d",
2824 hdev->name, handle);
2830 static void hci_rx_work(struct work_struct *work)
2832 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2833 struct sk_buff *skb;
2835 BT_DBG("%s", hdev->name);
2837 while ((skb = skb_dequeue(&hdev->rx_q))) {
2838 /* Send copy to monitor */
2839 hci_send_to_monitor(hdev, skb);
2841 if (atomic_read(&hdev->promisc)) {
2842 /* Send copy to the sockets */
2843 hci_send_to_sock(hdev, skb);
2846 if (test_bit(HCI_RAW, &hdev->flags)) {
2851 if (test_bit(HCI_INIT, &hdev->flags)) {
2852 /* Don't process data packets in this states. */
2853 switch (bt_cb(skb)->pkt_type) {
2854 case HCI_ACLDATA_PKT:
2855 case HCI_SCODATA_PKT:
2862 switch (bt_cb(skb)->pkt_type) {
2864 BT_DBG("%s Event packet", hdev->name);
2865 hci_event_packet(hdev, skb);
2868 case HCI_ACLDATA_PKT:
2869 BT_DBG("%s ACL data packet", hdev->name);
2870 hci_acldata_packet(hdev, skb);
2873 case HCI_SCODATA_PKT:
2874 BT_DBG("%s SCO data packet", hdev->name);
2875 hci_scodata_packet(hdev, skb);
2885 static void hci_cmd_work(struct work_struct *work)
2887 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2888 struct sk_buff *skb;
2890 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2892 /* Send queued commands */
2893 if (atomic_read(&hdev->cmd_cnt)) {
2894 skb = skb_dequeue(&hdev->cmd_q);
2898 kfree_skb(hdev->sent_cmd);
2900 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2901 if (hdev->sent_cmd) {
2902 atomic_dec(&hdev->cmd_cnt);
2903 hci_send_frame(skb);
2904 if (test_bit(HCI_RESET, &hdev->flags))
2905 del_timer(&hdev->cmd_timer);
2907 mod_timer(&hdev->cmd_timer,
2908 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2910 skb_queue_head(&hdev->cmd_q, skb);
2911 queue_work(hdev->workqueue, &hdev->cmd_work);
2916 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2918 /* General inquiry access code (GIAC) */
2919 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2920 struct hci_cp_inquiry cp;
2922 BT_DBG("%s", hdev->name);
2924 if (test_bit(HCI_INQUIRY, &hdev->flags))
2925 return -EINPROGRESS;
2927 inquiry_cache_flush(hdev);
2929 memset(&cp, 0, sizeof(cp));
2930 memcpy(&cp.lap, lap, sizeof(cp.lap));
2933 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2936 int hci_cancel_inquiry(struct hci_dev *hdev)
2938 BT_DBG("%s", hdev->name);
2940 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2943 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);