2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
46 #include <asm/system.h>
47 #include <asm/uaccess.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
53 static void hci_cmd_task(unsigned long arg);
54 static void hci_rx_task(unsigned long arg);
55 static void hci_tx_task(unsigned long arg);
56 static void hci_notify(struct hci_dev *hdev, int event);
58 static DEFINE_RWLOCK(hci_task_lock);
61 LIST_HEAD(hci_dev_list);
62 DEFINE_RWLOCK(hci_dev_list_lock);
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list);
66 DEFINE_RWLOCK(hci_cb_list_lock);
69 #define HCI_MAX_PROTO 2
70 struct hci_proto *hci_proto[HCI_MAX_PROTO];
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
75 /* ---- HCI notifications ---- */
77 int hci_register_notifier(struct notifier_block *nb)
79 return atomic_notifier_chain_register(&hci_notifier, nb);
82 int hci_unregister_notifier(struct notifier_block *nb)
84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
87 static void hci_notify(struct hci_dev *hdev, int event)
89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
92 /* ---- HCI requests ---- */
94 void hci_req_complete(struct hci_dev *hdev, int result)
96 BT_DBG("%s result 0x%2.2x", hdev->name, result);
98 if (hdev->req_status == HCI_REQ_PEND) {
99 hdev->req_result = result;
100 hdev->req_status = HCI_REQ_DONE;
101 wake_up_interruptible(&hdev->req_wait_q);
105 static void hci_req_cancel(struct hci_dev *hdev, int err)
107 BT_DBG("%s err 0x%2.2x", hdev->name, err);
109 if (hdev->req_status == HCI_REQ_PEND) {
110 hdev->req_result = err;
111 hdev->req_status = HCI_REQ_CANCELED;
112 wake_up_interruptible(&hdev->req_wait_q);
116 /* Execute request and wait for completion. */
117 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
118 unsigned long opt, __u32 timeout)
120 DECLARE_WAITQUEUE(wait, current);
123 BT_DBG("%s start", hdev->name);
125 hdev->req_status = HCI_REQ_PEND;
127 add_wait_queue(&hdev->req_wait_q, &wait);
128 set_current_state(TASK_INTERRUPTIBLE);
131 schedule_timeout(timeout);
133 remove_wait_queue(&hdev->req_wait_q, &wait);
135 if (signal_pending(current))
138 switch (hdev->req_status) {
140 err = -bt_err(hdev->req_result);
143 case HCI_REQ_CANCELED:
144 err = -hdev->req_result;
152 hdev->req_status = hdev->req_result = 0;
154 BT_DBG("%s end: err %d", hdev->name, err);
159 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
160 unsigned long opt, __u32 timeout)
164 if (!test_bit(HCI_UP, &hdev->flags))
167 /* Serialize all requests */
169 ret = __hci_request(hdev, req, opt, timeout);
170 hci_req_unlock(hdev);
175 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
177 BT_DBG("%s %ld", hdev->name, opt);
180 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
183 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
189 BT_DBG("%s %ld", hdev->name, opt);
191 /* Driver initialization */
193 /* Special commands */
194 while ((skb = skb_dequeue(&hdev->driver_init))) {
195 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
196 skb->dev = (void *) hdev;
198 skb_queue_tail(&hdev->cmd_q, skb);
199 tasklet_schedule(&hdev->cmd_task);
201 skb_queue_purge(&hdev->driver_init);
203 /* Mandatory initialization */
206 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
209 /* Read Local Supported Features */
210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
212 /* Read Local Version */
213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
215 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
216 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
219 /* Host buffer size */
221 struct hci_cp_host_buffer_size cp;
222 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
223 cp.sco_mtu = HCI_MAX_SCO_SIZE;
224 cp.acl_max_pkt = cpu_to_le16(0xffff);
225 cp.sco_max_pkt = cpu_to_le16(0xffff);
226 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
230 /* Read BD Address */
231 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
233 /* Read Class of Device */
234 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
236 /* Read Local Name */
237 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
239 /* Read Voice Setting */
240 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
242 /* Optional initialization */
244 /* Clear Event Filters */
245 flt_type = HCI_FLT_CLEAR_ALL;
246 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
248 /* Page timeout ~20 secs */
249 param = cpu_to_le16(0x8000);
250 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, ¶m);
252 /* Connection accept timeout ~20 secs */
253 param = cpu_to_le16(0x7d00);
254 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
257 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
261 BT_DBG("%s %x", hdev->name, scan);
263 /* Inquiry and Page scans */
264 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
267 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
271 BT_DBG("%s %x", hdev->name, auth);
274 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
277 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
281 BT_DBG("%s %x", hdev->name, encrypt);
284 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
287 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
289 __le16 policy = cpu_to_le16(opt);
291 BT_DBG("%s %x", hdev->name, policy);
293 /* Default link policy */
294 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
297 /* Get HCI device by index.
298 * Device is held on return. */
299 struct hci_dev *hci_dev_get(int index)
301 struct hci_dev *hdev = NULL;
309 read_lock(&hci_dev_list_lock);
310 list_for_each(p, &hci_dev_list) {
311 struct hci_dev *d = list_entry(p, struct hci_dev, list);
312 if (d->id == index) {
313 hdev = hci_dev_hold(d);
317 read_unlock(&hci_dev_list_lock);
321 /* ---- Inquiry support ---- */
322 static void inquiry_cache_flush(struct hci_dev *hdev)
324 struct inquiry_cache *cache = &hdev->inq_cache;
325 struct inquiry_entry *next = cache->list, *e;
327 BT_DBG("cache %p", cache);
336 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
338 struct inquiry_cache *cache = &hdev->inq_cache;
339 struct inquiry_entry *e;
341 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
343 for (e = cache->list; e; e = e->next)
344 if (!bacmp(&e->data.bdaddr, bdaddr))
349 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
351 struct inquiry_cache *cache = &hdev->inq_cache;
352 struct inquiry_entry *e;
354 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
356 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
357 /* Entry not in the cache. Add new one. */
358 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
360 e->next = cache->list;
364 memcpy(&e->data, data, sizeof(*data));
365 e->timestamp = jiffies;
366 cache->timestamp = jiffies;
369 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
371 struct inquiry_cache *cache = &hdev->inq_cache;
372 struct inquiry_info *info = (struct inquiry_info *) buf;
373 struct inquiry_entry *e;
376 for (e = cache->list; e && copied < num; e = e->next, copied++) {
377 struct inquiry_data *data = &e->data;
378 bacpy(&info->bdaddr, &data->bdaddr);
379 info->pscan_rep_mode = data->pscan_rep_mode;
380 info->pscan_period_mode = data->pscan_period_mode;
381 info->pscan_mode = data->pscan_mode;
382 memcpy(info->dev_class, data->dev_class, 3);
383 info->clock_offset = data->clock_offset;
387 BT_DBG("cache %p, copied %d", cache, copied);
391 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
393 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
394 struct hci_cp_inquiry cp;
396 BT_DBG("%s", hdev->name);
398 if (test_bit(HCI_INQUIRY, &hdev->flags))
402 memcpy(&cp.lap, &ir->lap, 3);
403 cp.length = ir->length;
404 cp.num_rsp = ir->num_rsp;
405 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
408 int hci_inquiry(void __user *arg)
410 __u8 __user *ptr = arg;
411 struct hci_inquiry_req ir;
412 struct hci_dev *hdev;
413 int err = 0, do_inquiry = 0, max_rsp;
417 if (copy_from_user(&ir, ptr, sizeof(ir)))
420 if (!(hdev = hci_dev_get(ir.dev_id)))
423 hci_dev_lock_bh(hdev);
424 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
425 inquiry_cache_empty(hdev) ||
426 ir.flags & IREQ_CACHE_FLUSH) {
427 inquiry_cache_flush(hdev);
430 hci_dev_unlock_bh(hdev);
432 timeo = ir.length * msecs_to_jiffies(2000);
433 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
436 /* for unlimited number of responses we will use buffer with 255 entries */
437 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
439 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
440 * copy it to the user space.
442 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
447 hci_dev_lock_bh(hdev);
448 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
449 hci_dev_unlock_bh(hdev);
451 BT_DBG("num_rsp %d", ir.num_rsp);
453 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
455 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
468 /* ---- HCI ioctl helpers ---- */
470 int hci_dev_open(__u16 dev)
472 struct hci_dev *hdev;
475 if (!(hdev = hci_dev_get(dev)))
478 BT_DBG("%s %p", hdev->name, hdev);
482 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
487 if (test_bit(HCI_UP, &hdev->flags)) {
492 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
493 set_bit(HCI_RAW, &hdev->flags);
495 /* Treat all non BR/EDR controllers as raw devices for now */
496 if (hdev->dev_type != HCI_BREDR)
497 set_bit(HCI_RAW, &hdev->flags);
499 if (hdev->open(hdev)) {
504 if (!test_bit(HCI_RAW, &hdev->flags)) {
505 atomic_set(&hdev->cmd_cnt, 1);
506 set_bit(HCI_INIT, &hdev->flags);
508 //__hci_request(hdev, hci_reset_req, 0, HZ);
509 ret = __hci_request(hdev, hci_init_req, 0,
510 msecs_to_jiffies(HCI_INIT_TIMEOUT));
512 clear_bit(HCI_INIT, &hdev->flags);
517 set_bit(HCI_UP, &hdev->flags);
518 hci_notify(hdev, HCI_DEV_UP);
520 /* Init failed, cleanup */
521 tasklet_kill(&hdev->rx_task);
522 tasklet_kill(&hdev->tx_task);
523 tasklet_kill(&hdev->cmd_task);
525 skb_queue_purge(&hdev->cmd_q);
526 skb_queue_purge(&hdev->rx_q);
531 if (hdev->sent_cmd) {
532 kfree_skb(hdev->sent_cmd);
533 hdev->sent_cmd = NULL;
541 hci_req_unlock(hdev);
546 static int hci_dev_do_close(struct hci_dev *hdev)
548 BT_DBG("%s %p", hdev->name, hdev);
550 hci_req_cancel(hdev, ENODEV);
553 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
554 hci_req_unlock(hdev);
558 /* Kill RX and TX tasks */
559 tasklet_kill(&hdev->rx_task);
560 tasklet_kill(&hdev->tx_task);
562 hci_dev_lock_bh(hdev);
563 inquiry_cache_flush(hdev);
564 hci_conn_hash_flush(hdev);
565 hci_dev_unlock_bh(hdev);
567 hci_notify(hdev, HCI_DEV_DOWN);
573 skb_queue_purge(&hdev->cmd_q);
574 atomic_set(&hdev->cmd_cnt, 1);
575 if (!test_bit(HCI_RAW, &hdev->flags)) {
576 set_bit(HCI_INIT, &hdev->flags);
577 __hci_request(hdev, hci_reset_req, 0,
578 msecs_to_jiffies(250));
579 clear_bit(HCI_INIT, &hdev->flags);
583 tasklet_kill(&hdev->cmd_task);
586 skb_queue_purge(&hdev->rx_q);
587 skb_queue_purge(&hdev->cmd_q);
588 skb_queue_purge(&hdev->raw_q);
590 /* Drop last sent command */
591 if (hdev->sent_cmd) {
592 kfree_skb(hdev->sent_cmd);
593 hdev->sent_cmd = NULL;
596 /* After this point our queues are empty
597 * and no tasks are scheduled. */
603 hci_req_unlock(hdev);
609 int hci_dev_close(__u16 dev)
611 struct hci_dev *hdev;
614 if (!(hdev = hci_dev_get(dev)))
616 err = hci_dev_do_close(hdev);
621 int hci_dev_reset(__u16 dev)
623 struct hci_dev *hdev;
626 if (!(hdev = hci_dev_get(dev)))
630 tasklet_disable(&hdev->tx_task);
632 if (!test_bit(HCI_UP, &hdev->flags))
636 skb_queue_purge(&hdev->rx_q);
637 skb_queue_purge(&hdev->cmd_q);
639 hci_dev_lock_bh(hdev);
640 inquiry_cache_flush(hdev);
641 hci_conn_hash_flush(hdev);
642 hci_dev_unlock_bh(hdev);
647 atomic_set(&hdev->cmd_cnt, 1);
648 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
650 if (!test_bit(HCI_RAW, &hdev->flags))
651 ret = __hci_request(hdev, hci_reset_req, 0,
652 msecs_to_jiffies(HCI_INIT_TIMEOUT));
655 tasklet_enable(&hdev->tx_task);
656 hci_req_unlock(hdev);
661 int hci_dev_reset_stat(__u16 dev)
663 struct hci_dev *hdev;
666 if (!(hdev = hci_dev_get(dev)))
669 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
676 int hci_dev_cmd(unsigned int cmd, void __user *arg)
678 struct hci_dev *hdev;
679 struct hci_dev_req dr;
682 if (copy_from_user(&dr, arg, sizeof(dr)))
685 if (!(hdev = hci_dev_get(dr.dev_id)))
690 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
691 msecs_to_jiffies(HCI_INIT_TIMEOUT));
695 if (!lmp_encrypt_capable(hdev)) {
700 if (!test_bit(HCI_AUTH, &hdev->flags)) {
701 /* Auth must be enabled first */
702 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
703 msecs_to_jiffies(HCI_INIT_TIMEOUT));
708 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
709 msecs_to_jiffies(HCI_INIT_TIMEOUT));
713 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
714 msecs_to_jiffies(HCI_INIT_TIMEOUT));
718 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
719 msecs_to_jiffies(HCI_INIT_TIMEOUT));
723 hdev->link_mode = ((__u16) dr.dev_opt) &
724 (HCI_LM_MASTER | HCI_LM_ACCEPT);
728 hdev->pkt_type = (__u16) dr.dev_opt;
732 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
733 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
737 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
738 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
750 int hci_get_dev_list(void __user *arg)
752 struct hci_dev_list_req *dl;
753 struct hci_dev_req *dr;
755 int n = 0, size, err;
758 if (get_user(dev_num, (__u16 __user *) arg))
761 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
764 size = sizeof(*dl) + dev_num * sizeof(*dr);
766 if (!(dl = kzalloc(size, GFP_KERNEL)))
771 read_lock_bh(&hci_dev_list_lock);
772 list_for_each(p, &hci_dev_list) {
773 struct hci_dev *hdev;
774 hdev = list_entry(p, struct hci_dev, list);
775 (dr + n)->dev_id = hdev->id;
776 (dr + n)->dev_opt = hdev->flags;
780 read_unlock_bh(&hci_dev_list_lock);
783 size = sizeof(*dl) + n * sizeof(*dr);
785 err = copy_to_user(arg, dl, size);
788 return err ? -EFAULT : 0;
791 int hci_get_dev_info(void __user *arg)
793 struct hci_dev *hdev;
794 struct hci_dev_info di;
797 if (copy_from_user(&di, arg, sizeof(di)))
800 if (!(hdev = hci_dev_get(di.dev_id)))
803 strcpy(di.name, hdev->name);
804 di.bdaddr = hdev->bdaddr;
805 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
806 di.flags = hdev->flags;
807 di.pkt_type = hdev->pkt_type;
808 di.acl_mtu = hdev->acl_mtu;
809 di.acl_pkts = hdev->acl_pkts;
810 di.sco_mtu = hdev->sco_mtu;
811 di.sco_pkts = hdev->sco_pkts;
812 di.link_policy = hdev->link_policy;
813 di.link_mode = hdev->link_mode;
815 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
816 memcpy(&di.features, &hdev->features, sizeof(di.features));
818 if (copy_to_user(arg, &di, sizeof(di)))
826 /* ---- Interface to HCI drivers ---- */
828 static int hci_rfkill_set_block(void *data, bool blocked)
830 struct hci_dev *hdev = data;
832 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
837 hci_dev_do_close(hdev);
842 static const struct rfkill_ops hci_rfkill_ops = {
843 .set_block = hci_rfkill_set_block,
846 /* Alloc HCI device */
847 struct hci_dev *hci_alloc_dev(void)
849 struct hci_dev *hdev;
851 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
855 skb_queue_head_init(&hdev->driver_init);
859 EXPORT_SYMBOL(hci_alloc_dev);
861 /* Free HCI device */
862 void hci_free_dev(struct hci_dev *hdev)
864 skb_queue_purge(&hdev->driver_init);
866 /* will free via device release */
867 put_device(&hdev->dev);
869 EXPORT_SYMBOL(hci_free_dev);
871 /* Register HCI device */
872 int hci_register_dev(struct hci_dev *hdev)
874 struct list_head *head = &hci_dev_list, *p;
877 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
878 hdev->bus, hdev->owner);
880 if (!hdev->open || !hdev->close || !hdev->destruct)
883 write_lock_bh(&hci_dev_list_lock);
885 /* Find first available device id */
886 list_for_each(p, &hci_dev_list) {
887 if (list_entry(p, struct hci_dev, list)->id != id)
892 sprintf(hdev->name, "hci%d", id);
894 list_add(&hdev->list, head);
896 atomic_set(&hdev->refcnt, 1);
897 spin_lock_init(&hdev->lock);
900 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
901 hdev->esco_type = (ESCO_HV1);
902 hdev->link_mode = (HCI_LM_ACCEPT);
904 hdev->idle_timeout = 0;
905 hdev->sniff_max_interval = 800;
906 hdev->sniff_min_interval = 80;
908 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
909 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
910 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
912 skb_queue_head_init(&hdev->rx_q);
913 skb_queue_head_init(&hdev->cmd_q);
914 skb_queue_head_init(&hdev->raw_q);
916 for (i = 0; i < NUM_REASSEMBLY; i++)
917 hdev->reassembly[i] = NULL;
919 init_waitqueue_head(&hdev->req_wait_q);
920 mutex_init(&hdev->req_lock);
922 inquiry_cache_init(hdev);
924 hci_conn_hash_init(hdev);
926 INIT_LIST_HEAD(&hdev->blacklist);
928 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
930 atomic_set(&hdev->promisc, 0);
932 write_unlock_bh(&hci_dev_list_lock);
934 hdev->workqueue = create_singlethread_workqueue(hdev->name);
935 if (!hdev->workqueue)
938 hci_register_sysfs(hdev);
940 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
941 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
943 if (rfkill_register(hdev->rfkill) < 0) {
944 rfkill_destroy(hdev->rfkill);
949 hci_notify(hdev, HCI_DEV_REG);
954 write_lock_bh(&hci_dev_list_lock);
955 list_del(&hdev->list);
956 write_unlock_bh(&hci_dev_list_lock);
960 EXPORT_SYMBOL(hci_register_dev);
962 /* Unregister HCI device */
963 int hci_unregister_dev(struct hci_dev *hdev)
967 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
969 write_lock_bh(&hci_dev_list_lock);
970 list_del(&hdev->list);
971 write_unlock_bh(&hci_dev_list_lock);
973 hci_dev_do_close(hdev);
975 for (i = 0; i < NUM_REASSEMBLY; i++)
976 kfree_skb(hdev->reassembly[i]);
978 hci_notify(hdev, HCI_DEV_UNREG);
981 rfkill_unregister(hdev->rfkill);
982 rfkill_destroy(hdev->rfkill);
985 hci_unregister_sysfs(hdev);
987 destroy_workqueue(hdev->workqueue);
989 #ifdef FEATURE_DELAYED_HCI_UNREGISTER
990 hdev->workqueue = NULL;
996 EXPORT_SYMBOL(hci_unregister_dev);
998 /* Suspend HCI device */
999 int hci_suspend_dev(struct hci_dev *hdev)
1001 hci_notify(hdev, HCI_DEV_SUSPEND);
1004 EXPORT_SYMBOL(hci_suspend_dev);
1006 /* Resume HCI device */
1007 int hci_resume_dev(struct hci_dev *hdev)
1009 hci_notify(hdev, HCI_DEV_RESUME);
1012 EXPORT_SYMBOL(hci_resume_dev);
1014 /* Receive frame from HCI drivers */
1015 int hci_recv_frame(struct sk_buff *skb)
1017 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1018 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1019 && !test_bit(HCI_INIT, &hdev->flags))) {
1025 bt_cb(skb)->incoming = 1;
1028 __net_timestamp(skb);
1030 /* Queue frame for rx task */
1031 skb_queue_tail(&hdev->rx_q, skb);
1032 tasklet_schedule(&hdev->rx_task);
1036 EXPORT_SYMBOL(hci_recv_frame);
1038 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1039 int count, __u8 index, gfp_t gfp_mask)
1044 struct sk_buff *skb;
1045 struct bt_skb_cb *scb;
1047 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1048 index >= NUM_REASSEMBLY)
1051 skb = hdev->reassembly[index];
1055 case HCI_ACLDATA_PKT:
1056 len = HCI_MAX_FRAME_SIZE;
1057 hlen = HCI_ACL_HDR_SIZE;
1060 len = HCI_MAX_EVENT_SIZE;
1061 hlen = HCI_EVENT_HDR_SIZE;
1063 case HCI_SCODATA_PKT:
1064 len = HCI_MAX_SCO_SIZE;
1065 hlen = HCI_SCO_HDR_SIZE;
1069 skb = bt_skb_alloc(len, gfp_mask);
1073 scb = (void *) skb->cb;
1075 scb->pkt_type = type;
1077 skb->dev = (void *) hdev;
1078 hdev->reassembly[index] = skb;
1082 scb = (void *) skb->cb;
1083 len = min(scb->expect, (__u16)count);
1085 memcpy(skb_put(skb, len), data, len);
1094 if (skb->len == HCI_EVENT_HDR_SIZE) {
1095 struct hci_event_hdr *h = hci_event_hdr(skb);
1096 scb->expect = h->plen;
1098 if (skb_tailroom(skb) < scb->expect) {
1100 hdev->reassembly[index] = NULL;
1106 case HCI_ACLDATA_PKT:
1107 if (skb->len == HCI_ACL_HDR_SIZE) {
1108 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1109 scb->expect = __le16_to_cpu(h->dlen);
1111 if (skb_tailroom(skb) < scb->expect) {
1113 hdev->reassembly[index] = NULL;
1119 case HCI_SCODATA_PKT:
1120 if (skb->len == HCI_SCO_HDR_SIZE) {
1121 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1122 scb->expect = h->dlen;
1124 if (skb_tailroom(skb) < scb->expect) {
1126 hdev->reassembly[index] = NULL;
1133 if (scb->expect == 0) {
1134 /* Complete frame */
1136 bt_cb(skb)->pkt_type = type;
1137 hci_recv_frame(skb);
1139 hdev->reassembly[index] = NULL;
1147 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1151 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1155 rem = hci_reassembly(hdev, type, data, count,
1156 type - 1, GFP_ATOMIC);
1160 data += (count - rem);
1166 EXPORT_SYMBOL(hci_recv_fragment);
1168 #define STREAM_REASSEMBLY 0
1170 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1176 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1179 struct { char type; } *pkt;
1181 /* Start of the frame */
1188 type = bt_cb(skb)->pkt_type;
1190 rem = hci_reassembly(hdev, type, data,
1191 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1195 data += (count - rem);
1201 EXPORT_SYMBOL(hci_recv_stream_fragment);
1203 /* ---- Interface to upper protocols ---- */
1205 /* Register/Unregister protocols.
1206 * hci_task_lock is used to ensure that no tasks are running. */
1207 int hci_register_proto(struct hci_proto *hp)
1211 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1213 if (hp->id >= HCI_MAX_PROTO)
1216 write_lock_bh(&hci_task_lock);
1218 if (!hci_proto[hp->id])
1219 hci_proto[hp->id] = hp;
1223 write_unlock_bh(&hci_task_lock);
1227 EXPORT_SYMBOL(hci_register_proto);
1229 int hci_unregister_proto(struct hci_proto *hp)
1233 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1235 if (hp->id >= HCI_MAX_PROTO)
1238 write_lock_bh(&hci_task_lock);
1240 if (hci_proto[hp->id])
1241 hci_proto[hp->id] = NULL;
1245 write_unlock_bh(&hci_task_lock);
1249 EXPORT_SYMBOL(hci_unregister_proto);
1251 int hci_register_cb(struct hci_cb *cb)
1253 BT_DBG("%p name %s", cb, cb->name);
1255 write_lock_bh(&hci_cb_list_lock);
1256 list_add(&cb->list, &hci_cb_list);
1257 write_unlock_bh(&hci_cb_list_lock);
1261 EXPORT_SYMBOL(hci_register_cb);
1263 int hci_unregister_cb(struct hci_cb *cb)
1265 BT_DBG("%p name %s", cb, cb->name);
1267 write_lock_bh(&hci_cb_list_lock);
1268 list_del(&cb->list);
1269 write_unlock_bh(&hci_cb_list_lock);
1273 EXPORT_SYMBOL(hci_unregister_cb);
1275 static int hci_send_frame(struct sk_buff *skb)
1277 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1284 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1286 if (atomic_read(&hdev->promisc)) {
1288 __net_timestamp(skb);
1290 hci_send_to_sock(hdev, skb);
1293 /* Get rid of skb owner, prior to sending to the driver. */
1296 return hdev->send(skb);
1299 /* Send HCI command */
1300 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1302 int len = HCI_COMMAND_HDR_SIZE + plen;
1303 struct hci_command_hdr *hdr;
1304 struct sk_buff *skb;
1306 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1308 skb = bt_skb_alloc(len, GFP_ATOMIC);
1310 BT_ERR("%s no memory for command", hdev->name);
1314 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1315 hdr->opcode = cpu_to_le16(opcode);
1319 memcpy(skb_put(skb, plen), param, plen);
1321 BT_DBG("skb len %d", skb->len);
1323 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1324 skb->dev = (void *) hdev;
1326 skb_queue_tail(&hdev->cmd_q, skb);
1327 tasklet_schedule(&hdev->cmd_task);
1332 /* Get data from the previously sent command */
1333 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1335 struct hci_command_hdr *hdr;
1337 if (!hdev->sent_cmd)
1340 hdr = (void *) hdev->sent_cmd->data;
1342 if (hdr->opcode != cpu_to_le16(opcode))
1345 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1347 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1351 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1353 struct hci_acl_hdr *hdr;
1356 skb_push(skb, HCI_ACL_HDR_SIZE);
1357 skb_reset_transport_header(skb);
1358 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1359 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1360 hdr->dlen = cpu_to_le16(len);
1363 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1365 struct hci_dev *hdev = conn->hdev;
1366 struct sk_buff *list;
1368 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1370 skb->dev = (void *) hdev;
1371 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1372 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1374 if (!(list = skb_shinfo(skb)->frag_list)) {
1375 /* Non fragmented */
1376 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1378 skb_queue_tail(&conn->data_q, skb);
1381 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1383 skb_shinfo(skb)->frag_list = NULL;
1385 /* Queue all fragments atomically */
1386 spin_lock_bh(&conn->data_q.lock);
1388 __skb_queue_tail(&conn->data_q, skb);
1390 skb = list; list = list->next;
1392 skb->dev = (void *) hdev;
1393 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1394 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1396 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1398 __skb_queue_tail(&conn->data_q, skb);
1401 spin_unlock_bh(&conn->data_q.lock);
1404 tasklet_schedule(&hdev->tx_task);
1406 EXPORT_SYMBOL(hci_send_acl);
1409 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1411 struct hci_dev *hdev = conn->hdev;
1412 struct hci_sco_hdr hdr;
1414 BT_DBG("%s len %d", hdev->name, skb->len);
1416 hdr.handle = cpu_to_le16(conn->handle);
1417 hdr.dlen = skb->len;
1419 skb_push(skb, HCI_SCO_HDR_SIZE);
1420 skb_reset_transport_header(skb);
1421 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1423 skb->dev = (void *) hdev;
1424 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1426 skb_queue_tail(&conn->data_q, skb);
1427 tasklet_schedule(&hdev->tx_task);
1429 EXPORT_SYMBOL(hci_send_sco);
1431 /* ---- HCI TX task (outgoing data) ---- */
1433 /* HCI Connection scheduler */
1434 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1436 struct hci_conn_hash *h = &hdev->conn_hash;
1437 struct hci_conn *conn = NULL;
1438 int num = 0, min = ~0;
1439 struct list_head *p;
1441 /* We don't have to lock device here. Connections are always
1442 * added and removed with TX task disabled. */
1443 list_for_each(p, &h->list) {
1445 c = list_entry(p, struct hci_conn, list);
1447 if (c->type != type || skb_queue_empty(&c->data_q))
1450 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1455 if (c->sent < min) {
1462 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1468 BT_DBG("conn %p quote %d", conn, *quote);
1472 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1474 struct hci_conn_hash *h = &hdev->conn_hash;
1475 struct list_head *p;
1478 BT_ERR("%s ACL tx timeout", hdev->name);
1480 /* Kill stalled connections */
1481 list_for_each(p, &h->list) {
1482 c = list_entry(p, struct hci_conn, list);
1483 if (c->type == ACL_LINK && c->sent) {
1484 BT_ERR("%s killing stalled ACL connection %s",
1485 hdev->name, batostr(&c->dst));
1486 hci_acl_disconn(c, 0x13);
1491 static inline void hci_sched_acl(struct hci_dev *hdev)
1493 struct hci_conn *conn;
1494 struct sk_buff *skb;
1497 BT_DBG("%s", hdev->name);
1499 if (!test_bit(HCI_RAW, &hdev->flags)) {
1500 /* ACL tx timeout must be longer than maximum
1501 * link supervision timeout (40.9 seconds) */
1502 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1503 hci_acl_tx_to(hdev);
1506 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
1507 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1508 BT_DBG("skb %p len %d", skb, skb->len);
1510 hci_conn_enter_active_mode(conn);
1512 hci_send_frame(skb);
1513 hdev->acl_last_tx = jiffies;
1522 static inline void hci_sched_sco(struct hci_dev *hdev)
1524 struct hci_conn *conn;
1525 struct sk_buff *skb;
1528 BT_DBG("%s", hdev->name);
1530 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
1531 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1532 BT_DBG("skb %p len %d", skb, skb->len);
1533 hci_send_frame(skb);
1536 if (conn->sent == ~0)
1542 static inline void hci_sched_esco(struct hci_dev *hdev)
1544 struct hci_conn *conn;
1545 struct sk_buff *skb;
1548 BT_DBG("%s", hdev->name);
1550 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
1551 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1552 BT_DBG("skb %p len %d", skb, skb->len);
1553 hci_send_frame(skb);
1556 if (conn->sent == ~0)
1562 static void hci_tx_task(unsigned long arg)
1564 struct hci_dev *hdev = (struct hci_dev *) arg;
1565 struct sk_buff *skb;
1567 read_lock(&hci_task_lock);
1569 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1571 /* Schedule queues and send stuff to HCI driver */
1573 hci_sched_acl(hdev);
1575 hci_sched_sco(hdev);
1577 hci_sched_esco(hdev);
1579 /* Send next queued raw (unknown type) packet */
1580 while ((skb = skb_dequeue(&hdev->raw_q)))
1581 hci_send_frame(skb);
1583 read_unlock(&hci_task_lock);
1586 /* ----- HCI RX task (incoming data proccessing) ----- */
1588 /* ACL data packet */
1589 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1591 struct hci_acl_hdr *hdr = (void *) skb->data;
1592 struct hci_conn *conn;
1593 __u16 handle, flags;
1595 skb_pull(skb, HCI_ACL_HDR_SIZE);
1597 handle = __le16_to_cpu(hdr->handle);
1598 flags = hci_flags(handle);
1599 handle = hci_handle(handle);
1601 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1603 hdev->stat.acl_rx++;
1606 conn = hci_conn_hash_lookup_handle(hdev, handle);
1607 hci_dev_unlock(hdev);
1610 register struct hci_proto *hp;
1612 hci_conn_enter_active_mode(conn);
1614 /* Send to upper protocol */
1615 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1616 hp->recv_acldata(conn, skb, flags);
1620 BT_ERR("%s ACL packet for unknown connection handle %d",
1621 hdev->name, handle);
1627 /* SCO data packet */
1628 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1630 struct hci_sco_hdr *hdr = (void *) skb->data;
1631 struct hci_conn *conn;
1634 skb_pull(skb, HCI_SCO_HDR_SIZE);
1636 handle = __le16_to_cpu(hdr->handle);
1638 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1640 hdev->stat.sco_rx++;
1643 conn = hci_conn_hash_lookup_handle(hdev, handle);
1644 hci_dev_unlock(hdev);
1647 register struct hci_proto *hp;
1649 /* Send to upper protocol */
1650 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1651 hp->recv_scodata(conn, skb);
1655 BT_ERR("%s SCO packet for unknown connection handle %d",
1656 hdev->name, handle);
1662 static void hci_rx_task(unsigned long arg)
1664 struct hci_dev *hdev = (struct hci_dev *) arg;
1665 struct sk_buff *skb;
1667 BT_DBG("%s", hdev->name);
1669 read_lock(&hci_task_lock);
1671 while ((skb = skb_dequeue(&hdev->rx_q))) {
1672 if (atomic_read(&hdev->promisc)) {
1673 /* Send copy to the sockets */
1674 hci_send_to_sock(hdev, skb);
1677 if (test_bit(HCI_RAW, &hdev->flags)) {
1682 if (test_bit(HCI_INIT, &hdev->flags)) {
1683 /* Don't process data packets in this states. */
1684 switch (bt_cb(skb)->pkt_type) {
1685 case HCI_ACLDATA_PKT:
1686 case HCI_SCODATA_PKT:
1693 switch (bt_cb(skb)->pkt_type) {
1695 hci_event_packet(hdev, skb);
1698 case HCI_ACLDATA_PKT:
1699 BT_DBG("%s ACL data packet", hdev->name);
1700 hci_acldata_packet(hdev, skb);
1703 case HCI_SCODATA_PKT:
1704 BT_DBG("%s SCO data packet", hdev->name);
1705 hci_scodata_packet(hdev, skb);
1714 read_unlock(&hci_task_lock);
1717 static void hci_cmd_task(unsigned long arg)
1719 struct hci_dev *hdev = (struct hci_dev *) arg;
1720 struct sk_buff *skb;
1722 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1724 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1725 BT_ERR("%s command tx timeout", hdev->name);
1726 atomic_set(&hdev->cmd_cnt, 1);
1729 /* Send queued commands */
1730 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1731 kfree_skb(hdev->sent_cmd);
1733 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1734 atomic_dec(&hdev->cmd_cnt);
1735 hci_send_frame(skb);
1736 hdev->cmd_last_tx = jiffies;
1738 skb_queue_head(&hdev->cmd_q, skb);
1739 tasklet_schedule(&hdev->cmd_task);