2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
44 #ifdef CONFIG_SLEEP_MONITOR
45 #include <linux/power/sleep_monitor.h>
48 static void hci_rx_work(struct work_struct *work);
49 static void hci_cmd_work(struct work_struct *work);
50 static void hci_tx_work(struct work_struct *work);
53 LIST_HEAD(hci_dev_list);
54 DEFINE_RWLOCK(hci_dev_list_lock);
56 /* HCI callback list */
57 LIST_HEAD(hci_cb_list);
58 DEFINE_RWLOCK(hci_cb_list_lock);
60 /* HCI ID Numbering */
61 static DEFINE_IDA(hci_index_ida);
64 /* ---- HCI notifications ---- */
66 #ifdef CONFIG_TIZEN_WIP
67 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
69 int hci_register_notifier(struct notifier_block *nb)
71 return atomic_notifier_chain_register(&hci_notifier, nb);
74 int hci_unregister_notifier(struct notifier_block *nb)
76 return atomic_notifier_chain_unregister(&hci_notifier, nb);
80 /* ----- HCI requests ----- */
82 #define HCI_REQ_DONE 0
83 #define HCI_REQ_PEND 1
84 #define HCI_REQ_CANCELED 2
86 #define hci_req_lock(d) mutex_lock(&d->req_lock)
87 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
89 /* ---- HCI notifications ---- */
91 static void hci_notify(struct hci_dev *hdev, int event)
93 hci_sock_dev_event(hdev, event);
94 #ifdef CONFIG_TIZEN_WIP
95 if (event == HCI_DEV_REG || event == HCI_DEV_UNREG
96 || event == HCI_DEV_WRITE)
97 atomic_notifier_call_chain(&hci_notifier, event, hdev);
101 /* ---- HCI debugfs entries ---- */
103 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
104 size_t count, loff_t *ppos)
106 struct hci_dev *hdev = file->private_data;
109 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
112 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
115 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
116 size_t count, loff_t *ppos)
118 struct hci_dev *hdev = file->private_data;
121 size_t buf_size = min(count, (sizeof(buf)-1));
125 if (!test_bit(HCI_UP, &hdev->flags))
128 if (copy_from_user(buf, user_buf, buf_size))
131 buf[buf_size] = '\0';
132 if (strtobool(buf, &enable))
135 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
140 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
143 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
145 hci_req_unlock(hdev);
150 err = -bt_to_errno(skb->data[0]);
156 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
161 static const struct file_operations dut_mode_fops = {
163 .read = dut_mode_read,
164 .write = dut_mode_write,
165 .llseek = default_llseek,
168 /* ---- HCI requests ---- */
170 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode)
172 BT_DBG("%s result 0x%2.2x", hdev->name, result);
174 if (hdev->req_status == HCI_REQ_PEND) {
175 hdev->req_result = result;
176 hdev->req_status = HCI_REQ_DONE;
177 wake_up_interruptible(&hdev->req_wait_q);
181 static void hci_req_cancel(struct hci_dev *hdev, int err)
183 BT_DBG("%s err 0x%2.2x", hdev->name, err);
185 if (hdev->req_status == HCI_REQ_PEND) {
186 hdev->req_result = err;
187 hdev->req_status = HCI_REQ_CANCELED;
188 wake_up_interruptible(&hdev->req_wait_q);
192 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
195 struct hci_ev_cmd_complete *ev;
196 struct hci_event_hdr *hdr;
201 skb = hdev->recv_evt;
202 hdev->recv_evt = NULL;
204 hci_dev_unlock(hdev);
207 return ERR_PTR(-ENODATA);
209 if (skb->len < sizeof(*hdr)) {
210 BT_ERR("Too short HCI event");
214 hdr = (void *) skb->data;
215 skb_pull(skb, HCI_EVENT_HDR_SIZE);
218 if (hdr->evt != event)
223 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
224 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
228 if (skb->len < sizeof(*ev)) {
229 BT_ERR("Too short cmd_complete event");
233 ev = (void *) skb->data;
234 skb_pull(skb, sizeof(*ev));
236 if (opcode == __le16_to_cpu(ev->opcode))
239 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
240 __le16_to_cpu(ev->opcode));
244 return ERR_PTR(-ENODATA);
247 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
248 const void *param, u8 event, u32 timeout)
250 DECLARE_WAITQUEUE(wait, current);
251 struct hci_request req;
254 BT_DBG("%s", hdev->name);
256 hci_req_init(&req, hdev);
258 hci_req_add_ev(&req, opcode, plen, param, event);
260 hdev->req_status = HCI_REQ_PEND;
262 add_wait_queue(&hdev->req_wait_q, &wait);
263 set_current_state(TASK_INTERRUPTIBLE);
265 err = hci_req_run(&req, hci_req_sync_complete);
267 remove_wait_queue(&hdev->req_wait_q, &wait);
268 set_current_state(TASK_RUNNING);
272 schedule_timeout(timeout);
274 remove_wait_queue(&hdev->req_wait_q, &wait);
276 if (signal_pending(current))
277 return ERR_PTR(-EINTR);
279 switch (hdev->req_status) {
281 err = -bt_to_errno(hdev->req_result);
284 case HCI_REQ_CANCELED:
285 err = -hdev->req_result;
293 hdev->req_status = hdev->req_result = 0;
295 BT_DBG("%s end: err %d", hdev->name, err);
300 return hci_get_cmd_complete(hdev, opcode, event);
302 EXPORT_SYMBOL(__hci_cmd_sync_ev);
304 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
305 const void *param, u32 timeout)
307 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
309 EXPORT_SYMBOL(__hci_cmd_sync);
311 /* Execute request and wait for completion. */
312 static int __hci_req_sync(struct hci_dev *hdev,
313 void (*func)(struct hci_request *req,
315 unsigned long opt, __u32 timeout)
317 struct hci_request req;
318 DECLARE_WAITQUEUE(wait, current);
321 BT_DBG("%s start", hdev->name);
323 hci_req_init(&req, hdev);
325 hdev->req_status = HCI_REQ_PEND;
329 add_wait_queue(&hdev->req_wait_q, &wait);
330 set_current_state(TASK_INTERRUPTIBLE);
332 err = hci_req_run(&req, hci_req_sync_complete);
334 hdev->req_status = 0;
336 remove_wait_queue(&hdev->req_wait_q, &wait);
337 set_current_state(TASK_RUNNING);
339 /* ENODATA means the HCI request command queue is empty.
340 * This can happen when a request with conditionals doesn't
341 * trigger any commands to be sent. This is normal behavior
342 * and should not trigger an error return.
350 schedule_timeout(timeout);
352 remove_wait_queue(&hdev->req_wait_q, &wait);
354 if (signal_pending(current))
357 switch (hdev->req_status) {
359 err = -bt_to_errno(hdev->req_result);
362 case HCI_REQ_CANCELED:
363 err = -hdev->req_result;
371 hdev->req_status = hdev->req_result = 0;
373 BT_DBG("%s end: err %d", hdev->name, err);
378 static int hci_req_sync(struct hci_dev *hdev,
379 void (*req)(struct hci_request *req,
381 unsigned long opt, __u32 timeout)
385 if (!test_bit(HCI_UP, &hdev->flags))
388 /* Serialize all requests */
390 ret = __hci_req_sync(hdev, req, opt, timeout);
391 hci_req_unlock(hdev);
396 static void hci_reset_req(struct hci_request *req, unsigned long opt)
398 BT_DBG("%s %ld", req->hdev->name, opt);
401 set_bit(HCI_RESET, &req->hdev->flags);
402 hci_req_add(req, HCI_OP_RESET, 0, NULL);
405 static void bredr_init(struct hci_request *req)
407 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
409 /* Read Local Supported Features */
410 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
412 /* Read Local Version */
413 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
415 /* Read BD Address */
416 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
419 static void amp_init(struct hci_request *req)
421 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
423 /* Read Local Version */
424 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
426 /* Read Local Supported Commands */
427 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
429 /* Read Local Supported Features */
430 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
432 /* Read Local AMP Info */
433 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
435 /* Read Data Blk size */
436 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
438 /* Read Flow Control Mode */
439 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
441 /* Read Location Data */
442 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
445 static void hci_init1_req(struct hci_request *req, unsigned long opt)
447 struct hci_dev *hdev = req->hdev;
449 BT_DBG("%s %ld", hdev->name, opt);
452 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
453 hci_reset_req(req, 0);
455 switch (hdev->dev_type) {
465 BT_ERR("Unknown device type %d", hdev->dev_type);
470 static void bredr_setup(struct hci_request *req)
475 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
476 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
478 /* Read Class of Device */
479 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
481 /* Read Local Name */
482 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
484 /* Read Voice Setting */
485 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
487 /* Read Number of Supported IAC */
488 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
490 /* Read Current IAC LAP */
491 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
493 /* Clear Event Filters */
494 flt_type = HCI_FLT_CLEAR_ALL;
495 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
497 /* Connection accept timeout ~20 secs */
498 param = cpu_to_le16(0x7d00);
499 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
502 static void le_setup(struct hci_request *req)
504 struct hci_dev *hdev = req->hdev;
506 /* Read LE Buffer Size */
507 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
509 /* Read LE Local Supported Features */
510 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
512 /* Read LE Supported States */
513 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
515 /* Read LE White List Size */
516 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
518 /* Clear LE White List */
519 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
521 /* LE-only controllers have LE implicitly enabled */
522 if (!lmp_bredr_capable(hdev))
523 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
526 static void hci_setup_event_mask(struct hci_request *req)
528 struct hci_dev *hdev = req->hdev;
530 /* The second byte is 0xff instead of 0x9f (two reserved bits
531 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
534 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
536 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
537 * any event mask for pre 1.2 devices.
539 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
542 if (lmp_bredr_capable(hdev)) {
543 events[4] |= 0x01; /* Flow Specification Complete */
544 events[4] |= 0x02; /* Inquiry Result with RSSI */
545 events[4] |= 0x04; /* Read Remote Extended Features Complete */
546 events[5] |= 0x08; /* Synchronous Connection Complete */
547 events[5] |= 0x10; /* Synchronous Connection Changed */
549 /* Use a different default for LE-only devices */
550 memset(events, 0, sizeof(events));
551 events[0] |= 0x10; /* Disconnection Complete */
552 events[1] |= 0x08; /* Read Remote Version Information Complete */
553 events[1] |= 0x20; /* Command Complete */
554 events[1] |= 0x40; /* Command Status */
555 events[1] |= 0x80; /* Hardware Error */
556 events[2] |= 0x04; /* Number of Completed Packets */
557 events[3] |= 0x02; /* Data Buffer Overflow */
559 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
560 events[0] |= 0x80; /* Encryption Change */
561 events[5] |= 0x80; /* Encryption Key Refresh Complete */
565 if (lmp_inq_rssi_capable(hdev))
566 events[4] |= 0x02; /* Inquiry Result with RSSI */
568 if (lmp_sniffsubr_capable(hdev))
569 events[5] |= 0x20; /* Sniff Subrating */
571 if (lmp_pause_enc_capable(hdev))
572 events[5] |= 0x80; /* Encryption Key Refresh Complete */
574 if (lmp_ext_inq_capable(hdev))
575 events[5] |= 0x40; /* Extended Inquiry Result */
577 if (lmp_no_flush_capable(hdev))
578 events[7] |= 0x01; /* Enhanced Flush Complete */
580 if (lmp_lsto_capable(hdev))
581 events[6] |= 0x80; /* Link Supervision Timeout Changed */
583 if (lmp_ssp_capable(hdev)) {
584 events[6] |= 0x01; /* IO Capability Request */
585 events[6] |= 0x02; /* IO Capability Response */
586 events[6] |= 0x04; /* User Confirmation Request */
587 events[6] |= 0x08; /* User Passkey Request */
588 events[6] |= 0x10; /* Remote OOB Data Request */
589 events[6] |= 0x20; /* Simple Pairing Complete */
590 events[7] |= 0x04; /* User Passkey Notification */
591 events[7] |= 0x08; /* Keypress Notification */
592 events[7] |= 0x10; /* Remote Host Supported
593 * Features Notification
597 if (lmp_le_capable(hdev))
598 events[7] |= 0x20; /* LE Meta-Event */
600 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
603 static void hci_init2_req(struct hci_request *req, unsigned long opt)
605 struct hci_dev *hdev = req->hdev;
607 if (lmp_bredr_capable(hdev))
610 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
612 if (lmp_le_capable(hdev))
615 /* All Bluetooth 1.2 and later controllers should support the
616 * HCI command for reading the local supported commands.
618 * Unfortunately some controllers indicate Bluetooth 1.2 support,
619 * but do not have support for this command. If that is the case,
620 * the driver can quirk the behavior and skip reading the local
621 * supported commands.
623 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
624 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
625 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
627 if (lmp_ssp_capable(hdev)) {
628 /* When SSP is available, then the host features page
629 * should also be available as well. However some
630 * controllers list the max_page as 0 as long as SSP
631 * has not been enabled. To achieve proper debugging
632 * output, force the minimum max_page to 1 at least.
634 hdev->max_page = 0x01;
636 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
639 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
640 sizeof(mode), &mode);
642 struct hci_cp_write_eir cp;
644 memset(hdev->eir, 0, sizeof(hdev->eir));
645 memset(&cp, 0, sizeof(cp));
647 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
651 if (lmp_inq_rssi_capable(hdev) ||
652 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
655 /* If Extended Inquiry Result events are supported, then
656 * they are clearly preferred over Inquiry Result with RSSI
659 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
661 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
664 if (lmp_inq_tx_pwr_capable(hdev))
665 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
667 if (lmp_ext_feat_capable(hdev)) {
668 struct hci_cp_read_local_ext_features cp;
671 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
675 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
677 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
682 static void hci_setup_link_policy(struct hci_request *req)
684 struct hci_dev *hdev = req->hdev;
685 struct hci_cp_write_def_link_policy cp;
688 if (lmp_rswitch_capable(hdev))
689 link_policy |= HCI_LP_RSWITCH;
690 if (lmp_hold_capable(hdev))
691 link_policy |= HCI_LP_HOLD;
692 if (lmp_sniff_capable(hdev))
693 link_policy |= HCI_LP_SNIFF;
694 if (lmp_park_capable(hdev))
695 link_policy |= HCI_LP_PARK;
697 cp.policy = cpu_to_le16(link_policy);
698 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
701 static void hci_set_le_support(struct hci_request *req)
703 struct hci_dev *hdev = req->hdev;
704 struct hci_cp_write_le_host_supported cp;
706 /* LE-only devices do not support explicit enablement */
707 if (!lmp_bredr_capable(hdev))
710 memset(&cp, 0, sizeof(cp));
712 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
717 if (cp.le != lmp_host_le_capable(hdev))
718 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
722 static void hci_set_event_mask_page_2(struct hci_request *req)
724 struct hci_dev *hdev = req->hdev;
725 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
727 /* If Connectionless Slave Broadcast master role is supported
728 * enable all necessary events for it.
730 if (lmp_csb_master_capable(hdev)) {
731 events[1] |= 0x40; /* Triggered Clock Capture */
732 events[1] |= 0x80; /* Synchronization Train Complete */
733 events[2] |= 0x10; /* Slave Page Response Timeout */
734 events[2] |= 0x20; /* CSB Channel Map Change */
737 /* If Connectionless Slave Broadcast slave role is supported
738 * enable all necessary events for it.
740 if (lmp_csb_slave_capable(hdev)) {
741 events[2] |= 0x01; /* Synchronization Train Received */
742 events[2] |= 0x02; /* CSB Receive */
743 events[2] |= 0x04; /* CSB Timeout */
744 events[2] |= 0x08; /* Truncated Page Complete */
747 /* Enable Authenticated Payload Timeout Expired event if supported */
748 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
751 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
754 static void hci_init3_req(struct hci_request *req, unsigned long opt)
756 struct hci_dev *hdev = req->hdev;
759 hci_setup_event_mask(req);
761 if (hdev->commands[6] & 0x20) {
762 struct hci_cp_read_stored_link_key cp;
764 bacpy(&cp.bdaddr, BDADDR_ANY);
766 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
769 if (hdev->commands[5] & 0x10)
770 hci_setup_link_policy(req);
772 if (hdev->commands[8] & 0x01)
773 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
775 /* Some older Broadcom based Bluetooth 1.2 controllers do not
776 * support the Read Page Scan Type command. Check support for
777 * this command in the bit mask of supported commands.
779 if (hdev->commands[13] & 0x01)
780 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
782 if (lmp_le_capable(hdev)) {
785 memset(events, 0, sizeof(events));
788 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
789 events[0] |= 0x10; /* LE Long Term Key Request */
791 /* If controller supports the Connection Parameters Request
792 * Link Layer Procedure, enable the corresponding event.
794 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
795 events[0] |= 0x20; /* LE Remote Connection
799 /* If the controller supports the Data Length Extension
800 * feature, enable the corresponding event.
802 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
803 events[0] |= 0x40; /* LE Data Length Change */
805 /* If the controller supports Extended Scanner Filter
806 * Policies, enable the correspondig event.
808 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
809 events[1] |= 0x04; /* LE Direct Advertising
813 /* If the controller supports the LE Read Local P-256
814 * Public Key command, enable the corresponding event.
816 if (hdev->commands[34] & 0x02)
817 events[0] |= 0x80; /* LE Read Local P-256
818 * Public Key Complete
821 /* If the controller supports the LE Generate DHKey
822 * command, enable the corresponding event.
824 if (hdev->commands[34] & 0x04)
825 events[1] |= 0x01; /* LE Generate DHKey Complete */
827 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
830 if (hdev->commands[25] & 0x40) {
831 /* Read LE Advertising Channel TX Power */
832 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
835 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
836 /* Read LE Maximum Data Length */
837 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
839 /* Read LE Suggested Default Data Length */
840 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
843 hci_set_le_support(req);
846 /* Read features beyond page 1 if available */
847 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
848 struct hci_cp_read_local_ext_features cp;
851 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
856 static void hci_init4_req(struct hci_request *req, unsigned long opt)
858 struct hci_dev *hdev = req->hdev;
860 /* Some Broadcom based Bluetooth controllers do not support the
861 * Delete Stored Link Key command. They are clearly indicating its
862 * absence in the bit mask of supported commands.
864 * Check the supported commands and only if the the command is marked
865 * as supported send it. If not supported assume that the controller
866 * does not have actual support for stored link keys which makes this
867 * command redundant anyway.
869 * Some controllers indicate that they support handling deleting
870 * stored link keys, but they don't. The quirk lets a driver
871 * just disable this command.
873 if (hdev->commands[6] & 0x80 &&
874 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
875 struct hci_cp_delete_stored_link_key cp;
877 bacpy(&cp.bdaddr, BDADDR_ANY);
878 cp.delete_all = 0x01;
879 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
883 /* Set event mask page 2 if the HCI command for it is supported */
884 if (hdev->commands[22] & 0x04)
885 hci_set_event_mask_page_2(req);
887 /* Read local codec list if the HCI command is supported */
888 if (hdev->commands[29] & 0x20)
889 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
891 /* Get MWS transport configuration if the HCI command is supported */
892 if (hdev->commands[30] & 0x08)
893 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
895 /* Check for Synchronization Train support */
896 if (lmp_sync_train_capable(hdev))
897 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
899 /* Disable Secure connection implementation now */
900 #ifdef CONFIG_TIZEN_WIP
901 /* Enable Secure Connections if supported and configured */
902 if ((lmp_sc_capable(hdev) ||
903 test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags)) &&
904 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
907 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
908 sizeof(support), &support);
913 static int __hci_init(struct hci_dev *hdev)
917 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
921 /* The Device Under Test (DUT) mode is special and available for
922 * all controller types. So just create it early on.
924 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
925 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
929 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
930 * BR/EDR/LE type controllers. AMP controllers only need the
933 if (hdev->dev_type != HCI_BREDR)
936 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
940 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
944 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
948 /* This function is only called when the controller is actually in
949 * configured state. When the controller is marked as unconfigured,
950 * this initialization procedure is not run.
952 * It means that it is possible that a controller runs through its
953 * setup phase and then discovers missing settings. If that is the
954 * case, then this function will not be called. It then will only
955 * be called during the config phase.
957 * So only when in setup phase or config phase, create the debugfs
958 * entries and register the SMP channels.
960 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
961 !test_bit(HCI_CONFIG, &hdev->dev_flags))
964 hci_debugfs_create_common(hdev);
966 if (lmp_bredr_capable(hdev))
967 hci_debugfs_create_bredr(hdev);
969 if (lmp_le_capable(hdev))
970 hci_debugfs_create_le(hdev);
975 static void hci_init0_req(struct hci_request *req, unsigned long opt)
977 struct hci_dev *hdev = req->hdev;
979 BT_DBG("%s %ld", hdev->name, opt);
982 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
983 hci_reset_req(req, 0);
985 /* Read Local Version */
986 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
988 /* Read BD Address */
989 if (hdev->set_bdaddr)
990 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
993 static int __hci_unconf_init(struct hci_dev *hdev)
997 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1000 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1007 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1011 BT_DBG("%s %x", req->hdev->name, scan);
1013 /* Inquiry and Page scans */
1014 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1017 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1021 BT_DBG("%s %x", req->hdev->name, auth);
1023 /* Authentication */
1024 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1027 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1031 BT_DBG("%s %x", req->hdev->name, encrypt);
1034 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1037 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1039 __le16 policy = cpu_to_le16(opt);
1041 BT_DBG("%s %x", req->hdev->name, policy);
1043 /* Default link policy */
1044 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1047 /* Get HCI device by index.
1048 * Device is held on return. */
1049 struct hci_dev *hci_dev_get(int index)
1051 struct hci_dev *hdev = NULL, *d;
1053 BT_DBG("%d", index);
1058 read_lock(&hci_dev_list_lock);
1059 list_for_each_entry(d, &hci_dev_list, list) {
1060 if (d->id == index) {
1061 hdev = hci_dev_hold(d);
1065 read_unlock(&hci_dev_list_lock);
1069 /* ---- Inquiry support ---- */
1071 bool hci_discovery_active(struct hci_dev *hdev)
1073 struct discovery_state *discov = &hdev->discovery;
1075 switch (discov->state) {
1076 case DISCOVERY_FINDING:
1077 case DISCOVERY_RESOLVING:
1085 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1087 int old_state = hdev->discovery.state;
1089 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1091 if (old_state == state)
1094 hdev->discovery.state = state;
1097 case DISCOVERY_STOPPED:
1098 #ifndef CONFIG_TIZEN_WIP
1099 hci_update_background_scan(hdev);
1102 if (old_state != DISCOVERY_STARTING)
1103 mgmt_discovering(hdev, 0);
1105 case DISCOVERY_STARTING:
1107 case DISCOVERY_FINDING:
1108 mgmt_discovering(hdev, 1);
1110 case DISCOVERY_RESOLVING:
1112 case DISCOVERY_STOPPING:
1116 #ifdef CONFIG_TIZEN_WIP
1117 /* BEGIN TIZEN_Bluetooth :: Seperate LE discovery */
1118 bool hci_le_discovery_active(struct hci_dev *hdev)
1120 struct discovery_state *discov = &hdev->le_discovery;
1122 switch (discov->state) {
1123 case DISCOVERY_FINDING:
1124 case DISCOVERY_RESOLVING:
1132 void hci_le_discovery_set_state(struct hci_dev *hdev, int state)
1134 BT_DBG("%s state %u -> %u", hdev->name, hdev->le_discovery.state, state);
1136 if (hdev->le_discovery.state == state)
1140 case DISCOVERY_STOPPED:
1141 hci_update_background_scan(hdev);
1143 if (hdev->le_discovery.state != DISCOVERY_STARTING)
1144 mgmt_le_discovering(hdev, 0);
1146 case DISCOVERY_STARTING:
1148 case DISCOVERY_FINDING:
1149 mgmt_le_discovering(hdev, 1);
1151 case DISCOVERY_RESOLVING:
1153 case DISCOVERY_STOPPING:
1157 hdev->le_discovery.state = state;
1159 /* END TIZEN_Bluetooth */
1162 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1164 struct discovery_state *cache = &hdev->discovery;
1165 struct inquiry_entry *p, *n;
1167 list_for_each_entry_safe(p, n, &cache->all, all) {
1172 INIT_LIST_HEAD(&cache->unknown);
1173 INIT_LIST_HEAD(&cache->resolve);
1176 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1179 struct discovery_state *cache = &hdev->discovery;
1180 struct inquiry_entry *e;
1182 BT_DBG("cache %p, %pMR", cache, bdaddr);
1184 list_for_each_entry(e, &cache->all, all) {
1185 if (!bacmp(&e->data.bdaddr, bdaddr))
1192 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1195 struct discovery_state *cache = &hdev->discovery;
1196 struct inquiry_entry *e;
1198 BT_DBG("cache %p, %pMR", cache, bdaddr);
1200 list_for_each_entry(e, &cache->unknown, list) {
1201 if (!bacmp(&e->data.bdaddr, bdaddr))
1208 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1212 struct discovery_state *cache = &hdev->discovery;
1213 struct inquiry_entry *e;
1215 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1217 list_for_each_entry(e, &cache->resolve, list) {
1218 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1220 if (!bacmp(&e->data.bdaddr, bdaddr))
1227 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1228 struct inquiry_entry *ie)
1230 struct discovery_state *cache = &hdev->discovery;
1231 struct list_head *pos = &cache->resolve;
1232 struct inquiry_entry *p;
1234 list_del(&ie->list);
1236 list_for_each_entry(p, &cache->resolve, list) {
1237 if (p->name_state != NAME_PENDING &&
1238 abs(p->data.rssi) >= abs(ie->data.rssi))
1243 list_add(&ie->list, pos);
1246 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1249 struct discovery_state *cache = &hdev->discovery;
1250 struct inquiry_entry *ie;
1253 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1255 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1257 if (!data->ssp_mode)
1258 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1260 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1262 if (!ie->data.ssp_mode)
1263 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1265 if (ie->name_state == NAME_NEEDED &&
1266 data->rssi != ie->data.rssi) {
1267 ie->data.rssi = data->rssi;
1268 hci_inquiry_cache_update_resolve(hdev, ie);
1274 /* Entry not in the cache. Add new one. */
1275 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1277 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1281 list_add(&ie->all, &cache->all);
1284 ie->name_state = NAME_KNOWN;
1286 ie->name_state = NAME_NOT_KNOWN;
1287 list_add(&ie->list, &cache->unknown);
1291 if (name_known && ie->name_state != NAME_KNOWN &&
1292 ie->name_state != NAME_PENDING) {
1293 ie->name_state = NAME_KNOWN;
1294 list_del(&ie->list);
1297 memcpy(&ie->data, data, sizeof(*data));
1298 ie->timestamp = jiffies;
1299 cache->timestamp = jiffies;
1301 if (ie->name_state == NAME_NOT_KNOWN)
1302 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1308 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1310 struct discovery_state *cache = &hdev->discovery;
1311 struct inquiry_info *info = (struct inquiry_info *) buf;
1312 struct inquiry_entry *e;
1315 list_for_each_entry(e, &cache->all, all) {
1316 struct inquiry_data *data = &e->data;
1321 bacpy(&info->bdaddr, &data->bdaddr);
1322 info->pscan_rep_mode = data->pscan_rep_mode;
1323 info->pscan_period_mode = data->pscan_period_mode;
1324 info->pscan_mode = data->pscan_mode;
1325 memcpy(info->dev_class, data->dev_class, 3);
1326 info->clock_offset = data->clock_offset;
1332 BT_DBG("cache %p, copied %d", cache, copied);
1336 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1338 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1339 struct hci_dev *hdev = req->hdev;
1340 struct hci_cp_inquiry cp;
1342 BT_DBG("%s", hdev->name);
1344 if (test_bit(HCI_INQUIRY, &hdev->flags))
1348 memcpy(&cp.lap, &ir->lap, 3);
1349 cp.length = ir->length;
1350 cp.num_rsp = ir->num_rsp;
1351 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1354 #ifdef CONFIG_TIZEN_WIP
1355 static int wait_inquiry(void *word)
1358 return signal_pending(current);
1362 int hci_inquiry(void __user *arg)
1364 __u8 __user *ptr = arg;
1365 struct hci_inquiry_req ir;
1366 struct hci_dev *hdev;
1367 int err = 0, do_inquiry = 0, max_rsp;
1371 if (copy_from_user(&ir, ptr, sizeof(ir)))
1374 hdev = hci_dev_get(ir.dev_id);
1378 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1383 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1388 if (hdev->dev_type != HCI_BREDR) {
1393 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1399 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1400 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1401 hci_inquiry_cache_flush(hdev);
1404 hci_dev_unlock(hdev);
1406 timeo = ir.length * msecs_to_jiffies(2000);
1409 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1414 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1415 * cleared). If it is interrupted by a signal, return -EINTR.
1417 #ifdef CONFIG_TIZEN_WIP
1418 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1419 TASK_INTERRUPTIBLE))
1421 /* Signature of the function "wait_on_bit" is changed in latest kernel
1422 * So, if kernel is migrated to latest, then below code should be enabled
1424 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1425 TASK_INTERRUPTIBLE))
1430 /* for unlimited number of responses we will use buffer with
1433 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1435 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1436 * copy it to the user space.
1438 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1445 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1446 hci_dev_unlock(hdev);
1448 BT_DBG("num_rsp %d", ir.num_rsp);
1450 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1452 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1465 static int hci_dev_do_open(struct hci_dev *hdev)
1469 BT_DBG("%s %p", hdev->name, hdev);
1473 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1478 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1479 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1480 /* Check for rfkill but allow the HCI setup stage to
1481 * proceed (which in itself doesn't cause any RF activity).
1483 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1488 /* Check for valid public address or a configured static
1489 * random adddress, but let the HCI setup proceed to
1490 * be able to determine if there is a public address
1493 * In case of user channel usage, it is not important
1494 * if a public address or static random address is
1497 * This check is only valid for BR/EDR controllers
1498 * since AMP controllers do not have an address.
1500 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1501 hdev->dev_type == HCI_BREDR &&
1502 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1503 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1504 ret = -EADDRNOTAVAIL;
1509 if (test_bit(HCI_UP, &hdev->flags)) {
1514 if (hdev->open(hdev)) {
1519 atomic_set(&hdev->cmd_cnt, 1);
1520 set_bit(HCI_INIT, &hdev->flags);
1522 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1524 ret = hdev->setup(hdev);
1526 /* The transport driver can set these quirks before
1527 * creating the HCI device or in its setup callback.
1529 * In case any of them is set, the controller has to
1530 * start up as unconfigured.
1532 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1533 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1534 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
1536 /* For an unconfigured controller it is required to
1537 * read at least the version information provided by
1538 * the Read Local Version Information command.
1540 * If the set_bdaddr driver callback is provided, then
1541 * also the original Bluetooth public device address
1542 * will be read using the Read BD Address command.
1544 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
1545 ret = __hci_unconf_init(hdev);
1548 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
1549 /* If public address change is configured, ensure that
1550 * the address gets programmed. If the driver does not
1551 * support changing the public address, fail the power
1554 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1556 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1558 ret = -EADDRNOTAVAIL;
1562 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1563 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1564 ret = __hci_init(hdev);
1567 clear_bit(HCI_INIT, &hdev->flags);
1571 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1572 set_bit(HCI_UP, &hdev->flags);
1573 hci_notify(hdev, HCI_DEV_UP);
1574 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1575 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
1576 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1577 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1578 hdev->dev_type == HCI_BREDR) {
1580 mgmt_powered(hdev, 1);
1581 hci_dev_unlock(hdev);
1584 /* Init failed, cleanup */
1585 flush_work(&hdev->tx_work);
1586 flush_work(&hdev->cmd_work);
1587 flush_work(&hdev->rx_work);
1589 skb_queue_purge(&hdev->cmd_q);
1590 skb_queue_purge(&hdev->rx_q);
1595 if (hdev->sent_cmd) {
1596 kfree_skb(hdev->sent_cmd);
1597 hdev->sent_cmd = NULL;
1601 hdev->flags &= BIT(HCI_RAW);
1605 hci_req_unlock(hdev);
1609 /* ---- HCI ioctl helpers ---- */
1611 int hci_dev_open(__u16 dev)
1613 struct hci_dev *hdev;
1616 hdev = hci_dev_get(dev);
1620 /* Devices that are marked as unconfigured can only be powered
1621 * up as user channel. Trying to bring them up as normal devices
1622 * will result into a failure. Only user channel operation is
1625 * When this function is called for a user channel, the flag
1626 * HCI_USER_CHANNEL will be set first before attempting to
1629 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1630 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1635 /* We need to ensure that no other power on/off work is pending
1636 * before proceeding to call hci_dev_do_open. This is
1637 * particularly important if the setup procedure has not yet
1640 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1641 cancel_delayed_work(&hdev->power_off);
1643 /* After this call it is guaranteed that the setup procedure
1644 * has finished. This means that error conditions like RFKILL
1645 * or no valid public or static random address apply.
1647 flush_workqueue(hdev->req_workqueue);
1649 /* For controllers not using the management interface and that
1650 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1651 * so that pairing works for them. Once the management interface
1652 * is in use this bit will be cleared again and userspace has
1653 * to explicitly enable it.
1655 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1656 !test_bit(HCI_MGMT, &hdev->dev_flags))
1657 set_bit(HCI_BONDABLE, &hdev->dev_flags);
1659 err = hci_dev_do_open(hdev);
1666 /* This function requires the caller holds hdev->lock */
1667 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1669 struct hci_conn_params *p;
1671 list_for_each_entry(p, &hdev->le_conn_params, list) {
1673 hci_conn_drop(p->conn);
1674 hci_conn_put(p->conn);
1677 list_del_init(&p->action);
1680 BT_DBG("All LE pending actions cleared");
1683 static int hci_dev_do_close(struct hci_dev *hdev)
1685 BT_DBG("%s %p", hdev->name, hdev);
1687 cancel_delayed_work(&hdev->power_off);
1689 hci_req_cancel(hdev, ENODEV);
1692 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1693 cancel_delayed_work_sync(&hdev->cmd_timer);
1694 hci_req_unlock(hdev);
1698 /* Flush RX and TX works */
1699 flush_work(&hdev->tx_work);
1700 flush_work(&hdev->rx_work);
1702 if (hdev->discov_timeout > 0) {
1703 cancel_delayed_work(&hdev->discov_off);
1704 hdev->discov_timeout = 0;
1705 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1706 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1709 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1710 cancel_delayed_work(&hdev->service_cache);
1712 cancel_delayed_work_sync(&hdev->le_scan_disable);
1714 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1715 cancel_delayed_work_sync(&hdev->rpa_expired);
1717 /* Avoid potential lockdep warnings from the *_flush() calls by
1718 * ensuring the workqueue is empty up front.
1720 drain_workqueue(hdev->workqueue);
1724 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1725 if (hdev->dev_type == HCI_BREDR)
1726 mgmt_powered(hdev, 0);
1729 hci_inquiry_cache_flush(hdev);
1730 hci_pend_le_actions_clear(hdev);
1731 hci_conn_hash_flush(hdev);
1732 hci_dev_unlock(hdev);
1734 hci_notify(hdev, HCI_DEV_DOWN);
1740 skb_queue_purge(&hdev->cmd_q);
1741 atomic_set(&hdev->cmd_cnt, 1);
1742 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1743 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
1744 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1745 set_bit(HCI_INIT, &hdev->flags);
1746 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1747 clear_bit(HCI_INIT, &hdev->flags);
1750 /* flush cmd work */
1751 flush_work(&hdev->cmd_work);
1754 skb_queue_purge(&hdev->rx_q);
1755 skb_queue_purge(&hdev->cmd_q);
1756 skb_queue_purge(&hdev->raw_q);
1758 /* Drop last sent command */
1759 if (hdev->sent_cmd) {
1760 cancel_delayed_work_sync(&hdev->cmd_timer);
1761 kfree_skb(hdev->sent_cmd);
1762 hdev->sent_cmd = NULL;
1765 kfree_skb(hdev->recv_evt);
1766 hdev->recv_evt = NULL;
1768 /* After this point our queues are empty
1769 * and no tasks are scheduled. */
1773 hdev->flags &= BIT(HCI_RAW);
1774 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1776 /* Controller radio is available but is currently powered down */
1777 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1779 memset(hdev->eir, 0, sizeof(hdev->eir));
1780 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1781 bacpy(&hdev->random_addr, BDADDR_ANY);
1783 hci_req_unlock(hdev);
1789 int hci_dev_close(__u16 dev)
1791 struct hci_dev *hdev;
1794 hdev = hci_dev_get(dev);
1798 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1803 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1804 cancel_delayed_work(&hdev->power_off);
1806 err = hci_dev_do_close(hdev);
1813 int hci_dev_reset(__u16 dev)
1815 struct hci_dev *hdev;
1818 hdev = hci_dev_get(dev);
1824 if (!test_bit(HCI_UP, &hdev->flags)) {
1829 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1834 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1840 skb_queue_purge(&hdev->rx_q);
1841 skb_queue_purge(&hdev->cmd_q);
1843 /* Avoid potential lockdep warnings from the *_flush() calls by
1844 * ensuring the workqueue is empty up front.
1846 drain_workqueue(hdev->workqueue);
1849 hci_inquiry_cache_flush(hdev);
1850 hci_conn_hash_flush(hdev);
1851 hci_dev_unlock(hdev);
1856 atomic_set(&hdev->cmd_cnt, 1);
1857 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1859 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1862 hci_req_unlock(hdev);
1867 int hci_dev_reset_stat(__u16 dev)
1869 struct hci_dev *hdev;
1872 hdev = hci_dev_get(dev);
1876 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1881 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1886 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1893 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1895 bool conn_changed, discov_changed;
1897 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1899 if ((scan & SCAN_PAGE))
1900 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1903 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1906 if ((scan & SCAN_INQUIRY)) {
1907 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
1910 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1911 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1915 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1918 if (conn_changed || discov_changed) {
1919 /* In case this was disabled through mgmt */
1920 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1922 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1923 mgmt_update_adv_data(hdev);
1925 mgmt_new_settings(hdev);
1929 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1931 struct hci_dev *hdev;
1932 struct hci_dev_req dr;
1935 if (copy_from_user(&dr, arg, sizeof(dr)))
1938 hdev = hci_dev_get(dr.dev_id);
1942 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1947 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1952 if (hdev->dev_type != HCI_BREDR) {
1957 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1964 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1969 if (!lmp_encrypt_capable(hdev)) {
1974 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1975 /* Auth must be enabled first */
1976 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1982 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1987 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1990 /* Ensure that the connectable and discoverable states
1991 * get correctly modified as this was a non-mgmt change.
1994 hci_update_scan_state(hdev, dr.dev_opt);
1998 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2002 case HCISETLINKMODE:
2003 hdev->link_mode = ((__u16) dr.dev_opt) &
2004 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2008 hdev->pkt_type = (__u16) dr.dev_opt;
2012 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2013 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2017 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2018 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2031 int hci_get_dev_list(void __user *arg)
2033 struct hci_dev *hdev;
2034 struct hci_dev_list_req *dl;
2035 struct hci_dev_req *dr;
2036 int n = 0, size, err;
2039 if (get_user(dev_num, (__u16 __user *) arg))
2042 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2045 size = sizeof(*dl) + dev_num * sizeof(*dr);
2047 dl = kzalloc(size, GFP_KERNEL);
2053 read_lock(&hci_dev_list_lock);
2054 list_for_each_entry(hdev, &hci_dev_list, list) {
2055 unsigned long flags = hdev->flags;
2057 /* When the auto-off is configured it means the transport
2058 * is running, but in that case still indicate that the
2059 * device is actually down.
2061 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2062 flags &= ~BIT(HCI_UP);
2064 (dr + n)->dev_id = hdev->id;
2065 (dr + n)->dev_opt = flags;
2070 read_unlock(&hci_dev_list_lock);
2073 size = sizeof(*dl) + n * sizeof(*dr);
2075 err = copy_to_user(arg, dl, size);
2078 return err ? -EFAULT : 0;
2081 int hci_get_dev_info(void __user *arg)
2083 struct hci_dev *hdev;
2084 struct hci_dev_info di;
2085 unsigned long flags;
2088 if (copy_from_user(&di, arg, sizeof(di)))
2091 hdev = hci_dev_get(di.dev_id);
2095 /* When the auto-off is configured it means the transport
2096 * is running, but in that case still indicate that the
2097 * device is actually down.
2099 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2100 flags = hdev->flags & ~BIT(HCI_UP);
2102 flags = hdev->flags;
2104 strcpy(di.name, hdev->name);
2105 di.bdaddr = hdev->bdaddr;
2106 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2108 di.pkt_type = hdev->pkt_type;
2109 if (lmp_bredr_capable(hdev)) {
2110 di.acl_mtu = hdev->acl_mtu;
2111 di.acl_pkts = hdev->acl_pkts;
2112 di.sco_mtu = hdev->sco_mtu;
2113 di.sco_pkts = hdev->sco_pkts;
2115 di.acl_mtu = hdev->le_mtu;
2116 di.acl_pkts = hdev->le_pkts;
2120 di.link_policy = hdev->link_policy;
2121 di.link_mode = hdev->link_mode;
2123 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2124 memcpy(&di.features, &hdev->features, sizeof(di.features));
2126 if (copy_to_user(arg, &di, sizeof(di)))
2134 /* ---- Interface to HCI drivers ---- */
2136 static int hci_rfkill_set_block(void *data, bool blocked)
2138 struct hci_dev *hdev = data;
2140 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2142 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2146 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2147 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2148 !test_bit(HCI_CONFIG, &hdev->dev_flags))
2149 hci_dev_do_close(hdev);
2151 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2157 static const struct rfkill_ops hci_rfkill_ops = {
2158 .set_block = hci_rfkill_set_block,
2161 static void hci_power_on(struct work_struct *work)
2163 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2166 BT_DBG("%s", hdev->name);
2168 err = hci_dev_do_open(hdev);
2171 mgmt_set_powered_failed(hdev, err);
2172 hci_dev_unlock(hdev);
2176 /* During the HCI setup phase, a few error conditions are
2177 * ignored and they need to be checked now. If they are still
2178 * valid, it is important to turn the device back off.
2180 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2181 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2182 (hdev->dev_type == HCI_BREDR &&
2183 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2184 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2185 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2186 hci_dev_do_close(hdev);
2187 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2188 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2189 HCI_AUTO_OFF_TIMEOUT);
2192 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2193 /* For unconfigured devices, set the HCI_RAW flag
2194 * so that userspace can easily identify them.
2196 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2197 set_bit(HCI_RAW, &hdev->flags);
2199 /* For fully configured devices, this will send
2200 * the Index Added event. For unconfigured devices,
2201 * it will send Unconfigued Index Added event.
2203 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2204 * and no event will be send.
2206 mgmt_index_added(hdev);
2207 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
2208 /* When the controller is now configured, then it
2209 * is important to clear the HCI_RAW flag.
2211 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2212 clear_bit(HCI_RAW, &hdev->flags);
2214 /* Powering on the controller with HCI_CONFIG set only
2215 * happens with the transition from unconfigured to
2216 * configured. This will send the Index Added event.
2218 mgmt_index_added(hdev);
2222 static void hci_power_off(struct work_struct *work)
2224 struct hci_dev *hdev = container_of(work, struct hci_dev,
2227 BT_DBG("%s", hdev->name);
2229 hci_dev_do_close(hdev);
2231 smp_unregister(hdev);
2234 static void hci_discov_off(struct work_struct *work)
2236 struct hci_dev *hdev;
2238 hdev = container_of(work, struct hci_dev, discov_off.work);
2240 BT_DBG("%s", hdev->name);
2242 mgmt_discoverable_timeout(hdev);
2245 void hci_uuids_clear(struct hci_dev *hdev)
2247 struct bt_uuid *uuid, *tmp;
2249 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2250 list_del(&uuid->list);
2255 void hci_link_keys_clear(struct hci_dev *hdev)
2257 struct link_key *key;
2259 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2260 list_del_rcu(&key->list);
2261 kfree_rcu(key, rcu);
2265 void hci_smp_ltks_clear(struct hci_dev *hdev)
2269 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2270 list_del_rcu(&k->list);
2275 void hci_smp_irks_clear(struct hci_dev *hdev)
2279 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2280 list_del_rcu(&k->list);
2285 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2290 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2291 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2301 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2302 u8 key_type, u8 old_key_type)
2305 if (key_type < 0x03)
2308 /* Debug keys are insecure so don't store them persistently */
2309 if (key_type == HCI_LK_DEBUG_COMBINATION)
2312 /* Changed combination key and there's no previous one */
2313 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2316 /* Security mode 3 case */
2320 /* BR/EDR key derived using SC from an LE link */
2321 if (conn->type == LE_LINK)
2324 /* Neither local nor remote side had no-bonding as requirement */
2325 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2328 /* Local side had dedicated bonding as requirement */
2329 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2332 /* Remote side had dedicated bonding as requirement */
2333 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2336 #ifdef CONFIG_TIZEN_WIP
2337 /* In case of auth_type '0x01'. It is authenticated by MITM, so store it */
2338 if (key_type == HCI_LK_AUTH_COMBINATION_P192)
2341 /* If none of the above criteria match, then don't store the key
2346 static u8 ltk_role(u8 type)
2348 if (type == SMP_LTK)
2349 return HCI_ROLE_MASTER;
2351 return HCI_ROLE_SLAVE;
2354 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2355 u8 addr_type, u8 role)
2360 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2361 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2364 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2374 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2376 struct smp_irk *irk;
2379 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2380 if (!bacmp(&irk->rpa, rpa)) {
2386 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2387 if (smp_irk_matches(hdev, irk->val, rpa)) {
2388 bacpy(&irk->rpa, rpa);
2398 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2401 struct smp_irk *irk;
2403 /* Identity Address must be public or static random */
2404 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2408 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2409 if (addr_type == irk->addr_type &&
2410 bacmp(bdaddr, &irk->bdaddr) == 0) {
2420 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2421 bdaddr_t *bdaddr, u8 *val, u8 type,
2422 u8 pin_len, bool *persistent)
2424 struct link_key *key, *old_key;
2427 old_key = hci_find_link_key(hdev, bdaddr);
2429 old_key_type = old_key->type;
2432 old_key_type = conn ? conn->key_type : 0xff;
2433 key = kzalloc(sizeof(*key), GFP_KERNEL);
2436 list_add_rcu(&key->list, &hdev->link_keys);
2439 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2441 /* Some buggy controller combinations generate a changed
2442 * combination key for legacy pairing even when there's no
2444 if (type == HCI_LK_CHANGED_COMBINATION &&
2445 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2446 type = HCI_LK_COMBINATION;
2448 conn->key_type = type;
2451 bacpy(&key->bdaddr, bdaddr);
2452 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2453 key->pin_len = pin_len;
2455 if (type == HCI_LK_CHANGED_COMBINATION)
2456 key->type = old_key_type;
2461 *persistent = hci_persistent_key(hdev, conn, type,
2467 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2468 u8 addr_type, u8 type, u8 authenticated,
2469 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2471 struct smp_ltk *key, *old_key;
2472 u8 role = ltk_role(type);
2474 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2478 key = kzalloc(sizeof(*key), GFP_KERNEL);
2481 list_add_rcu(&key->list, &hdev->long_term_keys);
2484 bacpy(&key->bdaddr, bdaddr);
2485 key->bdaddr_type = addr_type;
2486 memcpy(key->val, tk, sizeof(key->val));
2487 key->authenticated = authenticated;
2490 key->enc_size = enc_size;
2496 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2497 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2499 struct smp_irk *irk;
2501 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2503 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2507 bacpy(&irk->bdaddr, bdaddr);
2508 irk->addr_type = addr_type;
2510 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2513 memcpy(irk->val, val, 16);
2514 bacpy(&irk->rpa, rpa);
2519 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2521 struct link_key *key;
2523 key = hci_find_link_key(hdev, bdaddr);
2527 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2529 list_del_rcu(&key->list);
2530 kfree_rcu(key, rcu);
2535 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2540 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2541 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2544 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2546 list_del_rcu(&k->list);
2551 return removed ? 0 : -ENOENT;
2554 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2558 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2559 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2562 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2564 list_del_rcu(&k->list);
2569 #ifdef CONFIG_TIZEN_WIP
2570 int hci_set_rpa_res_support(struct hci_dev *hdev, bdaddr_t *bdaddr,
2571 u8 addr_type, u8 enabled)
2573 struct smp_irk *irk;
2575 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2579 irk->rpa_res_support = enabled;
2584 /* Timeout Error Event is being handled */
2585 static void hci_tx_timeout_error_evt(struct hci_dev *hdev)
2587 BT_ERR("%s H/W TX Timeout error", hdev->name);
2589 mgmt_tx_timeout_error(hdev);
2593 /* HCI command timer function */
2594 static void hci_cmd_timeout(struct work_struct *work)
2596 struct hci_dev *hdev = container_of(work, struct hci_dev,
2599 if (hdev->sent_cmd) {
2600 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2601 u16 opcode = __le16_to_cpu(sent->opcode);
2603 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2605 BT_ERR("%s command tx timeout", hdev->name);
2608 #ifdef CONFIG_TIZEN_WIP
2609 hci_tx_timeout_error_evt(hdev);
2611 atomic_set(&hdev->cmd_cnt, 1);
2612 queue_work(hdev->workqueue, &hdev->cmd_work);
2615 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2616 bdaddr_t *bdaddr, u8 bdaddr_type)
2618 struct oob_data *data;
2620 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2621 if (bacmp(bdaddr, &data->bdaddr) != 0)
2623 if (data->bdaddr_type != bdaddr_type)
2631 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2634 struct oob_data *data;
2636 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2640 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2642 list_del(&data->list);
2648 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2650 struct oob_data *data, *n;
2652 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2653 list_del(&data->list);
2658 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2659 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2660 u8 *hash256, u8 *rand256)
2662 struct oob_data *data;
2664 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2666 data = kmalloc(sizeof(*data), GFP_KERNEL);
2670 bacpy(&data->bdaddr, bdaddr);
2671 data->bdaddr_type = bdaddr_type;
2672 list_add(&data->list, &hdev->remote_oob_data);
2675 if (hash192 && rand192) {
2676 memcpy(data->hash192, hash192, sizeof(data->hash192));
2677 memcpy(data->rand192, rand192, sizeof(data->rand192));
2679 memset(data->hash192, 0, sizeof(data->hash192));
2680 memset(data->rand192, 0, sizeof(data->rand192));
2683 if (hash256 && rand256) {
2684 memcpy(data->hash256, hash256, sizeof(data->hash256));
2685 memcpy(data->rand256, rand256, sizeof(data->rand256));
2687 memset(data->hash256, 0, sizeof(data->hash256));
2688 memset(data->rand256, 0, sizeof(data->rand256));
2691 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2696 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2697 bdaddr_t *bdaddr, u8 type)
2699 struct bdaddr_list *b;
2701 list_for_each_entry(b, bdaddr_list, list) {
2702 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2709 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2711 struct list_head *p, *n;
2713 list_for_each_safe(p, n, bdaddr_list) {
2714 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2721 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2723 struct bdaddr_list *entry;
2725 if (!bacmp(bdaddr, BDADDR_ANY))
2728 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2731 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2735 bacpy(&entry->bdaddr, bdaddr);
2736 entry->bdaddr_type = type;
2738 list_add(&entry->list, list);
2743 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2745 struct bdaddr_list *entry;
2747 if (!bacmp(bdaddr, BDADDR_ANY)) {
2748 hci_bdaddr_list_clear(list);
2752 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2756 list_del(&entry->list);
2762 /* This function requires the caller holds hdev->lock */
2763 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2764 bdaddr_t *addr, u8 addr_type)
2766 struct hci_conn_params *params;
2768 /* The conn params list only contains identity addresses */
2769 if (!hci_is_identity_address(addr, addr_type))
2772 list_for_each_entry(params, &hdev->le_conn_params, list) {
2773 if (bacmp(¶ms->addr, addr) == 0 &&
2774 params->addr_type == addr_type) {
2782 /* This function requires the caller holds hdev->lock */
2783 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2784 bdaddr_t *addr, u8 addr_type)
2786 struct hci_conn_params *param;
2788 /* The list only contains identity addresses */
2789 if (!hci_is_identity_address(addr, addr_type))
2792 list_for_each_entry(param, list, action) {
2793 if (bacmp(¶m->addr, addr) == 0 &&
2794 param->addr_type == addr_type)
2801 /* This function requires the caller holds hdev->lock */
2802 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2803 bdaddr_t *addr, u8 addr_type)
2805 struct hci_conn_params *params;
2807 if (!hci_is_identity_address(addr, addr_type))
2810 params = hci_conn_params_lookup(hdev, addr, addr_type);
2814 params = kzalloc(sizeof(*params), GFP_KERNEL);
2816 BT_ERR("Out of memory");
2820 bacpy(¶ms->addr, addr);
2821 params->addr_type = addr_type;
2823 list_add(¶ms->list, &hdev->le_conn_params);
2824 INIT_LIST_HEAD(¶ms->action);
2826 params->conn_min_interval = hdev->le_conn_min_interval;
2827 params->conn_max_interval = hdev->le_conn_max_interval;
2828 params->conn_latency = hdev->le_conn_latency;
2829 params->supervision_timeout = hdev->le_supv_timeout;
2830 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2832 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2837 static void hci_conn_params_free(struct hci_conn_params *params)
2840 hci_conn_drop(params->conn);
2841 hci_conn_put(params->conn);
2844 list_del(¶ms->action);
2845 list_del(¶ms->list);
2849 /* This function requires the caller holds hdev->lock */
2850 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2852 struct hci_conn_params *params;
2854 params = hci_conn_params_lookup(hdev, addr, addr_type);
2858 hci_conn_params_free(params);
2860 hci_update_background_scan(hdev);
2862 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2865 /* This function requires the caller holds hdev->lock */
2866 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2868 struct hci_conn_params *params, *tmp;
2870 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2871 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2873 list_del(¶ms->list);
2877 BT_DBG("All LE disabled connection parameters were removed");
2880 /* This function requires the caller holds hdev->lock */
2881 void hci_conn_params_clear_all(struct hci_dev *hdev)
2883 struct hci_conn_params *params, *tmp;
2885 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2886 hci_conn_params_free(params);
2888 hci_update_background_scan(hdev);
2890 BT_DBG("All LE connection parameters were removed");
2893 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2896 BT_ERR("Failed to start inquiry: status %d", status);
2899 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2900 hci_dev_unlock(hdev);
2905 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2908 /* General inquiry access code (GIAC) */
2909 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2910 struct hci_request req;
2911 struct hci_cp_inquiry cp;
2915 BT_ERR("Failed to disable LE scanning: status %d", status);
2919 switch (hdev->discovery.type) {
2920 case DISCOV_TYPE_LE:
2922 #ifdef CONFIG_TIZEN_WIP
2923 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
2925 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2927 hci_dev_unlock(hdev);
2930 case DISCOV_TYPE_INTERLEAVED:
2931 hci_req_init(&req, hdev);
2933 memset(&cp, 0, sizeof(cp));
2934 memcpy(&cp.lap, lap, sizeof(cp.lap));
2935 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2936 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2940 hci_inquiry_cache_flush(hdev);
2942 err = hci_req_run(&req, inquiry_complete);
2944 BT_ERR("Inquiry request failed: err %d", err);
2945 #ifdef CONFIG_TIZEN_WIP
2946 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
2948 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2952 hci_dev_unlock(hdev);
2957 static void le_scan_disable_work(struct work_struct *work)
2959 struct hci_dev *hdev = container_of(work, struct hci_dev,
2960 le_scan_disable.work);
2961 struct hci_request req;
2964 BT_DBG("%s", hdev->name);
2966 hci_req_init(&req, hdev);
2968 hci_req_add_le_scan_disable(&req);
2970 err = hci_req_run(&req, le_scan_disable_work_complete);
2972 BT_ERR("Disable LE scanning request failed: err %d", err);
2975 /* Copy the Identity Address of the controller.
2977 * If the controller has a public BD_ADDR, then by default use that one.
2978 * If this is a LE only controller without a public address, default to
2979 * the static random address.
2981 * For debugging purposes it is possible to force controllers with a
2982 * public address to use the static random address instead.
2984 * In case BR/EDR has been disabled on a dual-mode controller and
2985 * userspace has configured a static address, then that address
2986 * becomes the identity address instead of the public BR/EDR address.
2988 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2991 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
2992 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2993 (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
2994 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2995 bacpy(bdaddr, &hdev->static_addr);
2996 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2998 bacpy(bdaddr, &hdev->bdaddr);
2999 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3003 /* Alloc HCI device */
3004 struct hci_dev *hci_alloc_dev(void)
3006 struct hci_dev *hdev;
3008 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3012 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3013 hdev->esco_type = (ESCO_HV1);
3014 hdev->link_mode = (HCI_LM_ACCEPT);
3015 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3016 hdev->io_capability = 0x03; /* No Input No Output */
3017 hdev->manufacturer = 0xffff; /* Default to internal use */
3018 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3019 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3021 hdev->sniff_max_interval = 800;
3022 hdev->sniff_min_interval = 80;
3024 hdev->le_adv_channel_map = 0x07;
3025 hdev->le_adv_min_interval = 0x0800;
3026 hdev->le_adv_max_interval = 0x0800;
3028 #ifdef CONFIG_TIZEN_WIP
3029 hdev->sniff_max_interval = 800;
3030 hdev->sniff_min_interval = 400;
3032 /* automatically enable sniff mode for connection */
3033 hdev->idle_timeout = TIZEN_SNIFF_TIMEOUT * 1000; /* 2 Second */
3035 hdev->adv_filter_policy = 0x00;
3036 hdev->adv_type = 0x00;
3038 hdev->le_scan_type = LE_SCAN_PASSIVE;
3039 hdev->le_scan_interval = 0x0060;
3040 hdev->le_scan_window = 0x0030;
3041 hdev->le_conn_min_interval = 0x0028;
3042 hdev->le_conn_max_interval = 0x0038;
3043 hdev->le_conn_latency = 0x0000;
3044 #ifdef CONFIG_TIZEN_WIP
3045 hdev->le_supv_timeout = 0x0258; /* 6000 msec */
3047 hdev->le_supv_timeout = 0x002a; /* 420 msec */
3049 hdev->le_def_tx_len = 0x001b;
3050 hdev->le_def_tx_time = 0x0148;
3051 hdev->le_max_tx_len = 0x001b;
3052 hdev->le_max_tx_time = 0x0148;
3053 hdev->le_max_rx_len = 0x001b;
3054 hdev->le_max_rx_time = 0x0148;
3056 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3057 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3058 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3059 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3061 mutex_init(&hdev->lock);
3062 mutex_init(&hdev->req_lock);
3064 INIT_LIST_HEAD(&hdev->mgmt_pending);
3065 INIT_LIST_HEAD(&hdev->blacklist);
3066 INIT_LIST_HEAD(&hdev->whitelist);
3067 INIT_LIST_HEAD(&hdev->uuids);
3068 INIT_LIST_HEAD(&hdev->link_keys);
3069 INIT_LIST_HEAD(&hdev->long_term_keys);
3070 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3071 INIT_LIST_HEAD(&hdev->remote_oob_data);
3072 INIT_LIST_HEAD(&hdev->le_white_list);
3073 INIT_LIST_HEAD(&hdev->le_conn_params);
3074 INIT_LIST_HEAD(&hdev->pend_le_conns);
3075 INIT_LIST_HEAD(&hdev->pend_le_reports);
3076 INIT_LIST_HEAD(&hdev->conn_hash.list);
3078 INIT_WORK(&hdev->rx_work, hci_rx_work);
3079 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3080 INIT_WORK(&hdev->tx_work, hci_tx_work);
3081 INIT_WORK(&hdev->power_on, hci_power_on);
3083 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3084 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3085 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3087 skb_queue_head_init(&hdev->rx_q);
3088 skb_queue_head_init(&hdev->cmd_q);
3089 skb_queue_head_init(&hdev->raw_q);
3091 init_waitqueue_head(&hdev->req_wait_q);
3093 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3095 hci_init_sysfs(hdev);
3096 discovery_init(hdev);
3100 EXPORT_SYMBOL(hci_alloc_dev);
3102 /* Free HCI device */
3103 void hci_free_dev(struct hci_dev *hdev)
3105 /* will free via device release */
3106 put_device(&hdev->dev);
3108 EXPORT_SYMBOL(hci_free_dev);
3110 #ifdef CONFIG_SLEEP_MONITOR
3112 unsigned char common;
3113 unsigned char device[3];
3116 int bt_get_sleep_monitor_cb(void *priv, unsigned int *raw_val,
3117 int check_level, int caller_type)
3120 struct hci_dev *hdev = (struct hci_dev*)priv;
3122 struct _sleep_pkt sleep_pkt = {0, };
3123 struct hci_conn *conn = NULL;
3126 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags))
3127 sleep_pkt.common |= 0x01;
3129 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
3130 sleep_pkt.common |= 0x02;
3132 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3133 sleep_pkt.common |= 0x04;
3135 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3136 sleep_pkt.common |= 0x08;
3138 if (test_bit(HCI_ISCAN, &hdev->flags))
3139 sleep_pkt.common |= 0x10;
3141 if (test_bit(HCI_PSCAN, &hdev->flags))
3142 sleep_pkt.common |= 0x20;
3145 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
3147 hci_dev_unlock(hdev);
3151 sleep_pkt.device[idx] |= (0x7f & conn->handle);
3152 if (conn->mode != HCI_CM_SNIFF)
3153 sleep_pkt.device[idx] |= 0x80;
3157 hci_dev_unlock(hdev);
3160 memcpy(raw_val, &sleep_pkt, sizeof(unsigned int));
3162 if (test_bit(HCI_INQUIRY, &hdev->flags))
3165 if (test_bit(HCI_ISCAN, &hdev->flags) ||
3166 test_bit(HCI_PSCAN, &hdev->flags))
3181 BT_DBG("%s: check_level[%d], state[%d], raw[%X]\n", __func__,
3182 check_level, state, *raw_val);
3187 static struct sleep_monitor_ops bt_sleep_monitor_ops = {
3188 .read_cb_func = bt_get_sleep_monitor_cb,
3192 /* Register HCI device */
3193 int hci_register_dev(struct hci_dev *hdev)
3197 if (!hdev->open || !hdev->close || !hdev->send)
3200 /* Do not allow HCI_AMP devices to register at index 0,
3201 * so the index can be used as the AMP controller ID.
3203 switch (hdev->dev_type) {
3205 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3208 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3217 sprintf(hdev->name, "hci%d", id);
3220 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3222 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3223 WQ_MEM_RECLAIM, 1, hdev->name);
3224 if (!hdev->workqueue) {
3229 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3230 WQ_MEM_RECLAIM, 1, hdev->name);
3231 if (!hdev->req_workqueue) {
3232 destroy_workqueue(hdev->workqueue);
3237 if (!IS_ERR_OR_NULL(bt_debugfs))
3238 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3240 dev_set_name(&hdev->dev, "%s", hdev->name);
3242 error = device_add(&hdev->dev);
3246 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3247 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3250 if (rfkill_register(hdev->rfkill) < 0) {
3251 rfkill_destroy(hdev->rfkill);
3252 hdev->rfkill = NULL;
3256 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3257 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3259 set_bit(HCI_SETUP, &hdev->dev_flags);
3260 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3262 if (hdev->dev_type == HCI_BREDR) {
3263 /* Assume BR/EDR support until proven otherwise (such as
3264 * through reading supported features during init.
3266 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3269 write_lock(&hci_dev_list_lock);
3270 list_add(&hdev->list, &hci_dev_list);
3271 write_unlock(&hci_dev_list_lock);
3273 /* Devices that are marked for raw-only usage are unconfigured
3274 * and should not be included in normal operation.
3276 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3277 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
3279 hci_notify(hdev, HCI_DEV_REG);
3282 queue_work(hdev->req_workqueue, &hdev->power_on);
3284 #ifdef CONFIG_SLEEP_MONITOR
3285 sleep_monitor_register_ops(hdev, &bt_sleep_monitor_ops,
3291 destroy_workqueue(hdev->workqueue);
3292 destroy_workqueue(hdev->req_workqueue);
3294 ida_simple_remove(&hci_index_ida, hdev->id);
3298 EXPORT_SYMBOL(hci_register_dev);
3300 /* Unregister HCI device */
3301 void hci_unregister_dev(struct hci_dev *hdev)
3305 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3307 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3311 write_lock(&hci_dev_list_lock);
3312 list_del(&hdev->list);
3313 write_unlock(&hci_dev_list_lock);
3315 hci_dev_do_close(hdev);
3317 for (i = 0; i < NUM_REASSEMBLY; i++)
3318 kfree_skb(hdev->reassembly[i]);
3320 cancel_work_sync(&hdev->power_on);
3322 if (!test_bit(HCI_INIT, &hdev->flags) &&
3323 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3324 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
3326 mgmt_index_removed(hdev);
3327 hci_dev_unlock(hdev);
3330 /* mgmt_index_removed should take care of emptying the
3332 BUG_ON(!list_empty(&hdev->mgmt_pending));
3334 hci_notify(hdev, HCI_DEV_UNREG);
3337 rfkill_unregister(hdev->rfkill);
3338 rfkill_destroy(hdev->rfkill);
3341 smp_unregister(hdev);
3343 device_del(&hdev->dev);
3345 debugfs_remove_recursive(hdev->debugfs);
3347 destroy_workqueue(hdev->workqueue);
3348 destroy_workqueue(hdev->req_workqueue);
3351 hci_bdaddr_list_clear(&hdev->blacklist);
3352 hci_bdaddr_list_clear(&hdev->whitelist);
3353 hci_uuids_clear(hdev);
3354 hci_link_keys_clear(hdev);
3355 hci_smp_ltks_clear(hdev);
3356 hci_smp_irks_clear(hdev);
3357 hci_remote_oob_data_clear(hdev);
3358 hci_bdaddr_list_clear(&hdev->le_white_list);
3359 hci_conn_params_clear_all(hdev);
3360 hci_discovery_filter_clear(hdev);
3361 hci_dev_unlock(hdev);
3365 ida_simple_remove(&hci_index_ida, id);
3367 #ifdef CONFIG_SLEEP_MONITOR
3368 sleep_monitor_unregister_ops(SLEEP_MONITOR_BT);
3371 EXPORT_SYMBOL(hci_unregister_dev);
3373 /* Suspend HCI device */
3374 int hci_suspend_dev(struct hci_dev *hdev)
3376 hci_notify(hdev, HCI_DEV_SUSPEND);
3379 EXPORT_SYMBOL(hci_suspend_dev);
3381 /* Resume HCI device */
3382 int hci_resume_dev(struct hci_dev *hdev)
3384 hci_notify(hdev, HCI_DEV_RESUME);
3387 EXPORT_SYMBOL(hci_resume_dev);
3389 /* Reset HCI device */
3390 int hci_reset_dev(struct hci_dev *hdev)
3392 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3393 struct sk_buff *skb;
3395 skb = bt_skb_alloc(3, GFP_ATOMIC);
3399 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3400 memcpy(skb_put(skb, 3), hw_err, 3);
3402 /* Send Hardware Error to upper stack */
3403 return hci_recv_frame(hdev, skb);
3405 EXPORT_SYMBOL(hci_reset_dev);
3407 /* Receive frame from HCI drivers */
3408 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3410 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3411 && !test_bit(HCI_INIT, &hdev->flags))) {
3417 bt_cb(skb)->incoming = 1;
3420 __net_timestamp(skb);
3422 skb_queue_tail(&hdev->rx_q, skb);
3423 queue_work(hdev->workqueue, &hdev->rx_work);
3427 EXPORT_SYMBOL(hci_recv_frame);
3429 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3430 int count, __u8 index)
3435 struct sk_buff *skb;
3436 struct bt_skb_cb *scb;
3438 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3439 index >= NUM_REASSEMBLY)
3442 skb = hdev->reassembly[index];
3446 case HCI_ACLDATA_PKT:
3447 len = HCI_MAX_FRAME_SIZE;
3448 hlen = HCI_ACL_HDR_SIZE;
3451 len = HCI_MAX_EVENT_SIZE;
3452 hlen = HCI_EVENT_HDR_SIZE;
3454 case HCI_SCODATA_PKT:
3455 len = HCI_MAX_SCO_SIZE;
3456 hlen = HCI_SCO_HDR_SIZE;
3460 skb = bt_skb_alloc(len, GFP_ATOMIC);
3464 scb = (void *) skb->cb;
3466 scb->pkt_type = type;
3468 hdev->reassembly[index] = skb;
3472 scb = (void *) skb->cb;
3473 len = min_t(uint, scb->expect, count);
3475 memcpy(skb_put(skb, len), data, len);
3484 if (skb->len == HCI_EVENT_HDR_SIZE) {
3485 struct hci_event_hdr *h = hci_event_hdr(skb);
3486 scb->expect = h->plen;
3488 if (skb_tailroom(skb) < scb->expect) {
3490 hdev->reassembly[index] = NULL;
3496 case HCI_ACLDATA_PKT:
3497 if (skb->len == HCI_ACL_HDR_SIZE) {
3498 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3499 scb->expect = __le16_to_cpu(h->dlen);
3501 if (skb_tailroom(skb) < scb->expect) {
3503 hdev->reassembly[index] = NULL;
3509 case HCI_SCODATA_PKT:
3510 if (skb->len == HCI_SCO_HDR_SIZE) {
3511 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3512 scb->expect = h->dlen;
3514 if (skb_tailroom(skb) < scb->expect) {
3516 hdev->reassembly[index] = NULL;
3523 if (scb->expect == 0) {
3524 /* Complete frame */
3526 bt_cb(skb)->pkt_type = type;
3527 hci_recv_frame(hdev, skb);
3529 hdev->reassembly[index] = NULL;
3537 #define STREAM_REASSEMBLY 0
3539 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3545 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3548 struct { char type; } *pkt;
3550 /* Start of the frame */
3557 type = bt_cb(skb)->pkt_type;
3559 rem = hci_reassembly(hdev, type, data, count,
3564 data += (count - rem);
3570 EXPORT_SYMBOL(hci_recv_stream_fragment);
3572 /* ---- Interface to upper protocols ---- */
3574 int hci_register_cb(struct hci_cb *cb)
3576 BT_DBG("%p name %s", cb, cb->name);
3578 write_lock(&hci_cb_list_lock);
3579 list_add(&cb->list, &hci_cb_list);
3580 write_unlock(&hci_cb_list_lock);
3584 EXPORT_SYMBOL(hci_register_cb);
3586 int hci_unregister_cb(struct hci_cb *cb)
3588 BT_DBG("%p name %s", cb, cb->name);
3590 write_lock(&hci_cb_list_lock);
3591 list_del(&cb->list);
3592 write_unlock(&hci_cb_list_lock);
3596 EXPORT_SYMBOL(hci_unregister_cb);
3598 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3602 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3605 __net_timestamp(skb);
3607 /* Send copy to monitor */
3608 hci_send_to_monitor(hdev, skb);
3610 if (atomic_read(&hdev->promisc)) {
3611 /* Send copy to the sockets */
3612 hci_send_to_sock(hdev, skb);
3615 /* Get rid of skb owner, prior to sending to the driver. */
3618 #ifdef CONFIG_TIZEN_WIP
3619 hci_notify(hdev, HCI_DEV_WRITE);
3622 err = hdev->send(hdev, skb);
3624 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3629 bool hci_req_pending(struct hci_dev *hdev)
3631 return (hdev->req_status == HCI_REQ_PEND);
3634 /* Send HCI command */
3635 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3638 struct sk_buff *skb;
3640 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3642 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3644 BT_ERR("%s no memory for command", hdev->name);
3648 /* Stand-alone HCI commands must be flagged as
3649 * single-command requests.
3651 bt_cb(skb)->req.start = true;
3653 skb_queue_tail(&hdev->cmd_q, skb);
3654 queue_work(hdev->workqueue, &hdev->cmd_work);
3659 /* Get data from the previously sent command */
3660 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3662 struct hci_command_hdr *hdr;
3664 if (!hdev->sent_cmd)
3667 hdr = (void *) hdev->sent_cmd->data;
3669 if (hdr->opcode != cpu_to_le16(opcode))
3672 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3674 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3678 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3680 struct hci_acl_hdr *hdr;
3683 skb_push(skb, HCI_ACL_HDR_SIZE);
3684 skb_reset_transport_header(skb);
3685 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3686 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3687 hdr->dlen = cpu_to_le16(len);
3690 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3691 struct sk_buff *skb, __u16 flags)
3693 struct hci_conn *conn = chan->conn;
3694 struct hci_dev *hdev = conn->hdev;
3695 struct sk_buff *list;
3697 skb->len = skb_headlen(skb);
3700 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3702 switch (hdev->dev_type) {
3704 hci_add_acl_hdr(skb, conn->handle, flags);
3707 hci_add_acl_hdr(skb, chan->handle, flags);
3710 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3714 list = skb_shinfo(skb)->frag_list;
3716 /* Non fragmented */
3717 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3719 skb_queue_tail(queue, skb);
3722 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3724 skb_shinfo(skb)->frag_list = NULL;
3726 /* Queue all fragments atomically. We need to use spin_lock_bh
3727 * here because of 6LoWPAN links, as there this function is
3728 * called from softirq and using normal spin lock could cause
3731 spin_lock_bh(&queue->lock);
3733 __skb_queue_tail(queue, skb);
3735 flags &= ~ACL_START;
3738 skb = list; list = list->next;
3740 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3741 hci_add_acl_hdr(skb, conn->handle, flags);
3743 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3745 __skb_queue_tail(queue, skb);
3748 spin_unlock_bh(&queue->lock);
3752 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3754 struct hci_dev *hdev = chan->conn->hdev;
3756 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3758 hci_queue_acl(chan, &chan->data_q, skb, flags);
3760 queue_work(hdev->workqueue, &hdev->tx_work);
3764 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3766 struct hci_dev *hdev = conn->hdev;
3767 struct hci_sco_hdr hdr;
3769 BT_DBG("%s len %d", hdev->name, skb->len);
3771 hdr.handle = cpu_to_le16(conn->handle);
3772 hdr.dlen = skb->len;
3774 skb_push(skb, HCI_SCO_HDR_SIZE);
3775 skb_reset_transport_header(skb);
3776 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3778 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3780 skb_queue_tail(&conn->data_q, skb);
3781 queue_work(hdev->workqueue, &hdev->tx_work);
3784 /* ---- HCI TX task (outgoing data) ---- */
3786 /* HCI Connection scheduler */
3787 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3790 struct hci_conn_hash *h = &hdev->conn_hash;
3791 struct hci_conn *conn = NULL, *c;
3792 unsigned int num = 0, min = ~0;
3794 /* We don't have to lock device here. Connections are always
3795 * added and removed with TX task disabled. */
3799 list_for_each_entry_rcu(c, &h->list, list) {
3800 if (c->type != type || skb_queue_empty(&c->data_q))
3803 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3808 if (c->sent < min) {
3813 if (hci_conn_num(hdev, type) == num)
3822 switch (conn->type) {
3824 cnt = hdev->acl_cnt;
3828 cnt = hdev->sco_cnt;
3831 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3835 BT_ERR("Unknown link type");
3843 BT_DBG("conn %p quote %d", conn, *quote);
3847 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3849 struct hci_conn_hash *h = &hdev->conn_hash;
3852 BT_ERR("%s link tx timeout", hdev->name);
3856 /* Kill stalled connections */
3857 list_for_each_entry_rcu(c, &h->list, list) {
3858 if (c->type == type && c->sent) {
3859 BT_ERR("%s killing stalled connection %pMR",
3860 hdev->name, &c->dst);
3861 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3868 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3871 struct hci_conn_hash *h = &hdev->conn_hash;
3872 struct hci_chan *chan = NULL;
3873 unsigned int num = 0, min = ~0, cur_prio = 0;
3874 struct hci_conn *conn;
3875 int cnt, q, conn_num = 0;
3877 BT_DBG("%s", hdev->name);
3881 list_for_each_entry_rcu(conn, &h->list, list) {
3882 struct hci_chan *tmp;
3884 if (conn->type != type)
3887 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3892 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3893 struct sk_buff *skb;
3895 if (skb_queue_empty(&tmp->data_q))
3898 skb = skb_peek(&tmp->data_q);
3899 if (skb->priority < cur_prio)
3902 if (skb->priority > cur_prio) {
3905 cur_prio = skb->priority;
3910 if (conn->sent < min) {
3916 if (hci_conn_num(hdev, type) == conn_num)
3925 switch (chan->conn->type) {
3927 cnt = hdev->acl_cnt;
3930 cnt = hdev->block_cnt;
3934 cnt = hdev->sco_cnt;
3937 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3941 BT_ERR("Unknown link type");
3946 BT_DBG("chan %p quote %d", chan, *quote);
3950 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3952 struct hci_conn_hash *h = &hdev->conn_hash;
3953 struct hci_conn *conn;
3956 BT_DBG("%s", hdev->name);
3960 list_for_each_entry_rcu(conn, &h->list, list) {
3961 struct hci_chan *chan;
3963 if (conn->type != type)
3966 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3971 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3972 struct sk_buff *skb;
3979 if (skb_queue_empty(&chan->data_q))
3982 skb = skb_peek(&chan->data_q);
3983 if (skb->priority >= HCI_PRIO_MAX - 1)
3986 skb->priority = HCI_PRIO_MAX - 1;
3988 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3992 if (hci_conn_num(hdev, type) == num)
4000 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4002 /* Calculate count of blocks used by this packet */
4003 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4006 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4008 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4009 /* ACL tx timeout must be longer than maximum
4010 * link supervision timeout (40.9 seconds) */
4011 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4012 HCI_ACL_TX_TIMEOUT))
4013 hci_link_tx_to(hdev, ACL_LINK);
4017 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4019 unsigned int cnt = hdev->acl_cnt;
4020 struct hci_chan *chan;
4021 struct sk_buff *skb;
4024 __check_timeout(hdev, cnt);
4026 while (hdev->acl_cnt &&
4027 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4028 u32 priority = (skb_peek(&chan->data_q))->priority;
4029 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4030 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4031 skb->len, skb->priority);
4033 /* Stop if priority has changed */
4034 if (skb->priority < priority)
4037 skb = skb_dequeue(&chan->data_q);
4039 hci_conn_enter_active_mode(chan->conn,
4040 bt_cb(skb)->force_active);
4042 hci_send_frame(hdev, skb);
4043 hdev->acl_last_tx = jiffies;
4051 if (cnt != hdev->acl_cnt)
4052 hci_prio_recalculate(hdev, ACL_LINK);
4055 static void hci_sched_acl_blk(struct hci_dev *hdev)
4057 unsigned int cnt = hdev->block_cnt;
4058 struct hci_chan *chan;
4059 struct sk_buff *skb;
4063 __check_timeout(hdev, cnt);
4065 BT_DBG("%s", hdev->name);
4067 if (hdev->dev_type == HCI_AMP)
4072 while (hdev->block_cnt > 0 &&
4073 (chan = hci_chan_sent(hdev, type, "e))) {
4074 u32 priority = (skb_peek(&chan->data_q))->priority;
4075 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4078 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4079 skb->len, skb->priority);
4081 /* Stop if priority has changed */
4082 if (skb->priority < priority)
4085 skb = skb_dequeue(&chan->data_q);
4087 blocks = __get_blocks(hdev, skb);
4088 if (blocks > hdev->block_cnt)
4091 hci_conn_enter_active_mode(chan->conn,
4092 bt_cb(skb)->force_active);
4094 hci_send_frame(hdev, skb);
4095 hdev->acl_last_tx = jiffies;
4097 hdev->block_cnt -= blocks;
4100 chan->sent += blocks;
4101 chan->conn->sent += blocks;
4105 if (cnt != hdev->block_cnt)
4106 hci_prio_recalculate(hdev, type);
4109 static void hci_sched_acl(struct hci_dev *hdev)
4111 BT_DBG("%s", hdev->name);
4113 /* No ACL link over BR/EDR controller */
4114 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4117 /* No AMP link over AMP controller */
4118 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4121 switch (hdev->flow_ctl_mode) {
4122 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4123 hci_sched_acl_pkt(hdev);
4126 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4127 hci_sched_acl_blk(hdev);
4133 static void hci_sched_sco(struct hci_dev *hdev)
4135 struct hci_conn *conn;
4136 struct sk_buff *skb;
4139 BT_DBG("%s", hdev->name);
4141 if (!hci_conn_num(hdev, SCO_LINK))
4144 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4145 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4146 BT_DBG("skb %p len %d", skb, skb->len);
4147 hci_send_frame(hdev, skb);
4150 if (conn->sent == ~0)
4156 static void hci_sched_esco(struct hci_dev *hdev)
4158 struct hci_conn *conn;
4159 struct sk_buff *skb;
4162 BT_DBG("%s", hdev->name);
4164 if (!hci_conn_num(hdev, ESCO_LINK))
4167 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4169 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4170 BT_DBG("skb %p len %d", skb, skb->len);
4171 hci_send_frame(hdev, skb);
4174 if (conn->sent == ~0)
4180 static void hci_sched_le(struct hci_dev *hdev)
4182 struct hci_chan *chan;
4183 struct sk_buff *skb;
4184 int quote, cnt, tmp;
4186 BT_DBG("%s", hdev->name);
4188 if (!hci_conn_num(hdev, LE_LINK))
4191 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4192 /* LE tx timeout must be longer than maximum
4193 * link supervision timeout (40.9 seconds) */
4194 if (!hdev->le_cnt && hdev->le_pkts &&
4195 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4196 hci_link_tx_to(hdev, LE_LINK);
4199 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4201 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4202 u32 priority = (skb_peek(&chan->data_q))->priority;
4203 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4204 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4205 skb->len, skb->priority);
4207 /* Stop if priority has changed */
4208 if (skb->priority < priority)
4211 skb = skb_dequeue(&chan->data_q);
4213 hci_send_frame(hdev, skb);
4214 hdev->le_last_tx = jiffies;
4225 hdev->acl_cnt = cnt;
4228 hci_prio_recalculate(hdev, LE_LINK);
4231 static void hci_tx_work(struct work_struct *work)
4233 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4234 struct sk_buff *skb;
4236 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4237 hdev->sco_cnt, hdev->le_cnt);
4239 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4240 /* Schedule queues and send stuff to HCI driver */
4241 hci_sched_acl(hdev);
4242 hci_sched_sco(hdev);
4243 hci_sched_esco(hdev);
4247 /* Send next queued raw (unknown type) packet */
4248 while ((skb = skb_dequeue(&hdev->raw_q)))
4249 hci_send_frame(hdev, skb);
4252 /* ----- HCI RX task (incoming data processing) ----- */
4254 /* ACL data packet */
4255 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4257 struct hci_acl_hdr *hdr = (void *) skb->data;
4258 struct hci_conn *conn;
4259 __u16 handle, flags;
4261 skb_pull(skb, HCI_ACL_HDR_SIZE);
4263 handle = __le16_to_cpu(hdr->handle);
4264 flags = hci_flags(handle);
4265 handle = hci_handle(handle);
4267 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4270 hdev->stat.acl_rx++;
4273 conn = hci_conn_hash_lookup_handle(hdev, handle);
4274 hci_dev_unlock(hdev);
4277 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4279 /* Send to upper protocol */
4280 l2cap_recv_acldata(conn, skb, flags);
4283 BT_ERR("%s ACL packet for unknown connection handle %d",
4284 hdev->name, handle);
4290 /* SCO data packet */
4291 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4293 struct hci_sco_hdr *hdr = (void *) skb->data;
4294 struct hci_conn *conn;
4297 skb_pull(skb, HCI_SCO_HDR_SIZE);
4299 handle = __le16_to_cpu(hdr->handle);
4301 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4303 hdev->stat.sco_rx++;
4306 conn = hci_conn_hash_lookup_handle(hdev, handle);
4307 hci_dev_unlock(hdev);
4310 /* Send to upper protocol */
4311 sco_recv_scodata(conn, skb);
4314 BT_ERR("%s SCO packet for unknown connection handle %d",
4315 hdev->name, handle);
4321 static bool hci_req_is_complete(struct hci_dev *hdev)
4323 struct sk_buff *skb;
4325 skb = skb_peek(&hdev->cmd_q);
4329 return bt_cb(skb)->req.start;
4332 static void hci_resend_last(struct hci_dev *hdev)
4334 struct hci_command_hdr *sent;
4335 struct sk_buff *skb;
4338 if (!hdev->sent_cmd)
4341 sent = (void *) hdev->sent_cmd->data;
4342 opcode = __le16_to_cpu(sent->opcode);
4343 if (opcode == HCI_OP_RESET)
4346 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4350 skb_queue_head(&hdev->cmd_q, skb);
4351 queue_work(hdev->workqueue, &hdev->cmd_work);
4354 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4356 hci_req_complete_t req_complete = NULL;
4357 struct sk_buff *skb;
4358 unsigned long flags;
4360 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4362 /* If the completed command doesn't match the last one that was
4363 * sent we need to do special handling of it.
4365 if (!hci_sent_cmd_data(hdev, opcode)) {
4366 /* Some CSR based controllers generate a spontaneous
4367 * reset complete event during init and any pending
4368 * command will never be completed. In such a case we
4369 * need to resend whatever was the last sent
4372 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4373 hci_resend_last(hdev);
4378 /* If the command succeeded and there's still more commands in
4379 * this request the request is not yet complete.
4381 if (!status && !hci_req_is_complete(hdev))
4384 /* If this was the last command in a request the complete
4385 * callback would be found in hdev->sent_cmd instead of the
4386 * command queue (hdev->cmd_q).
4388 if (hdev->sent_cmd) {
4389 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4392 /* We must set the complete callback to NULL to
4393 * avoid calling the callback more than once if
4394 * this function gets called again.
4396 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4402 /* Remove all pending commands belonging to this request */
4403 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4404 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4405 if (bt_cb(skb)->req.start) {
4406 __skb_queue_head(&hdev->cmd_q, skb);
4410 req_complete = bt_cb(skb)->req.complete;
4413 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4417 req_complete(hdev, status, status ? opcode : HCI_OP_NOP);
4420 static void hci_rx_work(struct work_struct *work)
4422 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4423 struct sk_buff *skb;
4425 BT_DBG("%s", hdev->name);
4427 while ((skb = skb_dequeue(&hdev->rx_q))) {
4428 /* Send copy to monitor */
4429 hci_send_to_monitor(hdev, skb);
4431 if (atomic_read(&hdev->promisc)) {
4432 /* Send copy to the sockets */
4433 hci_send_to_sock(hdev, skb);
4436 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4441 if (test_bit(HCI_INIT, &hdev->flags)) {
4442 /* Don't process data packets in this states. */
4443 switch (bt_cb(skb)->pkt_type) {
4444 case HCI_ACLDATA_PKT:
4445 case HCI_SCODATA_PKT:
4452 switch (bt_cb(skb)->pkt_type) {
4454 BT_DBG("%s Event packet", hdev->name);
4455 hci_event_packet(hdev, skb);
4458 case HCI_ACLDATA_PKT:
4459 BT_DBG("%s ACL data packet", hdev->name);
4460 hci_acldata_packet(hdev, skb);
4463 case HCI_SCODATA_PKT:
4464 BT_DBG("%s SCO data packet", hdev->name);
4465 hci_scodata_packet(hdev, skb);
4475 static void hci_cmd_work(struct work_struct *work)
4477 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4478 struct sk_buff *skb;
4480 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4481 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4483 /* Send queued commands */
4484 if (atomic_read(&hdev->cmd_cnt)) {
4485 skb = skb_dequeue(&hdev->cmd_q);
4489 kfree_skb(hdev->sent_cmd);
4491 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4492 if (hdev->sent_cmd) {
4493 atomic_dec(&hdev->cmd_cnt);
4494 hci_send_frame(hdev, skb);
4495 if (test_bit(HCI_RESET, &hdev->flags))
4496 cancel_delayed_work(&hdev->cmd_timer);
4498 schedule_delayed_work(&hdev->cmd_timer,
4501 skb_queue_head(&hdev->cmd_q, skb);
4502 queue_work(hdev->workqueue, &hdev->cmd_work);