2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/property.h>
33 #include <linux/suspend.h>
34 #include <linux/wait.h>
35 #include <asm/unaligned.h>
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 #include <net/bluetooth/mgmt.h>
42 #include "hci_request.h"
43 #include "hci_debugfs.h"
49 static void hci_rx_work(struct work_struct *work);
50 static void hci_cmd_work(struct work_struct *work);
51 static void hci_tx_work(struct work_struct *work);
54 LIST_HEAD(hci_dev_list);
55 DEFINE_RWLOCK(hci_dev_list_lock);
57 /* HCI callback list */
58 LIST_HEAD(hci_cb_list);
59 DEFINE_MUTEX(hci_cb_list_lock);
61 /* HCI ID Numbering */
62 static DEFINE_IDA(hci_index_ida);
64 /* ---- HCI debugfs entries ---- */
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
69 struct hci_dev *hdev = file->private_data;
72 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
81 struct hci_dev *hdev = file->private_data;
86 if (!test_bit(HCI_UP, &hdev->flags))
89 err = kstrtobool_from_user(user_buf, count, &enable);
93 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
96 hci_req_sync_lock(hdev);
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
103 hci_req_sync_unlock(hdev);
110 hci_dev_change_flag(hdev, HCI_DUT_MODE);
115 static const struct file_operations dut_mode_fops = {
117 .read = dut_mode_read,
118 .write = dut_mode_write,
119 .llseek = default_llseek,
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123 size_t count, loff_t *ppos)
125 struct hci_dev *hdev = file->private_data;
128 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
131 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135 size_t count, loff_t *ppos)
137 struct hci_dev *hdev = file->private_data;
141 err = kstrtobool_from_user(user_buf, count, &enable);
145 /* When the diagnostic flags are not persistent and the transport
146 * is not active or in user channel operation, then there is no need
147 * for the vendor callback. Instead just store the desired value and
148 * the setting will be programmed when the controller gets powered on.
150 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
151 (!test_bit(HCI_RUNNING, &hdev->flags) ||
152 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
155 hci_req_sync_lock(hdev);
156 err = hdev->set_diag(hdev, enable);
157 hci_req_sync_unlock(hdev);
164 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
166 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
171 static const struct file_operations vendor_diag_fops = {
173 .read = vendor_diag_read,
174 .write = vendor_diag_write,
175 .llseek = default_llseek,
178 static void hci_debugfs_create_basic(struct hci_dev *hdev)
180 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
184 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
188 static int hci_reset_req(struct hci_request *req, unsigned long opt)
190 BT_DBG("%s %ld", req->hdev->name, opt);
193 set_bit(HCI_RESET, &req->hdev->flags);
194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
198 static void bredr_init(struct hci_request *req)
200 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
202 /* Read Local Supported Features */
203 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
205 /* Read Local Version */
206 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
208 /* Read BD Address */
209 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
212 static void amp_init1(struct hci_request *req)
214 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
216 /* Read Local Version */
217 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
219 /* Read Local Supported Commands */
220 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
222 /* Read Local AMP Info */
223 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
225 /* Read Data Blk size */
226 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
228 /* Read Flow Control Mode */
229 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
231 /* Read Location Data */
232 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
235 static int amp_init2(struct hci_request *req)
237 /* Read Local Supported Features. Not all AMP controllers
238 * support this so it's placed conditionally in the second
241 if (req->hdev->commands[14] & 0x20)
242 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
247 static int hci_init1_req(struct hci_request *req, unsigned long opt)
249 struct hci_dev *hdev = req->hdev;
251 BT_DBG("%s %ld", hdev->name, opt);
254 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
255 hci_reset_req(req, 0);
257 switch (hdev->dev_type) {
265 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
272 static void bredr_setup(struct hci_request *req)
277 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
278 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
280 /* Read Class of Device */
281 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
283 /* Read Local Name */
284 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
286 /* Read Voice Setting */
287 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
289 /* Read Number of Supported IAC */
290 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
292 /* Read Current IAC LAP */
293 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
295 /* Clear Event Filters */
296 flt_type = HCI_FLT_CLEAR_ALL;
297 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
299 /* Connection accept timeout ~20 secs */
300 param = cpu_to_le16(0x7d00);
301 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
304 static void le_setup(struct hci_request *req)
306 struct hci_dev *hdev = req->hdev;
308 /* Read LE Buffer Size */
309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
311 /* Read LE Local Supported Features */
312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
314 /* Read LE Supported States */
315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
317 /* LE-only controllers have LE implicitly enabled */
318 if (!lmp_bredr_capable(hdev))
319 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
322 static void hci_setup_event_mask(struct hci_request *req)
324 struct hci_dev *hdev = req->hdev;
326 /* The second byte is 0xff instead of 0x9f (two reserved bits
327 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
330 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
332 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
333 * any event mask for pre 1.2 devices.
335 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
338 if (lmp_bredr_capable(hdev)) {
339 events[4] |= 0x01; /* Flow Specification Complete */
341 /* Use a different default for LE-only devices */
342 memset(events, 0, sizeof(events));
343 events[1] |= 0x20; /* Command Complete */
344 events[1] |= 0x40; /* Command Status */
345 events[1] |= 0x80; /* Hardware Error */
347 /* If the controller supports the Disconnect command, enable
348 * the corresponding event. In addition enable packet flow
349 * control related events.
351 if (hdev->commands[0] & 0x20) {
352 events[0] |= 0x10; /* Disconnection Complete */
353 events[2] |= 0x04; /* Number of Completed Packets */
354 events[3] |= 0x02; /* Data Buffer Overflow */
357 /* If the controller supports the Read Remote Version
358 * Information command, enable the corresponding event.
360 if (hdev->commands[2] & 0x80)
361 events[1] |= 0x08; /* Read Remote Version Information
365 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
366 events[0] |= 0x80; /* Encryption Change */
367 events[5] |= 0x80; /* Encryption Key Refresh Complete */
371 if (lmp_inq_rssi_capable(hdev) ||
372 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
373 events[4] |= 0x02; /* Inquiry Result with RSSI */
375 if (lmp_ext_feat_capable(hdev))
376 events[4] |= 0x04; /* Read Remote Extended Features Complete */
378 if (lmp_esco_capable(hdev)) {
379 events[5] |= 0x08; /* Synchronous Connection Complete */
380 events[5] |= 0x10; /* Synchronous Connection Changed */
383 if (lmp_sniffsubr_capable(hdev))
384 events[5] |= 0x20; /* Sniff Subrating */
386 if (lmp_pause_enc_capable(hdev))
387 events[5] |= 0x80; /* Encryption Key Refresh Complete */
389 if (lmp_ext_inq_capable(hdev))
390 events[5] |= 0x40; /* Extended Inquiry Result */
392 if (lmp_no_flush_capable(hdev))
393 events[7] |= 0x01; /* Enhanced Flush Complete */
395 if (lmp_lsto_capable(hdev))
396 events[6] |= 0x80; /* Link Supervision Timeout Changed */
398 if (lmp_ssp_capable(hdev)) {
399 events[6] |= 0x01; /* IO Capability Request */
400 events[6] |= 0x02; /* IO Capability Response */
401 events[6] |= 0x04; /* User Confirmation Request */
402 events[6] |= 0x08; /* User Passkey Request */
403 events[6] |= 0x10; /* Remote OOB Data Request */
404 events[6] |= 0x20; /* Simple Pairing Complete */
405 events[7] |= 0x04; /* User Passkey Notification */
406 events[7] |= 0x08; /* Keypress Notification */
407 events[7] |= 0x10; /* Remote Host Supported
408 * Features Notification
412 if (lmp_le_capable(hdev))
413 events[7] |= 0x20; /* LE Meta-Event */
415 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
418 static int hci_init2_req(struct hci_request *req, unsigned long opt)
420 struct hci_dev *hdev = req->hdev;
422 if (hdev->dev_type == HCI_AMP)
423 return amp_init2(req);
425 if (lmp_bredr_capable(hdev))
428 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
430 if (lmp_le_capable(hdev))
433 /* All Bluetooth 1.2 and later controllers should support the
434 * HCI command for reading the local supported commands.
436 * Unfortunately some controllers indicate Bluetooth 1.2 support,
437 * but do not have support for this command. If that is the case,
438 * the driver can quirk the behavior and skip reading the local
439 * supported commands.
441 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
442 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
443 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
445 if (lmp_ssp_capable(hdev)) {
446 /* When SSP is available, then the host features page
447 * should also be available as well. However some
448 * controllers list the max_page as 0 as long as SSP
449 * has not been enabled. To achieve proper debugging
450 * output, force the minimum max_page to 1 at least.
452 hdev->max_page = 0x01;
454 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
457 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
458 sizeof(mode), &mode);
460 struct hci_cp_write_eir cp;
462 memset(hdev->eir, 0, sizeof(hdev->eir));
463 memset(&cp, 0, sizeof(cp));
465 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
469 if (lmp_inq_rssi_capable(hdev) ||
470 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
473 /* If Extended Inquiry Result events are supported, then
474 * they are clearly preferred over Inquiry Result with RSSI
477 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
479 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
482 if (lmp_inq_tx_pwr_capable(hdev))
483 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
485 if (lmp_ext_feat_capable(hdev)) {
486 struct hci_cp_read_local_ext_features cp;
489 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
493 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
495 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
502 static void hci_setup_link_policy(struct hci_request *req)
504 struct hci_dev *hdev = req->hdev;
505 struct hci_cp_write_def_link_policy cp;
508 if (lmp_rswitch_capable(hdev))
509 link_policy |= HCI_LP_RSWITCH;
510 if (lmp_hold_capable(hdev))
511 link_policy |= HCI_LP_HOLD;
512 if (lmp_sniff_capable(hdev))
513 link_policy |= HCI_LP_SNIFF;
514 if (lmp_park_capable(hdev))
515 link_policy |= HCI_LP_PARK;
517 cp.policy = cpu_to_le16(link_policy);
518 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
521 static void hci_set_le_support(struct hci_request *req)
523 struct hci_dev *hdev = req->hdev;
524 struct hci_cp_write_le_host_supported cp;
526 /* LE-only devices do not support explicit enablement */
527 if (!lmp_bredr_capable(hdev))
530 memset(&cp, 0, sizeof(cp));
532 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
537 if (cp.le != lmp_host_le_capable(hdev))
538 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
542 static void hci_set_event_mask_page_2(struct hci_request *req)
544 struct hci_dev *hdev = req->hdev;
545 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
546 bool changed = false;
548 /* If Connectionless Peripheral Broadcast central role is supported
549 * enable all necessary events for it.
551 if (lmp_cpb_central_capable(hdev)) {
552 events[1] |= 0x40; /* Triggered Clock Capture */
553 events[1] |= 0x80; /* Synchronization Train Complete */
554 events[2] |= 0x10; /* Peripheral Page Response Timeout */
555 events[2] |= 0x20; /* CPB Channel Map Change */
559 /* If Connectionless Peripheral Broadcast peripheral role is supported
560 * enable all necessary events for it.
562 if (lmp_cpb_peripheral_capable(hdev)) {
563 events[2] |= 0x01; /* Synchronization Train Received */
564 events[2] |= 0x02; /* CPB Receive */
565 events[2] |= 0x04; /* CPB Timeout */
566 events[2] |= 0x08; /* Truncated Page Complete */
570 /* Enable Authenticated Payload Timeout Expired event if supported */
571 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
576 /* Some Broadcom based controllers indicate support for Set Event
577 * Mask Page 2 command, but then actually do not support it. Since
578 * the default value is all bits set to zero, the command is only
579 * required if the event mask has to be changed. In case no change
580 * to the event mask is needed, skip this command.
583 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
584 sizeof(events), events);
587 static int hci_init3_req(struct hci_request *req, unsigned long opt)
589 struct hci_dev *hdev = req->hdev;
592 hci_setup_event_mask(req);
594 if (hdev->commands[6] & 0x20 &&
595 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
596 struct hci_cp_read_stored_link_key cp;
598 bacpy(&cp.bdaddr, BDADDR_ANY);
600 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
603 if (hdev->commands[5] & 0x10)
604 hci_setup_link_policy(req);
606 if (hdev->commands[8] & 0x01)
607 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
609 if (hdev->commands[18] & 0x04 &&
610 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
611 hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
613 /* Some older Broadcom based Bluetooth 1.2 controllers do not
614 * support the Read Page Scan Type command. Check support for
615 * this command in the bit mask of supported commands.
617 if (hdev->commands[13] & 0x01)
618 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
620 if (lmp_le_capable(hdev)) {
623 memset(events, 0, sizeof(events));
625 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
626 events[0] |= 0x10; /* LE Long Term Key Request */
628 /* If controller supports the Connection Parameters Request
629 * Link Layer Procedure, enable the corresponding event.
631 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
632 events[0] |= 0x20; /* LE Remote Connection
636 /* If the controller supports the Data Length Extension
637 * feature, enable the corresponding event.
639 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
640 events[0] |= 0x40; /* LE Data Length Change */
642 /* If the controller supports LL Privacy feature, enable
643 * the corresponding event.
645 if (hdev->le_features[0] & HCI_LE_LL_PRIVACY)
646 events[1] |= 0x02; /* LE Enhanced Connection
650 /* If the controller supports Extended Scanner Filter
651 * Policies, enable the corresponding event.
653 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
654 events[1] |= 0x04; /* LE Direct Advertising
658 /* If the controller supports Channel Selection Algorithm #2
659 * feature, enable the corresponding event.
661 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
662 events[2] |= 0x08; /* LE Channel Selection
666 /* If the controller supports the LE Set Scan Enable command,
667 * enable the corresponding advertising report event.
669 if (hdev->commands[26] & 0x08)
670 events[0] |= 0x02; /* LE Advertising Report */
672 /* If the controller supports the LE Create Connection
673 * command, enable the corresponding event.
675 if (hdev->commands[26] & 0x10)
676 events[0] |= 0x01; /* LE Connection Complete */
678 /* If the controller supports the LE Connection Update
679 * command, enable the corresponding event.
681 if (hdev->commands[27] & 0x04)
682 events[0] |= 0x04; /* LE Connection Update
686 /* If the controller supports the LE Read Remote Used Features
687 * command, enable the corresponding event.
689 if (hdev->commands[27] & 0x20)
690 events[0] |= 0x08; /* LE Read Remote Used
694 /* If the controller supports the LE Read Local P-256
695 * Public Key command, enable the corresponding event.
697 if (hdev->commands[34] & 0x02)
698 events[0] |= 0x80; /* LE Read Local P-256
699 * Public Key Complete
702 /* If the controller supports the LE Generate DHKey
703 * command, enable the corresponding event.
705 if (hdev->commands[34] & 0x04)
706 events[1] |= 0x01; /* LE Generate DHKey Complete */
708 /* If the controller supports the LE Set Default PHY or
709 * LE Set PHY commands, enable the corresponding event.
711 if (hdev->commands[35] & (0x20 | 0x40))
712 events[1] |= 0x08; /* LE PHY Update Complete */
714 /* If the controller supports LE Set Extended Scan Parameters
715 * and LE Set Extended Scan Enable commands, enable the
716 * corresponding event.
718 if (use_ext_scan(hdev))
719 events[1] |= 0x10; /* LE Extended Advertising
723 /* If the controller supports the LE Extended Advertising
724 * command, enable the corresponding event.
726 if (ext_adv_capable(hdev))
727 events[2] |= 0x02; /* LE Advertising Set
731 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
734 /* Read LE Advertising Channel TX Power */
735 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
736 /* HCI TS spec forbids mixing of legacy and extended
737 * advertising commands wherein READ_ADV_TX_POWER is
738 * also included. So do not call it if extended adv
739 * is supported otherwise controller will return
740 * COMMAND_DISALLOWED for extended commands.
742 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
745 if ((hdev->commands[38] & 0x80) &&
746 !test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks)) {
747 /* Read LE Min/Max Tx Power*/
748 hci_req_add(req, HCI_OP_LE_READ_TRANSMIT_POWER,
752 if (hdev->commands[26] & 0x40) {
753 /* Read LE Accept List Size */
754 hci_req_add(req, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
758 if (hdev->commands[26] & 0x80) {
759 /* Clear LE Accept List */
760 hci_req_add(req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
763 if (hdev->commands[34] & 0x40) {
764 /* Read LE Resolving List Size */
765 hci_req_add(req, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
769 if (hdev->commands[34] & 0x20) {
770 /* Clear LE Resolving List */
771 hci_req_add(req, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL);
774 if (hdev->commands[35] & 0x04) {
775 __le16 rpa_timeout = cpu_to_le16(hdev->rpa_timeout);
777 /* Set RPA timeout */
778 hci_req_add(req, HCI_OP_LE_SET_RPA_TIMEOUT, 2,
782 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
783 /* Read LE Maximum Data Length */
784 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
786 /* Read LE Suggested Default Data Length */
787 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
790 if (ext_adv_capable(hdev)) {
791 /* Read LE Number of Supported Advertising Sets */
792 hci_req_add(req, HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
796 hci_set_le_support(req);
799 /* Read features beyond page 1 if available */
800 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
801 struct hci_cp_read_local_ext_features cp;
804 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
811 static int hci_init4_req(struct hci_request *req, unsigned long opt)
813 struct hci_dev *hdev = req->hdev;
815 /* Some Broadcom based Bluetooth controllers do not support the
816 * Delete Stored Link Key command. They are clearly indicating its
817 * absence in the bit mask of supported commands.
819 * Check the supported commands and only if the command is marked
820 * as supported send it. If not supported assume that the controller
821 * does not have actual support for stored link keys which makes this
822 * command redundant anyway.
824 * Some controllers indicate that they support handling deleting
825 * stored link keys, but they don't. The quirk lets a driver
826 * just disable this command.
828 if (hdev->commands[6] & 0x80 &&
829 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
830 struct hci_cp_delete_stored_link_key cp;
832 bacpy(&cp.bdaddr, BDADDR_ANY);
833 cp.delete_all = 0x01;
834 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
838 /* Set event mask page 2 if the HCI command for it is supported */
839 if (hdev->commands[22] & 0x04)
840 hci_set_event_mask_page_2(req);
842 /* Read local codec list if the HCI command is supported */
843 if (hdev->commands[29] & 0x20)
844 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
846 /* Read local pairing options if the HCI command is supported */
847 if (hdev->commands[41] & 0x08)
848 hci_req_add(req, HCI_OP_READ_LOCAL_PAIRING_OPTS, 0, NULL);
850 /* Get MWS transport configuration if the HCI command is supported */
851 if (hdev->commands[30] & 0x08)
852 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
854 /* Check for Synchronization Train support */
855 if (lmp_sync_train_capable(hdev))
856 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
858 /* Enable Secure Connections if supported and configured */
859 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
860 bredr_sc_enabled(hdev)) {
863 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
864 sizeof(support), &support);
867 /* Set erroneous data reporting if supported to the wideband speech
870 if (hdev->commands[18] & 0x08 &&
871 !test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) {
872 bool enabled = hci_dev_test_flag(hdev,
873 HCI_WIDEBAND_SPEECH_ENABLED);
876 (hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
877 struct hci_cp_write_def_err_data_reporting cp;
879 cp.err_data_reporting = enabled ?
880 ERR_DATA_REPORTING_ENABLED :
881 ERR_DATA_REPORTING_DISABLED;
883 hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
888 /* Set Suggested Default Data Length to maximum if supported */
889 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
890 struct hci_cp_le_write_def_data_len cp;
892 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
893 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
894 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
897 /* Set Default PHY parameters if command is supported */
898 if (hdev->commands[35] & 0x20) {
899 struct hci_cp_le_set_default_phy cp;
902 cp.tx_phys = hdev->le_tx_def_phys;
903 cp.rx_phys = hdev->le_rx_def_phys;
905 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
911 static int __hci_init(struct hci_dev *hdev)
915 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
919 if (hci_dev_test_flag(hdev, HCI_SETUP))
920 hci_debugfs_create_basic(hdev);
922 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
926 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
927 * BR/EDR/LE type controllers. AMP controllers only need the
928 * first two stages of init.
930 if (hdev->dev_type != HCI_PRIMARY)
933 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
937 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
941 /* This function is only called when the controller is actually in
942 * configured state. When the controller is marked as unconfigured,
943 * this initialization procedure is not run.
945 * It means that it is possible that a controller runs through its
946 * setup phase and then discovers missing settings. If that is the
947 * case, then this function will not be called. It then will only
948 * be called during the config phase.
950 * So only when in setup phase or config phase, create the debugfs
951 * entries and register the SMP channels.
953 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
954 !hci_dev_test_flag(hdev, HCI_CONFIG))
957 hci_debugfs_create_common(hdev);
959 if (lmp_bredr_capable(hdev))
960 hci_debugfs_create_bredr(hdev);
962 if (lmp_le_capable(hdev))
963 hci_debugfs_create_le(hdev);
968 static int hci_init0_req(struct hci_request *req, unsigned long opt)
970 struct hci_dev *hdev = req->hdev;
972 BT_DBG("%s %ld", hdev->name, opt);
975 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
976 hci_reset_req(req, 0);
978 /* Read Local Version */
979 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
981 /* Read BD Address */
982 if (hdev->set_bdaddr)
983 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
988 static int __hci_unconf_init(struct hci_dev *hdev)
992 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
995 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
999 if (hci_dev_test_flag(hdev, HCI_SETUP))
1000 hci_debugfs_create_basic(hdev);
1005 static int hci_scan_req(struct hci_request *req, unsigned long opt)
1009 BT_DBG("%s %x", req->hdev->name, scan);
1011 /* Inquiry and Page scans */
1012 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1016 static int hci_auth_req(struct hci_request *req, unsigned long opt)
1020 BT_DBG("%s %x", req->hdev->name, auth);
1022 /* Authentication */
1023 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1027 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1031 BT_DBG("%s %x", req->hdev->name, encrypt);
1034 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1038 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
1040 __le16 policy = cpu_to_le16(opt);
1042 BT_DBG("%s %x", req->hdev->name, policy);
1044 /* Default link policy */
1045 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1049 /* Get HCI device by index.
1050 * Device is held on return. */
1051 struct hci_dev *hci_dev_get(int index)
1053 struct hci_dev *hdev = NULL, *d;
1055 BT_DBG("%d", index);
1060 read_lock(&hci_dev_list_lock);
1061 list_for_each_entry(d, &hci_dev_list, list) {
1062 if (d->id == index) {
1063 hdev = hci_dev_hold(d);
1067 read_unlock(&hci_dev_list_lock);
1071 /* ---- Inquiry support ---- */
1073 bool hci_discovery_active(struct hci_dev *hdev)
1075 struct discovery_state *discov = &hdev->discovery;
1077 switch (discov->state) {
1078 case DISCOVERY_FINDING:
1079 case DISCOVERY_RESOLVING:
1087 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1089 int old_state = hdev->discovery.state;
1091 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1093 if (old_state == state)
1096 hdev->discovery.state = state;
1099 case DISCOVERY_STOPPED:
1100 hci_update_background_scan(hdev);
1102 if (old_state != DISCOVERY_STARTING)
1103 mgmt_discovering(hdev, 0);
1105 case DISCOVERY_STARTING:
1107 case DISCOVERY_FINDING:
1108 mgmt_discovering(hdev, 1);
1110 case DISCOVERY_RESOLVING:
1112 case DISCOVERY_STOPPING:
1118 bool hci_le_discovery_active(struct hci_dev *hdev)
1120 struct discovery_state *discov = &hdev->le_discovery;
1122 switch (discov->state) {
1123 case DISCOVERY_FINDING:
1124 case DISCOVERY_RESOLVING:
1132 void hci_le_discovery_set_state(struct hci_dev *hdev, int state)
1134 BT_DBG("%s state %u -> %u", hdev->name,
1135 hdev->le_discovery.state, state);
1137 if (hdev->le_discovery.state == state)
1141 case DISCOVERY_STOPPED:
1142 hci_update_background_scan(hdev);
1144 if (hdev->le_discovery.state != DISCOVERY_STARTING)
1145 mgmt_le_discovering(hdev, 0);
1147 case DISCOVERY_STARTING:
1149 case DISCOVERY_FINDING:
1150 mgmt_le_discovering(hdev, 1);
1152 case DISCOVERY_RESOLVING:
1154 case DISCOVERY_STOPPING:
1158 hdev->le_discovery.state = state;
1162 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1164 struct discovery_state *cache = &hdev->discovery;
1165 struct inquiry_entry *p, *n;
1167 list_for_each_entry_safe(p, n, &cache->all, all) {
1172 INIT_LIST_HEAD(&cache->unknown);
1173 INIT_LIST_HEAD(&cache->resolve);
1176 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1179 struct discovery_state *cache = &hdev->discovery;
1180 struct inquiry_entry *e;
1182 BT_DBG("cache %p, %pMR", cache, bdaddr);
1184 list_for_each_entry(e, &cache->all, all) {
1185 if (!bacmp(&e->data.bdaddr, bdaddr))
1192 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1195 struct discovery_state *cache = &hdev->discovery;
1196 struct inquiry_entry *e;
1198 BT_DBG("cache %p, %pMR", cache, bdaddr);
1200 list_for_each_entry(e, &cache->unknown, list) {
1201 if (!bacmp(&e->data.bdaddr, bdaddr))
1208 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1212 struct discovery_state *cache = &hdev->discovery;
1213 struct inquiry_entry *e;
1215 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1217 list_for_each_entry(e, &cache->resolve, list) {
1218 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1220 if (!bacmp(&e->data.bdaddr, bdaddr))
1227 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1228 struct inquiry_entry *ie)
1230 struct discovery_state *cache = &hdev->discovery;
1231 struct list_head *pos = &cache->resolve;
1232 struct inquiry_entry *p;
1234 list_del(&ie->list);
1236 list_for_each_entry(p, &cache->resolve, list) {
1237 if (p->name_state != NAME_PENDING &&
1238 abs(p->data.rssi) >= abs(ie->data.rssi))
1243 list_add(&ie->list, pos);
1246 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1249 struct discovery_state *cache = &hdev->discovery;
1250 struct inquiry_entry *ie;
1253 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1255 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1257 if (!data->ssp_mode)
1258 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1260 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1262 if (!ie->data.ssp_mode)
1263 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1265 if (ie->name_state == NAME_NEEDED &&
1266 data->rssi != ie->data.rssi) {
1267 ie->data.rssi = data->rssi;
1268 hci_inquiry_cache_update_resolve(hdev, ie);
1274 /* Entry not in the cache. Add new one. */
1275 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1277 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1281 list_add(&ie->all, &cache->all);
1284 ie->name_state = NAME_KNOWN;
1286 ie->name_state = NAME_NOT_KNOWN;
1287 list_add(&ie->list, &cache->unknown);
1291 if (name_known && ie->name_state != NAME_KNOWN &&
1292 ie->name_state != NAME_PENDING) {
1293 ie->name_state = NAME_KNOWN;
1294 list_del(&ie->list);
1297 memcpy(&ie->data, data, sizeof(*data));
1298 ie->timestamp = jiffies;
1299 cache->timestamp = jiffies;
1301 if (ie->name_state == NAME_NOT_KNOWN)
1302 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1308 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1310 struct discovery_state *cache = &hdev->discovery;
1311 struct inquiry_info *info = (struct inquiry_info *) buf;
1312 struct inquiry_entry *e;
1315 list_for_each_entry(e, &cache->all, all) {
1316 struct inquiry_data *data = &e->data;
1321 bacpy(&info->bdaddr, &data->bdaddr);
1322 info->pscan_rep_mode = data->pscan_rep_mode;
1323 info->pscan_period_mode = data->pscan_period_mode;
1324 info->pscan_mode = data->pscan_mode;
1325 memcpy(info->dev_class, data->dev_class, 3);
1326 info->clock_offset = data->clock_offset;
1332 BT_DBG("cache %p, copied %d", cache, copied);
1336 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1338 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1339 struct hci_dev *hdev = req->hdev;
1340 struct hci_cp_inquiry cp;
1342 BT_DBG("%s", hdev->name);
1344 if (test_bit(HCI_INQUIRY, &hdev->flags))
1348 memcpy(&cp.lap, &ir->lap, 3);
1349 cp.length = ir->length;
1350 cp.num_rsp = ir->num_rsp;
1351 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1356 int hci_inquiry(void __user *arg)
1358 __u8 __user *ptr = arg;
1359 struct hci_inquiry_req ir;
1360 struct hci_dev *hdev;
1361 int err = 0, do_inquiry = 0, max_rsp;
1365 if (copy_from_user(&ir, ptr, sizeof(ir)))
1368 hdev = hci_dev_get(ir.dev_id);
1372 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1377 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1382 if (hdev->dev_type != HCI_PRIMARY) {
1387 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1392 /* Restrict maximum inquiry length to 60 seconds */
1393 if (ir.length > 60) {
1399 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1400 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1401 hci_inquiry_cache_flush(hdev);
1404 hci_dev_unlock(hdev);
1406 timeo = ir.length * msecs_to_jiffies(2000);
1409 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1414 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1415 * cleared). If it is interrupted by a signal, return -EINTR.
1417 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1418 TASK_INTERRUPTIBLE)) {
1424 /* for unlimited number of responses we will use buffer with
1427 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1429 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1430 * copy it to the user space.
1432 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
1439 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1440 hci_dev_unlock(hdev);
1442 BT_DBG("num_rsp %d", ir.num_rsp);
1444 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1446 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1460 * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
1461 * (BD_ADDR) for a HCI device from
1462 * a firmware node property.
1463 * @hdev: The HCI device
1465 * Search the firmware node for 'local-bd-address'.
1467 * All-zero BD addresses are rejected, because those could be properties
1468 * that exist in the firmware tables, but were not updated by the firmware. For
1469 * example, the DTS could define 'local-bd-address', with zero BD addresses.
1471 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
1473 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
1477 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
1478 (u8 *)&ba, sizeof(ba));
1479 if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
1482 bacpy(&hdev->public_addr, &ba);
1485 static int hci_dev_do_open(struct hci_dev *hdev)
1489 BT_DBG("%s %p", hdev->name, hdev);
1491 hci_req_sync_lock(hdev);
1493 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1498 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1499 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1500 /* Check for rfkill but allow the HCI setup stage to
1501 * proceed (which in itself doesn't cause any RF activity).
1503 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1508 /* Check for valid public address or a configured static
1509 * random address, but let the HCI setup proceed to
1510 * be able to determine if there is a public address
1513 * In case of user channel usage, it is not important
1514 * if a public address or static random address is
1517 * This check is only valid for BR/EDR controllers
1518 * since AMP controllers do not have an address.
1520 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1521 hdev->dev_type == HCI_PRIMARY &&
1522 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1523 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1524 ret = -EADDRNOTAVAIL;
1529 if (test_bit(HCI_UP, &hdev->flags)) {
1534 if (hdev->open(hdev)) {
1539 set_bit(HCI_RUNNING, &hdev->flags);
1540 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1542 atomic_set(&hdev->cmd_cnt, 1);
1543 set_bit(HCI_INIT, &hdev->flags);
1545 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1546 test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
1547 bool invalid_bdaddr;
1549 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1552 ret = hdev->setup(hdev);
1554 /* The transport driver can set the quirk to mark the
1555 * BD_ADDR invalid before creating the HCI device or in
1556 * its setup callback.
1558 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR,
1564 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) {
1565 if (!bacmp(&hdev->public_addr, BDADDR_ANY))
1566 hci_dev_get_bd_addr_from_property(hdev);
1568 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1570 ret = hdev->set_bdaddr(hdev,
1571 &hdev->public_addr);
1573 /* If setting of the BD_ADDR from the device
1574 * property succeeds, then treat the address
1575 * as valid even if the invalid BD_ADDR
1576 * quirk indicates otherwise.
1579 invalid_bdaddr = false;
1584 /* The transport driver can set these quirks before
1585 * creating the HCI device or in its setup callback.
1587 * For the invalid BD_ADDR quirk it is possible that
1588 * it becomes a valid address if the bootloader does
1589 * provide it (see above).
1591 * In case any of them is set, the controller has to
1592 * start up as unconfigured.
1594 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1596 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1598 /* For an unconfigured controller it is required to
1599 * read at least the version information provided by
1600 * the Read Local Version Information command.
1602 * If the set_bdaddr driver callback is provided, then
1603 * also the original Bluetooth public device address
1604 * will be read using the Read BD Address command.
1606 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1607 ret = __hci_unconf_init(hdev);
1610 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1611 /* If public address change is configured, ensure that
1612 * the address gets programmed. If the driver does not
1613 * support changing the public address, fail the power
1616 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1618 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1620 ret = -EADDRNOTAVAIL;
1624 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1625 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1626 ret = __hci_init(hdev);
1627 if (!ret && hdev->post_init)
1628 ret = hdev->post_init(hdev);
1632 /* If the HCI Reset command is clearing all diagnostic settings,
1633 * then they need to be reprogrammed after the init procedure
1636 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1637 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1638 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1639 ret = hdev->set_diag(hdev, true);
1644 clear_bit(HCI_INIT, &hdev->flags);
1648 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1649 hci_adv_instances_set_rpa_expired(hdev, true);
1650 set_bit(HCI_UP, &hdev->flags);
1651 hci_sock_dev_event(hdev, HCI_DEV_UP);
1652 hci_leds_update_powered(hdev, true);
1653 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1654 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1655 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1656 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1657 hci_dev_test_flag(hdev, HCI_MGMT) &&
1658 hdev->dev_type == HCI_PRIMARY) {
1659 ret = __hci_req_hci_power_on(hdev);
1660 mgmt_power_on(hdev, ret);
1663 /* Init failed, cleanup */
1664 flush_work(&hdev->tx_work);
1666 /* Since hci_rx_work() is possible to awake new cmd_work
1667 * it should be flushed first to avoid unexpected call of
1670 flush_work(&hdev->rx_work);
1671 flush_work(&hdev->cmd_work);
1673 skb_queue_purge(&hdev->cmd_q);
1674 skb_queue_purge(&hdev->rx_q);
1679 if (hdev->sent_cmd) {
1680 cancel_delayed_work_sync(&hdev->cmd_timer);
1681 kfree_skb(hdev->sent_cmd);
1682 hdev->sent_cmd = NULL;
1685 clear_bit(HCI_RUNNING, &hdev->flags);
1686 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1689 hdev->flags &= BIT(HCI_RAW);
1693 hci_req_sync_unlock(hdev);
1697 /* ---- HCI ioctl helpers ---- */
1699 int hci_dev_open(__u16 dev)
1701 struct hci_dev *hdev;
1704 hdev = hci_dev_get(dev);
1708 /* Devices that are marked as unconfigured can only be powered
1709 * up as user channel. Trying to bring them up as normal devices
1710 * will result into a failure. Only user channel operation is
1713 * When this function is called for a user channel, the flag
1714 * HCI_USER_CHANNEL will be set first before attempting to
1717 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1718 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1723 /* We need to ensure that no other power on/off work is pending
1724 * before proceeding to call hci_dev_do_open. This is
1725 * particularly important if the setup procedure has not yet
1728 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1729 cancel_delayed_work(&hdev->power_off);
1731 /* After this call it is guaranteed that the setup procedure
1732 * has finished. This means that error conditions like RFKILL
1733 * or no valid public or static random address apply.
1735 flush_workqueue(hdev->req_workqueue);
1737 /* For controllers not using the management interface and that
1738 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1739 * so that pairing works for them. Once the management interface
1740 * is in use this bit will be cleared again and userspace has
1741 * to explicitly enable it.
1743 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1744 !hci_dev_test_flag(hdev, HCI_MGMT))
1745 hci_dev_set_flag(hdev, HCI_BONDABLE);
1747 err = hci_dev_do_open(hdev);
1754 /* This function requires the caller holds hdev->lock */
1755 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1757 struct hci_conn_params *p;
1759 list_for_each_entry(p, &hdev->le_conn_params, list) {
1761 hci_conn_drop(p->conn);
1762 hci_conn_put(p->conn);
1765 list_del_init(&p->action);
1768 BT_DBG("All LE pending actions cleared");
1771 int hci_dev_do_close(struct hci_dev *hdev)
1776 BT_DBG("%s %p", hdev->name, hdev);
1778 cancel_delayed_work(&hdev->power_off);
1779 cancel_delayed_work(&hdev->ncmd_timer);
1781 hci_request_cancel_all(hdev);
1782 hci_req_sync_lock(hdev);
1784 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1785 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1786 test_bit(HCI_UP, &hdev->flags)) {
1787 /* Execute vendor specific shutdown routine */
1789 err = hdev->shutdown(hdev);
1792 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1793 cancel_delayed_work_sync(&hdev->cmd_timer);
1794 hci_req_sync_unlock(hdev);
1798 hci_leds_update_powered(hdev, false);
1800 /* Flush RX and TX works */
1801 flush_work(&hdev->tx_work);
1802 flush_work(&hdev->rx_work);
1804 if (hdev->discov_timeout > 0) {
1805 hdev->discov_timeout = 0;
1806 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1807 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1810 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1811 cancel_delayed_work(&hdev->service_cache);
1813 if (hci_dev_test_flag(hdev, HCI_MGMT)) {
1814 struct adv_info *adv_instance;
1816 cancel_delayed_work_sync(&hdev->rpa_expired);
1818 list_for_each_entry(adv_instance, &hdev->adv_instances, list)
1819 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1822 /* Avoid potential lockdep warnings from the *_flush() calls by
1823 * ensuring the workqueue is empty up front.
1825 drain_workqueue(hdev->workqueue);
1829 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1831 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1833 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1834 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1835 hci_dev_test_flag(hdev, HCI_MGMT))
1836 __mgmt_power_off(hdev);
1838 hci_inquiry_cache_flush(hdev);
1839 hci_pend_le_actions_clear(hdev);
1840 hci_conn_hash_flush(hdev);
1841 hci_dev_unlock(hdev);
1843 smp_unregister(hdev);
1845 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1847 aosp_do_close(hdev);
1848 msft_do_close(hdev);
1854 skb_queue_purge(&hdev->cmd_q);
1855 atomic_set(&hdev->cmd_cnt, 1);
1856 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1857 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1858 set_bit(HCI_INIT, &hdev->flags);
1859 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1860 clear_bit(HCI_INIT, &hdev->flags);
1863 /* flush cmd work */
1864 flush_work(&hdev->cmd_work);
1867 skb_queue_purge(&hdev->rx_q);
1868 skb_queue_purge(&hdev->cmd_q);
1869 skb_queue_purge(&hdev->raw_q);
1871 /* Drop last sent command */
1872 if (hdev->sent_cmd) {
1873 cancel_delayed_work_sync(&hdev->cmd_timer);
1874 kfree_skb(hdev->sent_cmd);
1875 hdev->sent_cmd = NULL;
1878 clear_bit(HCI_RUNNING, &hdev->flags);
1879 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1881 if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
1882 wake_up(&hdev->suspend_wait_q);
1884 /* After this point our queues are empty
1885 * and no tasks are scheduled. */
1889 hdev->flags &= BIT(HCI_RAW);
1890 hci_dev_clear_volatile_flags(hdev);
1892 /* Controller radio is available but is currently powered down */
1893 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1895 memset(hdev->eir, 0, sizeof(hdev->eir));
1896 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1897 bacpy(&hdev->random_addr, BDADDR_ANY);
1899 hci_req_sync_unlock(hdev);
1905 int hci_dev_close(__u16 dev)
1907 struct hci_dev *hdev;
1910 hdev = hci_dev_get(dev);
1914 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1919 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1920 cancel_delayed_work(&hdev->power_off);
1922 err = hci_dev_do_close(hdev);
1929 static int hci_dev_do_reset(struct hci_dev *hdev)
1933 BT_DBG("%s %p", hdev->name, hdev);
1935 hci_req_sync_lock(hdev);
1938 skb_queue_purge(&hdev->rx_q);
1939 skb_queue_purge(&hdev->cmd_q);
1941 /* Avoid potential lockdep warnings from the *_flush() calls by
1942 * ensuring the workqueue is empty up front.
1944 drain_workqueue(hdev->workqueue);
1947 hci_inquiry_cache_flush(hdev);
1948 hci_conn_hash_flush(hdev);
1949 hci_dev_unlock(hdev);
1954 atomic_set(&hdev->cmd_cnt, 1);
1955 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1957 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1959 hci_req_sync_unlock(hdev);
1963 int hci_dev_reset(__u16 dev)
1965 struct hci_dev *hdev;
1968 hdev = hci_dev_get(dev);
1972 if (!test_bit(HCI_UP, &hdev->flags)) {
1977 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1982 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1987 err = hci_dev_do_reset(hdev);
1994 int hci_dev_reset_stat(__u16 dev)
1996 struct hci_dev *hdev;
1999 hdev = hci_dev_get(dev);
2003 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2008 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2013 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2020 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2022 bool conn_changed, discov_changed;
2024 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2026 if ((scan & SCAN_PAGE))
2027 conn_changed = !hci_dev_test_and_set_flag(hdev,
2030 conn_changed = hci_dev_test_and_clear_flag(hdev,
2033 if ((scan & SCAN_INQUIRY)) {
2034 discov_changed = !hci_dev_test_and_set_flag(hdev,
2037 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2038 discov_changed = hci_dev_test_and_clear_flag(hdev,
2042 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2045 if (conn_changed || discov_changed) {
2046 /* In case this was disabled through mgmt */
2047 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2049 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2050 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
2052 mgmt_new_settings(hdev);
2056 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2058 struct hci_dev *hdev;
2059 struct hci_dev_req dr;
2062 if (copy_from_user(&dr, arg, sizeof(dr)))
2065 hdev = hci_dev_get(dr.dev_id);
2069 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
2074 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
2079 if (hdev->dev_type != HCI_PRIMARY) {
2084 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2091 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2092 HCI_INIT_TIMEOUT, NULL);
2096 if (!lmp_encrypt_capable(hdev)) {
2101 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2102 /* Auth must be enabled first */
2103 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2104 HCI_INIT_TIMEOUT, NULL);
2109 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2110 HCI_INIT_TIMEOUT, NULL);
2114 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2115 HCI_INIT_TIMEOUT, NULL);
2117 /* Ensure that the connectable and discoverable states
2118 * get correctly modified as this was a non-mgmt change.
2121 hci_update_scan_state(hdev, dr.dev_opt);
2125 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2126 HCI_INIT_TIMEOUT, NULL);
2129 case HCISETLINKMODE:
2130 hdev->link_mode = ((__u16) dr.dev_opt) &
2131 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2135 if (hdev->pkt_type == (__u16) dr.dev_opt)
2138 hdev->pkt_type = (__u16) dr.dev_opt;
2139 mgmt_phy_configuration_changed(hdev, NULL);
2143 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2144 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2148 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2149 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2162 int hci_get_dev_list(void __user *arg)
2164 struct hci_dev *hdev;
2165 struct hci_dev_list_req *dl;
2166 struct hci_dev_req *dr;
2167 int n = 0, size, err;
2170 if (get_user(dev_num, (__u16 __user *) arg))
2173 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2176 size = sizeof(*dl) + dev_num * sizeof(*dr);
2178 dl = kzalloc(size, GFP_KERNEL);
2184 read_lock(&hci_dev_list_lock);
2185 list_for_each_entry(hdev, &hci_dev_list, list) {
2186 unsigned long flags = hdev->flags;
2188 /* When the auto-off is configured it means the transport
2189 * is running, but in that case still indicate that the
2190 * device is actually down.
2192 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2193 flags &= ~BIT(HCI_UP);
2195 (dr + n)->dev_id = hdev->id;
2196 (dr + n)->dev_opt = flags;
2201 read_unlock(&hci_dev_list_lock);
2204 size = sizeof(*dl) + n * sizeof(*dr);
2206 err = copy_to_user(arg, dl, size);
2209 return err ? -EFAULT : 0;
2212 int hci_get_dev_info(void __user *arg)
2214 struct hci_dev *hdev;
2215 struct hci_dev_info di;
2216 unsigned long flags;
2219 if (copy_from_user(&di, arg, sizeof(di)))
2222 hdev = hci_dev_get(di.dev_id);
2226 /* When the auto-off is configured it means the transport
2227 * is running, but in that case still indicate that the
2228 * device is actually down.
2230 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2231 flags = hdev->flags & ~BIT(HCI_UP);
2233 flags = hdev->flags;
2235 strcpy(di.name, hdev->name);
2236 di.bdaddr = hdev->bdaddr;
2237 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2239 di.pkt_type = hdev->pkt_type;
2240 if (lmp_bredr_capable(hdev)) {
2241 di.acl_mtu = hdev->acl_mtu;
2242 di.acl_pkts = hdev->acl_pkts;
2243 di.sco_mtu = hdev->sco_mtu;
2244 di.sco_pkts = hdev->sco_pkts;
2246 di.acl_mtu = hdev->le_mtu;
2247 di.acl_pkts = hdev->le_pkts;
2251 di.link_policy = hdev->link_policy;
2252 di.link_mode = hdev->link_mode;
2254 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2255 memcpy(&di.features, &hdev->features, sizeof(di.features));
2257 if (copy_to_user(arg, &di, sizeof(di)))
2265 /* ---- Interface to HCI drivers ---- */
2267 static int hci_rfkill_set_block(void *data, bool blocked)
2269 struct hci_dev *hdev = data;
2271 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2273 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2277 hci_dev_set_flag(hdev, HCI_RFKILLED);
2278 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2279 !hci_dev_test_flag(hdev, HCI_CONFIG))
2280 hci_dev_do_close(hdev);
2282 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2288 static const struct rfkill_ops hci_rfkill_ops = {
2289 .set_block = hci_rfkill_set_block,
2292 static void hci_power_on(struct work_struct *work)
2294 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2297 BT_DBG("%s", hdev->name);
2299 if (test_bit(HCI_UP, &hdev->flags) &&
2300 hci_dev_test_flag(hdev, HCI_MGMT) &&
2301 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2302 cancel_delayed_work(&hdev->power_off);
2303 hci_req_sync_lock(hdev);
2304 err = __hci_req_hci_power_on(hdev);
2305 hci_req_sync_unlock(hdev);
2306 mgmt_power_on(hdev, err);
2310 err = hci_dev_do_open(hdev);
2313 mgmt_set_powered_failed(hdev, err);
2314 hci_dev_unlock(hdev);
2318 /* During the HCI setup phase, a few error conditions are
2319 * ignored and they need to be checked now. If they are still
2320 * valid, it is important to turn the device back off.
2322 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2323 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2324 (hdev->dev_type == HCI_PRIMARY &&
2325 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2326 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2327 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2328 hci_dev_do_close(hdev);
2329 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2330 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2331 HCI_AUTO_OFF_TIMEOUT);
2334 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2335 /* For unconfigured devices, set the HCI_RAW flag
2336 * so that userspace can easily identify them.
2338 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2339 set_bit(HCI_RAW, &hdev->flags);
2341 /* For fully configured devices, this will send
2342 * the Index Added event. For unconfigured devices,
2343 * it will send Unconfigued Index Added event.
2345 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2346 * and no event will be send.
2348 mgmt_index_added(hdev);
2349 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2350 /* When the controller is now configured, then it
2351 * is important to clear the HCI_RAW flag.
2353 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2354 clear_bit(HCI_RAW, &hdev->flags);
2356 /* Powering on the controller with HCI_CONFIG set only
2357 * happens with the transition from unconfigured to
2358 * configured. This will send the Index Added event.
2360 mgmt_index_added(hdev);
2364 static void hci_power_off(struct work_struct *work)
2366 struct hci_dev *hdev = container_of(work, struct hci_dev,
2369 BT_DBG("%s", hdev->name);
2371 hci_dev_do_close(hdev);
2374 static void hci_error_reset(struct work_struct *work)
2376 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2378 BT_DBG("%s", hdev->name);
2381 hdev->hw_error(hdev, hdev->hw_error_code);
2383 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2385 if (hci_dev_do_close(hdev))
2388 hci_dev_do_open(hdev);
2391 void hci_uuids_clear(struct hci_dev *hdev)
2393 struct bt_uuid *uuid, *tmp;
2395 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2396 list_del(&uuid->list);
2401 void hci_link_keys_clear(struct hci_dev *hdev)
2403 struct link_key *key;
2405 list_for_each_entry(key, &hdev->link_keys, list) {
2406 list_del_rcu(&key->list);
2407 kfree_rcu(key, rcu);
2411 void hci_smp_ltks_clear(struct hci_dev *hdev)
2415 list_for_each_entry(k, &hdev->long_term_keys, list) {
2416 list_del_rcu(&k->list);
2421 void hci_smp_irks_clear(struct hci_dev *hdev)
2425 list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
2426 list_del_rcu(&k->list);
2431 void hci_blocked_keys_clear(struct hci_dev *hdev)
2433 struct blocked_key *b;
2435 list_for_each_entry(b, &hdev->blocked_keys, list) {
2436 list_del_rcu(&b->list);
2441 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
2443 bool blocked = false;
2444 struct blocked_key *b;
2447 list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
2448 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
2458 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2463 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2464 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2467 if (hci_is_blocked_key(hdev,
2468 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2470 bt_dev_warn_ratelimited(hdev,
2471 "Link key blocked for %pMR",
2484 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2485 u8 key_type, u8 old_key_type)
2488 if (key_type < 0x03)
2491 /* Debug keys are insecure so don't store them persistently */
2492 if (key_type == HCI_LK_DEBUG_COMBINATION)
2495 /* Changed combination key and there's no previous one */
2496 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2499 /* Security mode 3 case */
2503 /* BR/EDR key derived using SC from an LE link */
2504 if (conn->type == LE_LINK)
2507 /* Neither local nor remote side had no-bonding as requirement */
2508 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2511 /* Local side had dedicated bonding as requirement */
2512 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2515 /* Remote side had dedicated bonding as requirement */
2516 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2519 /* If none of the above criteria match, then don't store the key
2524 static u8 ltk_role(u8 type)
2526 if (type == SMP_LTK)
2527 return HCI_ROLE_MASTER;
2529 return HCI_ROLE_SLAVE;
2532 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2533 u8 addr_type, u8 role)
2538 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2539 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2542 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2545 if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
2547 bt_dev_warn_ratelimited(hdev,
2548 "LTK blocked for %pMR",
2561 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2563 struct smp_irk *irk_to_return = NULL;
2564 struct smp_irk *irk;
2567 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2568 if (!bacmp(&irk->rpa, rpa)) {
2569 irk_to_return = irk;
2574 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2575 if (smp_irk_matches(hdev, irk->val, rpa)) {
2576 bacpy(&irk->rpa, rpa);
2577 irk_to_return = irk;
2583 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2584 irk_to_return->val)) {
2585 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2586 &irk_to_return->bdaddr);
2587 irk_to_return = NULL;
2592 return irk_to_return;
2595 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2598 struct smp_irk *irk_to_return = NULL;
2599 struct smp_irk *irk;
2601 /* Identity Address must be public or static random */
2602 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2606 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2607 if (addr_type == irk->addr_type &&
2608 bacmp(bdaddr, &irk->bdaddr) == 0) {
2609 irk_to_return = irk;
2616 if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
2617 irk_to_return->val)) {
2618 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
2619 &irk_to_return->bdaddr);
2620 irk_to_return = NULL;
2625 return irk_to_return;
2628 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2629 bdaddr_t *bdaddr, u8 *val, u8 type,
2630 u8 pin_len, bool *persistent)
2632 struct link_key *key, *old_key;
2635 old_key = hci_find_link_key(hdev, bdaddr);
2637 old_key_type = old_key->type;
2640 old_key_type = conn ? conn->key_type : 0xff;
2641 key = kzalloc(sizeof(*key), GFP_KERNEL);
2644 list_add_rcu(&key->list, &hdev->link_keys);
2647 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2649 /* Some buggy controller combinations generate a changed
2650 * combination key for legacy pairing even when there's no
2652 if (type == HCI_LK_CHANGED_COMBINATION &&
2653 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2654 type = HCI_LK_COMBINATION;
2656 conn->key_type = type;
2659 bacpy(&key->bdaddr, bdaddr);
2660 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2661 key->pin_len = pin_len;
2663 if (type == HCI_LK_CHANGED_COMBINATION)
2664 key->type = old_key_type;
2669 *persistent = hci_persistent_key(hdev, conn, type,
2675 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2676 u8 addr_type, u8 type, u8 authenticated,
2677 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2679 struct smp_ltk *key, *old_key;
2680 u8 role = ltk_role(type);
2682 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2686 key = kzalloc(sizeof(*key), GFP_KERNEL);
2689 list_add_rcu(&key->list, &hdev->long_term_keys);
2692 bacpy(&key->bdaddr, bdaddr);
2693 key->bdaddr_type = addr_type;
2694 memcpy(key->val, tk, sizeof(key->val));
2695 key->authenticated = authenticated;
2698 key->enc_size = enc_size;
2704 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2705 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2707 struct smp_irk *irk;
2709 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2711 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2715 bacpy(&irk->bdaddr, bdaddr);
2716 irk->addr_type = addr_type;
2718 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2721 memcpy(irk->val, val, 16);
2722 bacpy(&irk->rpa, rpa);
2727 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2729 struct link_key *key;
2731 key = hci_find_link_key(hdev, bdaddr);
2735 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2737 list_del_rcu(&key->list);
2738 kfree_rcu(key, rcu);
2743 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2748 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2749 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2752 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2754 list_del_rcu(&k->list);
2759 return removed ? 0 : -ENOENT;
2762 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2766 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2767 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2770 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2772 list_del_rcu(&k->list);
2777 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2780 struct smp_irk *irk;
2783 if (type == BDADDR_BREDR) {
2784 if (hci_find_link_key(hdev, bdaddr))
2789 /* Convert to HCI addr type which struct smp_ltk uses */
2790 if (type == BDADDR_LE_PUBLIC)
2791 addr_type = ADDR_LE_DEV_PUBLIC;
2793 addr_type = ADDR_LE_DEV_RANDOM;
2795 irk = hci_get_irk(hdev, bdaddr, addr_type);
2797 bdaddr = &irk->bdaddr;
2798 addr_type = irk->addr_type;
2802 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2803 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2813 /* HCI command timer function */
2814 static void hci_cmd_timeout(struct work_struct *work)
2816 struct hci_dev *hdev = container_of(work, struct hci_dev,
2819 if (hdev->sent_cmd) {
2820 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2821 u16 opcode = __le16_to_cpu(sent->opcode);
2823 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2825 bt_dev_err(hdev, "command tx timeout");
2828 if (hdev->cmd_timeout)
2829 hdev->cmd_timeout(hdev);
2831 atomic_set(&hdev->cmd_cnt, 1);
2832 queue_work(hdev->workqueue, &hdev->cmd_work);
2835 /* HCI ncmd timer function */
2836 static void hci_ncmd_timeout(struct work_struct *work)
2838 struct hci_dev *hdev = container_of(work, struct hci_dev,
2841 bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
2843 /* During HCI_INIT phase no events can be injected if the ncmd timer
2844 * triggers since the procedure has its own timeout handling.
2846 if (test_bit(HCI_INIT, &hdev->flags))
2849 /* This is an irrecoverable state, inject hardware error event */
2850 hci_reset_dev(hdev);
2853 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2854 bdaddr_t *bdaddr, u8 bdaddr_type)
2856 struct oob_data *data;
2858 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2859 if (bacmp(bdaddr, &data->bdaddr) != 0)
2861 if (data->bdaddr_type != bdaddr_type)
2869 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2872 struct oob_data *data;
2874 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2878 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2880 list_del(&data->list);
2886 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2888 struct oob_data *data, *n;
2890 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2891 list_del(&data->list);
2896 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2897 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2898 u8 *hash256, u8 *rand256)
2900 struct oob_data *data;
2902 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2904 data = kmalloc(sizeof(*data), GFP_KERNEL);
2908 bacpy(&data->bdaddr, bdaddr);
2909 data->bdaddr_type = bdaddr_type;
2910 list_add(&data->list, &hdev->remote_oob_data);
2913 if (hash192 && rand192) {
2914 memcpy(data->hash192, hash192, sizeof(data->hash192));
2915 memcpy(data->rand192, rand192, sizeof(data->rand192));
2916 if (hash256 && rand256)
2917 data->present = 0x03;
2919 memset(data->hash192, 0, sizeof(data->hash192));
2920 memset(data->rand192, 0, sizeof(data->rand192));
2921 if (hash256 && rand256)
2922 data->present = 0x02;
2924 data->present = 0x00;
2927 if (hash256 && rand256) {
2928 memcpy(data->hash256, hash256, sizeof(data->hash256));
2929 memcpy(data->rand256, rand256, sizeof(data->rand256));
2931 memset(data->hash256, 0, sizeof(data->hash256));
2932 memset(data->rand256, 0, sizeof(data->rand256));
2933 if (hash192 && rand192)
2934 data->present = 0x01;
2937 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2942 /* This function requires the caller holds hdev->lock */
2943 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2945 struct adv_info *adv_instance;
2947 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2948 if (adv_instance->instance == instance)
2949 return adv_instance;
2955 /* This function requires the caller holds hdev->lock */
2956 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2958 struct adv_info *cur_instance;
2960 cur_instance = hci_find_adv_instance(hdev, instance);
2964 if (cur_instance == list_last_entry(&hdev->adv_instances,
2965 struct adv_info, list))
2966 return list_first_entry(&hdev->adv_instances,
2967 struct adv_info, list);
2969 return list_next_entry(cur_instance, list);
2972 /* This function requires the caller holds hdev->lock */
2973 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2975 struct adv_info *adv_instance;
2977 adv_instance = hci_find_adv_instance(hdev, instance);
2981 BT_DBG("%s removing %dMR", hdev->name, instance);
2983 if (hdev->cur_adv_instance == instance) {
2984 if (hdev->adv_instance_timeout) {
2985 cancel_delayed_work(&hdev->adv_instance_expire);
2986 hdev->adv_instance_timeout = 0;
2988 hdev->cur_adv_instance = 0x00;
2991 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
2993 list_del(&adv_instance->list);
2994 kfree(adv_instance);
2996 hdev->adv_instance_cnt--;
3001 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
3003 struct adv_info *adv_instance, *n;
3005 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
3006 adv_instance->rpa_expired = rpa_expired;
3009 /* This function requires the caller holds hdev->lock */
3010 void hci_adv_instances_clear(struct hci_dev *hdev)
3012 struct adv_info *adv_instance, *n;
3014 if (hdev->adv_instance_timeout) {
3015 cancel_delayed_work(&hdev->adv_instance_expire);
3016 hdev->adv_instance_timeout = 0;
3019 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
3020 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
3021 list_del(&adv_instance->list);
3022 kfree(adv_instance);
3025 hdev->adv_instance_cnt = 0;
3026 hdev->cur_adv_instance = 0x00;
3029 static void adv_instance_rpa_expired(struct work_struct *work)
3031 struct adv_info *adv_instance = container_of(work, struct adv_info,
3032 rpa_expired_cb.work);
3036 adv_instance->rpa_expired = true;
3039 /* This function requires the caller holds hdev->lock */
3040 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
3041 u16 adv_data_len, u8 *adv_data,
3042 u16 scan_rsp_len, u8 *scan_rsp_data,
3043 u16 timeout, u16 duration, s8 tx_power,
3044 u32 min_interval, u32 max_interval)
3046 struct adv_info *adv_instance;
3048 adv_instance = hci_find_adv_instance(hdev, instance);
3050 memset(adv_instance->adv_data, 0,
3051 sizeof(adv_instance->adv_data));
3052 memset(adv_instance->scan_rsp_data, 0,
3053 sizeof(adv_instance->scan_rsp_data));
3055 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
3056 instance < 1 || instance > hdev->le_num_of_adv_sets)
3059 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
3063 adv_instance->pending = true;
3064 adv_instance->instance = instance;
3065 list_add(&adv_instance->list, &hdev->adv_instances);
3066 hdev->adv_instance_cnt++;
3069 adv_instance->flags = flags;
3070 adv_instance->adv_data_len = adv_data_len;
3071 adv_instance->scan_rsp_len = scan_rsp_len;
3072 adv_instance->min_interval = min_interval;
3073 adv_instance->max_interval = max_interval;
3074 adv_instance->tx_power = tx_power;
3077 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3080 memcpy(adv_instance->scan_rsp_data,
3081 scan_rsp_data, scan_rsp_len);
3083 adv_instance->timeout = timeout;
3084 adv_instance->remaining_time = timeout;
3087 adv_instance->duration = hdev->def_multi_adv_rotation_duration;
3089 adv_instance->duration = duration;
3091 INIT_DELAYED_WORK(&adv_instance->rpa_expired_cb,
3092 adv_instance_rpa_expired);
3094 BT_DBG("%s for %dMR", hdev->name, instance);
3099 /* This function requires the caller holds hdev->lock */
3100 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
3101 u16 adv_data_len, u8 *adv_data,
3102 u16 scan_rsp_len, u8 *scan_rsp_data)
3104 struct adv_info *adv_instance;
3106 adv_instance = hci_find_adv_instance(hdev, instance);
3108 /* If advertisement doesn't exist, we can't modify its data */
3113 memset(adv_instance->adv_data, 0,
3114 sizeof(adv_instance->adv_data));
3115 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
3116 adv_instance->adv_data_len = adv_data_len;
3120 memset(adv_instance->scan_rsp_data, 0,
3121 sizeof(adv_instance->scan_rsp_data));
3122 memcpy(adv_instance->scan_rsp_data,
3123 scan_rsp_data, scan_rsp_len);
3124 adv_instance->scan_rsp_len = scan_rsp_len;
3130 /* This function requires the caller holds hdev->lock */
3131 void hci_adv_monitors_clear(struct hci_dev *hdev)
3133 struct adv_monitor *monitor;
3136 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
3137 hci_free_adv_monitor(hdev, monitor);
3139 idr_destroy(&hdev->adv_monitors_idr);
3142 /* Frees the monitor structure and do some bookkeepings.
3143 * This function requires the caller holds hdev->lock.
3145 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
3147 struct adv_pattern *pattern;
3148 struct adv_pattern *tmp;
3153 list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
3154 list_del(&pattern->list);
3158 if (monitor->handle)
3159 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
3161 if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
3162 hdev->adv_monitors_cnt--;
3163 mgmt_adv_monitor_removed(hdev, monitor->handle);
3169 int hci_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
3171 return mgmt_add_adv_patterns_monitor_complete(hdev, status);
3174 int hci_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
3176 return mgmt_remove_adv_monitor_complete(hdev, status);
3179 /* Assigns handle to a monitor, and if offloading is supported and power is on,
3180 * also attempts to forward the request to the controller.
3181 * Returns true if request is forwarded (result is pending), false otherwise.
3182 * This function requires the caller holds hdev->lock.
3184 bool hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor,
3187 int min, max, handle;
3196 min = HCI_MIN_ADV_MONITOR_HANDLE;
3197 max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
3198 handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
3205 monitor->handle = handle;
3207 if (!hdev_is_powered(hdev))
3210 switch (hci_get_adv_monitor_offload_ext(hdev)) {
3211 case HCI_ADV_MONITOR_EXT_NONE:
3212 hci_update_background_scan(hdev);
3213 bt_dev_dbg(hdev, "%s add monitor status %d", hdev->name, *err);
3214 /* Message was not forwarded to controller - not an error */
3216 case HCI_ADV_MONITOR_EXT_MSFT:
3217 *err = msft_add_monitor_pattern(hdev, monitor);
3218 bt_dev_dbg(hdev, "%s add monitor msft status %d", hdev->name,
3226 /* Attempts to tell the controller and free the monitor. If somehow the
3227 * controller doesn't have a corresponding handle, remove anyway.
3228 * Returns true if request is forwarded (result is pending), false otherwise.
3229 * This function requires the caller holds hdev->lock.
3231 static bool hci_remove_adv_monitor(struct hci_dev *hdev,
3232 struct adv_monitor *monitor,
3233 u16 handle, int *err)
3237 switch (hci_get_adv_monitor_offload_ext(hdev)) {
3238 case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
3240 case HCI_ADV_MONITOR_EXT_MSFT:
3241 *err = msft_remove_monitor(hdev, monitor, handle);
3245 /* In case no matching handle registered, just free the monitor */
3246 if (*err == -ENOENT)
3252 if (*err == -ENOENT)
3253 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
3255 hci_free_adv_monitor(hdev, monitor);
3261 /* Returns true if request is forwarded (result is pending), false otherwise.
3262 * This function requires the caller holds hdev->lock.
3264 bool hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle, int *err)
3266 struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
3274 pending = hci_remove_adv_monitor(hdev, monitor, handle, err);
3275 if (!*err && !pending)
3276 hci_update_background_scan(hdev);
3278 bt_dev_dbg(hdev, "%s remove monitor handle %d, status %d, %spending",
3279 hdev->name, handle, *err, pending ? "" : "not ");
3284 /* Returns true if request is forwarded (result is pending), false otherwise.
3285 * This function requires the caller holds hdev->lock.
3287 bool hci_remove_all_adv_monitor(struct hci_dev *hdev, int *err)
3289 struct adv_monitor *monitor;
3290 int idr_next_id = 0;
3291 bool pending = false;
3292 bool update = false;
3296 while (!*err && !pending) {
3297 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
3301 pending = hci_remove_adv_monitor(hdev, monitor, 0, err);
3303 if (!*err && !pending)
3308 hci_update_background_scan(hdev);
3310 bt_dev_dbg(hdev, "%s remove all monitors status %d, %spending",
3311 hdev->name, *err, pending ? "" : "not ");
3316 /* This function requires the caller holds hdev->lock */
3317 bool hci_is_adv_monitoring(struct hci_dev *hdev)
3319 return !idr_is_empty(&hdev->adv_monitors_idr);
3322 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
3324 if (msft_monitor_supported(hdev))
3325 return HCI_ADV_MONITOR_EXT_MSFT;
3327 return HCI_ADV_MONITOR_EXT_NONE;
3330 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3331 bdaddr_t *bdaddr, u8 type)
3333 struct bdaddr_list *b;
3335 list_for_each_entry(b, bdaddr_list, list) {
3336 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3343 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
3344 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
3347 struct bdaddr_list_with_irk *b;
3349 list_for_each_entry(b, bdaddr_list, list) {
3350 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3357 struct bdaddr_list_with_flags *
3358 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
3359 bdaddr_t *bdaddr, u8 type)
3361 struct bdaddr_list_with_flags *b;
3363 list_for_each_entry(b, bdaddr_list, list) {
3364 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3371 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3373 struct bdaddr_list *b, *n;
3375 list_for_each_entry_safe(b, n, bdaddr_list, list) {
3381 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3383 struct bdaddr_list *entry;
3385 if (!bacmp(bdaddr, BDADDR_ANY))
3388 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3391 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3395 bacpy(&entry->bdaddr, bdaddr);
3396 entry->bdaddr_type = type;
3398 list_add(&entry->list, list);
3403 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3404 u8 type, u8 *peer_irk, u8 *local_irk)
3406 struct bdaddr_list_with_irk *entry;
3408 if (!bacmp(bdaddr, BDADDR_ANY))
3411 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3414 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3418 bacpy(&entry->bdaddr, bdaddr);
3419 entry->bdaddr_type = type;
3422 memcpy(entry->peer_irk, peer_irk, 16);
3425 memcpy(entry->local_irk, local_irk, 16);
3427 list_add(&entry->list, list);
3432 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3435 struct bdaddr_list_with_flags *entry;
3437 if (!bacmp(bdaddr, BDADDR_ANY))
3440 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3443 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3447 bacpy(&entry->bdaddr, bdaddr);
3448 entry->bdaddr_type = type;
3449 entry->current_flags = flags;
3451 list_add(&entry->list, list);
3456 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3458 struct bdaddr_list *entry;
3460 if (!bacmp(bdaddr, BDADDR_ANY)) {
3461 hci_bdaddr_list_clear(list);
3465 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3469 list_del(&entry->list);
3475 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
3478 struct bdaddr_list_with_irk *entry;
3480 if (!bacmp(bdaddr, BDADDR_ANY)) {
3481 hci_bdaddr_list_clear(list);
3485 entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
3489 list_del(&entry->list);
3495 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
3498 struct bdaddr_list_with_flags *entry;
3500 if (!bacmp(bdaddr, BDADDR_ANY)) {
3501 hci_bdaddr_list_clear(list);
3505 entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
3509 list_del(&entry->list);
3515 /* This function requires the caller holds hdev->lock */
3516 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3517 bdaddr_t *addr, u8 addr_type)
3519 struct hci_conn_params *params;
3521 list_for_each_entry(params, &hdev->le_conn_params, list) {
3522 if (bacmp(¶ms->addr, addr) == 0 &&
3523 params->addr_type == addr_type) {
3531 /* This function requires the caller holds hdev->lock */
3532 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3533 bdaddr_t *addr, u8 addr_type)
3535 struct hci_conn_params *param;
3537 switch (addr_type) {
3538 case ADDR_LE_DEV_PUBLIC_RESOLVED:
3539 addr_type = ADDR_LE_DEV_PUBLIC;
3541 case ADDR_LE_DEV_RANDOM_RESOLVED:
3542 addr_type = ADDR_LE_DEV_RANDOM;
3546 list_for_each_entry(param, list, action) {
3547 if (bacmp(¶m->addr, addr) == 0 &&
3548 param->addr_type == addr_type)
3555 /* This function requires the caller holds hdev->lock */
3556 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3557 bdaddr_t *addr, u8 addr_type)
3559 struct hci_conn_params *params;
3561 params = hci_conn_params_lookup(hdev, addr, addr_type);
3565 params = kzalloc(sizeof(*params), GFP_KERNEL);
3567 bt_dev_err(hdev, "out of memory");
3571 bacpy(¶ms->addr, addr);
3572 params->addr_type = addr_type;
3574 list_add(¶ms->list, &hdev->le_conn_params);
3575 INIT_LIST_HEAD(¶ms->action);
3577 params->conn_min_interval = hdev->le_conn_min_interval;
3578 params->conn_max_interval = hdev->le_conn_max_interval;
3579 params->conn_latency = hdev->le_conn_latency;
3580 params->supervision_timeout = hdev->le_supv_timeout;
3581 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3583 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3588 static void hci_conn_params_free(struct hci_conn_params *params)
3591 hci_conn_drop(params->conn);
3592 hci_conn_put(params->conn);
3595 list_del(¶ms->action);
3596 list_del(¶ms->list);
3600 /* This function requires the caller holds hdev->lock */
3601 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3603 struct hci_conn_params *params;
3605 params = hci_conn_params_lookup(hdev, addr, addr_type);
3609 hci_conn_params_free(params);
3611 hci_update_background_scan(hdev);
3613 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3616 /* This function requires the caller holds hdev->lock */
3617 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3619 struct hci_conn_params *params, *tmp;
3621 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3622 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3625 /* If trying to establish one time connection to disabled
3626 * device, leave the params, but mark them as just once.
3628 if (params->explicit_connect) {
3629 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3633 list_del(¶ms->list);
3637 BT_DBG("All LE disabled connection parameters were removed");
3640 /* This function requires the caller holds hdev->lock */
3641 static void hci_conn_params_clear_all(struct hci_dev *hdev)
3643 struct hci_conn_params *params, *tmp;
3645 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3646 hci_conn_params_free(params);
3648 BT_DBG("All LE connection parameters were removed");
3651 /* Copy the Identity Address of the controller.
3653 * If the controller has a public BD_ADDR, then by default use that one.
3654 * If this is a LE only controller without a public address, default to
3655 * the static random address.
3657 * For debugging purposes it is possible to force controllers with a
3658 * public address to use the static random address instead.
3660 * In case BR/EDR has been disabled on a dual-mode controller and
3661 * userspace has configured a static address, then that address
3662 * becomes the identity address instead of the public BR/EDR address.
3664 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3667 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3668 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3669 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3670 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3671 bacpy(bdaddr, &hdev->static_addr);
3672 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3674 bacpy(bdaddr, &hdev->bdaddr);
3675 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3679 static void hci_suspend_clear_tasks(struct hci_dev *hdev)
3683 for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
3684 clear_bit(i, hdev->suspend_tasks);
3686 wake_up(&hdev->suspend_wait_q);
3689 static int hci_suspend_wait_event(struct hci_dev *hdev)
3692 (find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \
3693 __SUSPEND_NUM_TASKS)
3696 int ret = wait_event_timeout(hdev->suspend_wait_q,
3697 WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
3700 bt_dev_err(hdev, "Timed out waiting for suspend events");
3701 for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
3702 if (test_bit(i, hdev->suspend_tasks))
3703 bt_dev_err(hdev, "Suspend timeout bit: %d", i);
3704 clear_bit(i, hdev->suspend_tasks);
3715 static void hci_prepare_suspend(struct work_struct *work)
3717 struct hci_dev *hdev =
3718 container_of(work, struct hci_dev, suspend_prepare);
3721 hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
3722 hci_dev_unlock(hdev);
3725 static int hci_change_suspend_state(struct hci_dev *hdev,
3726 enum suspended_state next)
3728 hdev->suspend_state_next = next;
3729 set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
3730 queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
3731 return hci_suspend_wait_event(hdev);
3734 static void hci_clear_wake_reason(struct hci_dev *hdev)
3738 hdev->wake_reason = 0;
3739 bacpy(&hdev->wake_addr, BDADDR_ANY);
3740 hdev->wake_addr_type = 0;
3742 hci_dev_unlock(hdev);
3745 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
3748 struct hci_dev *hdev =
3749 container_of(nb, struct hci_dev, suspend_notifier);
3751 u8 state = BT_RUNNING;
3753 /* If powering down, wait for completion. */
3754 if (mgmt_powering_down(hdev)) {
3755 set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
3756 ret = hci_suspend_wait_event(hdev);
3761 /* Suspend notifier should only act on events when powered. */
3762 if (!hdev_is_powered(hdev) ||
3763 hci_dev_test_flag(hdev, HCI_UNREGISTER))
3766 if (action == PM_SUSPEND_PREPARE) {
3767 /* Suspend consists of two actions:
3768 * - First, disconnect everything and make the controller not
3769 * connectable (disabling scanning)
3770 * - Second, program event filter/accept list and enable scan
3772 ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
3774 state = BT_SUSPEND_DISCONNECT;
3776 /* Only configure accept list if disconnect succeeded and wake
3777 * isn't being prevented.
3779 if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
3780 ret = hci_change_suspend_state(hdev,
3781 BT_SUSPEND_CONFIGURE_WAKE);
3783 state = BT_SUSPEND_CONFIGURE_WAKE;
3786 hci_clear_wake_reason(hdev);
3787 mgmt_suspending(hdev, state);
3789 } else if (action == PM_POST_SUSPEND) {
3790 ret = hci_change_suspend_state(hdev, BT_RUNNING);
3792 mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
3793 hdev->wake_addr_type);
3797 /* We always allow suspend even if suspend preparation failed and
3798 * attempt to recover in resume.
3801 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
3807 /* Alloc HCI device */
3808 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
3810 struct hci_dev *hdev;
3811 unsigned int alloc_size;
3813 alloc_size = sizeof(*hdev);
3815 /* Fixme: May need ALIGN-ment? */
3816 alloc_size += sizeof_priv;
3819 hdev = kzalloc(alloc_size, GFP_KERNEL);
3823 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3824 hdev->esco_type = (ESCO_HV1);
3825 hdev->link_mode = (HCI_LM_ACCEPT);
3826 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3827 hdev->io_capability = 0x03; /* No Input No Output */
3828 hdev->manufacturer = 0xffff; /* Default to internal use */
3829 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3830 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3831 hdev->adv_instance_cnt = 0;
3832 hdev->cur_adv_instance = 0x00;
3833 hdev->adv_instance_timeout = 0;
3835 hdev->advmon_allowlist_duration = 300;
3836 hdev->advmon_no_filter_duration = 500;
3837 hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */
3839 hdev->sniff_max_interval = 800;
3840 hdev->sniff_min_interval = 80;
3842 hdev->le_adv_channel_map = 0x07;
3843 hdev->le_adv_min_interval = 0x0800;
3844 hdev->le_adv_max_interval = 0x0800;
3846 hdev->adv_filter_policy = 0x00;
3847 hdev->adv_type = 0x00;
3849 hdev->le_scan_interval = 0x0060;
3850 hdev->le_scan_window = 0x0030;
3851 hdev->le_scan_int_suspend = 0x0400;
3852 hdev->le_scan_window_suspend = 0x0012;
3853 hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
3854 hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
3855 hdev->le_scan_int_adv_monitor = 0x0060;
3856 hdev->le_scan_window_adv_monitor = 0x0030;
3857 hdev->le_scan_int_connect = 0x0060;
3858 hdev->le_scan_window_connect = 0x0060;
3859 hdev->le_conn_min_interval = 0x0018;
3860 hdev->le_conn_max_interval = 0x0028;
3861 hdev->le_conn_latency = 0x0000;
3862 hdev->le_supv_timeout = 0x002a;
3863 hdev->le_def_tx_len = 0x001b;
3864 hdev->le_def_tx_time = 0x0148;
3865 hdev->le_max_tx_len = 0x001b;
3866 hdev->le_max_tx_time = 0x0148;
3867 hdev->le_max_rx_len = 0x001b;
3868 hdev->le_max_rx_time = 0x0148;
3869 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3870 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3871 hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
3872 hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
3873 hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
3874 hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
3875 hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
3876 hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
3877 hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
3879 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3880 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3881 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3882 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3883 hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
3884 hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
3886 /* default 1.28 sec page scan */
3887 hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
3888 hdev->def_page_scan_int = 0x0800;
3889 hdev->def_page_scan_window = 0x0012;
3891 mutex_init(&hdev->lock);
3892 mutex_init(&hdev->req_lock);
3894 INIT_LIST_HEAD(&hdev->mgmt_pending);
3895 INIT_LIST_HEAD(&hdev->reject_list);
3896 INIT_LIST_HEAD(&hdev->accept_list);
3897 INIT_LIST_HEAD(&hdev->uuids);
3898 INIT_LIST_HEAD(&hdev->link_keys);
3899 INIT_LIST_HEAD(&hdev->long_term_keys);
3900 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3901 INIT_LIST_HEAD(&hdev->remote_oob_data);
3902 INIT_LIST_HEAD(&hdev->le_accept_list);
3903 INIT_LIST_HEAD(&hdev->le_resolv_list);
3904 INIT_LIST_HEAD(&hdev->le_conn_params);
3905 INIT_LIST_HEAD(&hdev->pend_le_conns);
3906 INIT_LIST_HEAD(&hdev->pend_le_reports);
3907 INIT_LIST_HEAD(&hdev->conn_hash.list);
3908 INIT_LIST_HEAD(&hdev->adv_instances);
3909 INIT_LIST_HEAD(&hdev->blocked_keys);
3911 INIT_WORK(&hdev->rx_work, hci_rx_work);
3912 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3913 INIT_WORK(&hdev->tx_work, hci_tx_work);
3914 INIT_WORK(&hdev->power_on, hci_power_on);
3915 INIT_WORK(&hdev->error_reset, hci_error_reset);
3916 INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
3918 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3920 skb_queue_head_init(&hdev->rx_q);
3921 skb_queue_head_init(&hdev->cmd_q);
3922 skb_queue_head_init(&hdev->raw_q);
3924 init_waitqueue_head(&hdev->req_wait_q);
3925 init_waitqueue_head(&hdev->suspend_wait_q);
3927 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3928 INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
3930 hci_request_setup(hdev);
3932 hci_init_sysfs(hdev);
3933 discovery_init(hdev);
3937 EXPORT_SYMBOL(hci_alloc_dev_priv);
3939 /* Free HCI device */
3940 void hci_free_dev(struct hci_dev *hdev)
3942 /* will free via device release */
3943 put_device(&hdev->dev);
3945 EXPORT_SYMBOL(hci_free_dev);
3947 /* Register HCI device */
3948 int hci_register_dev(struct hci_dev *hdev)
3952 if (!hdev->open || !hdev->close || !hdev->send)
3955 /* Do not allow HCI_AMP devices to register at index 0,
3956 * so the index can be used as the AMP controller ID.
3958 switch (hdev->dev_type) {
3960 id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL);
3963 id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL);
3972 snprintf(hdev->name, sizeof(hdev->name), "hci%d", id);
3975 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3977 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3978 if (!hdev->workqueue) {
3983 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3985 if (!hdev->req_workqueue) {
3986 destroy_workqueue(hdev->workqueue);
3991 if (!IS_ERR_OR_NULL(bt_debugfs))
3992 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3994 dev_set_name(&hdev->dev, "%s", hdev->name);
3996 error = device_add(&hdev->dev);
4000 hci_leds_init(hdev);
4002 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4003 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4006 if (rfkill_register(hdev->rfkill) < 0) {
4007 rfkill_destroy(hdev->rfkill);
4008 hdev->rfkill = NULL;
4012 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4013 hci_dev_set_flag(hdev, HCI_RFKILLED);
4015 hci_dev_set_flag(hdev, HCI_SETUP);
4016 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
4018 if (hdev->dev_type == HCI_PRIMARY) {
4019 /* Assume BR/EDR support until proven otherwise (such as
4020 * through reading supported features during init.
4022 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4025 write_lock(&hci_dev_list_lock);
4026 list_add(&hdev->list, &hci_dev_list);
4027 write_unlock(&hci_dev_list_lock);
4029 /* Devices that are marked for raw-only usage are unconfigured
4030 * and should not be included in normal operation.
4032 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4033 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
4035 hci_sock_dev_event(hdev, HCI_DEV_REG);
4038 if (!hdev->suspend_notifier.notifier_call &&
4039 !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
4040 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
4041 error = register_pm_notifier(&hdev->suspend_notifier);
4046 queue_work(hdev->req_workqueue, &hdev->power_on);
4048 idr_init(&hdev->adv_monitors_idr);
4053 debugfs_remove_recursive(hdev->debugfs);
4054 destroy_workqueue(hdev->workqueue);
4055 destroy_workqueue(hdev->req_workqueue);
4057 ida_simple_remove(&hci_index_ida, hdev->id);
4061 EXPORT_SYMBOL(hci_register_dev);
4063 /* Unregister HCI device */
4064 void hci_unregister_dev(struct hci_dev *hdev)
4066 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4068 hci_dev_set_flag(hdev, HCI_UNREGISTER);
4070 write_lock(&hci_dev_list_lock);
4071 list_del(&hdev->list);
4072 write_unlock(&hci_dev_list_lock);
4074 cancel_work_sync(&hdev->power_on);
4076 if (!test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
4077 hci_suspend_clear_tasks(hdev);
4078 unregister_pm_notifier(&hdev->suspend_notifier);
4079 cancel_work_sync(&hdev->suspend_prepare);
4082 hci_dev_do_close(hdev);
4084 if (!test_bit(HCI_INIT, &hdev->flags) &&
4085 !hci_dev_test_flag(hdev, HCI_SETUP) &&
4086 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
4088 mgmt_index_removed(hdev);
4089 hci_dev_unlock(hdev);
4092 /* mgmt_index_removed should take care of emptying the
4094 BUG_ON(!list_empty(&hdev->mgmt_pending));
4096 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
4099 rfkill_unregister(hdev->rfkill);
4100 rfkill_destroy(hdev->rfkill);
4103 device_del(&hdev->dev);
4104 /* Actual cleanup is deferred until hci_release_dev(). */
4107 EXPORT_SYMBOL(hci_unregister_dev);
4109 /* Release HCI device */
4110 void hci_release_dev(struct hci_dev *hdev)
4112 debugfs_remove_recursive(hdev->debugfs);
4113 kfree_const(hdev->hw_info);
4114 kfree_const(hdev->fw_info);
4116 destroy_workqueue(hdev->workqueue);
4117 destroy_workqueue(hdev->req_workqueue);
4120 hci_bdaddr_list_clear(&hdev->reject_list);
4121 hci_bdaddr_list_clear(&hdev->accept_list);
4122 hci_uuids_clear(hdev);
4123 hci_link_keys_clear(hdev);
4124 hci_smp_ltks_clear(hdev);
4125 hci_smp_irks_clear(hdev);
4126 hci_remote_oob_data_clear(hdev);
4127 hci_adv_instances_clear(hdev);
4128 hci_adv_monitors_clear(hdev);
4129 hci_bdaddr_list_clear(&hdev->le_accept_list);
4130 hci_bdaddr_list_clear(&hdev->le_resolv_list);
4131 hci_conn_params_clear_all(hdev);
4132 hci_discovery_filter_clear(hdev);
4133 hci_blocked_keys_clear(hdev);
4134 hci_dev_unlock(hdev);
4136 ida_simple_remove(&hci_index_ida, hdev->id);
4137 kfree_skb(hdev->sent_cmd);
4140 EXPORT_SYMBOL(hci_release_dev);
4142 /* Suspend HCI device */
4143 int hci_suspend_dev(struct hci_dev *hdev)
4145 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
4148 EXPORT_SYMBOL(hci_suspend_dev);
4150 /* Resume HCI device */
4151 int hci_resume_dev(struct hci_dev *hdev)
4153 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
4156 EXPORT_SYMBOL(hci_resume_dev);
4158 /* Reset HCI device */
4159 int hci_reset_dev(struct hci_dev *hdev)
4161 static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4162 struct sk_buff *skb;
4164 skb = bt_skb_alloc(3, GFP_ATOMIC);
4168 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
4169 skb_put_data(skb, hw_err, 3);
4171 bt_dev_err(hdev, "Injecting HCI hardware error event");
4173 /* Send Hardware Error to upper stack */
4174 return hci_recv_frame(hdev, skb);
4176 EXPORT_SYMBOL(hci_reset_dev);
4178 /* Receive frame from HCI drivers */
4179 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4181 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4182 && !test_bit(HCI_INIT, &hdev->flags))) {
4187 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
4188 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
4189 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT &&
4190 hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) {
4196 bt_cb(skb)->incoming = 1;
4199 __net_timestamp(skb);
4201 skb_queue_tail(&hdev->rx_q, skb);
4202 queue_work(hdev->workqueue, &hdev->rx_work);
4206 EXPORT_SYMBOL(hci_recv_frame);
4208 /* Receive diagnostic message from HCI drivers */
4209 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
4211 /* Mark as diagnostic packet */
4212 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
4215 __net_timestamp(skb);
4217 skb_queue_tail(&hdev->rx_q, skb);
4218 queue_work(hdev->workqueue, &hdev->rx_work);
4222 EXPORT_SYMBOL(hci_recv_diag);
4224 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
4228 va_start(vargs, fmt);
4229 kfree_const(hdev->hw_info);
4230 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4233 EXPORT_SYMBOL(hci_set_hw_info);
4235 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
4239 va_start(vargs, fmt);
4240 kfree_const(hdev->fw_info);
4241 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
4244 EXPORT_SYMBOL(hci_set_fw_info);
4246 /* ---- Interface to upper protocols ---- */
4248 int hci_register_cb(struct hci_cb *cb)
4250 BT_DBG("%p name %s", cb, cb->name);
4252 mutex_lock(&hci_cb_list_lock);
4253 list_add_tail(&cb->list, &hci_cb_list);
4254 mutex_unlock(&hci_cb_list_lock);
4258 EXPORT_SYMBOL(hci_register_cb);
4260 int hci_unregister_cb(struct hci_cb *cb)
4262 BT_DBG("%p name %s", cb, cb->name);
4264 mutex_lock(&hci_cb_list_lock);
4265 list_del(&cb->list);
4266 mutex_unlock(&hci_cb_list_lock);
4270 EXPORT_SYMBOL(hci_unregister_cb);
4272 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4276 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
4280 __net_timestamp(skb);
4282 /* Send copy to monitor */
4283 hci_send_to_monitor(hdev, skb);
4285 if (atomic_read(&hdev->promisc)) {
4286 /* Send copy to the sockets */
4287 hci_send_to_sock(hdev, skb);
4290 /* Get rid of skb owner, prior to sending to the driver. */
4293 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
4298 err = hdev->send(hdev, skb);
4300 bt_dev_err(hdev, "sending frame failed (%d)", err);
4305 /* Send HCI command */
4306 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4309 struct sk_buff *skb;
4311 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4313 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4315 bt_dev_err(hdev, "no memory for command");
4319 /* Stand-alone HCI commands must be flagged as
4320 * single-command requests.
4322 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
4324 skb_queue_tail(&hdev->cmd_q, skb);
4325 queue_work(hdev->workqueue, &hdev->cmd_work);
4330 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
4333 struct sk_buff *skb;
4335 if (hci_opcode_ogf(opcode) != 0x3f) {
4336 /* A controller receiving a command shall respond with either
4337 * a Command Status Event or a Command Complete Event.
4338 * Therefore, all standard HCI commands must be sent via the
4339 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
4340 * Some vendors do not comply with this rule for vendor-specific
4341 * commands and do not return any event. We want to support
4342 * unresponded commands for such cases only.
4344 bt_dev_err(hdev, "unresponded command not supported");
4348 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4350 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
4355 hci_send_frame(hdev, skb);
4359 EXPORT_SYMBOL(__hci_cmd_send);
4361 /* Get data from the previously sent command */
4362 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4364 struct hci_command_hdr *hdr;
4366 if (!hdev->sent_cmd)
4369 hdr = (void *) hdev->sent_cmd->data;
4371 if (hdr->opcode != cpu_to_le16(opcode))
4374 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4376 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4379 /* Send HCI command and wait for command complete event */
4380 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
4381 const void *param, u32 timeout)
4383 struct sk_buff *skb;
4385 if (!test_bit(HCI_UP, &hdev->flags))
4386 return ERR_PTR(-ENETDOWN);
4388 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
4390 hci_req_sync_lock(hdev);
4391 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
4392 hci_req_sync_unlock(hdev);
4396 EXPORT_SYMBOL(hci_cmd_sync);
4399 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4401 struct hci_acl_hdr *hdr;
4404 skb_push(skb, HCI_ACL_HDR_SIZE);
4405 skb_reset_transport_header(skb);
4406 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4407 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4408 hdr->dlen = cpu_to_le16(len);
4411 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4412 struct sk_buff *skb, __u16 flags)
4414 struct hci_conn *conn = chan->conn;
4415 struct hci_dev *hdev = conn->hdev;
4416 struct sk_buff *list;
4418 skb->len = skb_headlen(skb);
4421 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4423 switch (hdev->dev_type) {
4425 hci_add_acl_hdr(skb, conn->handle, flags);
4428 hci_add_acl_hdr(skb, chan->handle, flags);
4431 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4435 list = skb_shinfo(skb)->frag_list;
4437 /* Non fragmented */
4438 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4440 skb_queue_tail(queue, skb);
4443 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4445 skb_shinfo(skb)->frag_list = NULL;
4447 /* Queue all fragments atomically. We need to use spin_lock_bh
4448 * here because of 6LoWPAN links, as there this function is
4449 * called from softirq and using normal spin lock could cause
4452 spin_lock_bh(&queue->lock);
4454 __skb_queue_tail(queue, skb);
4456 flags &= ~ACL_START;
4459 skb = list; list = list->next;
4461 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
4462 hci_add_acl_hdr(skb, conn->handle, flags);
4464 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4466 __skb_queue_tail(queue, skb);
4469 spin_unlock_bh(&queue->lock);
4473 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4475 struct hci_dev *hdev = chan->conn->hdev;
4477 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4479 hci_queue_acl(chan, &chan->data_q, skb, flags);
4481 queue_work(hdev->workqueue, &hdev->tx_work);
4485 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4487 struct hci_dev *hdev = conn->hdev;
4488 struct hci_sco_hdr hdr;
4490 BT_DBG("%s len %d", hdev->name, skb->len);
4492 hdr.handle = cpu_to_le16(conn->handle);
4493 hdr.dlen = skb->len;
4495 skb_push(skb, HCI_SCO_HDR_SIZE);
4496 skb_reset_transport_header(skb);
4497 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4499 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
4501 skb_queue_tail(&conn->data_q, skb);
4502 queue_work(hdev->workqueue, &hdev->tx_work);
4505 /* ---- HCI TX task (outgoing data) ---- */
4507 /* HCI Connection scheduler */
4508 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4511 struct hci_conn_hash *h = &hdev->conn_hash;
4512 struct hci_conn *conn = NULL, *c;
4513 unsigned int num = 0, min = ~0;
4515 /* We don't have to lock device here. Connections are always
4516 * added and removed with TX task disabled. */
4520 list_for_each_entry_rcu(c, &h->list, list) {
4521 if (c->type != type || skb_queue_empty(&c->data_q))
4524 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4529 if (c->sent < min) {
4534 if (hci_conn_num(hdev, type) == num)
4543 switch (conn->type) {
4545 cnt = hdev->acl_cnt;
4549 cnt = hdev->sco_cnt;
4552 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4556 bt_dev_err(hdev, "unknown link type %d", conn->type);
4564 BT_DBG("conn %p quote %d", conn, *quote);
4568 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4570 struct hci_conn_hash *h = &hdev->conn_hash;
4573 bt_dev_err(hdev, "link tx timeout");
4577 /* Kill stalled connections */
4578 list_for_each_entry_rcu(c, &h->list, list) {
4579 if (c->type == type && c->sent) {
4580 bt_dev_err(hdev, "killing stalled connection %pMR",
4582 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4589 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4592 struct hci_conn_hash *h = &hdev->conn_hash;
4593 struct hci_chan *chan = NULL;
4594 unsigned int num = 0, min = ~0, cur_prio = 0;
4595 struct hci_conn *conn;
4596 int cnt, q, conn_num = 0;
4598 BT_DBG("%s", hdev->name);
4602 list_for_each_entry_rcu(conn, &h->list, list) {
4603 struct hci_chan *tmp;
4605 if (conn->type != type)
4608 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4613 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4614 struct sk_buff *skb;
4616 if (skb_queue_empty(&tmp->data_q))
4619 skb = skb_peek(&tmp->data_q);
4620 if (skb->priority < cur_prio)
4623 if (skb->priority > cur_prio) {
4626 cur_prio = skb->priority;
4631 if (conn->sent < min) {
4637 if (hci_conn_num(hdev, type) == conn_num)
4646 switch (chan->conn->type) {
4648 cnt = hdev->acl_cnt;
4651 cnt = hdev->block_cnt;
4655 cnt = hdev->sco_cnt;
4658 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4662 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
4667 BT_DBG("chan %p quote %d", chan, *quote);
4671 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4673 struct hci_conn_hash *h = &hdev->conn_hash;
4674 struct hci_conn *conn;
4677 BT_DBG("%s", hdev->name);
4681 list_for_each_entry_rcu(conn, &h->list, list) {
4682 struct hci_chan *chan;
4684 if (conn->type != type)
4687 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4692 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4693 struct sk_buff *skb;
4700 if (skb_queue_empty(&chan->data_q))
4703 skb = skb_peek(&chan->data_q);
4704 if (skb->priority >= HCI_PRIO_MAX - 1)
4707 skb->priority = HCI_PRIO_MAX - 1;
4709 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4713 if (hci_conn_num(hdev, type) == num)
4721 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4723 /* Calculate count of blocks used by this packet */
4724 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4727 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
4729 unsigned long last_tx;
4731 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4736 last_tx = hdev->le_last_tx;
4739 last_tx = hdev->acl_last_tx;
4743 /* tx timeout must be longer than maximum link supervision timeout
4746 if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
4747 hci_link_tx_to(hdev, type);
4751 static void hci_sched_sco(struct hci_dev *hdev)
4753 struct hci_conn *conn;
4754 struct sk_buff *skb;
4757 BT_DBG("%s", hdev->name);
4759 if (!hci_conn_num(hdev, SCO_LINK))
4762 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
4763 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4764 BT_DBG("skb %p len %d", skb, skb->len);
4765 hci_send_frame(hdev, skb);
4768 if (conn->sent == ~0)
4774 static void hci_sched_esco(struct hci_dev *hdev)
4776 struct hci_conn *conn;
4777 struct sk_buff *skb;
4780 BT_DBG("%s", hdev->name);
4782 if (!hci_conn_num(hdev, ESCO_LINK))
4785 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4787 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4788 BT_DBG("skb %p len %d", skb, skb->len);
4789 hci_send_frame(hdev, skb);
4792 if (conn->sent == ~0)
4798 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4800 unsigned int cnt = hdev->acl_cnt;
4801 struct hci_chan *chan;
4802 struct sk_buff *skb;
4805 __check_timeout(hdev, cnt, ACL_LINK);
4807 while (hdev->acl_cnt &&
4808 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
4809 u32 priority = (skb_peek(&chan->data_q))->priority;
4810 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4811 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4812 skb->len, skb->priority);
4814 /* Stop if priority has changed */
4815 if (skb->priority < priority)
4818 skb = skb_dequeue(&chan->data_q);
4820 hci_conn_enter_active_mode(chan->conn,
4821 bt_cb(skb)->force_active);
4823 hci_send_frame(hdev, skb);
4824 hdev->acl_last_tx = jiffies;
4830 /* Send pending SCO packets right away */
4831 hci_sched_sco(hdev);
4832 hci_sched_esco(hdev);
4836 if (cnt != hdev->acl_cnt)
4837 hci_prio_recalculate(hdev, ACL_LINK);
4840 static void hci_sched_acl_blk(struct hci_dev *hdev)
4842 unsigned int cnt = hdev->block_cnt;
4843 struct hci_chan *chan;
4844 struct sk_buff *skb;
4848 BT_DBG("%s", hdev->name);
4850 if (hdev->dev_type == HCI_AMP)
4855 __check_timeout(hdev, cnt, type);
4857 while (hdev->block_cnt > 0 &&
4858 (chan = hci_chan_sent(hdev, type, "e))) {
4859 u32 priority = (skb_peek(&chan->data_q))->priority;
4860 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4863 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4864 skb->len, skb->priority);
4866 /* Stop if priority has changed */
4867 if (skb->priority < priority)
4870 skb = skb_dequeue(&chan->data_q);
4872 blocks = __get_blocks(hdev, skb);
4873 if (blocks > hdev->block_cnt)
4876 hci_conn_enter_active_mode(chan->conn,
4877 bt_cb(skb)->force_active);
4879 hci_send_frame(hdev, skb);
4880 hdev->acl_last_tx = jiffies;
4882 hdev->block_cnt -= blocks;
4885 chan->sent += blocks;
4886 chan->conn->sent += blocks;
4890 if (cnt != hdev->block_cnt)
4891 hci_prio_recalculate(hdev, type);
4894 static void hci_sched_acl(struct hci_dev *hdev)
4896 BT_DBG("%s", hdev->name);
4898 /* No ACL link over BR/EDR controller */
4899 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
4902 /* No AMP link over AMP controller */
4903 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4906 switch (hdev->flow_ctl_mode) {
4907 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4908 hci_sched_acl_pkt(hdev);
4911 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4912 hci_sched_acl_blk(hdev);
4917 static void hci_sched_le(struct hci_dev *hdev)
4919 struct hci_chan *chan;
4920 struct sk_buff *skb;
4921 int quote, cnt, tmp;
4923 BT_DBG("%s", hdev->name);
4925 if (!hci_conn_num(hdev, LE_LINK))
4928 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4930 __check_timeout(hdev, cnt, LE_LINK);
4933 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
4934 u32 priority = (skb_peek(&chan->data_q))->priority;
4935 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4936 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4937 skb->len, skb->priority);
4939 /* Stop if priority has changed */
4940 if (skb->priority < priority)
4943 skb = skb_dequeue(&chan->data_q);
4945 hci_send_frame(hdev, skb);
4946 hdev->le_last_tx = jiffies;
4952 /* Send pending SCO packets right away */
4953 hci_sched_sco(hdev);
4954 hci_sched_esco(hdev);
4961 hdev->acl_cnt = cnt;
4964 hci_prio_recalculate(hdev, LE_LINK);
4967 static void hci_tx_work(struct work_struct *work)
4969 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4970 struct sk_buff *skb;
4972 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4973 hdev->sco_cnt, hdev->le_cnt);
4975 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4976 /* Schedule queues and send stuff to HCI driver */
4977 hci_sched_sco(hdev);
4978 hci_sched_esco(hdev);
4979 hci_sched_acl(hdev);
4983 /* Send next queued raw (unknown type) packet */
4984 while ((skb = skb_dequeue(&hdev->raw_q)))
4985 hci_send_frame(hdev, skb);
4988 /* ----- HCI RX task (incoming data processing) ----- */
4990 /* ACL data packet */
4991 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4993 struct hci_acl_hdr *hdr = (void *) skb->data;
4994 struct hci_conn *conn;
4995 __u16 handle, flags;
4997 skb_pull(skb, HCI_ACL_HDR_SIZE);
4999 handle = __le16_to_cpu(hdr->handle);
5000 flags = hci_flags(handle);
5001 handle = hci_handle(handle);
5003 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5006 hdev->stat.acl_rx++;
5009 conn = hci_conn_hash_lookup_handle(hdev, handle);
5010 hci_dev_unlock(hdev);
5013 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5015 /* Send to upper protocol */
5016 l2cap_recv_acldata(conn, skb, flags);
5019 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
5026 /* SCO data packet */
5027 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5029 struct hci_sco_hdr *hdr = (void *) skb->data;
5030 struct hci_conn *conn;
5031 __u16 handle, flags;
5033 skb_pull(skb, HCI_SCO_HDR_SIZE);
5035 handle = __le16_to_cpu(hdr->handle);
5036 flags = hci_flags(handle);
5037 handle = hci_handle(handle);
5039 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5042 hdev->stat.sco_rx++;
5045 conn = hci_conn_hash_lookup_handle(hdev, handle);
5046 hci_dev_unlock(hdev);
5049 /* Send to upper protocol */
5050 bt_cb(skb)->sco.pkt_status = flags & 0x03;
5051 sco_recv_scodata(conn, skb);
5054 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
5061 static bool hci_req_is_complete(struct hci_dev *hdev)
5063 struct sk_buff *skb;
5065 skb = skb_peek(&hdev->cmd_q);
5069 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
5072 static void hci_resend_last(struct hci_dev *hdev)
5074 struct hci_command_hdr *sent;
5075 struct sk_buff *skb;
5078 if (!hdev->sent_cmd)
5081 sent = (void *) hdev->sent_cmd->data;
5082 opcode = __le16_to_cpu(sent->opcode);
5083 if (opcode == HCI_OP_RESET)
5086 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5090 skb_queue_head(&hdev->cmd_q, skb);
5091 queue_work(hdev->workqueue, &hdev->cmd_work);
5094 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
5095 hci_req_complete_t *req_complete,
5096 hci_req_complete_skb_t *req_complete_skb)
5098 struct sk_buff *skb;
5099 unsigned long flags;
5101 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5103 /* If the completed command doesn't match the last one that was
5104 * sent we need to do special handling of it.
5106 if (!hci_sent_cmd_data(hdev, opcode)) {
5107 /* Some CSR based controllers generate a spontaneous
5108 * reset complete event during init and any pending
5109 * command will never be completed. In such a case we
5110 * need to resend whatever was the last sent
5113 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5114 hci_resend_last(hdev);
5119 /* If we reach this point this event matches the last command sent */
5120 hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
5122 /* If the command succeeded and there's still more commands in
5123 * this request the request is not yet complete.
5125 if (!status && !hci_req_is_complete(hdev))
5128 /* If this was the last command in a request the complete
5129 * callback would be found in hdev->sent_cmd instead of the
5130 * command queue (hdev->cmd_q).
5132 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
5133 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
5137 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
5138 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
5142 /* Remove all pending commands belonging to this request */
5143 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5144 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5145 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
5146 __skb_queue_head(&hdev->cmd_q, skb);
5150 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
5151 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
5153 *req_complete = bt_cb(skb)->hci.req_complete;
5154 dev_kfree_skb_irq(skb);
5156 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5159 static void hci_rx_work(struct work_struct *work)
5161 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5162 struct sk_buff *skb;
5164 BT_DBG("%s", hdev->name);
5166 while ((skb = skb_dequeue(&hdev->rx_q))) {
5167 /* Send copy to monitor */
5168 hci_send_to_monitor(hdev, skb);
5170 if (atomic_read(&hdev->promisc)) {
5171 /* Send copy to the sockets */
5172 hci_send_to_sock(hdev, skb);
5175 /* If the device has been opened in HCI_USER_CHANNEL,
5176 * the userspace has exclusive access to device.
5177 * When device is HCI_INIT, we still need to process
5178 * the data packets to the driver in order
5179 * to complete its setup().
5181 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5182 !test_bit(HCI_INIT, &hdev->flags)) {
5187 if (test_bit(HCI_INIT, &hdev->flags)) {
5188 /* Don't process data packets in this states. */
5189 switch (hci_skb_pkt_type(skb)) {
5190 case HCI_ACLDATA_PKT:
5191 case HCI_SCODATA_PKT:
5192 case HCI_ISODATA_PKT:
5199 switch (hci_skb_pkt_type(skb)) {
5201 BT_DBG("%s Event packet", hdev->name);
5202 hci_event_packet(hdev, skb);
5205 case HCI_ACLDATA_PKT:
5206 BT_DBG("%s ACL data packet", hdev->name);
5207 hci_acldata_packet(hdev, skb);
5210 case HCI_SCODATA_PKT:
5211 BT_DBG("%s SCO data packet", hdev->name);
5212 hci_scodata_packet(hdev, skb);
5222 static void hci_cmd_work(struct work_struct *work)
5224 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5225 struct sk_buff *skb;
5227 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5228 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5230 /* Send queued commands */
5231 if (atomic_read(&hdev->cmd_cnt)) {
5232 skb = skb_dequeue(&hdev->cmd_q);
5236 kfree_skb(hdev->sent_cmd);
5238 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5239 if (hdev->sent_cmd) {
5240 if (hci_req_status_pend(hdev))
5241 hci_dev_set_flag(hdev, HCI_CMD_PENDING);
5242 atomic_dec(&hdev->cmd_cnt);
5243 hci_send_frame(hdev, skb);
5244 if (test_bit(HCI_RESET, &hdev->flags))
5245 cancel_delayed_work(&hdev->cmd_timer);
5247 schedule_delayed_work(&hdev->cmd_timer,
5250 skb_queue_head(&hdev->cmd_q, skb);
5251 queue_work(hdev->workqueue, &hdev->cmd_work);