2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
40 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
41 "\x00\x00\x00\x00\x00\x00\x00\x00"
43 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
45 /* Handle HCI Event packets */
47 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
50 __u8 status = *((__u8 *) skb->data);
52 BT_DBG("%s status 0x%2.2x", hdev->name, status);
54 /* It is possible that we receive Inquiry Complete event right
55 * before we receive Inquiry Cancel Command Complete event, in
56 * which case the latter event should have status of Command
57 * Disallowed (0x0c). This should not be treated as error, since
58 * we actually achieve what Inquiry Cancel wants to achieve,
59 * which is to end the last Inquiry session.
61 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
62 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
71 clear_bit(HCI_INQUIRY, &hdev->flags);
72 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
73 wake_up_bit(&hdev->flags, HCI_INQUIRY);
76 /* Set discovery state to stopped if we're not doing LE active
79 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
80 hdev->le_scan_type != LE_SCAN_ACTIVE)
81 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
84 hci_conn_check_pending(hdev);
87 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
89 __u8 status = *((__u8 *) skb->data);
91 BT_DBG("%s status 0x%2.2x", hdev->name, status);
96 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
99 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
101 __u8 status = *((__u8 *) skb->data);
103 BT_DBG("%s status 0x%2.2x", hdev->name, status);
108 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
110 hci_conn_check_pending(hdev);
113 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
116 BT_DBG("%s", hdev->name);
119 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
121 struct hci_rp_role_discovery *rp = (void *) skb->data;
122 struct hci_conn *conn;
124 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
131 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
133 conn->role = rp->role;
135 hci_dev_unlock(hdev);
138 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
140 struct hci_rp_read_link_policy *rp = (void *) skb->data;
141 struct hci_conn *conn;
143 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
152 conn->link_policy = __le16_to_cpu(rp->policy);
154 hci_dev_unlock(hdev);
157 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
159 struct hci_rp_write_link_policy *rp = (void *) skb->data;
160 struct hci_conn *conn;
163 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
168 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
174 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
176 conn->link_policy = get_unaligned_le16(sent + 2);
178 hci_dev_unlock(hdev);
181 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
184 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
186 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
191 hdev->link_policy = __le16_to_cpu(rp->policy);
194 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
197 __u8 status = *((__u8 *) skb->data);
200 BT_DBG("%s status 0x%2.2x", hdev->name, status);
205 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
209 hdev->link_policy = get_unaligned_le16(sent);
212 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
214 __u8 status = *((__u8 *) skb->data);
216 BT_DBG("%s status 0x%2.2x", hdev->name, status);
218 clear_bit(HCI_RESET, &hdev->flags);
223 /* Reset all non-persistent flags */
224 hci_dev_clear_volatile_flags(hdev);
226 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
228 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
229 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
231 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
232 hdev->adv_data_len = 0;
234 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
235 hdev->scan_rsp_data_len = 0;
237 hdev->le_scan_type = LE_SCAN_PASSIVE;
239 hdev->ssp_debug_mode = 0;
241 hci_bdaddr_list_clear(&hdev->le_accept_list);
242 hci_bdaddr_list_clear(&hdev->le_resolv_list);
245 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
248 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
249 struct hci_cp_read_stored_link_key *sent;
251 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
253 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
257 if (!rp->status && sent->read_all == 0x01) {
258 hdev->stored_max_keys = rp->max_keys;
259 hdev->stored_num_keys = rp->num_keys;
263 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
266 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
268 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
273 if (rp->num_keys <= hdev->stored_num_keys)
274 hdev->stored_num_keys -= rp->num_keys;
276 hdev->stored_num_keys = 0;
279 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
281 __u8 status = *((__u8 *) skb->data);
284 BT_DBG("%s status 0x%2.2x", hdev->name, status);
286 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
292 if (hci_dev_test_flag(hdev, HCI_MGMT))
293 mgmt_set_local_name_complete(hdev, sent, status);
295 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
297 hci_dev_unlock(hdev);
300 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
302 struct hci_rp_read_local_name *rp = (void *) skb->data;
304 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
309 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
310 hci_dev_test_flag(hdev, HCI_CONFIG))
311 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
314 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
316 __u8 status = *((__u8 *) skb->data);
319 BT_DBG("%s status 0x%2.2x", hdev->name, status);
321 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
328 __u8 param = *((__u8 *) sent);
330 if (param == AUTH_ENABLED)
331 set_bit(HCI_AUTH, &hdev->flags);
333 clear_bit(HCI_AUTH, &hdev->flags);
336 if (hci_dev_test_flag(hdev, HCI_MGMT))
337 mgmt_auth_enable_complete(hdev, status);
339 hci_dev_unlock(hdev);
342 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
344 __u8 status = *((__u8 *) skb->data);
348 BT_DBG("%s status 0x%2.2x", hdev->name, status);
353 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
357 param = *((__u8 *) sent);
360 set_bit(HCI_ENCRYPT, &hdev->flags);
362 clear_bit(HCI_ENCRYPT, &hdev->flags);
365 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
367 __u8 status = *((__u8 *) skb->data);
371 BT_DBG("%s status 0x%2.2x", hdev->name, status);
373 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
377 param = *((__u8 *) sent);
382 hdev->discov_timeout = 0;
386 if (param & SCAN_INQUIRY)
387 set_bit(HCI_ISCAN, &hdev->flags);
389 clear_bit(HCI_ISCAN, &hdev->flags);
391 if (param & SCAN_PAGE)
392 set_bit(HCI_PSCAN, &hdev->flags);
394 clear_bit(HCI_PSCAN, &hdev->flags);
397 hci_dev_unlock(hdev);
400 static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb)
402 __u8 status = *((__u8 *)skb->data);
403 struct hci_cp_set_event_filter *cp;
406 BT_DBG("%s status 0x%2.2x", hdev->name, status);
411 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
415 cp = (struct hci_cp_set_event_filter *)sent;
417 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
418 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
420 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
423 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
425 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
427 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
432 memcpy(hdev->dev_class, rp->dev_class, 3);
434 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
435 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
438 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
440 __u8 status = *((__u8 *) skb->data);
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
445 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
452 memcpy(hdev->dev_class, sent, 3);
454 if (hci_dev_test_flag(hdev, HCI_MGMT))
455 mgmt_set_class_of_dev_complete(hdev, sent, status);
457 hci_dev_unlock(hdev);
460 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
462 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
465 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
470 setting = __le16_to_cpu(rp->voice_setting);
472 if (hdev->voice_setting == setting)
475 hdev->voice_setting = setting;
477 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
480 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
483 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
486 __u8 status = *((__u8 *) skb->data);
490 BT_DBG("%s status 0x%2.2x", hdev->name, status);
495 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
499 setting = get_unaligned_le16(sent);
501 if (hdev->voice_setting == setting)
504 hdev->voice_setting = setting;
506 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
509 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
512 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
515 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
517 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
522 hdev->num_iac = rp->num_iac;
524 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
527 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
529 __u8 status = *((__u8 *) skb->data);
530 struct hci_cp_write_ssp_mode *sent;
532 BT_DBG("%s status 0x%2.2x", hdev->name, status);
534 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
542 hdev->features[1][0] |= LMP_HOST_SSP;
544 hdev->features[1][0] &= ~LMP_HOST_SSP;
547 if (hci_dev_test_flag(hdev, HCI_MGMT))
548 mgmt_ssp_enable_complete(hdev, sent->mode, status);
551 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
553 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
556 hci_dev_unlock(hdev);
559 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
561 u8 status = *((u8 *) skb->data);
562 struct hci_cp_write_sc_support *sent;
564 BT_DBG("%s status 0x%2.2x", hdev->name, status);
566 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
574 hdev->features[1][0] |= LMP_HOST_SC;
576 hdev->features[1][0] &= ~LMP_HOST_SC;
579 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
581 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
583 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
586 hci_dev_unlock(hdev);
589 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
591 struct hci_rp_read_local_version *rp = (void *) skb->data;
593 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
598 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
599 hci_dev_test_flag(hdev, HCI_CONFIG)) {
600 hdev->hci_ver = rp->hci_ver;
601 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
602 hdev->lmp_ver = rp->lmp_ver;
603 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
604 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
608 static void hci_cc_read_local_commands(struct hci_dev *hdev,
611 struct hci_rp_read_local_commands *rp = (void *) skb->data;
613 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
618 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
619 hci_dev_test_flag(hdev, HCI_CONFIG))
620 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
623 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
626 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
627 struct hci_conn *conn;
629 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
636 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
638 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
640 hci_dev_unlock(hdev);
643 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
646 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
647 struct hci_conn *conn;
650 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
655 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
661 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
663 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
665 hci_dev_unlock(hdev);
668 static void hci_cc_read_local_features(struct hci_dev *hdev,
671 struct hci_rp_read_local_features *rp = (void *) skb->data;
673 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
678 memcpy(hdev->features, rp->features, 8);
680 /* Adjust default settings according to features
681 * supported by device. */
683 if (hdev->features[0][0] & LMP_3SLOT)
684 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
686 if (hdev->features[0][0] & LMP_5SLOT)
687 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
689 if (hdev->features[0][1] & LMP_HV2) {
690 hdev->pkt_type |= (HCI_HV2);
691 hdev->esco_type |= (ESCO_HV2);
694 if (hdev->features[0][1] & LMP_HV3) {
695 hdev->pkt_type |= (HCI_HV3);
696 hdev->esco_type |= (ESCO_HV3);
699 if (lmp_esco_capable(hdev))
700 hdev->esco_type |= (ESCO_EV3);
702 if (hdev->features[0][4] & LMP_EV4)
703 hdev->esco_type |= (ESCO_EV4);
705 if (hdev->features[0][4] & LMP_EV5)
706 hdev->esco_type |= (ESCO_EV5);
708 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
709 hdev->esco_type |= (ESCO_2EV3);
711 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
712 hdev->esco_type |= (ESCO_3EV3);
714 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
715 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
718 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
721 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
723 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
728 if (hdev->max_page < rp->max_page)
729 hdev->max_page = rp->max_page;
731 if (rp->page < HCI_MAX_PAGES)
732 memcpy(hdev->features[rp->page], rp->features, 8);
735 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
738 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
740 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
745 hdev->flow_ctl_mode = rp->mode;
748 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
750 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
752 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
757 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
758 hdev->sco_mtu = rp->sco_mtu;
759 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
760 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
762 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
767 hdev->acl_cnt = hdev->acl_pkts;
768 hdev->sco_cnt = hdev->sco_pkts;
770 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
771 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
774 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
776 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
778 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
783 if (test_bit(HCI_INIT, &hdev->flags))
784 bacpy(&hdev->bdaddr, &rp->bdaddr);
786 if (hci_dev_test_flag(hdev, HCI_SETUP))
787 bacpy(&hdev->setup_addr, &rp->bdaddr);
790 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
793 struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
795 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
800 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
801 hci_dev_test_flag(hdev, HCI_CONFIG)) {
802 hdev->pairing_opts = rp->pairing_opts;
803 hdev->max_enc_key_size = rp->max_key_size;
807 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
810 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
812 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
817 if (test_bit(HCI_INIT, &hdev->flags)) {
818 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
819 hdev->page_scan_window = __le16_to_cpu(rp->window);
823 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
826 u8 status = *((u8 *) skb->data);
827 struct hci_cp_write_page_scan_activity *sent;
829 BT_DBG("%s status 0x%2.2x", hdev->name, status);
834 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
838 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
839 hdev->page_scan_window = __le16_to_cpu(sent->window);
842 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
845 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
847 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
852 if (test_bit(HCI_INIT, &hdev->flags))
853 hdev->page_scan_type = rp->type;
856 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
859 u8 status = *((u8 *) skb->data);
862 BT_DBG("%s status 0x%2.2x", hdev->name, status);
867 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
869 hdev->page_scan_type = *type;
872 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
875 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
877 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
882 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
883 hdev->block_len = __le16_to_cpu(rp->block_len);
884 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
886 hdev->block_cnt = hdev->num_blocks;
888 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
889 hdev->block_cnt, hdev->block_len);
892 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
894 struct hci_rp_read_clock *rp = (void *) skb->data;
895 struct hci_cp_read_clock *cp;
896 struct hci_conn *conn;
898 BT_DBG("%s", hdev->name);
900 if (skb->len < sizeof(*rp))
908 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
912 if (cp->which == 0x00) {
913 hdev->clock = le32_to_cpu(rp->clock);
917 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
919 conn->clock = le32_to_cpu(rp->clock);
920 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
924 hci_dev_unlock(hdev);
927 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
930 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
932 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
937 hdev->amp_status = rp->amp_status;
938 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
939 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
940 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
941 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
942 hdev->amp_type = rp->amp_type;
943 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
944 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
945 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
946 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
949 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
952 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
954 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
959 hdev->inq_tx_power = rp->tx_power;
962 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
965 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
967 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
972 hdev->err_data_reporting = rp->err_data_reporting;
975 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
978 __u8 status = *((__u8 *)skb->data);
979 struct hci_cp_write_def_err_data_reporting *cp;
981 BT_DBG("%s status 0x%2.2x", hdev->name, status);
986 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
990 hdev->err_data_reporting = cp->err_data_reporting;
993 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
995 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
996 struct hci_cp_pin_code_reply *cp;
997 struct hci_conn *conn;
999 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1003 if (hci_dev_test_flag(hdev, HCI_MGMT))
1004 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1009 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1013 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1015 conn->pin_length = cp->pin_len;
1018 hci_dev_unlock(hdev);
1021 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1023 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1025 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1029 if (hci_dev_test_flag(hdev, HCI_MGMT))
1030 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1033 hci_dev_unlock(hdev);
1036 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1037 struct sk_buff *skb)
1039 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1041 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1046 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1047 hdev->le_pkts = rp->le_max_pkt;
1049 hdev->le_cnt = hdev->le_pkts;
1051 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1054 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1055 struct sk_buff *skb)
1057 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1059 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1064 memcpy(hdev->le_features, rp->features, 8);
1067 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1068 struct sk_buff *skb)
1070 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1072 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1077 hdev->adv_tx_power = rp->tx_power;
1080 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1082 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1084 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1088 if (hci_dev_test_flag(hdev, HCI_MGMT))
1089 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1092 hci_dev_unlock(hdev);
1095 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1096 struct sk_buff *skb)
1098 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1100 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1104 if (hci_dev_test_flag(hdev, HCI_MGMT))
1105 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1106 ACL_LINK, 0, rp->status);
1108 hci_dev_unlock(hdev);
1111 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1113 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1115 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1119 if (hci_dev_test_flag(hdev, HCI_MGMT))
1120 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1123 hci_dev_unlock(hdev);
1126 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1127 struct sk_buff *skb)
1129 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1131 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1135 if (hci_dev_test_flag(hdev, HCI_MGMT))
1136 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1137 ACL_LINK, 0, rp->status);
1139 hci_dev_unlock(hdev);
1142 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1143 struct sk_buff *skb)
1145 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1147 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1150 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1151 struct sk_buff *skb)
1153 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1155 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1158 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1160 __u8 status = *((__u8 *) skb->data);
1163 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1168 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1174 bacpy(&hdev->random_addr, sent);
1176 if (!bacmp(&hdev->rpa, sent)) {
1177 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1178 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1179 secs_to_jiffies(hdev->rpa_timeout));
1182 hci_dev_unlock(hdev);
1185 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1187 __u8 status = *((__u8 *) skb->data);
1188 struct hci_cp_le_set_default_phy *cp;
1190 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1195 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1201 hdev->le_tx_def_phys = cp->tx_phys;
1202 hdev->le_rx_def_phys = cp->rx_phys;
1204 hci_dev_unlock(hdev);
1207 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1208 struct sk_buff *skb)
1210 __u8 status = *((__u8 *) skb->data);
1211 struct hci_cp_le_set_adv_set_rand_addr *cp;
1212 struct adv_info *adv;
1217 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1218 /* Update only in case the adv instance since handle 0x00 shall be using
1219 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1220 * non-extended adverting.
1222 if (!cp || !cp->handle)
1227 adv = hci_find_adv_instance(hdev, cp->handle);
1229 bacpy(&adv->random_addr, &cp->bdaddr);
1230 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1231 adv->rpa_expired = false;
1232 queue_delayed_work(hdev->workqueue,
1233 &adv->rpa_expired_cb,
1234 secs_to_jiffies(hdev->rpa_timeout));
1238 hci_dev_unlock(hdev);
1241 static void hci_cc_le_read_transmit_power(struct hci_dev *hdev,
1242 struct sk_buff *skb)
1244 struct hci_rp_le_read_transmit_power *rp = (void *)skb->data;
1246 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1251 hdev->min_le_tx_power = rp->min_le_tx_power;
1252 hdev->max_le_tx_power = rp->max_le_tx_power;
1255 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1257 __u8 *sent, status = *((__u8 *) skb->data);
1259 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1264 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1270 /* If we're doing connection initiation as peripheral. Set a
1271 * timeout in case something goes wrong.
1274 struct hci_conn *conn;
1276 hci_dev_set_flag(hdev, HCI_LE_ADV);
1278 conn = hci_lookup_le_connect(hdev);
1280 queue_delayed_work(hdev->workqueue,
1281 &conn->le_conn_timeout,
1282 conn->conn_timeout);
1284 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1287 hci_dev_unlock(hdev);
1290 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1291 struct sk_buff *skb)
1293 struct hci_cp_le_set_ext_adv_enable *cp;
1294 struct hci_cp_ext_adv_set *set;
1295 __u8 status = *((__u8 *) skb->data);
1296 struct adv_info *adv = NULL, *n;
1298 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1303 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1307 set = (void *)cp->data;
1311 if (cp->num_of_sets)
1312 adv = hci_find_adv_instance(hdev, set->handle);
1315 struct hci_conn *conn;
1317 hci_dev_set_flag(hdev, HCI_LE_ADV);
1320 adv->enabled = true;
1322 conn = hci_lookup_le_connect(hdev);
1324 queue_delayed_work(hdev->workqueue,
1325 &conn->le_conn_timeout,
1326 conn->conn_timeout);
1329 adv->enabled = false;
1330 /* If just one instance was disabled check if there are
1331 * any other instance enabled before clearing HCI_LE_ADV
1333 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1339 /* All instances shall be considered disabled */
1340 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1342 adv->enabled = false;
1345 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1349 hci_dev_unlock(hdev);
1352 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1354 struct hci_cp_le_set_scan_param *cp;
1355 __u8 status = *((__u8 *) skb->data);
1357 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1362 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1368 hdev->le_scan_type = cp->type;
1370 hci_dev_unlock(hdev);
1373 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1374 struct sk_buff *skb)
1376 struct hci_cp_le_set_ext_scan_params *cp;
1377 __u8 status = *((__u8 *) skb->data);
1378 struct hci_cp_le_scan_phy_params *phy_param;
1380 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1385 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1389 phy_param = (void *)cp->data;
1393 hdev->le_scan_type = phy_param->type;
1395 hci_dev_unlock(hdev);
1398 static bool has_pending_adv_report(struct hci_dev *hdev)
1400 struct discovery_state *d = &hdev->discovery;
1402 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1405 static void clear_pending_adv_report(struct hci_dev *hdev)
1407 struct discovery_state *d = &hdev->discovery;
1409 bacpy(&d->last_adv_addr, BDADDR_ANY);
1410 d->last_adv_data_len = 0;
1413 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1414 u8 bdaddr_type, s8 rssi, u32 flags,
1417 struct discovery_state *d = &hdev->discovery;
1419 if (len > HCI_MAX_AD_LENGTH)
1422 bacpy(&d->last_adv_addr, bdaddr);
1423 d->last_adv_addr_type = bdaddr_type;
1424 d->last_adv_rssi = rssi;
1425 d->last_adv_flags = flags;
1426 memcpy(d->last_adv_data, data, len);
1427 d->last_adv_data_len = len;
1430 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1435 case LE_SCAN_ENABLE:
1436 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1437 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1438 clear_pending_adv_report(hdev);
1441 case LE_SCAN_DISABLE:
1442 /* We do this here instead of when setting DISCOVERY_STOPPED
1443 * since the latter would potentially require waiting for
1444 * inquiry to stop too.
1446 if (has_pending_adv_report(hdev)) {
1447 struct discovery_state *d = &hdev->discovery;
1449 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1450 d->last_adv_addr_type, NULL,
1451 d->last_adv_rssi, d->last_adv_flags,
1453 d->last_adv_data_len, NULL, 0);
1456 /* Cancel this timer so that we don't try to disable scanning
1457 * when it's already disabled.
1459 cancel_delayed_work(&hdev->le_scan_disable);
1461 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1463 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1464 * interrupted scanning due to a connect request. Mark
1465 * therefore discovery as stopped. If this was not
1466 * because of a connect request advertising might have
1467 * been disabled because of active scanning, so
1468 * re-enable it again if necessary.
1470 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1471 #ifndef TIZEN_BT /* The below line is kernel bug. */
1472 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1474 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
1476 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1477 hdev->discovery.state == DISCOVERY_FINDING)
1478 hci_req_reenable_advertising(hdev);
1483 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1488 hci_dev_unlock(hdev);
1491 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1492 struct sk_buff *skb)
1494 struct hci_cp_le_set_scan_enable *cp;
1495 __u8 status = *((__u8 *) skb->data);
1497 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1502 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1506 le_set_scan_enable_complete(hdev, cp->enable);
1509 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1510 struct sk_buff *skb)
1512 struct hci_cp_le_set_ext_scan_enable *cp;
1513 __u8 status = *((__u8 *) skb->data);
1515 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1520 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1524 le_set_scan_enable_complete(hdev, cp->enable);
1527 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1528 struct sk_buff *skb)
1530 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1532 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1538 hdev->le_num_of_adv_sets = rp->num_of_sets;
1541 static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev,
1542 struct sk_buff *skb)
1544 struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data;
1546 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1551 hdev->le_accept_list_size = rp->size;
1554 static void hci_cc_le_clear_accept_list(struct hci_dev *hdev,
1555 struct sk_buff *skb)
1557 __u8 status = *((__u8 *) skb->data);
1559 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1564 hci_bdaddr_list_clear(&hdev->le_accept_list);
1567 static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev,
1568 struct sk_buff *skb)
1570 struct hci_cp_le_add_to_accept_list *sent;
1571 __u8 status = *((__u8 *) skb->data);
1573 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1578 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1582 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1586 static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev,
1587 struct sk_buff *skb)
1589 struct hci_cp_le_del_from_accept_list *sent;
1590 __u8 status = *((__u8 *) skb->data);
1592 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1597 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1601 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1605 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1606 struct sk_buff *skb)
1608 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1610 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1615 memcpy(hdev->le_states, rp->le_states, 8);
1618 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1619 struct sk_buff *skb)
1621 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1623 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1628 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1629 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1632 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1633 struct sk_buff *skb)
1635 struct hci_cp_le_write_def_data_len *sent;
1636 __u8 status = *((__u8 *) skb->data);
1638 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1643 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1647 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1648 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1651 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1652 struct sk_buff *skb)
1654 struct hci_cp_le_add_to_resolv_list *sent;
1655 __u8 status = *((__u8 *) skb->data);
1657 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1662 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1666 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1667 sent->bdaddr_type, sent->peer_irk,
1671 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1672 struct sk_buff *skb)
1674 struct hci_cp_le_del_from_resolv_list *sent;
1675 __u8 status = *((__u8 *) skb->data);
1677 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1682 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1686 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1690 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1691 struct sk_buff *skb)
1693 __u8 status = *((__u8 *) skb->data);
1695 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1700 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1703 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1704 struct sk_buff *skb)
1706 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1708 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1713 hdev->le_resolv_list_size = rp->size;
1716 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1717 struct sk_buff *skb)
1719 __u8 *sent, status = *((__u8 *) skb->data);
1721 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1726 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1733 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1735 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1737 hci_dev_unlock(hdev);
1740 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1741 struct sk_buff *skb)
1743 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1745 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1750 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1751 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1752 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1753 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1756 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1757 struct sk_buff *skb)
1759 struct hci_cp_write_le_host_supported *sent;
1760 __u8 status = *((__u8 *) skb->data);
1762 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1767 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1774 hdev->features[1][0] |= LMP_HOST_LE;
1775 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1777 hdev->features[1][0] &= ~LMP_HOST_LE;
1778 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1779 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1783 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1785 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1787 hci_dev_unlock(hdev);
1790 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1792 struct hci_cp_le_set_adv_param *cp;
1793 u8 status = *((u8 *) skb->data);
1795 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1800 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1805 hdev->adv_addr_type = cp->own_address_type;
1806 hci_dev_unlock(hdev);
1809 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1811 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1812 struct hci_cp_le_set_ext_adv_params *cp;
1813 struct adv_info *adv_instance;
1815 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1820 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1825 hdev->adv_addr_type = cp->own_addr_type;
1827 /* Store in hdev for instance 0 */
1828 hdev->adv_tx_power = rp->tx_power;
1830 adv_instance = hci_find_adv_instance(hdev, cp->handle);
1832 adv_instance->tx_power = rp->tx_power;
1834 /* Update adv data as tx power is known now */
1835 hci_req_update_adv_data(hdev, cp->handle);
1837 hci_dev_unlock(hdev);
1841 static void hci_cc_enable_rssi(struct hci_dev *hdev,
1842 struct sk_buff *skb)
1844 struct hci_cc_rsp_enable_rssi *rp = (void *)skb->data;
1846 BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
1847 hdev->name, rp->status, rp->le_ext_opcode);
1849 mgmt_enable_rssi_cc(hdev, rp, rp->status);
1852 static void hci_cc_get_raw_rssi(struct hci_dev *hdev,
1853 struct sk_buff *skb)
1855 struct hci_cc_rp_get_raw_rssi *rp = (void *)skb->data;
1857 BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
1858 hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
1860 mgmt_raw_rssi_response(hdev, rp, rp->status);
1864 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1866 struct hci_rp_read_rssi *rp = (void *) skb->data;
1867 struct hci_conn *conn;
1869 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1876 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1878 conn->rssi = rp->rssi;
1880 hci_dev_unlock(hdev);
1883 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1885 struct hci_cp_read_tx_power *sent;
1886 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1887 struct hci_conn *conn;
1889 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1894 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1900 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1904 switch (sent->type) {
1906 conn->tx_power = rp->tx_power;
1909 conn->max_tx_power = rp->tx_power;
1914 hci_dev_unlock(hdev);
1917 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1919 u8 status = *((u8 *) skb->data);
1922 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1927 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1929 hdev->ssp_debug_mode = *mode;
1932 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1934 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1937 hci_conn_check_pending(hdev);
1941 set_bit(HCI_INQUIRY, &hdev->flags);
1944 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1946 struct hci_cp_create_conn *cp;
1947 struct hci_conn *conn;
1949 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1951 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1957 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1959 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1962 if (conn && conn->state == BT_CONNECT) {
1963 if (status != 0x0c || conn->attempt > 2) {
1964 conn->state = BT_CLOSED;
1965 hci_connect_cfm(conn, status);
1968 conn->state = BT_CONNECT2;
1972 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1975 bt_dev_err(hdev, "no memory for new connection");
1979 hci_dev_unlock(hdev);
1982 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1984 struct hci_cp_add_sco *cp;
1985 struct hci_conn *acl, *sco;
1988 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1993 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1997 handle = __le16_to_cpu(cp->handle);
1999 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2003 acl = hci_conn_hash_lookup_handle(hdev, handle);
2007 sco->state = BT_CLOSED;
2009 hci_connect_cfm(sco, status);
2014 hci_dev_unlock(hdev);
2017 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2019 struct hci_cp_auth_requested *cp;
2020 struct hci_conn *conn;
2022 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2027 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2033 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2035 if (conn->state == BT_CONFIG) {
2036 hci_connect_cfm(conn, status);
2037 hci_conn_drop(conn);
2041 hci_dev_unlock(hdev);
2044 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2046 struct hci_cp_set_conn_encrypt *cp;
2047 struct hci_conn *conn;
2049 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2054 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2060 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2062 if (conn->state == BT_CONFIG) {
2063 hci_connect_cfm(conn, status);
2064 hci_conn_drop(conn);
2068 hci_dev_unlock(hdev);
2071 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2072 struct hci_conn *conn)
2074 if (conn->state != BT_CONFIG || !conn->out)
2077 if (conn->pending_sec_level == BT_SECURITY_SDP)
2080 /* Only request authentication for SSP connections or non-SSP
2081 * devices with sec_level MEDIUM or HIGH or if MITM protection
2084 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2085 conn->pending_sec_level != BT_SECURITY_FIPS &&
2086 conn->pending_sec_level != BT_SECURITY_HIGH &&
2087 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2093 static int hci_resolve_name(struct hci_dev *hdev,
2094 struct inquiry_entry *e)
2096 struct hci_cp_remote_name_req cp;
2098 memset(&cp, 0, sizeof(cp));
2100 bacpy(&cp.bdaddr, &e->data.bdaddr);
2101 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2102 cp.pscan_mode = e->data.pscan_mode;
2103 cp.clock_offset = e->data.clock_offset;
2105 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2108 static bool hci_resolve_next_name(struct hci_dev *hdev)
2110 struct discovery_state *discov = &hdev->discovery;
2111 struct inquiry_entry *e;
2113 if (list_empty(&discov->resolve))
2116 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2120 if (hci_resolve_name(hdev, e) == 0) {
2121 e->name_state = NAME_PENDING;
2128 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2129 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2131 struct discovery_state *discov = &hdev->discovery;
2132 struct inquiry_entry *e;
2135 /* Update the mgmt connected state if necessary. Be careful with
2136 * conn objects that exist but are not (yet) connected however.
2137 * Only those in BT_CONFIG or BT_CONNECTED states can be
2138 * considered connected.
2141 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
2142 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2143 mgmt_device_connected(hdev, conn, 0, name, name_len);
2145 mgmt_device_name_update(hdev, bdaddr, name, name_len);
2149 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2150 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2151 mgmt_device_connected(hdev, conn, name, name_len);
2154 if (discov->state == DISCOVERY_STOPPED)
2157 if (discov->state == DISCOVERY_STOPPING)
2158 goto discov_complete;
2160 if (discov->state != DISCOVERY_RESOLVING)
2163 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2164 /* If the device was not found in a list of found devices names of which
2165 * are pending. there is no need to continue resolving a next name as it
2166 * will be done upon receiving another Remote Name Request Complete
2173 e->name_state = NAME_KNOWN;
2174 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2175 e->data.rssi, name, name_len);
2177 e->name_state = NAME_NOT_KNOWN;
2180 if (hci_resolve_next_name(hdev))
2184 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2187 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2189 struct hci_cp_remote_name_req *cp;
2190 struct hci_conn *conn;
2192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2194 /* If successful wait for the name req complete event before
2195 * checking for the need to do authentication */
2199 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2205 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2207 if (hci_dev_test_flag(hdev, HCI_MGMT))
2208 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2213 if (!hci_outgoing_auth_needed(hdev, conn))
2216 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2217 struct hci_cp_auth_requested auth_cp;
2219 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2221 auth_cp.handle = __cpu_to_le16(conn->handle);
2222 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2223 sizeof(auth_cp), &auth_cp);
2227 hci_dev_unlock(hdev);
2230 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2232 struct hci_cp_read_remote_features *cp;
2233 struct hci_conn *conn;
2235 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2240 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2246 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2248 if (conn->state == BT_CONFIG) {
2249 hci_connect_cfm(conn, status);
2250 hci_conn_drop(conn);
2254 hci_dev_unlock(hdev);
2257 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2259 struct hci_cp_read_remote_ext_features *cp;
2260 struct hci_conn *conn;
2262 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2267 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2273 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2275 if (conn->state == BT_CONFIG) {
2276 hci_connect_cfm(conn, status);
2277 hci_conn_drop(conn);
2281 hci_dev_unlock(hdev);
2284 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2286 struct hci_cp_setup_sync_conn *cp;
2287 struct hci_conn *acl, *sco;
2290 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2295 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2299 handle = __le16_to_cpu(cp->handle);
2301 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2305 acl = hci_conn_hash_lookup_handle(hdev, handle);
2309 sco->state = BT_CLOSED;
2311 hci_connect_cfm(sco, status);
2316 hci_dev_unlock(hdev);
2319 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2321 struct hci_cp_sniff_mode *cp;
2322 struct hci_conn *conn;
2324 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2329 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2335 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2337 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2339 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2340 hci_sco_setup(conn, status);
2343 hci_dev_unlock(hdev);
2346 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2348 struct hci_cp_exit_sniff_mode *cp;
2349 struct hci_conn *conn;
2351 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2356 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2362 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2364 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2366 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2367 hci_sco_setup(conn, status);
2370 hci_dev_unlock(hdev);
2373 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2375 struct hci_cp_disconnect *cp;
2376 struct hci_conn *conn;
2381 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2387 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2389 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2390 conn->dst_type, status);
2392 if (conn->type == LE_LINK) {
2393 hdev->cur_adv_instance = conn->adv_instance;
2394 hci_req_reenable_advertising(hdev);
2397 /* If the disconnection failed for any reason, the upper layer
2398 * does not retry to disconnect in current implementation.
2399 * Hence, we need to do some basic cleanup here and re-enable
2400 * advertising if necessary.
2405 hci_dev_unlock(hdev);
2408 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2409 u8 peer_addr_type, u8 own_address_type,
2412 struct hci_conn *conn;
2414 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2419 /* When using controller based address resolution, then the new
2420 * address types 0x02 and 0x03 are used. These types need to be
2421 * converted back into either public address or random address type
2423 if (use_ll_privacy(hdev) &&
2424 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2425 switch (own_address_type) {
2426 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2427 own_address_type = ADDR_LE_DEV_PUBLIC;
2429 case ADDR_LE_DEV_RANDOM_RESOLVED:
2430 own_address_type = ADDR_LE_DEV_RANDOM;
2435 /* Store the initiator and responder address information which
2436 * is needed for SMP. These values will not change during the
2437 * lifetime of the connection.
2439 conn->init_addr_type = own_address_type;
2440 if (own_address_type == ADDR_LE_DEV_RANDOM)
2441 bacpy(&conn->init_addr, &hdev->random_addr);
2443 bacpy(&conn->init_addr, &hdev->bdaddr);
2445 conn->resp_addr_type = peer_addr_type;
2446 bacpy(&conn->resp_addr, peer_addr);
2448 /* We don't want the connection attempt to stick around
2449 * indefinitely since LE doesn't have a page timeout concept
2450 * like BR/EDR. Set a timer for any connection that doesn't use
2451 * the accept list for connecting.
2453 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2454 queue_delayed_work(conn->hdev->workqueue,
2455 &conn->le_conn_timeout,
2456 conn->conn_timeout);
2459 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2461 struct hci_cp_le_create_conn *cp;
2463 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2465 /* All connection failure handling is taken care of by the
2466 * hci_le_conn_failed function which is triggered by the HCI
2467 * request completion callbacks used for connecting.
2472 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2478 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2479 cp->own_address_type, cp->filter_policy);
2481 hci_dev_unlock(hdev);
2484 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2486 struct hci_cp_le_ext_create_conn *cp;
2488 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2490 /* All connection failure handling is taken care of by the
2491 * hci_le_conn_failed function which is triggered by the HCI
2492 * request completion callbacks used for connecting.
2497 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2503 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2504 cp->own_addr_type, cp->filter_policy);
2506 hci_dev_unlock(hdev);
2509 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2511 struct hci_cp_le_read_remote_features *cp;
2512 struct hci_conn *conn;
2514 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2519 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2525 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2527 if (conn->state == BT_CONFIG) {
2528 hci_connect_cfm(conn, status);
2529 hci_conn_drop(conn);
2533 hci_dev_unlock(hdev);
2536 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2538 struct hci_cp_le_start_enc *cp;
2539 struct hci_conn *conn;
2541 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2548 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2552 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2556 if (conn->state != BT_CONNECTED)
2559 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2560 hci_conn_drop(conn);
2563 hci_dev_unlock(hdev);
2566 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2568 struct hci_cp_switch_role *cp;
2569 struct hci_conn *conn;
2571 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2576 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2582 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2584 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2586 hci_dev_unlock(hdev);
2589 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2591 __u8 status = *((__u8 *) skb->data);
2592 struct discovery_state *discov = &hdev->discovery;
2593 struct inquiry_entry *e;
2595 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2597 hci_conn_check_pending(hdev);
2599 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2602 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2603 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2605 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2610 if (discov->state != DISCOVERY_FINDING)
2613 if (list_empty(&discov->resolve)) {
2614 /* When BR/EDR inquiry is active and no LE scanning is in
2615 * progress, then change discovery state to indicate completion.
2617 * When running LE scanning and BR/EDR inquiry simultaneously
2618 * and the LE scan already finished, then change the discovery
2619 * state to indicate completion.
2621 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2622 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2623 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2627 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2628 if (e && hci_resolve_name(hdev, e) == 0) {
2629 e->name_state = NAME_PENDING;
2630 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2632 /* When BR/EDR inquiry is active and no LE scanning is in
2633 * progress, then change discovery state to indicate completion.
2635 * When running LE scanning and BR/EDR inquiry simultaneously
2636 * and the LE scan already finished, then change the discovery
2637 * state to indicate completion.
2639 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2640 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2641 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2645 hci_dev_unlock(hdev);
2648 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2650 struct inquiry_data data;
2651 struct inquiry_info *info = (void *) (skb->data + 1);
2652 int num_rsp = *((__u8 *) skb->data);
2654 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2656 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2659 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2664 for (; num_rsp; num_rsp--, info++) {
2667 bacpy(&data.bdaddr, &info->bdaddr);
2668 data.pscan_rep_mode = info->pscan_rep_mode;
2669 data.pscan_period_mode = info->pscan_period_mode;
2670 data.pscan_mode = info->pscan_mode;
2671 memcpy(data.dev_class, info->dev_class, 3);
2672 data.clock_offset = info->clock_offset;
2673 data.rssi = HCI_RSSI_INVALID;
2674 data.ssp_mode = 0x00;
2676 flags = hci_inquiry_cache_update(hdev, &data, false);
2678 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2679 info->dev_class, HCI_RSSI_INVALID,
2680 flags, NULL, 0, NULL, 0);
2683 hci_dev_unlock(hdev);
2686 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2688 struct hci_ev_conn_complete *ev = (void *) skb->data;
2689 struct hci_conn *conn;
2691 BT_DBG("%s", hdev->name);
2695 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2697 /* Connection may not exist if auto-connected. Check the bredr
2698 * allowlist to see if this device is allowed to auto connect.
2699 * If link is an ACL type, create a connection class
2702 * Auto-connect will only occur if the event filter is
2703 * programmed with a given address. Right now, event filter is
2704 * only used during suspend.
2706 if (ev->link_type == ACL_LINK &&
2707 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
2710 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2713 bt_dev_err(hdev, "no memory for new conn");
2717 if (ev->link_type != SCO_LINK)
2720 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2725 conn->type = SCO_LINK;
2730 conn->handle = __le16_to_cpu(ev->handle);
2732 if (conn->type == ACL_LINK) {
2733 conn->state = BT_CONFIG;
2734 hci_conn_hold(conn);
2736 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2737 !hci_find_link_key(hdev, &ev->bdaddr))
2738 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2740 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2742 conn->state = BT_CONNECTED;
2744 hci_debugfs_create_conn(conn);
2745 hci_conn_add_sysfs(conn);
2747 if (test_bit(HCI_AUTH, &hdev->flags))
2748 set_bit(HCI_CONN_AUTH, &conn->flags);
2750 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2751 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2753 /* Get remote features */
2754 if (conn->type == ACL_LINK) {
2755 struct hci_cp_read_remote_features cp;
2756 cp.handle = ev->handle;
2757 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2760 hci_req_update_scan(hdev);
2763 /* Set packet type for incoming connection */
2764 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2765 struct hci_cp_change_conn_ptype cp;
2766 cp.handle = ev->handle;
2767 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2768 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2772 conn->state = BT_CLOSED;
2773 if (conn->type == ACL_LINK)
2774 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2775 conn->dst_type, ev->status);
2778 if (conn->type == ACL_LINK)
2779 hci_sco_setup(conn, ev->status);
2782 hci_connect_cfm(conn, ev->status);
2784 } else if (ev->link_type == SCO_LINK) {
2785 switch (conn->setting & SCO_AIRMODE_MASK) {
2786 case SCO_AIRMODE_CVSD:
2788 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2792 hci_connect_cfm(conn, ev->status);
2796 hci_dev_unlock(hdev);
2798 hci_conn_check_pending(hdev);
2801 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2803 struct hci_cp_reject_conn_req cp;
2805 bacpy(&cp.bdaddr, bdaddr);
2806 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2807 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2810 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2812 struct hci_ev_conn_request *ev = (void *) skb->data;
2813 int mask = hdev->link_mode;
2814 struct inquiry_entry *ie;
2815 struct hci_conn *conn;
2818 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2821 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2824 if (!(mask & HCI_LM_ACCEPT)) {
2825 hci_reject_conn(hdev, &ev->bdaddr);
2829 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
2831 hci_reject_conn(hdev, &ev->bdaddr);
2835 /* Require HCI_CONNECTABLE or an accept list entry to accept the
2836 * connection. These features are only touched through mgmt so
2837 * only do the checks if HCI_MGMT is set.
2839 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2840 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2841 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
2843 hci_reject_conn(hdev, &ev->bdaddr);
2847 /* Connection accepted */
2851 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2853 memcpy(ie->data.dev_class, ev->dev_class, 3);
2855 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2858 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2861 bt_dev_err(hdev, "no memory for new connection");
2862 hci_dev_unlock(hdev);
2867 memcpy(conn->dev_class, ev->dev_class, 3);
2869 hci_dev_unlock(hdev);
2871 if (ev->link_type == ACL_LINK ||
2872 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2873 struct hci_cp_accept_conn_req cp;
2874 conn->state = BT_CONNECT;
2876 bacpy(&cp.bdaddr, &ev->bdaddr);
2878 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2879 cp.role = 0x00; /* Become central */
2881 cp.role = 0x01; /* Remain peripheral */
2883 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2884 } else if (!(flags & HCI_PROTO_DEFER)) {
2885 struct hci_cp_accept_sync_conn_req cp;
2886 conn->state = BT_CONNECT;
2888 bacpy(&cp.bdaddr, &ev->bdaddr);
2889 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2891 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2892 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2893 cp.max_latency = cpu_to_le16(0xffff);
2894 cp.content_format = cpu_to_le16(hdev->voice_setting);
2895 cp.retrans_effort = 0xff;
2897 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2900 conn->state = BT_CONNECT2;
2901 hci_connect_cfm(conn, 0);
2905 static u8 hci_to_mgmt_reason(u8 err)
2908 case HCI_ERROR_CONNECTION_TIMEOUT:
2909 return MGMT_DEV_DISCONN_TIMEOUT;
2910 case HCI_ERROR_REMOTE_USER_TERM:
2911 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2912 case HCI_ERROR_REMOTE_POWER_OFF:
2913 return MGMT_DEV_DISCONN_REMOTE;
2914 case HCI_ERROR_LOCAL_HOST_TERM:
2915 return MGMT_DEV_DISCONN_LOCAL_HOST;
2917 return MGMT_DEV_DISCONN_UNKNOWN;
2921 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2923 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2925 struct hci_conn_params *params;
2926 struct hci_conn *conn;
2927 bool mgmt_connected;
2929 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2933 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2938 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2939 conn->dst_type, ev->status);
2943 conn->state = BT_CLOSED;
2945 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2947 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2948 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2950 reason = hci_to_mgmt_reason(ev->reason);
2952 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2953 reason, mgmt_connected);
2955 if (conn->type == ACL_LINK) {
2956 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2957 hci_remove_link_key(hdev, &conn->dst);
2959 hci_req_update_scan(hdev);
2962 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2964 switch (params->auto_connect) {
2965 case HCI_AUTO_CONN_LINK_LOSS:
2966 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2970 case HCI_AUTO_CONN_DIRECT:
2971 case HCI_AUTO_CONN_ALWAYS:
2972 list_del_init(¶ms->action);
2973 list_add(¶ms->action, &hdev->pend_le_conns);
2974 hci_update_background_scan(hdev);
2982 hci_disconn_cfm(conn, ev->reason);
2984 /* The suspend notifier is waiting for all devices to disconnect so
2985 * clear the bit from pending tasks and inform the wait queue.
2987 if (list_empty(&hdev->conn_hash.list) &&
2988 test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
2989 wake_up(&hdev->suspend_wait_q);
2992 /* Re-enable advertising if necessary, since it might
2993 * have been disabled by the connection. From the
2994 * HCI_LE_Set_Advertise_Enable command description in
2995 * the core specification (v4.0):
2996 * "The Controller shall continue advertising until the Host
2997 * issues an LE_Set_Advertise_Enable command with
2998 * Advertising_Enable set to 0x00 (Advertising is disabled)
2999 * or until a connection is created or until the Advertising
3000 * is timed out due to Directed Advertising."
3002 if (conn->type == LE_LINK) {
3003 hdev->cur_adv_instance = conn->adv_instance;
3004 hci_req_reenable_advertising(hdev);
3010 hci_dev_unlock(hdev);
3013 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3015 struct hci_ev_auth_complete *ev = (void *) skb->data;
3016 struct hci_conn *conn;
3018 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3022 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3027 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3029 if (!hci_conn_ssp_enabled(conn) &&
3030 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3031 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3033 set_bit(HCI_CONN_AUTH, &conn->flags);
3034 conn->sec_level = conn->pending_sec_level;
3037 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3038 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3040 mgmt_auth_failed(conn, ev->status);
3043 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3044 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3046 if (conn->state == BT_CONFIG) {
3047 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3048 struct hci_cp_set_conn_encrypt cp;
3049 cp.handle = ev->handle;
3051 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3054 conn->state = BT_CONNECTED;
3055 hci_connect_cfm(conn, ev->status);
3056 hci_conn_drop(conn);
3059 hci_auth_cfm(conn, ev->status);
3061 hci_conn_hold(conn);
3062 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3063 hci_conn_drop(conn);
3066 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3068 struct hci_cp_set_conn_encrypt cp;
3069 cp.handle = ev->handle;
3071 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3074 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3075 hci_encrypt_cfm(conn, ev->status);
3080 hci_dev_unlock(hdev);
3083 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
3085 struct hci_ev_remote_name *ev = (void *) skb->data;
3086 struct hci_conn *conn;
3088 BT_DBG("%s", hdev->name);
3090 hci_conn_check_pending(hdev);
3094 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3096 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3099 if (ev->status == 0)
3100 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3101 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3103 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3109 if (!hci_outgoing_auth_needed(hdev, conn))
3112 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3113 struct hci_cp_auth_requested cp;
3115 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3117 cp.handle = __cpu_to_le16(conn->handle);
3118 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3122 hci_dev_unlock(hdev);
3125 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3126 u16 opcode, struct sk_buff *skb)
3128 const struct hci_rp_read_enc_key_size *rp;
3129 struct hci_conn *conn;
3132 BT_DBG("%s status 0x%02x", hdev->name, status);
3134 if (!skb || skb->len < sizeof(*rp)) {
3135 bt_dev_err(hdev, "invalid read key size response");
3139 rp = (void *)skb->data;
3140 handle = le16_to_cpu(rp->handle);
3144 conn = hci_conn_hash_lookup_handle(hdev, handle);
3148 /* While unexpected, the read_enc_key_size command may fail. The most
3149 * secure approach is to then assume the key size is 0 to force a
3153 bt_dev_err(hdev, "failed to read key size for handle %u",
3155 conn->enc_key_size = 0;
3157 conn->enc_key_size = rp->key_size;
3160 hci_encrypt_cfm(conn, 0);
3163 hci_dev_unlock(hdev);
3166 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3168 struct hci_ev_encrypt_change *ev = (void *) skb->data;
3169 struct hci_conn *conn;
3171 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3175 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3181 /* Encryption implies authentication */
3182 set_bit(HCI_CONN_AUTH, &conn->flags);
3183 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3184 conn->sec_level = conn->pending_sec_level;
3186 /* P-256 authentication key implies FIPS */
3187 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3188 set_bit(HCI_CONN_FIPS, &conn->flags);
3190 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3191 conn->type == LE_LINK)
3192 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3194 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3195 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3199 /* We should disregard the current RPA and generate a new one
3200 * whenever the encryption procedure fails.
3202 if (ev->status && conn->type == LE_LINK) {
3203 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3204 hci_adv_instances_set_rpa_expired(hdev, true);
3207 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3209 /* Check link security requirements are met */
3210 if (!hci_conn_check_link_mode(conn))
3211 ev->status = HCI_ERROR_AUTH_FAILURE;
3213 if (ev->status && conn->state == BT_CONNECTED) {
3214 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3215 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3217 /* Notify upper layers so they can cleanup before
3220 hci_encrypt_cfm(conn, ev->status);
3221 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3222 hci_conn_drop(conn);
3226 /* Try reading the encryption key size for encrypted ACL links */
3227 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3228 struct hci_cp_read_enc_key_size cp;
3229 struct hci_request req;
3231 /* Only send HCI_Read_Encryption_Key_Size if the
3232 * controller really supports it. If it doesn't, assume
3233 * the default size (16).
3235 if (!(hdev->commands[20] & 0x10)) {
3236 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3240 hci_req_init(&req, hdev);
3242 cp.handle = cpu_to_le16(conn->handle);
3243 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3245 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3246 bt_dev_err(hdev, "sending read key size failed");
3247 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3254 /* Set the default Authenticated Payload Timeout after
3255 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3256 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3257 * sent when the link is active and Encryption is enabled, the conn
3258 * type can be either LE or ACL and controller must support LMP Ping.
3259 * Ensure for AES-CCM encryption as well.
3261 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3262 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3263 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3264 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3265 struct hci_cp_write_auth_payload_to cp;
3267 cp.handle = cpu_to_le16(conn->handle);
3268 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3269 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3274 hci_encrypt_cfm(conn, ev->status);
3277 hci_dev_unlock(hdev);
3280 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3281 struct sk_buff *skb)
3283 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3284 struct hci_conn *conn;
3286 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3290 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3293 set_bit(HCI_CONN_SECURE, &conn->flags);
3295 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3297 hci_key_change_cfm(conn, ev->status);
3300 hci_dev_unlock(hdev);
3303 static void hci_remote_features_evt(struct hci_dev *hdev,
3304 struct sk_buff *skb)
3306 struct hci_ev_remote_features *ev = (void *) skb->data;
3307 struct hci_conn *conn;
3309 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3313 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3318 memcpy(conn->features[0], ev->features, 8);
3320 if (conn->state != BT_CONFIG)
3323 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3324 lmp_ext_feat_capable(conn)) {
3325 struct hci_cp_read_remote_ext_features cp;
3326 cp.handle = ev->handle;
3328 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3333 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3334 struct hci_cp_remote_name_req cp;
3335 memset(&cp, 0, sizeof(cp));
3336 bacpy(&cp.bdaddr, &conn->dst);
3337 cp.pscan_rep_mode = 0x02;
3338 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3339 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3340 mgmt_device_connected(hdev, conn, NULL, 0);
3342 if (!hci_outgoing_auth_needed(hdev, conn)) {
3343 conn->state = BT_CONNECTED;
3344 hci_connect_cfm(conn, ev->status);
3345 hci_conn_drop(conn);
3349 hci_dev_unlock(hdev);
3352 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3354 cancel_delayed_work(&hdev->cmd_timer);
3356 if (!test_bit(HCI_RESET, &hdev->flags)) {
3358 cancel_delayed_work(&hdev->ncmd_timer);
3359 atomic_set(&hdev->cmd_cnt, 1);
3361 schedule_delayed_work(&hdev->ncmd_timer,
3367 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3368 u16 *opcode, u8 *status,
3369 hci_req_complete_t *req_complete,
3370 hci_req_complete_skb_t *req_complete_skb)
3372 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3374 *opcode = __le16_to_cpu(ev->opcode);
3375 *status = skb->data[sizeof(*ev)];
3377 skb_pull(skb, sizeof(*ev));
3380 case HCI_OP_INQUIRY_CANCEL:
3381 hci_cc_inquiry_cancel(hdev, skb, status);
3384 case HCI_OP_PERIODIC_INQ:
3385 hci_cc_periodic_inq(hdev, skb);
3388 case HCI_OP_EXIT_PERIODIC_INQ:
3389 hci_cc_exit_periodic_inq(hdev, skb);
3392 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3393 hci_cc_remote_name_req_cancel(hdev, skb);
3396 case HCI_OP_ROLE_DISCOVERY:
3397 hci_cc_role_discovery(hdev, skb);
3400 case HCI_OP_READ_LINK_POLICY:
3401 hci_cc_read_link_policy(hdev, skb);
3404 case HCI_OP_WRITE_LINK_POLICY:
3405 hci_cc_write_link_policy(hdev, skb);
3408 case HCI_OP_READ_DEF_LINK_POLICY:
3409 hci_cc_read_def_link_policy(hdev, skb);
3412 case HCI_OP_WRITE_DEF_LINK_POLICY:
3413 hci_cc_write_def_link_policy(hdev, skb);
3417 hci_cc_reset(hdev, skb);
3420 case HCI_OP_READ_STORED_LINK_KEY:
3421 hci_cc_read_stored_link_key(hdev, skb);
3424 case HCI_OP_DELETE_STORED_LINK_KEY:
3425 hci_cc_delete_stored_link_key(hdev, skb);
3428 case HCI_OP_WRITE_LOCAL_NAME:
3429 hci_cc_write_local_name(hdev, skb);
3432 case HCI_OP_READ_LOCAL_NAME:
3433 hci_cc_read_local_name(hdev, skb);
3436 case HCI_OP_WRITE_AUTH_ENABLE:
3437 hci_cc_write_auth_enable(hdev, skb);
3440 case HCI_OP_WRITE_ENCRYPT_MODE:
3441 hci_cc_write_encrypt_mode(hdev, skb);
3444 case HCI_OP_WRITE_SCAN_ENABLE:
3445 hci_cc_write_scan_enable(hdev, skb);
3448 case HCI_OP_SET_EVENT_FLT:
3449 hci_cc_set_event_filter(hdev, skb);
3452 case HCI_OP_READ_CLASS_OF_DEV:
3453 hci_cc_read_class_of_dev(hdev, skb);
3456 case HCI_OP_WRITE_CLASS_OF_DEV:
3457 hci_cc_write_class_of_dev(hdev, skb);
3460 case HCI_OP_READ_VOICE_SETTING:
3461 hci_cc_read_voice_setting(hdev, skb);
3464 case HCI_OP_WRITE_VOICE_SETTING:
3465 hci_cc_write_voice_setting(hdev, skb);
3468 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3469 hci_cc_read_num_supported_iac(hdev, skb);
3472 case HCI_OP_WRITE_SSP_MODE:
3473 hci_cc_write_ssp_mode(hdev, skb);
3476 case HCI_OP_WRITE_SC_SUPPORT:
3477 hci_cc_write_sc_support(hdev, skb);
3480 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3481 hci_cc_read_auth_payload_timeout(hdev, skb);
3484 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3485 hci_cc_write_auth_payload_timeout(hdev, skb);
3488 case HCI_OP_READ_LOCAL_VERSION:
3489 hci_cc_read_local_version(hdev, skb);
3492 case HCI_OP_READ_LOCAL_COMMANDS:
3493 hci_cc_read_local_commands(hdev, skb);
3496 case HCI_OP_READ_LOCAL_FEATURES:
3497 hci_cc_read_local_features(hdev, skb);
3500 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3501 hci_cc_read_local_ext_features(hdev, skb);
3504 case HCI_OP_READ_BUFFER_SIZE:
3505 hci_cc_read_buffer_size(hdev, skb);
3508 case HCI_OP_READ_BD_ADDR:
3509 hci_cc_read_bd_addr(hdev, skb);
3512 case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3513 hci_cc_read_local_pairing_opts(hdev, skb);
3516 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3517 hci_cc_read_page_scan_activity(hdev, skb);
3520 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3521 hci_cc_write_page_scan_activity(hdev, skb);
3524 case HCI_OP_READ_PAGE_SCAN_TYPE:
3525 hci_cc_read_page_scan_type(hdev, skb);
3528 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3529 hci_cc_write_page_scan_type(hdev, skb);
3532 case HCI_OP_READ_DATA_BLOCK_SIZE:
3533 hci_cc_read_data_block_size(hdev, skb);
3536 case HCI_OP_READ_FLOW_CONTROL_MODE:
3537 hci_cc_read_flow_control_mode(hdev, skb);
3540 case HCI_OP_READ_LOCAL_AMP_INFO:
3541 hci_cc_read_local_amp_info(hdev, skb);
3544 case HCI_OP_READ_CLOCK:
3545 hci_cc_read_clock(hdev, skb);
3548 case HCI_OP_READ_INQ_RSP_TX_POWER:
3549 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3552 case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3553 hci_cc_read_def_err_data_reporting(hdev, skb);
3556 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3557 hci_cc_write_def_err_data_reporting(hdev, skb);
3560 case HCI_OP_PIN_CODE_REPLY:
3561 hci_cc_pin_code_reply(hdev, skb);
3564 case HCI_OP_PIN_CODE_NEG_REPLY:
3565 hci_cc_pin_code_neg_reply(hdev, skb);
3568 case HCI_OP_READ_LOCAL_OOB_DATA:
3569 hci_cc_read_local_oob_data(hdev, skb);
3572 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3573 hci_cc_read_local_oob_ext_data(hdev, skb);
3576 case HCI_OP_LE_READ_BUFFER_SIZE:
3577 hci_cc_le_read_buffer_size(hdev, skb);
3580 case HCI_OP_LE_READ_LOCAL_FEATURES:
3581 hci_cc_le_read_local_features(hdev, skb);
3584 case HCI_OP_LE_READ_ADV_TX_POWER:
3585 hci_cc_le_read_adv_tx_power(hdev, skb);
3588 case HCI_OP_USER_CONFIRM_REPLY:
3589 hci_cc_user_confirm_reply(hdev, skb);
3592 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3593 hci_cc_user_confirm_neg_reply(hdev, skb);
3596 case HCI_OP_USER_PASSKEY_REPLY:
3597 hci_cc_user_passkey_reply(hdev, skb);
3600 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3601 hci_cc_user_passkey_neg_reply(hdev, skb);
3604 case HCI_OP_LE_SET_RANDOM_ADDR:
3605 hci_cc_le_set_random_addr(hdev, skb);
3608 case HCI_OP_LE_SET_ADV_ENABLE:
3609 hci_cc_le_set_adv_enable(hdev, skb);
3612 case HCI_OP_LE_SET_SCAN_PARAM:
3613 hci_cc_le_set_scan_param(hdev, skb);
3616 case HCI_OP_LE_SET_SCAN_ENABLE:
3617 hci_cc_le_set_scan_enable(hdev, skb);
3620 case HCI_OP_LE_READ_ACCEPT_LIST_SIZE:
3621 hci_cc_le_read_accept_list_size(hdev, skb);
3624 case HCI_OP_LE_CLEAR_ACCEPT_LIST:
3625 hci_cc_le_clear_accept_list(hdev, skb);
3628 case HCI_OP_LE_ADD_TO_ACCEPT_LIST:
3629 hci_cc_le_add_to_accept_list(hdev, skb);
3632 case HCI_OP_LE_DEL_FROM_ACCEPT_LIST:
3633 hci_cc_le_del_from_accept_list(hdev, skb);
3636 case HCI_OP_LE_READ_SUPPORTED_STATES:
3637 hci_cc_le_read_supported_states(hdev, skb);
3640 case HCI_OP_LE_READ_DEF_DATA_LEN:
3641 hci_cc_le_read_def_data_len(hdev, skb);
3644 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3645 hci_cc_le_write_def_data_len(hdev, skb);
3648 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3649 hci_cc_le_add_to_resolv_list(hdev, skb);
3652 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3653 hci_cc_le_del_from_resolv_list(hdev, skb);
3656 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3657 hci_cc_le_clear_resolv_list(hdev, skb);
3660 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3661 hci_cc_le_read_resolv_list_size(hdev, skb);
3664 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3665 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3668 case HCI_OP_LE_READ_MAX_DATA_LEN:
3669 hci_cc_le_read_max_data_len(hdev, skb);
3672 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3673 hci_cc_write_le_host_supported(hdev, skb);
3676 case HCI_OP_LE_SET_ADV_PARAM:
3677 hci_cc_set_adv_param(hdev, skb);
3680 case HCI_OP_READ_RSSI:
3681 hci_cc_read_rssi(hdev, skb);
3684 case HCI_OP_READ_TX_POWER:
3685 hci_cc_read_tx_power(hdev, skb);
3688 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3689 hci_cc_write_ssp_debug_mode(hdev, skb);
3692 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3693 hci_cc_le_set_ext_scan_param(hdev, skb);
3696 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3697 hci_cc_le_set_ext_scan_enable(hdev, skb);
3700 case HCI_OP_LE_SET_DEFAULT_PHY:
3701 hci_cc_le_set_default_phy(hdev, skb);
3704 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3705 hci_cc_le_read_num_adv_sets(hdev, skb);
3708 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3709 hci_cc_set_ext_adv_param(hdev, skb);
3712 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3713 hci_cc_le_set_ext_adv_enable(hdev, skb);
3716 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3717 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3720 case HCI_OP_LE_READ_TRANSMIT_POWER:
3721 hci_cc_le_read_transmit_power(hdev, skb);
3724 case HCI_OP_ENABLE_RSSI:
3725 hci_cc_enable_rssi(hdev, skb);
3728 case HCI_OP_GET_RAW_RSSI:
3729 hci_cc_get_raw_rssi(hdev, skb);
3733 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3737 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3739 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3742 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3744 "unexpected event for opcode 0x%4.4x", *opcode);
3748 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3749 queue_work(hdev->workqueue, &hdev->cmd_work);
3752 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3753 u16 *opcode, u8 *status,
3754 hci_req_complete_t *req_complete,
3755 hci_req_complete_skb_t *req_complete_skb)
3757 struct hci_ev_cmd_status *ev = (void *) skb->data;
3759 skb_pull(skb, sizeof(*ev));
3761 *opcode = __le16_to_cpu(ev->opcode);
3762 *status = ev->status;
3765 case HCI_OP_INQUIRY:
3766 hci_cs_inquiry(hdev, ev->status);
3769 case HCI_OP_CREATE_CONN:
3770 hci_cs_create_conn(hdev, ev->status);
3773 case HCI_OP_DISCONNECT:
3774 hci_cs_disconnect(hdev, ev->status);
3777 case HCI_OP_ADD_SCO:
3778 hci_cs_add_sco(hdev, ev->status);
3781 case HCI_OP_AUTH_REQUESTED:
3782 hci_cs_auth_requested(hdev, ev->status);
3785 case HCI_OP_SET_CONN_ENCRYPT:
3786 hci_cs_set_conn_encrypt(hdev, ev->status);
3789 case HCI_OP_REMOTE_NAME_REQ:
3790 hci_cs_remote_name_req(hdev, ev->status);
3793 case HCI_OP_READ_REMOTE_FEATURES:
3794 hci_cs_read_remote_features(hdev, ev->status);
3797 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3798 hci_cs_read_remote_ext_features(hdev, ev->status);
3801 case HCI_OP_SETUP_SYNC_CONN:
3802 hci_cs_setup_sync_conn(hdev, ev->status);
3805 case HCI_OP_SNIFF_MODE:
3806 hci_cs_sniff_mode(hdev, ev->status);
3809 case HCI_OP_EXIT_SNIFF_MODE:
3810 hci_cs_exit_sniff_mode(hdev, ev->status);
3813 case HCI_OP_SWITCH_ROLE:
3814 hci_cs_switch_role(hdev, ev->status);
3817 case HCI_OP_LE_CREATE_CONN:
3818 hci_cs_le_create_conn(hdev, ev->status);
3821 case HCI_OP_LE_READ_REMOTE_FEATURES:
3822 hci_cs_le_read_remote_features(hdev, ev->status);
3825 case HCI_OP_LE_START_ENC:
3826 hci_cs_le_start_enc(hdev, ev->status);
3829 case HCI_OP_LE_EXT_CREATE_CONN:
3830 hci_cs_le_ext_create_conn(hdev, ev->status);
3834 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3838 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3840 /* Indicate request completion if the command failed. Also, if
3841 * we're not waiting for a special event and we get a success
3842 * command status we should try to flag the request as completed
3843 * (since for this kind of commands there will not be a command
3847 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3848 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3851 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3853 "unexpected event for opcode 0x%4.4x", *opcode);
3857 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3858 queue_work(hdev->workqueue, &hdev->cmd_work);
3861 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3863 struct hci_ev_hardware_error *ev = (void *) skb->data;
3865 hdev->hw_error_code = ev->code;
3867 queue_work(hdev->req_workqueue, &hdev->error_reset);
3870 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3872 struct hci_ev_role_change *ev = (void *) skb->data;
3873 struct hci_conn *conn;
3875 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3879 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3882 conn->role = ev->role;
3884 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3886 hci_role_switch_cfm(conn, ev->status, ev->role);
3889 hci_dev_unlock(hdev);
3892 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3894 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3897 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3898 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3902 if (skb->len < sizeof(*ev) ||
3903 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3904 BT_DBG("%s bad parameters", hdev->name);
3908 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3910 for (i = 0; i < ev->num_hndl; i++) {
3911 struct hci_comp_pkts_info *info = &ev->handles[i];
3912 struct hci_conn *conn;
3913 __u16 handle, count;
3915 handle = __le16_to_cpu(info->handle);
3916 count = __le16_to_cpu(info->count);
3918 conn = hci_conn_hash_lookup_handle(hdev, handle);
3922 conn->sent -= count;
3924 switch (conn->type) {
3926 hdev->acl_cnt += count;
3927 if (hdev->acl_cnt > hdev->acl_pkts)
3928 hdev->acl_cnt = hdev->acl_pkts;
3932 if (hdev->le_pkts) {
3933 hdev->le_cnt += count;
3934 if (hdev->le_cnt > hdev->le_pkts)
3935 hdev->le_cnt = hdev->le_pkts;
3937 hdev->acl_cnt += count;
3938 if (hdev->acl_cnt > hdev->acl_pkts)
3939 hdev->acl_cnt = hdev->acl_pkts;
3944 hdev->sco_cnt += count;
3945 if (hdev->sco_cnt > hdev->sco_pkts)
3946 hdev->sco_cnt = hdev->sco_pkts;
3950 bt_dev_err(hdev, "unknown type %d conn %p",
3956 queue_work(hdev->workqueue, &hdev->tx_work);
3959 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3962 struct hci_chan *chan;
3964 switch (hdev->dev_type) {
3966 return hci_conn_hash_lookup_handle(hdev, handle);
3968 chan = hci_chan_lookup_handle(hdev, handle);
3973 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3980 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3982 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3985 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3986 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3990 if (skb->len < sizeof(*ev) ||
3991 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3992 BT_DBG("%s bad parameters", hdev->name);
3996 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3999 for (i = 0; i < ev->num_hndl; i++) {
4000 struct hci_comp_blocks_info *info = &ev->handles[i];
4001 struct hci_conn *conn = NULL;
4002 __u16 handle, block_count;
4004 handle = __le16_to_cpu(info->handle);
4005 block_count = __le16_to_cpu(info->blocks);
4007 conn = __hci_conn_lookup_handle(hdev, handle);
4011 conn->sent -= block_count;
4013 switch (conn->type) {
4016 hdev->block_cnt += block_count;
4017 if (hdev->block_cnt > hdev->num_blocks)
4018 hdev->block_cnt = hdev->num_blocks;
4022 bt_dev_err(hdev, "unknown type %d conn %p",
4028 queue_work(hdev->workqueue, &hdev->tx_work);
4031 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4033 struct hci_ev_mode_change *ev = (void *) skb->data;
4034 struct hci_conn *conn;
4036 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4040 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4042 conn->mode = ev->mode;
4044 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4046 if (conn->mode == HCI_CM_ACTIVE)
4047 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4049 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4052 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4053 hci_sco_setup(conn, ev->status);
4056 hci_dev_unlock(hdev);
4059 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4061 struct hci_ev_pin_code_req *ev = (void *) skb->data;
4062 struct hci_conn *conn;
4064 BT_DBG("%s", hdev->name);
4068 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4072 if (conn->state == BT_CONNECTED) {
4073 hci_conn_hold(conn);
4074 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4075 hci_conn_drop(conn);
4078 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4079 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4080 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4081 sizeof(ev->bdaddr), &ev->bdaddr);
4082 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4085 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4090 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4094 hci_dev_unlock(hdev);
4097 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4099 if (key_type == HCI_LK_CHANGED_COMBINATION)
4102 conn->pin_length = pin_len;
4103 conn->key_type = key_type;
4106 case HCI_LK_LOCAL_UNIT:
4107 case HCI_LK_REMOTE_UNIT:
4108 case HCI_LK_DEBUG_COMBINATION:
4110 case HCI_LK_COMBINATION:
4112 conn->pending_sec_level = BT_SECURITY_HIGH;
4114 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4116 case HCI_LK_UNAUTH_COMBINATION_P192:
4117 case HCI_LK_UNAUTH_COMBINATION_P256:
4118 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4120 case HCI_LK_AUTH_COMBINATION_P192:
4121 conn->pending_sec_level = BT_SECURITY_HIGH;
4123 case HCI_LK_AUTH_COMBINATION_P256:
4124 conn->pending_sec_level = BT_SECURITY_FIPS;
4129 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4131 struct hci_ev_link_key_req *ev = (void *) skb->data;
4132 struct hci_cp_link_key_reply cp;
4133 struct hci_conn *conn;
4134 struct link_key *key;
4136 BT_DBG("%s", hdev->name);
4138 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4143 key = hci_find_link_key(hdev, &ev->bdaddr);
4145 BT_DBG("%s link key not found for %pMR", hdev->name,
4150 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4153 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4155 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4157 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4158 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4159 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4160 BT_DBG("%s ignoring unauthenticated key", hdev->name);
4164 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4165 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4166 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4167 BT_DBG("%s ignoring key unauthenticated for high security",
4172 conn_set_key(conn, key->type, key->pin_len);
4175 bacpy(&cp.bdaddr, &ev->bdaddr);
4176 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4178 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4180 hci_dev_unlock(hdev);
4185 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4186 hci_dev_unlock(hdev);
4189 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4191 struct hci_ev_link_key_notify *ev = (void *) skb->data;
4192 struct hci_conn *conn;
4193 struct link_key *key;
4197 BT_DBG("%s", hdev->name);
4201 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4205 hci_conn_hold(conn);
4206 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4207 hci_conn_drop(conn);
4209 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4210 conn_set_key(conn, ev->key_type, conn->pin_length);
4212 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4215 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4216 ev->key_type, pin_len, &persistent);
4220 /* Update connection information since adding the key will have
4221 * fixed up the type in the case of changed combination keys.
4223 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4224 conn_set_key(conn, key->type, key->pin_len);
4226 mgmt_new_link_key(hdev, key, persistent);
4228 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4229 * is set. If it's not set simply remove the key from the kernel
4230 * list (we've still notified user space about it but with
4231 * store_hint being 0).
4233 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4234 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4235 list_del_rcu(&key->list);
4236 kfree_rcu(key, rcu);
4241 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4243 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4246 hci_dev_unlock(hdev);
4249 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4251 struct hci_ev_clock_offset *ev = (void *) skb->data;
4252 struct hci_conn *conn;
4254 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4258 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4259 if (conn && !ev->status) {
4260 struct inquiry_entry *ie;
4262 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4264 ie->data.clock_offset = ev->clock_offset;
4265 ie->timestamp = jiffies;
4269 hci_dev_unlock(hdev);
4272 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4274 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4275 struct hci_conn *conn;
4277 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4281 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4282 if (conn && !ev->status)
4283 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4285 hci_dev_unlock(hdev);
4288 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4290 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4291 struct inquiry_entry *ie;
4293 BT_DBG("%s", hdev->name);
4297 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4299 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4300 ie->timestamp = jiffies;
4303 hci_dev_unlock(hdev);
4306 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4307 struct sk_buff *skb)
4309 struct inquiry_data data;
4310 int num_rsp = *((__u8 *) skb->data);
4312 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4317 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4322 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4323 struct inquiry_info_with_rssi_and_pscan_mode *info;
4324 info = (void *) (skb->data + 1);
4326 if (skb->len < num_rsp * sizeof(*info) + 1)
4329 for (; num_rsp; num_rsp--, info++) {
4332 bacpy(&data.bdaddr, &info->bdaddr);
4333 data.pscan_rep_mode = info->pscan_rep_mode;
4334 data.pscan_period_mode = info->pscan_period_mode;
4335 data.pscan_mode = info->pscan_mode;
4336 memcpy(data.dev_class, info->dev_class, 3);
4337 data.clock_offset = info->clock_offset;
4338 data.rssi = info->rssi;
4339 data.ssp_mode = 0x00;
4341 flags = hci_inquiry_cache_update(hdev, &data, false);
4343 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4344 info->dev_class, info->rssi,
4345 flags, NULL, 0, NULL, 0);
4348 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4350 if (skb->len < num_rsp * sizeof(*info) + 1)
4353 for (; num_rsp; num_rsp--, info++) {
4356 bacpy(&data.bdaddr, &info->bdaddr);
4357 data.pscan_rep_mode = info->pscan_rep_mode;
4358 data.pscan_period_mode = info->pscan_period_mode;
4359 data.pscan_mode = 0x00;
4360 memcpy(data.dev_class, info->dev_class, 3);
4361 data.clock_offset = info->clock_offset;
4362 data.rssi = info->rssi;
4363 data.ssp_mode = 0x00;
4365 flags = hci_inquiry_cache_update(hdev, &data, false);
4367 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4368 info->dev_class, info->rssi,
4369 flags, NULL, 0, NULL, 0);
4374 hci_dev_unlock(hdev);
4377 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4378 struct sk_buff *skb)
4380 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4381 struct hci_conn *conn;
4383 BT_DBG("%s", hdev->name);
4387 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4391 if (ev->page < HCI_MAX_PAGES)
4392 memcpy(conn->features[ev->page], ev->features, 8);
4394 if (!ev->status && ev->page == 0x01) {
4395 struct inquiry_entry *ie;
4397 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4399 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4401 if (ev->features[0] & LMP_HOST_SSP) {
4402 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4404 /* It is mandatory by the Bluetooth specification that
4405 * Extended Inquiry Results are only used when Secure
4406 * Simple Pairing is enabled, but some devices violate
4409 * To make these devices work, the internal SSP
4410 * enabled flag needs to be cleared if the remote host
4411 * features do not indicate SSP support */
4412 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4415 if (ev->features[0] & LMP_HOST_SC)
4416 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4419 if (conn->state != BT_CONFIG)
4422 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4423 struct hci_cp_remote_name_req cp;
4424 memset(&cp, 0, sizeof(cp));
4425 bacpy(&cp.bdaddr, &conn->dst);
4426 cp.pscan_rep_mode = 0x02;
4427 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4428 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4429 mgmt_device_connected(hdev, conn, NULL, 0);
4431 if (!hci_outgoing_auth_needed(hdev, conn)) {
4432 conn->state = BT_CONNECTED;
4433 hci_connect_cfm(conn, ev->status);
4434 hci_conn_drop(conn);
4438 hci_dev_unlock(hdev);
4441 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4442 struct sk_buff *skb)
4444 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4445 struct hci_conn *conn;
4447 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4451 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4453 if (ev->link_type == ESCO_LINK)
4456 /* When the link type in the event indicates SCO connection
4457 * and lookup of the connection object fails, then check
4458 * if an eSCO connection object exists.
4460 * The core limits the synchronous connections to either
4461 * SCO or eSCO. The eSCO connection is preferred and tried
4462 * to be setup first and until successfully established,
4463 * the link type will be hinted as eSCO.
4465 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4470 switch (ev->status) {
4472 /* The synchronous connection complete event should only be
4473 * sent once per new connection. Receiving a successful
4474 * complete event when the connection status is already
4475 * BT_CONNECTED means that the device is misbehaving and sent
4476 * multiple complete event packets for the same new connection.
4478 * Registering the device more than once can corrupt kernel
4479 * memory, hence upon detecting this invalid event, we report
4480 * an error and ignore the packet.
4482 if (conn->state == BT_CONNECTED) {
4483 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4487 conn->handle = __le16_to_cpu(ev->handle);
4488 conn->state = BT_CONNECTED;
4489 conn->type = ev->link_type;
4491 hci_debugfs_create_conn(conn);
4492 hci_conn_add_sysfs(conn);
4495 case 0x10: /* Connection Accept Timeout */
4496 case 0x0d: /* Connection Rejected due to Limited Resources */
4497 case 0x11: /* Unsupported Feature or Parameter Value */
4498 case 0x1c: /* SCO interval rejected */
4499 case 0x1a: /* Unsupported Remote Feature */
4500 case 0x1e: /* Invalid LMP Parameters */
4501 case 0x1f: /* Unspecified error */
4502 case 0x20: /* Unsupported LMP Parameter value */
4504 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4505 (hdev->esco_type & EDR_ESCO_MASK);
4506 if (hci_setup_sync(conn, conn->link->handle))
4512 conn->state = BT_CLOSED;
4516 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4518 switch (ev->air_mode) {
4521 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4525 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4529 hci_connect_cfm(conn, ev->status);
4534 hci_dev_unlock(hdev);
4537 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4541 while (parsed < eir_len) {
4542 u8 field_len = eir[0];
4547 parsed += field_len + 1;
4548 eir += field_len + 1;
4554 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4555 struct sk_buff *skb)
4557 struct inquiry_data data;
4558 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4559 int num_rsp = *((__u8 *) skb->data);
4562 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4564 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4567 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4572 for (; num_rsp; num_rsp--, info++) {
4576 bacpy(&data.bdaddr, &info->bdaddr);
4577 data.pscan_rep_mode = info->pscan_rep_mode;
4578 data.pscan_period_mode = info->pscan_period_mode;
4579 data.pscan_mode = 0x00;
4580 memcpy(data.dev_class, info->dev_class, 3);
4581 data.clock_offset = info->clock_offset;
4582 data.rssi = info->rssi;
4583 data.ssp_mode = 0x01;
4585 if (hci_dev_test_flag(hdev, HCI_MGMT))
4586 name_known = eir_get_data(info->data,
4588 EIR_NAME_COMPLETE, NULL);
4592 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4594 eir_len = eir_get_length(info->data, sizeof(info->data));
4596 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4597 info->dev_class, info->rssi,
4598 flags, info->data, eir_len, NULL, 0);
4601 hci_dev_unlock(hdev);
4604 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4605 struct sk_buff *skb)
4607 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4608 struct hci_conn *conn;
4610 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4611 __le16_to_cpu(ev->handle));
4615 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4619 /* For BR/EDR the necessary steps are taken through the
4620 * auth_complete event.
4622 if (conn->type != LE_LINK)
4626 conn->sec_level = conn->pending_sec_level;
4628 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4630 if (ev->status && conn->state == BT_CONNECTED) {
4631 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4632 hci_conn_drop(conn);
4636 if (conn->state == BT_CONFIG) {
4638 conn->state = BT_CONNECTED;
4640 hci_connect_cfm(conn, ev->status);
4641 hci_conn_drop(conn);
4643 hci_auth_cfm(conn, ev->status);
4645 hci_conn_hold(conn);
4646 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4647 hci_conn_drop(conn);
4651 hci_dev_unlock(hdev);
4654 static u8 hci_get_auth_req(struct hci_conn *conn)
4656 /* If remote requests no-bonding follow that lead */
4657 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4658 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4659 return conn->remote_auth | (conn->auth_type & 0x01);
4661 /* If both remote and local have enough IO capabilities, require
4664 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4665 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4666 return conn->remote_auth | 0x01;
4668 /* No MITM protection possible so ignore remote requirement */
4669 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4672 static u8 bredr_oob_data_present(struct hci_conn *conn)
4674 struct hci_dev *hdev = conn->hdev;
4675 struct oob_data *data;
4677 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4681 if (bredr_sc_enabled(hdev)) {
4682 /* When Secure Connections is enabled, then just
4683 * return the present value stored with the OOB
4684 * data. The stored value contains the right present
4685 * information. However it can only be trusted when
4686 * not in Secure Connection Only mode.
4688 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4689 return data->present;
4691 /* When Secure Connections Only mode is enabled, then
4692 * the P-256 values are required. If they are not
4693 * available, then do not declare that OOB data is
4696 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4697 !memcmp(data->hash256, ZERO_KEY, 16))
4703 /* When Secure Connections is not enabled or actually
4704 * not supported by the hardware, then check that if
4705 * P-192 data values are present.
4707 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4708 !memcmp(data->hash192, ZERO_KEY, 16))
4714 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4716 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4717 struct hci_conn *conn;
4719 BT_DBG("%s", hdev->name);
4723 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4727 hci_conn_hold(conn);
4729 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4732 /* Allow pairing if we're pairable, the initiators of the
4733 * pairing or if the remote is not requesting bonding.
4735 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4736 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4737 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4738 struct hci_cp_io_capability_reply cp;
4740 bacpy(&cp.bdaddr, &ev->bdaddr);
4741 /* Change the IO capability from KeyboardDisplay
4742 * to DisplayYesNo as it is not supported by BT spec. */
4743 cp.capability = (conn->io_capability == 0x04) ?
4744 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4746 /* If we are initiators, there is no remote information yet */
4747 if (conn->remote_auth == 0xff) {
4748 /* Request MITM protection if our IO caps allow it
4749 * except for the no-bonding case.
4751 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4752 conn->auth_type != HCI_AT_NO_BONDING)
4753 conn->auth_type |= 0x01;
4755 conn->auth_type = hci_get_auth_req(conn);
4758 /* If we're not bondable, force one of the non-bondable
4759 * authentication requirement values.
4761 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4762 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4764 cp.authentication = conn->auth_type;
4765 cp.oob_data = bredr_oob_data_present(conn);
4767 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4770 struct hci_cp_io_capability_neg_reply cp;
4772 bacpy(&cp.bdaddr, &ev->bdaddr);
4773 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4775 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4780 hci_dev_unlock(hdev);
4783 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4785 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4786 struct hci_conn *conn;
4788 BT_DBG("%s", hdev->name);
4792 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4796 conn->remote_cap = ev->capability;
4797 conn->remote_auth = ev->authentication;
4800 hci_dev_unlock(hdev);
4803 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4804 struct sk_buff *skb)
4806 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4807 int loc_mitm, rem_mitm, confirm_hint = 0;
4808 struct hci_conn *conn;
4810 BT_DBG("%s", hdev->name);
4814 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4817 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4821 loc_mitm = (conn->auth_type & 0x01);
4822 rem_mitm = (conn->remote_auth & 0x01);
4824 /* If we require MITM but the remote device can't provide that
4825 * (it has NoInputNoOutput) then reject the confirmation
4826 * request. We check the security level here since it doesn't
4827 * necessarily match conn->auth_type.
4829 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4830 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4831 BT_DBG("Rejecting request: remote device can't provide MITM");
4832 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4833 sizeof(ev->bdaddr), &ev->bdaddr);
4837 /* If no side requires MITM protection; auto-accept */
4838 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4839 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4841 /* If we're not the initiators request authorization to
4842 * proceed from user space (mgmt_user_confirm with
4843 * confirm_hint set to 1). The exception is if neither
4844 * side had MITM or if the local IO capability is
4845 * NoInputNoOutput, in which case we do auto-accept
4847 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4848 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4849 (loc_mitm || rem_mitm)) {
4850 BT_DBG("Confirming auto-accept as acceptor");
4855 /* If there already exists link key in local host, leave the
4856 * decision to user space since the remote device could be
4857 * legitimate or malicious.
4859 if (hci_find_link_key(hdev, &ev->bdaddr)) {
4860 bt_dev_dbg(hdev, "Local host already has link key");
4865 BT_DBG("Auto-accept of user confirmation with %ums delay",
4866 hdev->auto_accept_delay);
4868 if (hdev->auto_accept_delay > 0) {
4869 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4870 queue_delayed_work(conn->hdev->workqueue,
4871 &conn->auto_accept_work, delay);
4875 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4876 sizeof(ev->bdaddr), &ev->bdaddr);
4881 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4882 le32_to_cpu(ev->passkey), confirm_hint);
4885 hci_dev_unlock(hdev);
4888 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4889 struct sk_buff *skb)
4891 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4893 BT_DBG("%s", hdev->name);
4895 if (hci_dev_test_flag(hdev, HCI_MGMT))
4896 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4899 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4900 struct sk_buff *skb)
4902 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4903 struct hci_conn *conn;
4905 BT_DBG("%s", hdev->name);
4907 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4911 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4912 conn->passkey_entered = 0;
4914 if (hci_dev_test_flag(hdev, HCI_MGMT))
4915 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4916 conn->dst_type, conn->passkey_notify,
4917 conn->passkey_entered);
4920 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4922 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4923 struct hci_conn *conn;
4925 BT_DBG("%s", hdev->name);
4927 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4932 case HCI_KEYPRESS_STARTED:
4933 conn->passkey_entered = 0;
4936 case HCI_KEYPRESS_ENTERED:
4937 conn->passkey_entered++;
4940 case HCI_KEYPRESS_ERASED:
4941 conn->passkey_entered--;
4944 case HCI_KEYPRESS_CLEARED:
4945 conn->passkey_entered = 0;
4948 case HCI_KEYPRESS_COMPLETED:
4952 if (hci_dev_test_flag(hdev, HCI_MGMT))
4953 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4954 conn->dst_type, conn->passkey_notify,
4955 conn->passkey_entered);
4958 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4959 struct sk_buff *skb)
4961 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4962 struct hci_conn *conn;
4964 BT_DBG("%s", hdev->name);
4968 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4972 /* Reset the authentication requirement to unknown */
4973 conn->remote_auth = 0xff;
4975 /* To avoid duplicate auth_failed events to user space we check
4976 * the HCI_CONN_AUTH_PEND flag which will be set if we
4977 * initiated the authentication. A traditional auth_complete
4978 * event gets always produced as initiator and is also mapped to
4979 * the mgmt_auth_failed event */
4980 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4981 mgmt_auth_failed(conn, ev->status);
4983 hci_conn_drop(conn);
4986 hci_dev_unlock(hdev);
4989 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4990 struct sk_buff *skb)
4992 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4993 struct inquiry_entry *ie;
4994 struct hci_conn *conn;
4996 BT_DBG("%s", hdev->name);
5000 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5002 memcpy(conn->features[1], ev->features, 8);
5004 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5006 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5008 hci_dev_unlock(hdev);
5011 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
5012 struct sk_buff *skb)
5014 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
5015 struct oob_data *data;
5017 BT_DBG("%s", hdev->name);
5021 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5024 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5026 struct hci_cp_remote_oob_data_neg_reply cp;
5028 bacpy(&cp.bdaddr, &ev->bdaddr);
5029 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5034 if (bredr_sc_enabled(hdev)) {
5035 struct hci_cp_remote_oob_ext_data_reply cp;
5037 bacpy(&cp.bdaddr, &ev->bdaddr);
5038 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5039 memset(cp.hash192, 0, sizeof(cp.hash192));
5040 memset(cp.rand192, 0, sizeof(cp.rand192));
5042 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5043 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5045 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5046 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5048 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5051 struct hci_cp_remote_oob_data_reply cp;
5053 bacpy(&cp.bdaddr, &ev->bdaddr);
5054 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5055 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5057 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5062 hci_dev_unlock(hdev);
5065 #if IS_ENABLED(CONFIG_BT_HS)
5066 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5068 struct hci_ev_channel_selected *ev = (void *)skb->data;
5069 struct hci_conn *hcon;
5071 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5073 skb_pull(skb, sizeof(*ev));
5075 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5079 amp_read_loc_assoc_final_data(hdev, hcon);
5082 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
5083 struct sk_buff *skb)
5085 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
5086 struct hci_conn *hcon, *bredr_hcon;
5088 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
5093 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5105 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5107 hcon->state = BT_CONNECTED;
5108 bacpy(&hcon->dst, &bredr_hcon->dst);
5110 hci_conn_hold(hcon);
5111 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5112 hci_conn_drop(hcon);
5114 hci_debugfs_create_conn(hcon);
5115 hci_conn_add_sysfs(hcon);
5117 amp_physical_cfm(bredr_hcon, hcon);
5120 hci_dev_unlock(hdev);
5123 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5125 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
5126 struct hci_conn *hcon;
5127 struct hci_chan *hchan;
5128 struct amp_mgr *mgr;
5130 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5131 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
5134 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5138 /* Create AMP hchan */
5139 hchan = hci_chan_create(hcon);
5143 hchan->handle = le16_to_cpu(ev->handle);
5146 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5148 mgr = hcon->amp_mgr;
5149 if (mgr && mgr->bredr_chan) {
5150 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5152 l2cap_chan_lock(bredr_chan);
5154 bredr_chan->conn->mtu = hdev->block_mtu;
5155 l2cap_logical_cfm(bredr_chan, hchan, 0);
5156 hci_conn_hold(hcon);
5158 l2cap_chan_unlock(bredr_chan);
5162 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5163 struct sk_buff *skb)
5165 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5166 struct hci_chan *hchan;
5168 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5169 le16_to_cpu(ev->handle), ev->status);
5176 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5177 if (!hchan || !hchan->amp)
5180 amp_destroy_logical_link(hchan, ev->reason);
5183 hci_dev_unlock(hdev);
5186 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5187 struct sk_buff *skb)
5189 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5190 struct hci_conn *hcon;
5192 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5199 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5201 hcon->state = BT_CLOSED;
5205 hci_dev_unlock(hdev);
5209 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5210 u8 bdaddr_type, bdaddr_t *local_rpa)
5213 conn->dst_type = bdaddr_type;
5214 conn->resp_addr_type = bdaddr_type;
5215 bacpy(&conn->resp_addr, bdaddr);
5217 /* Check if the controller has set a Local RPA then it must be
5218 * used instead or hdev->rpa.
5220 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5221 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5222 bacpy(&conn->init_addr, local_rpa);
5223 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5224 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5225 bacpy(&conn->init_addr, &conn->hdev->rpa);
5227 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5228 &conn->init_addr_type);
5231 conn->resp_addr_type = conn->hdev->adv_addr_type;
5232 /* Check if the controller has set a Local RPA then it must be
5233 * used instead or hdev->rpa.
5235 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5236 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5237 bacpy(&conn->resp_addr, local_rpa);
5238 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5239 /* In case of ext adv, resp_addr will be updated in
5240 * Adv Terminated event.
5242 if (!ext_adv_capable(conn->hdev))
5243 bacpy(&conn->resp_addr,
5244 &conn->hdev->random_addr);
5246 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5249 conn->init_addr_type = bdaddr_type;
5250 bacpy(&conn->init_addr, bdaddr);
5252 /* For incoming connections, set the default minimum
5253 * and maximum connection interval. They will be used
5254 * to check if the parameters are in range and if not
5255 * trigger the connection update procedure.
5257 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5258 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5262 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5263 bdaddr_t *bdaddr, u8 bdaddr_type,
5264 bdaddr_t *local_rpa, u8 role, u16 handle,
5265 u16 interval, u16 latency,
5266 u16 supervision_timeout)
5268 struct hci_conn_params *params;
5269 struct hci_conn *conn;
5270 struct smp_irk *irk;
5275 /* All controllers implicitly stop advertising in the event of a
5276 * connection, so ensure that the state bit is cleared.
5278 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5280 conn = hci_lookup_le_connect(hdev);
5282 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5284 bt_dev_err(hdev, "no memory for new connection");
5288 conn->dst_type = bdaddr_type;
5290 /* If we didn't have a hci_conn object previously
5291 * but we're in central role this must be something
5292 * initiated using an accept list. Since accept list based
5293 * connections are not "first class citizens" we don't
5294 * have full tracking of them. Therefore, we go ahead
5295 * with a "best effort" approach of determining the
5296 * initiator address based on the HCI_PRIVACY flag.
5299 conn->resp_addr_type = bdaddr_type;
5300 bacpy(&conn->resp_addr, bdaddr);
5301 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5302 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5303 bacpy(&conn->init_addr, &hdev->rpa);
5305 hci_copy_identity_address(hdev,
5307 &conn->init_addr_type);
5311 cancel_delayed_work(&conn->le_conn_timeout);
5314 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5316 /* Lookup the identity address from the stored connection
5317 * address and address type.
5319 * When establishing connections to an identity address, the
5320 * connection procedure will store the resolvable random
5321 * address first. Now if it can be converted back into the
5322 * identity address, start using the identity address from
5325 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5327 bacpy(&conn->dst, &irk->bdaddr);
5328 conn->dst_type = irk->addr_type;
5331 /* When using controller based address resolution, then the new
5332 * address types 0x02 and 0x03 are used. These types need to be
5333 * converted back into either public address or random address type
5335 if (use_ll_privacy(hdev) &&
5336 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5337 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
5338 switch (conn->dst_type) {
5339 case ADDR_LE_DEV_PUBLIC_RESOLVED:
5340 conn->dst_type = ADDR_LE_DEV_PUBLIC;
5342 case ADDR_LE_DEV_RANDOM_RESOLVED:
5343 conn->dst_type = ADDR_LE_DEV_RANDOM;
5349 hci_le_conn_failed(conn, status);
5353 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5354 addr_type = BDADDR_LE_PUBLIC;
5356 addr_type = BDADDR_LE_RANDOM;
5358 /* Drop the connection if the device is blocked */
5359 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5360 hci_conn_drop(conn);
5364 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5365 mgmt_device_connected(hdev, conn, NULL, 0);
5367 conn->sec_level = BT_SECURITY_LOW;
5368 conn->handle = handle;
5369 conn->state = BT_CONFIG;
5371 /* Store current advertising instance as connection advertising instance
5372 * when sotfware rotation is in use so it can be re-enabled when
5375 if (!ext_adv_capable(hdev))
5376 conn->adv_instance = hdev->cur_adv_instance;
5378 conn->le_conn_interval = interval;
5379 conn->le_conn_latency = latency;
5380 conn->le_supv_timeout = supervision_timeout;
5382 hci_debugfs_create_conn(conn);
5383 hci_conn_add_sysfs(conn);
5385 /* The remote features procedure is defined for central
5386 * role only. So only in case of an initiated connection
5387 * request the remote features.
5389 * If the local controller supports peripheral-initiated features
5390 * exchange, then requesting the remote features in peripheral
5391 * role is possible. Otherwise just transition into the
5392 * connected state without requesting the remote features.
5395 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5396 struct hci_cp_le_read_remote_features cp;
5398 cp.handle = __cpu_to_le16(conn->handle);
5400 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5403 hci_conn_hold(conn);
5405 conn->state = BT_CONNECTED;
5406 hci_connect_cfm(conn, status);
5409 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5412 list_del_init(¶ms->action);
5414 hci_conn_drop(params->conn);
5415 hci_conn_put(params->conn);
5416 params->conn = NULL;
5421 hci_update_background_scan(hdev);
5422 hci_dev_unlock(hdev);
5425 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5427 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5429 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5431 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5432 NULL, ev->role, le16_to_cpu(ev->handle),
5433 le16_to_cpu(ev->interval),
5434 le16_to_cpu(ev->latency),
5435 le16_to_cpu(ev->supervision_timeout));
5438 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5439 struct sk_buff *skb)
5441 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5443 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5445 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5446 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5447 le16_to_cpu(ev->interval),
5448 le16_to_cpu(ev->latency),
5449 le16_to_cpu(ev->supervision_timeout));
5451 if (use_ll_privacy(hdev) &&
5452 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5453 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5454 hci_req_disable_address_resolution(hdev);
5457 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5459 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5460 struct hci_conn *conn;
5461 struct adv_info *adv;
5463 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5465 adv = hci_find_adv_instance(hdev, ev->handle);
5471 /* Remove advertising as it has been terminated */
5472 hci_remove_adv_instance(hdev, ev->handle);
5473 mgmt_advertising_removed(NULL, hdev, ev->handle);
5479 adv->enabled = false;
5481 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5483 /* Store handle in the connection so the correct advertising
5484 * instance can be re-enabled when disconnected.
5486 conn->adv_instance = ev->handle;
5488 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5489 bacmp(&conn->resp_addr, BDADDR_ANY))
5493 bacpy(&conn->resp_addr, &hdev->random_addr);
5498 bacpy(&conn->resp_addr, &adv->random_addr);
5502 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5503 struct sk_buff *skb)
5505 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5506 struct hci_conn *conn;
5508 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5515 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5517 conn->le_conn_interval = le16_to_cpu(ev->interval);
5518 conn->le_conn_latency = le16_to_cpu(ev->latency);
5519 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5522 hci_dev_unlock(hdev);
5525 /* This function requires the caller holds hdev->lock */
5526 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5528 u8 addr_type, u8 adv_type,
5529 bdaddr_t *direct_rpa)
5531 struct hci_conn *conn;
5532 struct hci_conn_params *params;
5534 /* If the event is not connectable don't proceed further */
5535 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5538 /* Ignore if the device is blocked */
5539 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type))
5542 /* Most controller will fail if we try to create new connections
5543 * while we have an existing one in peripheral role.
5545 if (hdev->conn_hash.le_num_peripheral > 0 &&
5546 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5547 !(hdev->le_states[3] & 0x10)))
5550 /* If we're not connectable only connect devices that we have in
5551 * our pend_le_conns list.
5553 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5558 if (!params->explicit_connect) {
5559 switch (params->auto_connect) {
5560 case HCI_AUTO_CONN_DIRECT:
5561 /* Only devices advertising with ADV_DIRECT_IND are
5562 * triggering a connection attempt. This is allowing
5563 * incoming connections from peripheral devices.
5565 if (adv_type != LE_ADV_DIRECT_IND)
5568 case HCI_AUTO_CONN_ALWAYS:
5569 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5570 * are triggering a connection attempt. This means
5571 * that incoming connections from peripheral device are
5572 * accepted and also outgoing connections to peripheral
5573 * devices are established when found.
5581 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5582 hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
5584 if (!IS_ERR(conn)) {
5585 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5586 * by higher layer that tried to connect, if no then
5587 * store the pointer since we don't really have any
5588 * other owner of the object besides the params that
5589 * triggered it. This way we can abort the connection if
5590 * the parameters get removed and keep the reference
5591 * count consistent once the connection is established.
5594 if (!params->explicit_connect)
5595 params->conn = hci_conn_get(conn);
5600 switch (PTR_ERR(conn)) {
5602 /* If hci_connect() returns -EBUSY it means there is already
5603 * an LE connection attempt going on. Since controllers don't
5604 * support more than one connection attempt at the time, we
5605 * don't consider this an error case.
5609 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5616 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5617 u8 bdaddr_type, bdaddr_t *direct_addr,
5618 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5621 struct discovery_state *d = &hdev->discovery;
5622 struct smp_irk *irk;
5623 struct hci_conn *conn;
5630 case LE_ADV_DIRECT_IND:
5631 case LE_ADV_SCAN_IND:
5632 case LE_ADV_NONCONN_IND:
5633 case LE_ADV_SCAN_RSP:
5636 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5637 "type: 0x%02x", type);
5641 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5642 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5646 /* Find the end of the data in case the report contains padded zero
5647 * bytes at the end causing an invalid length value.
5649 * When data is NULL, len is 0 so there is no need for extra ptr
5650 * check as 'ptr < data + 0' is already false in such case.
5652 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5653 if (ptr + 1 + *ptr > data + len)
5657 /* Adjust for actual length. This handles the case when remote
5658 * device is advertising with incorrect data length.
5662 /* If the direct address is present, then this report is from
5663 * a LE Direct Advertising Report event. In that case it is
5664 * important to see if the address is matching the local
5665 * controller address.
5668 /* Only resolvable random addresses are valid for these
5669 * kind of reports and others can be ignored.
5671 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5674 /* If the controller is not using resolvable random
5675 * addresses, then this report can be ignored.
5677 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5680 /* If the local IRK of the controller does not match
5681 * with the resolvable random address provided, then
5682 * this report can be ignored.
5684 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5688 /* Check if we need to convert to identity address */
5689 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5691 bdaddr = &irk->bdaddr;
5692 bdaddr_type = irk->addr_type;
5695 /* Check if we have been requested to connect to this device.
5697 * direct_addr is set only for directed advertising reports (it is NULL
5698 * for advertising reports) and is already verified to be RPA above.
5700 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5702 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5703 /* Store report for later inclusion by
5704 * mgmt_device_connected
5706 memcpy(conn->le_adv_data, data, len);
5707 conn->le_adv_data_len = len;
5710 /* Passive scanning shouldn't trigger any device found events,
5711 * except for devices marked as CONN_REPORT for which we do send
5712 * device found events, or advertisement monitoring requested.
5714 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5715 if (type == LE_ADV_DIRECT_IND)
5718 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5719 bdaddr, bdaddr_type) &&
5720 idr_is_empty(&hdev->adv_monitors_idr))
5723 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5724 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5727 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5728 rssi, flags, data, len, NULL, 0);
5732 /* When receiving non-connectable or scannable undirected
5733 * advertising reports, this means that the remote device is
5734 * not connectable and then clearly indicate this in the
5735 * device found event.
5737 * When receiving a scan response, then there is no way to
5738 * know if the remote device is connectable or not. However
5739 * since scan responses are merged with a previously seen
5740 * advertising report, the flags field from that report
5743 * In the really unlikely case that a controller get confused
5744 * and just sends a scan response event, then it is marked as
5745 * not connectable as well.
5747 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5748 type == LE_ADV_SCAN_RSP)
5749 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5753 /* If there's nothing pending either store the data from this
5754 * event or send an immediate device found event if the data
5755 * should not be stored for later.
5757 if (!ext_adv && !has_pending_adv_report(hdev)) {
5758 /* If the report will trigger a SCAN_REQ store it for
5761 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5762 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5763 rssi, flags, data, len);
5767 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5768 rssi, flags, data, len, NULL, 0);
5772 /* Check if the pending report is for the same device as the new one */
5773 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5774 bdaddr_type == d->last_adv_addr_type);
5776 /* If the pending data doesn't match this report or this isn't a
5777 * scan response (e.g. we got a duplicate ADV_IND) then force
5778 * sending of the pending data.
5780 if (type != LE_ADV_SCAN_RSP || !match) {
5781 /* Send out whatever is in the cache, but skip duplicates */
5783 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5784 d->last_adv_addr_type, NULL,
5785 d->last_adv_rssi, d->last_adv_flags,
5787 d->last_adv_data_len, NULL, 0);
5789 /* If the new report will trigger a SCAN_REQ store it for
5792 if (!ext_adv && (type == LE_ADV_IND ||
5793 type == LE_ADV_SCAN_IND)) {
5794 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5795 rssi, flags, data, len);
5799 /* The advertising reports cannot be merged, so clear
5800 * the pending report and send out a device found event.
5802 clear_pending_adv_report(hdev);
5803 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5804 rssi, flags, data, len, NULL, 0);
5808 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5809 * the new event is a SCAN_RSP. We can therefore proceed with
5810 * sending a merged device found event.
5812 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5813 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5814 d->last_adv_data, d->last_adv_data_len, data, len);
5815 clear_pending_adv_report(hdev);
5818 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5820 u8 num_reports = skb->data[0];
5821 void *ptr = &skb->data[1];
5825 while (num_reports--) {
5826 struct hci_ev_le_advertising_info *ev = ptr;
5829 if (ev->length <= HCI_MAX_AD_LENGTH) {
5830 rssi = ev->data[ev->length];
5831 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5832 ev->bdaddr_type, NULL, 0, rssi,
5833 ev->data, ev->length, false);
5835 bt_dev_err(hdev, "Dropping invalid advertising data");
5838 ptr += sizeof(*ev) + ev->length + 1;
5841 hci_dev_unlock(hdev);
5844 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
5846 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5848 case LE_LEGACY_ADV_IND:
5850 case LE_LEGACY_ADV_DIRECT_IND:
5851 return LE_ADV_DIRECT_IND;
5852 case LE_LEGACY_ADV_SCAN_IND:
5853 return LE_ADV_SCAN_IND;
5854 case LE_LEGACY_NONCONN_IND:
5855 return LE_ADV_NONCONN_IND;
5856 case LE_LEGACY_SCAN_RSP_ADV:
5857 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5858 return LE_ADV_SCAN_RSP;
5864 if (evt_type & LE_EXT_ADV_CONN_IND) {
5865 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5866 return LE_ADV_DIRECT_IND;
5871 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5872 return LE_ADV_SCAN_RSP;
5874 if (evt_type & LE_EXT_ADV_SCAN_IND)
5875 return LE_ADV_SCAN_IND;
5877 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5878 evt_type & LE_EXT_ADV_DIRECT_IND)
5879 return LE_ADV_NONCONN_IND;
5882 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5885 return LE_ADV_INVALID;
5888 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5890 u8 num_reports = skb->data[0];
5891 void *ptr = &skb->data[1];
5895 while (num_reports--) {
5896 struct hci_ev_le_ext_adv_report *ev = ptr;
5900 evt_type = __le16_to_cpu(ev->evt_type);
5901 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
5902 if (legacy_evt_type != LE_ADV_INVALID) {
5903 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5904 ev->bdaddr_type, NULL, 0, ev->rssi,
5905 ev->data, ev->length,
5906 !(evt_type & LE_EXT_ADV_LEGACY_PDU));
5909 ptr += sizeof(*ev) + ev->length;
5912 hci_dev_unlock(hdev);
5915 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5916 struct sk_buff *skb)
5918 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5919 struct hci_conn *conn;
5921 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5925 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5928 memcpy(conn->features[0], ev->features, 8);
5930 if (conn->state == BT_CONFIG) {
5933 /* If the local controller supports peripheral-initiated
5934 * features exchange, but the remote controller does
5935 * not, then it is possible that the error code 0x1a
5936 * for unsupported remote feature gets returned.
5938 * In this specific case, allow the connection to
5939 * transition into connected state and mark it as
5942 if (!conn->out && ev->status == 0x1a &&
5943 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
5946 status = ev->status;
5948 conn->state = BT_CONNECTED;
5949 hci_connect_cfm(conn, status);
5950 hci_conn_drop(conn);
5954 hci_dev_unlock(hdev);
5957 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5959 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5960 struct hci_cp_le_ltk_reply cp;
5961 struct hci_cp_le_ltk_neg_reply neg;
5962 struct hci_conn *conn;
5963 struct smp_ltk *ltk;
5965 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5969 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5973 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5977 if (smp_ltk_is_sc(ltk)) {
5978 /* With SC both EDiv and Rand are set to zero */
5979 if (ev->ediv || ev->rand)
5982 /* For non-SC keys check that EDiv and Rand match */
5983 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5987 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5988 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5989 cp.handle = cpu_to_le16(conn->handle);
5991 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5993 conn->enc_key_size = ltk->enc_size;
5995 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5997 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5998 * temporary key used to encrypt a connection following
5999 * pairing. It is used during the Encrypted Session Setup to
6000 * distribute the keys. Later, security can be re-established
6001 * using a distributed LTK.
6003 if (ltk->type == SMP_STK) {
6004 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6005 list_del_rcu(<k->list);
6006 kfree_rcu(ltk, rcu);
6008 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6011 hci_dev_unlock(hdev);
6016 neg.handle = ev->handle;
6017 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6018 hci_dev_unlock(hdev);
6021 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6024 struct hci_cp_le_conn_param_req_neg_reply cp;
6026 cp.handle = cpu_to_le16(handle);
6029 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6033 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
6034 struct sk_buff *skb)
6036 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
6037 struct hci_cp_le_conn_param_req_reply cp;
6038 struct hci_conn *hcon;
6039 u16 handle, min, max, latency, timeout;
6041 handle = le16_to_cpu(ev->handle);
6042 min = le16_to_cpu(ev->interval_min);
6043 max = le16_to_cpu(ev->interval_max);
6044 latency = le16_to_cpu(ev->latency);
6045 timeout = le16_to_cpu(ev->timeout);
6047 hcon = hci_conn_hash_lookup_handle(hdev, handle);
6048 if (!hcon || hcon->state != BT_CONNECTED)
6049 return send_conn_param_neg_reply(hdev, handle,
6050 HCI_ERROR_UNKNOWN_CONN_ID);
6052 if (hci_check_conn_params(min, max, latency, timeout))
6053 return send_conn_param_neg_reply(hdev, handle,
6054 HCI_ERROR_INVALID_LL_PARAMS);
6056 if (hcon->role == HCI_ROLE_MASTER) {
6057 struct hci_conn_params *params;
6062 params = hci_conn_params_lookup(hdev, &hcon->dst,
6065 params->conn_min_interval = min;
6066 params->conn_max_interval = max;
6067 params->conn_latency = latency;
6068 params->supervision_timeout = timeout;
6074 hci_dev_unlock(hdev);
6076 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6077 store_hint, min, max, latency, timeout);
6080 cp.handle = ev->handle;
6081 cp.interval_min = ev->interval_min;
6082 cp.interval_max = ev->interval_max;
6083 cp.latency = ev->latency;
6084 cp.timeout = ev->timeout;
6088 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6091 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
6092 struct sk_buff *skb)
6094 u8 num_reports = skb->data[0];
6095 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
6097 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
6102 for (; num_reports; num_reports--, ev++)
6103 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
6104 ev->bdaddr_type, &ev->direct_addr,
6105 ev->direct_addr_type, ev->rssi, NULL, 0,
6108 hci_dev_unlock(hdev);
6111 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
6113 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
6114 struct hci_conn *conn;
6116 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6123 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6127 conn->le_tx_phy = ev->tx_phy;
6128 conn->le_rx_phy = ev->rx_phy;
6131 hci_dev_unlock(hdev);
6134 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
6136 struct hci_ev_le_meta *le_ev = (void *) skb->data;
6138 skb_pull(skb, sizeof(*le_ev));
6140 switch (le_ev->subevent) {
6141 case HCI_EV_LE_CONN_COMPLETE:
6142 hci_le_conn_complete_evt(hdev, skb);
6145 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
6146 hci_le_conn_update_complete_evt(hdev, skb);
6149 case HCI_EV_LE_ADVERTISING_REPORT:
6150 hci_le_adv_report_evt(hdev, skb);
6153 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
6154 hci_le_remote_feat_complete_evt(hdev, skb);
6157 case HCI_EV_LE_LTK_REQ:
6158 hci_le_ltk_request_evt(hdev, skb);
6161 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
6162 hci_le_remote_conn_param_req_evt(hdev, skb);
6165 case HCI_EV_LE_DIRECT_ADV_REPORT:
6166 hci_le_direct_adv_report_evt(hdev, skb);
6169 case HCI_EV_LE_PHY_UPDATE_COMPLETE:
6170 hci_le_phy_update_evt(hdev, skb);
6173 case HCI_EV_LE_EXT_ADV_REPORT:
6174 hci_le_ext_adv_report_evt(hdev, skb);
6177 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
6178 hci_le_enh_conn_complete_evt(hdev, skb);
6181 case HCI_EV_LE_EXT_ADV_SET_TERM:
6182 hci_le_ext_adv_term_evt(hdev, skb);
6190 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6191 u8 event, struct sk_buff *skb)
6193 struct hci_ev_cmd_complete *ev;
6194 struct hci_event_hdr *hdr;
6199 if (skb->len < sizeof(*hdr)) {
6200 bt_dev_err(hdev, "too short HCI event");
6204 hdr = (void *) skb->data;
6205 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6208 if (hdr->evt != event)
6213 /* Check if request ended in Command Status - no way to retrieve
6214 * any extra parameters in this case.
6216 if (hdr->evt == HCI_EV_CMD_STATUS)
6219 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6220 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6225 if (skb->len < sizeof(*ev)) {
6226 bt_dev_err(hdev, "too short cmd_complete event");
6230 ev = (void *) skb->data;
6231 skb_pull(skb, sizeof(*ev));
6233 if (opcode != __le16_to_cpu(ev->opcode)) {
6234 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6235 __le16_to_cpu(ev->opcode));
6242 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6243 struct sk_buff *skb)
6245 struct hci_ev_le_advertising_info *adv;
6246 struct hci_ev_le_direct_adv_info *direct_adv;
6247 struct hci_ev_le_ext_adv_report *ext_adv;
6248 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6249 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6253 /* If we are currently suspended and this is the first BT event seen,
6254 * save the wake reason associated with the event.
6256 if (!hdev->suspended || hdev->wake_reason)
6259 /* Default to remote wake. Values for wake_reason are documented in the
6260 * Bluez mgmt api docs.
6262 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6264 /* Once configured for remote wakeup, we should only wake up for
6265 * reconnections. It's useful to see which device is waking us up so
6266 * keep track of the bdaddr of the connection event that woke us up.
6268 if (event == HCI_EV_CONN_REQUEST) {
6269 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6270 hdev->wake_addr_type = BDADDR_BREDR;
6271 } else if (event == HCI_EV_CONN_COMPLETE) {
6272 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6273 hdev->wake_addr_type = BDADDR_BREDR;
6274 } else if (event == HCI_EV_LE_META) {
6275 struct hci_ev_le_meta *le_ev = (void *)skb->data;
6276 u8 subevent = le_ev->subevent;
6277 u8 *ptr = &skb->data[sizeof(*le_ev)];
6278 u8 num_reports = *ptr;
6280 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6281 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6282 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6284 adv = (void *)(ptr + 1);
6285 direct_adv = (void *)(ptr + 1);
6286 ext_adv = (void *)(ptr + 1);
6289 case HCI_EV_LE_ADVERTISING_REPORT:
6290 bacpy(&hdev->wake_addr, &adv->bdaddr);
6291 hdev->wake_addr_type = adv->bdaddr_type;
6293 case HCI_EV_LE_DIRECT_ADV_REPORT:
6294 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6295 hdev->wake_addr_type = direct_adv->bdaddr_type;
6297 case HCI_EV_LE_EXT_ADV_REPORT:
6298 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6299 hdev->wake_addr_type = ext_adv->bdaddr_type;
6304 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6308 hci_dev_unlock(hdev);
6311 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6313 struct hci_event_hdr *hdr = (void *) skb->data;
6314 hci_req_complete_t req_complete = NULL;
6315 hci_req_complete_skb_t req_complete_skb = NULL;
6316 struct sk_buff *orig_skb = NULL;
6317 u8 status = 0, event = hdr->evt, req_evt = 0;
6318 u16 opcode = HCI_OP_NOP;
6321 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6325 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6326 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6327 opcode = __le16_to_cpu(cmd_hdr->opcode);
6328 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6333 /* If it looks like we might end up having to call
6334 * req_complete_skb, store a pristine copy of the skb since the
6335 * various handlers may modify the original one through
6336 * skb_pull() calls, etc.
6338 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6339 event == HCI_EV_CMD_COMPLETE)
6340 orig_skb = skb_clone(skb, GFP_KERNEL);
6342 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6344 /* Store wake reason if we're suspended */
6345 hci_store_wake_reason(hdev, event, skb);
6348 case HCI_EV_INQUIRY_COMPLETE:
6349 hci_inquiry_complete_evt(hdev, skb);
6352 case HCI_EV_INQUIRY_RESULT:
6353 hci_inquiry_result_evt(hdev, skb);
6356 case HCI_EV_CONN_COMPLETE:
6357 hci_conn_complete_evt(hdev, skb);
6360 case HCI_EV_CONN_REQUEST:
6361 hci_conn_request_evt(hdev, skb);
6364 case HCI_EV_DISCONN_COMPLETE:
6365 hci_disconn_complete_evt(hdev, skb);
6368 case HCI_EV_AUTH_COMPLETE:
6369 hci_auth_complete_evt(hdev, skb);
6372 case HCI_EV_REMOTE_NAME:
6373 hci_remote_name_evt(hdev, skb);
6376 case HCI_EV_ENCRYPT_CHANGE:
6377 hci_encrypt_change_evt(hdev, skb);
6380 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6381 hci_change_link_key_complete_evt(hdev, skb);
6384 case HCI_EV_REMOTE_FEATURES:
6385 hci_remote_features_evt(hdev, skb);
6388 case HCI_EV_CMD_COMPLETE:
6389 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6390 &req_complete, &req_complete_skb);
6393 case HCI_EV_CMD_STATUS:
6394 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6398 case HCI_EV_HARDWARE_ERROR:
6399 hci_hardware_error_evt(hdev, skb);
6402 case HCI_EV_ROLE_CHANGE:
6403 hci_role_change_evt(hdev, skb);
6406 case HCI_EV_NUM_COMP_PKTS:
6407 hci_num_comp_pkts_evt(hdev, skb);
6410 case HCI_EV_MODE_CHANGE:
6411 hci_mode_change_evt(hdev, skb);
6414 case HCI_EV_PIN_CODE_REQ:
6415 hci_pin_code_request_evt(hdev, skb);
6418 case HCI_EV_LINK_KEY_REQ:
6419 hci_link_key_request_evt(hdev, skb);
6422 case HCI_EV_LINK_KEY_NOTIFY:
6423 hci_link_key_notify_evt(hdev, skb);
6426 case HCI_EV_CLOCK_OFFSET:
6427 hci_clock_offset_evt(hdev, skb);
6430 case HCI_EV_PKT_TYPE_CHANGE:
6431 hci_pkt_type_change_evt(hdev, skb);
6434 case HCI_EV_PSCAN_REP_MODE:
6435 hci_pscan_rep_mode_evt(hdev, skb);
6438 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6439 hci_inquiry_result_with_rssi_evt(hdev, skb);
6442 case HCI_EV_REMOTE_EXT_FEATURES:
6443 hci_remote_ext_features_evt(hdev, skb);
6446 case HCI_EV_SYNC_CONN_COMPLETE:
6447 hci_sync_conn_complete_evt(hdev, skb);
6450 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6451 hci_extended_inquiry_result_evt(hdev, skb);
6454 case HCI_EV_KEY_REFRESH_COMPLETE:
6455 hci_key_refresh_complete_evt(hdev, skb);
6458 case HCI_EV_IO_CAPA_REQUEST:
6459 hci_io_capa_request_evt(hdev, skb);
6462 case HCI_EV_IO_CAPA_REPLY:
6463 hci_io_capa_reply_evt(hdev, skb);
6466 case HCI_EV_USER_CONFIRM_REQUEST:
6467 hci_user_confirm_request_evt(hdev, skb);
6470 case HCI_EV_USER_PASSKEY_REQUEST:
6471 hci_user_passkey_request_evt(hdev, skb);
6474 case HCI_EV_USER_PASSKEY_NOTIFY:
6475 hci_user_passkey_notify_evt(hdev, skb);
6478 case HCI_EV_KEYPRESS_NOTIFY:
6479 hci_keypress_notify_evt(hdev, skb);
6482 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6483 hci_simple_pair_complete_evt(hdev, skb);
6486 case HCI_EV_REMOTE_HOST_FEATURES:
6487 hci_remote_host_features_evt(hdev, skb);
6490 case HCI_EV_LE_META:
6491 hci_le_meta_evt(hdev, skb);
6494 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6495 hci_remote_oob_data_request_evt(hdev, skb);
6498 #if IS_ENABLED(CONFIG_BT_HS)
6499 case HCI_EV_CHANNEL_SELECTED:
6500 hci_chan_selected_evt(hdev, skb);
6503 case HCI_EV_PHY_LINK_COMPLETE:
6504 hci_phy_link_complete_evt(hdev, skb);
6507 case HCI_EV_LOGICAL_LINK_COMPLETE:
6508 hci_loglink_complete_evt(hdev, skb);
6511 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6512 hci_disconn_loglink_complete_evt(hdev, skb);
6515 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6516 hci_disconn_phylink_complete_evt(hdev, skb);
6520 case HCI_EV_NUM_COMP_BLOCKS:
6521 hci_num_comp_blocks_evt(hdev, skb);
6525 msft_vendor_evt(hdev, skb);
6529 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6534 req_complete(hdev, status, opcode);
6535 } else if (req_complete_skb) {
6536 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6537 kfree_skb(orig_skb);
6540 req_complete_skb(hdev, status, opcode, orig_skb);
6544 kfree_skb(orig_skb);
6546 hdev->stat.evt_rx++;