2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
40 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
41 "\x00\x00\x00\x00\x00\x00\x00\x00"
43 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
45 /* Handle HCI Event packets */
47 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
50 __u8 status = *((__u8 *) skb->data);
52 BT_DBG("%s status 0x%2.2x", hdev->name, status);
54 /* It is possible that we receive Inquiry Complete event right
55 * before we receive Inquiry Cancel Command Complete event, in
56 * which case the latter event should have status of Command
57 * Disallowed (0x0c). This should not be treated as error, since
58 * we actually achieve what Inquiry Cancel wants to achieve,
59 * which is to end the last Inquiry session.
61 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
62 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
71 clear_bit(HCI_INQUIRY, &hdev->flags);
72 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
73 wake_up_bit(&hdev->flags, HCI_INQUIRY);
76 /* Set discovery state to stopped if we're not doing LE active
79 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
80 hdev->le_scan_type != LE_SCAN_ACTIVE)
81 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
84 hci_conn_check_pending(hdev);
87 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
89 __u8 status = *((__u8 *) skb->data);
91 BT_DBG("%s status 0x%2.2x", hdev->name, status);
96 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
99 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
101 __u8 status = *((__u8 *) skb->data);
103 BT_DBG("%s status 0x%2.2x", hdev->name, status);
108 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
110 hci_conn_check_pending(hdev);
113 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
116 BT_DBG("%s", hdev->name);
119 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
121 struct hci_rp_role_discovery *rp = (void *) skb->data;
122 struct hci_conn *conn;
124 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
131 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
133 conn->role = rp->role;
135 hci_dev_unlock(hdev);
138 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
140 struct hci_rp_read_link_policy *rp = (void *) skb->data;
141 struct hci_conn *conn;
143 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
152 conn->link_policy = __le16_to_cpu(rp->policy);
154 hci_dev_unlock(hdev);
157 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
159 struct hci_rp_write_link_policy *rp = (void *) skb->data;
160 struct hci_conn *conn;
163 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
168 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
174 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
176 conn->link_policy = get_unaligned_le16(sent + 2);
178 hci_dev_unlock(hdev);
181 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
184 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
186 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
191 hdev->link_policy = __le16_to_cpu(rp->policy);
194 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
197 __u8 status = *((__u8 *) skb->data);
200 BT_DBG("%s status 0x%2.2x", hdev->name, status);
205 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
209 hdev->link_policy = get_unaligned_le16(sent);
212 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
214 __u8 status = *((__u8 *) skb->data);
216 BT_DBG("%s status 0x%2.2x", hdev->name, status);
218 clear_bit(HCI_RESET, &hdev->flags);
223 /* Reset all non-persistent flags */
224 hci_dev_clear_volatile_flags(hdev);
226 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
228 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
229 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
231 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
232 hdev->adv_data_len = 0;
234 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
235 hdev->scan_rsp_data_len = 0;
237 hdev->le_scan_type = LE_SCAN_PASSIVE;
239 hdev->ssp_debug_mode = 0;
241 hci_bdaddr_list_clear(&hdev->le_accept_list);
242 hci_bdaddr_list_clear(&hdev->le_resolv_list);
245 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
248 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
249 struct hci_cp_read_stored_link_key *sent;
251 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
253 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
257 if (!rp->status && sent->read_all == 0x01) {
258 hdev->stored_max_keys = rp->max_keys;
259 hdev->stored_num_keys = rp->num_keys;
263 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
266 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
268 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
273 if (rp->num_keys <= hdev->stored_num_keys)
274 hdev->stored_num_keys -= rp->num_keys;
276 hdev->stored_num_keys = 0;
279 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
281 __u8 status = *((__u8 *) skb->data);
284 BT_DBG("%s status 0x%2.2x", hdev->name, status);
286 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
292 if (hci_dev_test_flag(hdev, HCI_MGMT))
293 mgmt_set_local_name_complete(hdev, sent, status);
295 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
297 hci_dev_unlock(hdev);
300 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
302 struct hci_rp_read_local_name *rp = (void *) skb->data;
304 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
309 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
310 hci_dev_test_flag(hdev, HCI_CONFIG))
311 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
314 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
316 __u8 status = *((__u8 *) skb->data);
319 BT_DBG("%s status 0x%2.2x", hdev->name, status);
321 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
328 __u8 param = *((__u8 *) sent);
330 if (param == AUTH_ENABLED)
331 set_bit(HCI_AUTH, &hdev->flags);
333 clear_bit(HCI_AUTH, &hdev->flags);
336 if (hci_dev_test_flag(hdev, HCI_MGMT))
337 mgmt_auth_enable_complete(hdev, status);
339 hci_dev_unlock(hdev);
342 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
344 __u8 status = *((__u8 *) skb->data);
348 BT_DBG("%s status 0x%2.2x", hdev->name, status);
353 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
357 param = *((__u8 *) sent);
360 set_bit(HCI_ENCRYPT, &hdev->flags);
362 clear_bit(HCI_ENCRYPT, &hdev->flags);
365 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
367 __u8 status = *((__u8 *) skb->data);
371 BT_DBG("%s status 0x%2.2x", hdev->name, status);
373 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
377 param = *((__u8 *) sent);
382 hdev->discov_timeout = 0;
386 if (param & SCAN_INQUIRY)
387 set_bit(HCI_ISCAN, &hdev->flags);
389 clear_bit(HCI_ISCAN, &hdev->flags);
391 if (param & SCAN_PAGE)
392 set_bit(HCI_PSCAN, &hdev->flags);
394 clear_bit(HCI_PSCAN, &hdev->flags);
397 hci_dev_unlock(hdev);
400 static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb)
402 __u8 status = *((__u8 *)skb->data);
403 struct hci_cp_set_event_filter *cp;
406 BT_DBG("%s status 0x%2.2x", hdev->name, status);
411 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
415 cp = (struct hci_cp_set_event_filter *)sent;
417 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
418 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
420 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
423 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
425 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
427 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
432 memcpy(hdev->dev_class, rp->dev_class, 3);
434 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
435 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
438 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
440 __u8 status = *((__u8 *) skb->data);
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
445 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
452 memcpy(hdev->dev_class, sent, 3);
454 if (hci_dev_test_flag(hdev, HCI_MGMT))
455 mgmt_set_class_of_dev_complete(hdev, sent, status);
457 hci_dev_unlock(hdev);
460 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
462 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
465 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
470 setting = __le16_to_cpu(rp->voice_setting);
472 if (hdev->voice_setting == setting)
475 hdev->voice_setting = setting;
477 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
480 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
483 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
486 __u8 status = *((__u8 *) skb->data);
490 BT_DBG("%s status 0x%2.2x", hdev->name, status);
495 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
499 setting = get_unaligned_le16(sent);
501 if (hdev->voice_setting == setting)
504 hdev->voice_setting = setting;
506 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
509 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
512 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
515 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
517 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
522 hdev->num_iac = rp->num_iac;
524 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
527 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
529 __u8 status = *((__u8 *) skb->data);
530 struct hci_cp_write_ssp_mode *sent;
532 BT_DBG("%s status 0x%2.2x", hdev->name, status);
534 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
542 hdev->features[1][0] |= LMP_HOST_SSP;
544 hdev->features[1][0] &= ~LMP_HOST_SSP;
547 if (hci_dev_test_flag(hdev, HCI_MGMT))
548 mgmt_ssp_enable_complete(hdev, sent->mode, status);
551 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
553 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
556 hci_dev_unlock(hdev);
559 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
561 u8 status = *((u8 *) skb->data);
562 struct hci_cp_write_sc_support *sent;
564 BT_DBG("%s status 0x%2.2x", hdev->name, status);
566 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
574 hdev->features[1][0] |= LMP_HOST_SC;
576 hdev->features[1][0] &= ~LMP_HOST_SC;
579 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
581 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
583 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
586 hci_dev_unlock(hdev);
589 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
591 struct hci_rp_read_local_version *rp = (void *) skb->data;
593 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
598 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
599 hci_dev_test_flag(hdev, HCI_CONFIG)) {
600 hdev->hci_ver = rp->hci_ver;
601 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
602 hdev->lmp_ver = rp->lmp_ver;
603 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
604 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
608 static void hci_cc_read_local_commands(struct hci_dev *hdev,
611 struct hci_rp_read_local_commands *rp = (void *) skb->data;
613 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
618 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
619 hci_dev_test_flag(hdev, HCI_CONFIG))
620 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
623 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
626 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
627 struct hci_conn *conn;
629 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
636 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
638 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
640 hci_dev_unlock(hdev);
643 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
646 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
647 struct hci_conn *conn;
650 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
655 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
661 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
663 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
665 hci_dev_unlock(hdev);
668 static void hci_cc_read_local_features(struct hci_dev *hdev,
671 struct hci_rp_read_local_features *rp = (void *) skb->data;
673 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
678 memcpy(hdev->features, rp->features, 8);
680 /* Adjust default settings according to features
681 * supported by device. */
683 if (hdev->features[0][0] & LMP_3SLOT)
684 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
686 if (hdev->features[0][0] & LMP_5SLOT)
687 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
689 if (hdev->features[0][1] & LMP_HV2) {
690 hdev->pkt_type |= (HCI_HV2);
691 hdev->esco_type |= (ESCO_HV2);
694 if (hdev->features[0][1] & LMP_HV3) {
695 hdev->pkt_type |= (HCI_HV3);
696 hdev->esco_type |= (ESCO_HV3);
699 if (lmp_esco_capable(hdev))
700 hdev->esco_type |= (ESCO_EV3);
702 if (hdev->features[0][4] & LMP_EV4)
703 hdev->esco_type |= (ESCO_EV4);
705 if (hdev->features[0][4] & LMP_EV5)
706 hdev->esco_type |= (ESCO_EV5);
708 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
709 hdev->esco_type |= (ESCO_2EV3);
711 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
712 hdev->esco_type |= (ESCO_3EV3);
714 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
715 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
718 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
721 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
723 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
728 if (hdev->max_page < rp->max_page)
729 hdev->max_page = rp->max_page;
731 if (rp->page < HCI_MAX_PAGES)
732 memcpy(hdev->features[rp->page], rp->features, 8);
735 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
738 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
740 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
745 hdev->flow_ctl_mode = rp->mode;
748 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
750 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
752 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
757 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
758 hdev->sco_mtu = rp->sco_mtu;
759 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
760 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
762 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
767 hdev->acl_cnt = hdev->acl_pkts;
768 hdev->sco_cnt = hdev->sco_pkts;
770 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
771 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
774 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
776 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
778 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
783 if (test_bit(HCI_INIT, &hdev->flags))
784 bacpy(&hdev->bdaddr, &rp->bdaddr);
786 if (hci_dev_test_flag(hdev, HCI_SETUP))
787 bacpy(&hdev->setup_addr, &rp->bdaddr);
790 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
793 struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
795 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
800 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
801 hci_dev_test_flag(hdev, HCI_CONFIG)) {
802 hdev->pairing_opts = rp->pairing_opts;
803 hdev->max_enc_key_size = rp->max_key_size;
807 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
810 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
812 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
817 if (test_bit(HCI_INIT, &hdev->flags)) {
818 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
819 hdev->page_scan_window = __le16_to_cpu(rp->window);
823 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
826 u8 status = *((u8 *) skb->data);
827 struct hci_cp_write_page_scan_activity *sent;
829 BT_DBG("%s status 0x%2.2x", hdev->name, status);
834 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
838 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
839 hdev->page_scan_window = __le16_to_cpu(sent->window);
842 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
845 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
847 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
852 if (test_bit(HCI_INIT, &hdev->flags))
853 hdev->page_scan_type = rp->type;
856 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
859 u8 status = *((u8 *) skb->data);
862 BT_DBG("%s status 0x%2.2x", hdev->name, status);
867 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
869 hdev->page_scan_type = *type;
872 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
875 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
877 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
882 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
883 hdev->block_len = __le16_to_cpu(rp->block_len);
884 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
886 hdev->block_cnt = hdev->num_blocks;
888 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
889 hdev->block_cnt, hdev->block_len);
892 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
894 struct hci_rp_read_clock *rp = (void *) skb->data;
895 struct hci_cp_read_clock *cp;
896 struct hci_conn *conn;
898 BT_DBG("%s", hdev->name);
900 if (skb->len < sizeof(*rp))
908 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
912 if (cp->which == 0x00) {
913 hdev->clock = le32_to_cpu(rp->clock);
917 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
919 conn->clock = le32_to_cpu(rp->clock);
920 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
924 hci_dev_unlock(hdev);
927 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
930 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
932 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
937 hdev->amp_status = rp->amp_status;
938 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
939 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
940 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
941 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
942 hdev->amp_type = rp->amp_type;
943 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
944 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
945 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
946 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
949 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
952 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
954 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
959 hdev->inq_tx_power = rp->tx_power;
962 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
965 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
967 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
972 hdev->err_data_reporting = rp->err_data_reporting;
975 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
978 __u8 status = *((__u8 *)skb->data);
979 struct hci_cp_write_def_err_data_reporting *cp;
981 BT_DBG("%s status 0x%2.2x", hdev->name, status);
986 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
990 hdev->err_data_reporting = cp->err_data_reporting;
993 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
995 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
996 struct hci_cp_pin_code_reply *cp;
997 struct hci_conn *conn;
999 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1003 if (hci_dev_test_flag(hdev, HCI_MGMT))
1004 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1009 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1013 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1015 conn->pin_length = cp->pin_len;
1018 hci_dev_unlock(hdev);
1021 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1023 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1025 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1029 if (hci_dev_test_flag(hdev, HCI_MGMT))
1030 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1033 hci_dev_unlock(hdev);
1036 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1037 struct sk_buff *skb)
1039 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1041 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1046 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1047 hdev->le_pkts = rp->le_max_pkt;
1049 hdev->le_cnt = hdev->le_pkts;
1051 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1054 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1055 struct sk_buff *skb)
1057 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1059 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1064 memcpy(hdev->le_features, rp->features, 8);
1067 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1068 struct sk_buff *skb)
1070 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1072 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1077 hdev->adv_tx_power = rp->tx_power;
1080 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1082 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1084 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1088 if (hci_dev_test_flag(hdev, HCI_MGMT))
1089 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1092 hci_dev_unlock(hdev);
1095 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1096 struct sk_buff *skb)
1098 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1100 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1104 if (hci_dev_test_flag(hdev, HCI_MGMT))
1105 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1106 ACL_LINK, 0, rp->status);
1108 hci_dev_unlock(hdev);
1111 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1113 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1115 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1119 if (hci_dev_test_flag(hdev, HCI_MGMT))
1120 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1123 hci_dev_unlock(hdev);
1126 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1127 struct sk_buff *skb)
1129 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1131 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1135 if (hci_dev_test_flag(hdev, HCI_MGMT))
1136 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1137 ACL_LINK, 0, rp->status);
1139 hci_dev_unlock(hdev);
1142 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1143 struct sk_buff *skb)
1145 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1147 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1150 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1151 struct sk_buff *skb)
1153 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1155 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1158 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1160 __u8 status = *((__u8 *) skb->data);
1163 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1168 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1174 bacpy(&hdev->random_addr, sent);
1176 if (!bacmp(&hdev->rpa, sent)) {
1177 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1178 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1179 secs_to_jiffies(hdev->rpa_timeout));
1182 hci_dev_unlock(hdev);
1185 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1187 __u8 status = *((__u8 *) skb->data);
1188 struct hci_cp_le_set_default_phy *cp;
1190 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1195 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1201 hdev->le_tx_def_phys = cp->tx_phys;
1202 hdev->le_rx_def_phys = cp->rx_phys;
1204 hci_dev_unlock(hdev);
1207 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1208 struct sk_buff *skb)
1210 __u8 status = *((__u8 *) skb->data);
1211 struct hci_cp_le_set_adv_set_rand_addr *cp;
1212 struct adv_info *adv;
1217 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1218 /* Update only in case the adv instance since handle 0x00 shall be using
1219 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1220 * non-extended adverting.
1222 if (!cp || !cp->handle)
1227 adv = hci_find_adv_instance(hdev, cp->handle);
1229 bacpy(&adv->random_addr, &cp->bdaddr);
1230 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1231 adv->rpa_expired = false;
1232 queue_delayed_work(hdev->workqueue,
1233 &adv->rpa_expired_cb,
1234 secs_to_jiffies(hdev->rpa_timeout));
1238 hci_dev_unlock(hdev);
1241 static void hci_cc_le_read_transmit_power(struct hci_dev *hdev,
1242 struct sk_buff *skb)
1244 struct hci_rp_le_read_transmit_power *rp = (void *)skb->data;
1246 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1251 hdev->min_le_tx_power = rp->min_le_tx_power;
1252 hdev->max_le_tx_power = rp->max_le_tx_power;
1255 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1257 __u8 *sent, status = *((__u8 *) skb->data);
1259 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1264 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1270 /* If we're doing connection initiation as peripheral. Set a
1271 * timeout in case something goes wrong.
1274 struct hci_conn *conn;
1276 hci_dev_set_flag(hdev, HCI_LE_ADV);
1278 conn = hci_lookup_le_connect(hdev);
1280 queue_delayed_work(hdev->workqueue,
1281 &conn->le_conn_timeout,
1282 conn->conn_timeout);
1284 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1287 hci_dev_unlock(hdev);
1290 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1291 struct sk_buff *skb)
1293 struct hci_cp_le_set_ext_adv_enable *cp;
1294 struct hci_cp_ext_adv_set *set;
1295 __u8 status = *((__u8 *) skb->data);
1296 struct adv_info *adv = NULL, *n;
1298 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1303 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1307 set = (void *)cp->data;
1311 if (cp->num_of_sets)
1312 adv = hci_find_adv_instance(hdev, set->handle);
1315 struct hci_conn *conn;
1317 hci_dev_set_flag(hdev, HCI_LE_ADV);
1320 adv->enabled = true;
1322 conn = hci_lookup_le_connect(hdev);
1324 queue_delayed_work(hdev->workqueue,
1325 &conn->le_conn_timeout,
1326 conn->conn_timeout);
1329 adv->enabled = false;
1330 /* If just one instance was disabled check if there are
1331 * any other instance enabled before clearing HCI_LE_ADV
1333 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1339 /* All instances shall be considered disabled */
1340 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1342 adv->enabled = false;
1345 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1349 hci_dev_unlock(hdev);
1352 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1354 struct hci_cp_le_set_scan_param *cp;
1355 __u8 status = *((__u8 *) skb->data);
1357 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1362 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1368 hdev->le_scan_type = cp->type;
1370 hci_dev_unlock(hdev);
1373 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1374 struct sk_buff *skb)
1376 struct hci_cp_le_set_ext_scan_params *cp;
1377 __u8 status = *((__u8 *) skb->data);
1378 struct hci_cp_le_scan_phy_params *phy_param;
1380 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1385 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1389 phy_param = (void *)cp->data;
1393 hdev->le_scan_type = phy_param->type;
1395 hci_dev_unlock(hdev);
1398 static bool has_pending_adv_report(struct hci_dev *hdev)
1400 struct discovery_state *d = &hdev->discovery;
1402 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1405 static void clear_pending_adv_report(struct hci_dev *hdev)
1407 struct discovery_state *d = &hdev->discovery;
1409 bacpy(&d->last_adv_addr, BDADDR_ANY);
1410 d->last_adv_data_len = 0;
1413 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1414 u8 bdaddr_type, s8 rssi, u32 flags,
1417 struct discovery_state *d = &hdev->discovery;
1419 if (len > HCI_MAX_AD_LENGTH)
1422 bacpy(&d->last_adv_addr, bdaddr);
1423 d->last_adv_addr_type = bdaddr_type;
1424 d->last_adv_rssi = rssi;
1425 d->last_adv_flags = flags;
1426 memcpy(d->last_adv_data, data, len);
1427 d->last_adv_data_len = len;
1430 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1435 case LE_SCAN_ENABLE:
1436 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1437 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1438 clear_pending_adv_report(hdev);
1441 case LE_SCAN_DISABLE:
1442 /* We do this here instead of when setting DISCOVERY_STOPPED
1443 * since the latter would potentially require waiting for
1444 * inquiry to stop too.
1446 if (has_pending_adv_report(hdev)) {
1447 struct discovery_state *d = &hdev->discovery;
1449 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1450 d->last_adv_addr_type, NULL,
1451 d->last_adv_rssi, d->last_adv_flags,
1453 d->last_adv_data_len, NULL, 0);
1456 /* Cancel this timer so that we don't try to disable scanning
1457 * when it's already disabled.
1459 cancel_delayed_work(&hdev->le_scan_disable);
1461 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1463 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1464 * interrupted scanning due to a connect request. Mark
1465 * therefore discovery as stopped. If this was not
1466 * because of a connect request advertising might have
1467 * been disabled because of active scanning, so
1468 * re-enable it again if necessary.
1470 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1471 #ifndef TIZEN_BT /* The below line is kernel bug. */
1472 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1474 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
1476 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1477 hdev->discovery.state == DISCOVERY_FINDING)
1478 hci_req_reenable_advertising(hdev);
1483 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1488 hci_dev_unlock(hdev);
1491 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1492 struct sk_buff *skb)
1494 struct hci_cp_le_set_scan_enable *cp;
1495 __u8 status = *((__u8 *) skb->data);
1497 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1502 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1506 le_set_scan_enable_complete(hdev, cp->enable);
1509 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1510 struct sk_buff *skb)
1512 struct hci_cp_le_set_ext_scan_enable *cp;
1513 __u8 status = *((__u8 *) skb->data);
1515 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1520 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1524 le_set_scan_enable_complete(hdev, cp->enable);
1527 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1528 struct sk_buff *skb)
1530 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1532 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1538 hdev->le_num_of_adv_sets = rp->num_of_sets;
1541 static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev,
1542 struct sk_buff *skb)
1544 struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data;
1546 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1551 hdev->le_accept_list_size = rp->size;
1554 static void hci_cc_le_clear_accept_list(struct hci_dev *hdev,
1555 struct sk_buff *skb)
1557 __u8 status = *((__u8 *) skb->data);
1559 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1564 hci_bdaddr_list_clear(&hdev->le_accept_list);
1567 static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev,
1568 struct sk_buff *skb)
1570 struct hci_cp_le_add_to_accept_list *sent;
1571 __u8 status = *((__u8 *) skb->data);
1573 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1578 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1582 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1586 static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev,
1587 struct sk_buff *skb)
1589 struct hci_cp_le_del_from_accept_list *sent;
1590 __u8 status = *((__u8 *) skb->data);
1592 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1597 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1601 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1605 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1606 struct sk_buff *skb)
1608 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1610 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1615 memcpy(hdev->le_states, rp->le_states, 8);
1618 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1619 struct sk_buff *skb)
1621 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1623 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1628 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1629 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1632 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1633 struct sk_buff *skb)
1635 struct hci_cp_le_write_def_data_len *sent;
1636 __u8 status = *((__u8 *) skb->data);
1638 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1643 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1647 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1648 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1651 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1652 struct sk_buff *skb)
1654 struct hci_cp_le_add_to_resolv_list *sent;
1655 __u8 status = *((__u8 *) skb->data);
1657 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1662 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1666 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1667 sent->bdaddr_type, sent->peer_irk,
1671 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1672 struct sk_buff *skb)
1674 struct hci_cp_le_del_from_resolv_list *sent;
1675 __u8 status = *((__u8 *) skb->data);
1677 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1682 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1686 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1690 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1691 struct sk_buff *skb)
1693 __u8 status = *((__u8 *) skb->data);
1695 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1700 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1703 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1704 struct sk_buff *skb)
1706 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1708 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1713 hdev->le_resolv_list_size = rp->size;
1716 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1717 struct sk_buff *skb)
1719 __u8 *sent, status = *((__u8 *) skb->data);
1721 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1726 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1733 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1735 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1737 hci_dev_unlock(hdev);
1740 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1741 struct sk_buff *skb)
1743 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1745 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1750 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1751 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1752 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1753 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1756 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1757 struct sk_buff *skb)
1759 struct hci_cp_write_le_host_supported *sent;
1760 __u8 status = *((__u8 *) skb->data);
1762 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1767 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1774 hdev->features[1][0] |= LMP_HOST_LE;
1775 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1777 hdev->features[1][0] &= ~LMP_HOST_LE;
1778 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1779 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1783 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1785 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1787 hci_dev_unlock(hdev);
1790 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1792 struct hci_cp_le_set_adv_param *cp;
1793 u8 status = *((u8 *) skb->data);
1795 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1800 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1805 hdev->adv_addr_type = cp->own_address_type;
1806 hci_dev_unlock(hdev);
1809 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1811 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1812 struct hci_cp_le_set_ext_adv_params *cp;
1813 struct adv_info *adv_instance;
1815 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1820 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1825 hdev->adv_addr_type = cp->own_addr_type;
1827 /* Store in hdev for instance 0 */
1828 hdev->adv_tx_power = rp->tx_power;
1830 adv_instance = hci_find_adv_instance(hdev, cp->handle);
1832 adv_instance->tx_power = rp->tx_power;
1834 /* Update adv data as tx power is known now */
1835 hci_req_update_adv_data(hdev, cp->handle);
1837 hci_dev_unlock(hdev);
1841 static void hci_cc_enable_rssi(struct hci_dev *hdev,
1842 struct sk_buff *skb)
1844 struct hci_cc_rsp_enable_rssi *rp = (void *)skb->data;
1846 BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
1847 hdev->name, rp->status, rp->le_ext_opcode);
1849 mgmt_enable_rssi_cc(hdev, rp, rp->status);
1852 static void hci_cc_get_raw_rssi(struct hci_dev *hdev,
1853 struct sk_buff *skb)
1855 struct hci_cc_rp_get_raw_rssi *rp = (void *)skb->data;
1857 BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
1858 hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
1860 mgmt_raw_rssi_response(hdev, rp, rp->status);
1863 static void hci_vendor_specific_group_ext_evt(struct hci_dev *hdev,
1864 struct sk_buff *skb)
1866 struct hci_ev_ext_vendor_specific *ev = (void *)skb->data;
1867 __u8 event_le_ext_sub_code;
1869 BT_DBG("RSSI event LE_META_VENDOR_SPECIFIC_GROUP_EVENT: %X",
1870 LE_META_VENDOR_SPECIFIC_GROUP_EVENT);
1872 skb_pull(skb, sizeof(*ev));
1873 event_le_ext_sub_code = ev->event_le_ext_sub_code;
1875 switch (event_le_ext_sub_code) {
1876 case LE_RSSI_LINK_ALERT:
1877 BT_DBG("RSSI event LE_RSSI_LINK_ALERT %X",
1878 LE_RSSI_LINK_ALERT);
1879 mgmt_rssi_alert_evt(hdev, skb);
1887 static void hci_vendor_specific_evt(struct hci_dev *hdev, struct sk_buff *skb)
1889 struct hci_ev_vendor_specific *ev = (void *)skb->data;
1890 __u8 event_sub_code;
1892 BT_DBG("hci_vendor_specific_evt");
1894 skb_pull(skb, sizeof(*ev));
1895 event_sub_code = ev->event_sub_code;
1897 switch (event_sub_code) {
1898 case LE_META_VENDOR_SPECIFIC_GROUP_EVENT:
1899 hci_vendor_specific_group_ext_evt(hdev, skb);
1908 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1910 struct hci_rp_read_rssi *rp = (void *) skb->data;
1911 struct hci_conn *conn;
1913 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1920 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1922 conn->rssi = rp->rssi;
1924 hci_dev_unlock(hdev);
1927 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1929 struct hci_cp_read_tx_power *sent;
1930 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1931 struct hci_conn *conn;
1933 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1938 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1944 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1948 switch (sent->type) {
1950 conn->tx_power = rp->tx_power;
1953 conn->max_tx_power = rp->tx_power;
1958 hci_dev_unlock(hdev);
1961 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1963 u8 status = *((u8 *) skb->data);
1966 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1971 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1973 hdev->ssp_debug_mode = *mode;
1976 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1978 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1981 hci_conn_check_pending(hdev);
1985 set_bit(HCI_INQUIRY, &hdev->flags);
1988 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1990 struct hci_cp_create_conn *cp;
1991 struct hci_conn *conn;
1993 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1995 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2001 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2003 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
2006 if (conn && conn->state == BT_CONNECT) {
2007 if (status != 0x0c || conn->attempt > 2) {
2008 conn->state = BT_CLOSED;
2009 hci_connect_cfm(conn, status);
2012 conn->state = BT_CONNECT2;
2016 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
2019 bt_dev_err(hdev, "no memory for new connection");
2023 hci_dev_unlock(hdev);
2026 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2028 struct hci_cp_add_sco *cp;
2029 struct hci_conn *acl, *sco;
2032 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2037 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2041 handle = __le16_to_cpu(cp->handle);
2043 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2047 acl = hci_conn_hash_lookup_handle(hdev, handle);
2051 sco->state = BT_CLOSED;
2053 hci_connect_cfm(sco, status);
2058 hci_dev_unlock(hdev);
2061 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2063 struct hci_cp_auth_requested *cp;
2064 struct hci_conn *conn;
2066 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2071 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2077 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2079 if (conn->state == BT_CONFIG) {
2080 hci_connect_cfm(conn, status);
2081 hci_conn_drop(conn);
2085 hci_dev_unlock(hdev);
2088 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2090 struct hci_cp_set_conn_encrypt *cp;
2091 struct hci_conn *conn;
2093 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2098 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2104 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2106 if (conn->state == BT_CONFIG) {
2107 hci_connect_cfm(conn, status);
2108 hci_conn_drop(conn);
2112 hci_dev_unlock(hdev);
2115 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2116 struct hci_conn *conn)
2118 if (conn->state != BT_CONFIG || !conn->out)
2121 if (conn->pending_sec_level == BT_SECURITY_SDP)
2124 /* Only request authentication for SSP connections or non-SSP
2125 * devices with sec_level MEDIUM or HIGH or if MITM protection
2128 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2129 conn->pending_sec_level != BT_SECURITY_FIPS &&
2130 conn->pending_sec_level != BT_SECURITY_HIGH &&
2131 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2137 static int hci_resolve_name(struct hci_dev *hdev,
2138 struct inquiry_entry *e)
2140 struct hci_cp_remote_name_req cp;
2142 memset(&cp, 0, sizeof(cp));
2144 bacpy(&cp.bdaddr, &e->data.bdaddr);
2145 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2146 cp.pscan_mode = e->data.pscan_mode;
2147 cp.clock_offset = e->data.clock_offset;
2149 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2152 static bool hci_resolve_next_name(struct hci_dev *hdev)
2154 struct discovery_state *discov = &hdev->discovery;
2155 struct inquiry_entry *e;
2157 if (list_empty(&discov->resolve))
2160 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2164 if (hci_resolve_name(hdev, e) == 0) {
2165 e->name_state = NAME_PENDING;
2172 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2173 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2175 struct discovery_state *discov = &hdev->discovery;
2176 struct inquiry_entry *e;
2179 /* Update the mgmt connected state if necessary. Be careful with
2180 * conn objects that exist but are not (yet) connected however.
2181 * Only those in BT_CONFIG or BT_CONNECTED states can be
2182 * considered connected.
2185 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
2186 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2187 mgmt_device_connected(hdev, conn, 0, name, name_len);
2189 mgmt_device_name_update(hdev, bdaddr, name, name_len);
2193 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2194 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2195 mgmt_device_connected(hdev, conn, name, name_len);
2198 if (discov->state == DISCOVERY_STOPPED)
2201 if (discov->state == DISCOVERY_STOPPING)
2202 goto discov_complete;
2204 if (discov->state != DISCOVERY_RESOLVING)
2207 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2208 /* If the device was not found in a list of found devices names of which
2209 * are pending. there is no need to continue resolving a next name as it
2210 * will be done upon receiving another Remote Name Request Complete
2217 e->name_state = NAME_KNOWN;
2218 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2219 e->data.rssi, name, name_len);
2221 e->name_state = NAME_NOT_KNOWN;
2224 if (hci_resolve_next_name(hdev))
2228 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2231 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2233 struct hci_cp_remote_name_req *cp;
2234 struct hci_conn *conn;
2236 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2238 /* If successful wait for the name req complete event before
2239 * checking for the need to do authentication */
2243 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2249 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2251 if (hci_dev_test_flag(hdev, HCI_MGMT))
2252 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2257 if (!hci_outgoing_auth_needed(hdev, conn))
2260 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2261 struct hci_cp_auth_requested auth_cp;
2263 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2265 auth_cp.handle = __cpu_to_le16(conn->handle);
2266 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2267 sizeof(auth_cp), &auth_cp);
2271 hci_dev_unlock(hdev);
2274 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2276 struct hci_cp_read_remote_features *cp;
2277 struct hci_conn *conn;
2279 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2284 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2290 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2292 if (conn->state == BT_CONFIG) {
2293 hci_connect_cfm(conn, status);
2294 hci_conn_drop(conn);
2298 hci_dev_unlock(hdev);
2301 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2303 struct hci_cp_read_remote_ext_features *cp;
2304 struct hci_conn *conn;
2306 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2311 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2317 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2319 if (conn->state == BT_CONFIG) {
2320 hci_connect_cfm(conn, status);
2321 hci_conn_drop(conn);
2325 hci_dev_unlock(hdev);
2328 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2330 struct hci_cp_setup_sync_conn *cp;
2331 struct hci_conn *acl, *sco;
2334 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2339 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2343 handle = __le16_to_cpu(cp->handle);
2345 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2349 acl = hci_conn_hash_lookup_handle(hdev, handle);
2353 sco->state = BT_CLOSED;
2355 hci_connect_cfm(sco, status);
2360 hci_dev_unlock(hdev);
2363 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2365 struct hci_cp_sniff_mode *cp;
2366 struct hci_conn *conn;
2368 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2373 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2379 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2381 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2383 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2384 hci_sco_setup(conn, status);
2387 hci_dev_unlock(hdev);
2390 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2392 struct hci_cp_exit_sniff_mode *cp;
2393 struct hci_conn *conn;
2395 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2400 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2406 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2408 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2410 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2411 hci_sco_setup(conn, status);
2414 hci_dev_unlock(hdev);
2417 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2419 struct hci_cp_disconnect *cp;
2420 struct hci_conn *conn;
2425 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2431 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2433 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2434 conn->dst_type, status);
2436 if (conn->type == LE_LINK) {
2437 hdev->cur_adv_instance = conn->adv_instance;
2438 hci_req_reenable_advertising(hdev);
2441 /* If the disconnection failed for any reason, the upper layer
2442 * does not retry to disconnect in current implementation.
2443 * Hence, we need to do some basic cleanup here and re-enable
2444 * advertising if necessary.
2449 hci_dev_unlock(hdev);
2452 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2453 u8 peer_addr_type, u8 own_address_type,
2456 struct hci_conn *conn;
2458 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2463 /* When using controller based address resolution, then the new
2464 * address types 0x02 and 0x03 are used. These types need to be
2465 * converted back into either public address or random address type
2467 if (use_ll_privacy(hdev) &&
2468 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2469 switch (own_address_type) {
2470 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2471 own_address_type = ADDR_LE_DEV_PUBLIC;
2473 case ADDR_LE_DEV_RANDOM_RESOLVED:
2474 own_address_type = ADDR_LE_DEV_RANDOM;
2479 /* Store the initiator and responder address information which
2480 * is needed for SMP. These values will not change during the
2481 * lifetime of the connection.
2483 conn->init_addr_type = own_address_type;
2484 if (own_address_type == ADDR_LE_DEV_RANDOM)
2485 bacpy(&conn->init_addr, &hdev->random_addr);
2487 bacpy(&conn->init_addr, &hdev->bdaddr);
2489 conn->resp_addr_type = peer_addr_type;
2490 bacpy(&conn->resp_addr, peer_addr);
2492 /* We don't want the connection attempt to stick around
2493 * indefinitely since LE doesn't have a page timeout concept
2494 * like BR/EDR. Set a timer for any connection that doesn't use
2495 * the accept list for connecting.
2497 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2498 queue_delayed_work(conn->hdev->workqueue,
2499 &conn->le_conn_timeout,
2500 conn->conn_timeout);
2503 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2505 struct hci_cp_le_create_conn *cp;
2507 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2509 /* All connection failure handling is taken care of by the
2510 * hci_le_conn_failed function which is triggered by the HCI
2511 * request completion callbacks used for connecting.
2516 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2522 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2523 cp->own_address_type, cp->filter_policy);
2525 hci_dev_unlock(hdev);
2528 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2530 struct hci_cp_le_ext_create_conn *cp;
2532 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2534 /* All connection failure handling is taken care of by the
2535 * hci_le_conn_failed function which is triggered by the HCI
2536 * request completion callbacks used for connecting.
2541 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2547 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2548 cp->own_addr_type, cp->filter_policy);
2550 hci_dev_unlock(hdev);
2553 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2555 struct hci_cp_le_read_remote_features *cp;
2556 struct hci_conn *conn;
2558 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2563 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2569 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2571 if (conn->state == BT_CONFIG) {
2572 hci_connect_cfm(conn, status);
2573 hci_conn_drop(conn);
2577 hci_dev_unlock(hdev);
2580 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2582 struct hci_cp_le_start_enc *cp;
2583 struct hci_conn *conn;
2585 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2592 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2596 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2600 if (conn->state != BT_CONNECTED)
2603 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2604 hci_conn_drop(conn);
2607 hci_dev_unlock(hdev);
2610 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2612 struct hci_cp_switch_role *cp;
2613 struct hci_conn *conn;
2615 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2620 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2626 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2628 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2630 hci_dev_unlock(hdev);
2633 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2635 __u8 status = *((__u8 *) skb->data);
2636 struct discovery_state *discov = &hdev->discovery;
2637 struct inquiry_entry *e;
2639 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2641 hci_conn_check_pending(hdev);
2643 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2646 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2647 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2649 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2654 if (discov->state != DISCOVERY_FINDING)
2657 if (list_empty(&discov->resolve)) {
2658 /* When BR/EDR inquiry is active and no LE scanning is in
2659 * progress, then change discovery state to indicate completion.
2661 * When running LE scanning and BR/EDR inquiry simultaneously
2662 * and the LE scan already finished, then change the discovery
2663 * state to indicate completion.
2665 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2666 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2667 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2671 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2672 if (e && hci_resolve_name(hdev, e) == 0) {
2673 e->name_state = NAME_PENDING;
2674 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2676 /* When BR/EDR inquiry is active and no LE scanning is in
2677 * progress, then change discovery state to indicate completion.
2679 * When running LE scanning and BR/EDR inquiry simultaneously
2680 * and the LE scan already finished, then change the discovery
2681 * state to indicate completion.
2683 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2684 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2685 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2689 hci_dev_unlock(hdev);
2692 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2694 struct inquiry_data data;
2695 struct inquiry_info *info = (void *) (skb->data + 1);
2696 int num_rsp = *((__u8 *) skb->data);
2698 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2700 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2703 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2708 for (; num_rsp; num_rsp--, info++) {
2711 bacpy(&data.bdaddr, &info->bdaddr);
2712 data.pscan_rep_mode = info->pscan_rep_mode;
2713 data.pscan_period_mode = info->pscan_period_mode;
2714 data.pscan_mode = info->pscan_mode;
2715 memcpy(data.dev_class, info->dev_class, 3);
2716 data.clock_offset = info->clock_offset;
2717 data.rssi = HCI_RSSI_INVALID;
2718 data.ssp_mode = 0x00;
2720 flags = hci_inquiry_cache_update(hdev, &data, false);
2722 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2723 info->dev_class, HCI_RSSI_INVALID,
2724 flags, NULL, 0, NULL, 0);
2727 hci_dev_unlock(hdev);
2730 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2732 struct hci_ev_conn_complete *ev = (void *) skb->data;
2733 struct hci_conn *conn;
2735 BT_DBG("%s", hdev->name);
2739 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2741 /* Connection may not exist if auto-connected. Check the bredr
2742 * allowlist to see if this device is allowed to auto connect.
2743 * If link is an ACL type, create a connection class
2746 * Auto-connect will only occur if the event filter is
2747 * programmed with a given address. Right now, event filter is
2748 * only used during suspend.
2750 if (ev->link_type == ACL_LINK &&
2751 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
2754 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2757 bt_dev_err(hdev, "no memory for new conn");
2761 if (ev->link_type != SCO_LINK)
2764 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2769 conn->type = SCO_LINK;
2774 conn->handle = __le16_to_cpu(ev->handle);
2776 if (conn->type == ACL_LINK) {
2777 conn->state = BT_CONFIG;
2778 hci_conn_hold(conn);
2780 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2781 !hci_find_link_key(hdev, &ev->bdaddr))
2782 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2784 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2786 conn->state = BT_CONNECTED;
2788 hci_debugfs_create_conn(conn);
2789 hci_conn_add_sysfs(conn);
2791 if (test_bit(HCI_AUTH, &hdev->flags))
2792 set_bit(HCI_CONN_AUTH, &conn->flags);
2794 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2795 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2797 /* Get remote features */
2798 if (conn->type == ACL_LINK) {
2799 struct hci_cp_read_remote_features cp;
2800 cp.handle = ev->handle;
2801 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2804 hci_req_update_scan(hdev);
2807 /* Set packet type for incoming connection */
2808 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2809 struct hci_cp_change_conn_ptype cp;
2810 cp.handle = ev->handle;
2811 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2812 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2816 conn->state = BT_CLOSED;
2817 if (conn->type == ACL_LINK)
2818 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2819 conn->dst_type, ev->status);
2822 if (conn->type == ACL_LINK)
2823 hci_sco_setup(conn, ev->status);
2826 hci_connect_cfm(conn, ev->status);
2828 } else if (ev->link_type == SCO_LINK) {
2829 switch (conn->setting & SCO_AIRMODE_MASK) {
2830 case SCO_AIRMODE_CVSD:
2832 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2836 hci_connect_cfm(conn, ev->status);
2840 hci_dev_unlock(hdev);
2842 hci_conn_check_pending(hdev);
2845 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2847 struct hci_cp_reject_conn_req cp;
2849 bacpy(&cp.bdaddr, bdaddr);
2850 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2851 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2854 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2856 struct hci_ev_conn_request *ev = (void *) skb->data;
2857 int mask = hdev->link_mode;
2858 struct inquiry_entry *ie;
2859 struct hci_conn *conn;
2862 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2865 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2868 if (!(mask & HCI_LM_ACCEPT)) {
2869 hci_reject_conn(hdev, &ev->bdaddr);
2873 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
2875 hci_reject_conn(hdev, &ev->bdaddr);
2879 /* Require HCI_CONNECTABLE or an accept list entry to accept the
2880 * connection. These features are only touched through mgmt so
2881 * only do the checks if HCI_MGMT is set.
2883 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2884 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2885 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
2887 hci_reject_conn(hdev, &ev->bdaddr);
2891 /* Connection accepted */
2895 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2897 memcpy(ie->data.dev_class, ev->dev_class, 3);
2899 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2902 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2905 bt_dev_err(hdev, "no memory for new connection");
2906 hci_dev_unlock(hdev);
2911 memcpy(conn->dev_class, ev->dev_class, 3);
2913 hci_dev_unlock(hdev);
2915 if (ev->link_type == ACL_LINK ||
2916 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2917 struct hci_cp_accept_conn_req cp;
2918 conn->state = BT_CONNECT;
2920 bacpy(&cp.bdaddr, &ev->bdaddr);
2922 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2923 cp.role = 0x00; /* Become central */
2925 cp.role = 0x01; /* Remain peripheral */
2927 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2928 } else if (!(flags & HCI_PROTO_DEFER)) {
2929 struct hci_cp_accept_sync_conn_req cp;
2930 conn->state = BT_CONNECT;
2932 bacpy(&cp.bdaddr, &ev->bdaddr);
2933 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2935 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2936 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2937 cp.max_latency = cpu_to_le16(0xffff);
2938 cp.content_format = cpu_to_le16(hdev->voice_setting);
2939 cp.retrans_effort = 0xff;
2941 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2944 conn->state = BT_CONNECT2;
2945 hci_connect_cfm(conn, 0);
2949 static u8 hci_to_mgmt_reason(u8 err)
2952 case HCI_ERROR_CONNECTION_TIMEOUT:
2953 return MGMT_DEV_DISCONN_TIMEOUT;
2954 case HCI_ERROR_REMOTE_USER_TERM:
2955 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2956 case HCI_ERROR_REMOTE_POWER_OFF:
2957 return MGMT_DEV_DISCONN_REMOTE;
2958 case HCI_ERROR_LOCAL_HOST_TERM:
2959 return MGMT_DEV_DISCONN_LOCAL_HOST;
2961 return MGMT_DEV_DISCONN_UNKNOWN;
2965 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2967 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2969 struct hci_conn_params *params;
2970 struct hci_conn *conn;
2971 bool mgmt_connected;
2973 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2977 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2982 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2983 conn->dst_type, ev->status);
2987 conn->state = BT_CLOSED;
2989 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2991 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2992 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2994 reason = hci_to_mgmt_reason(ev->reason);
2996 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2997 reason, mgmt_connected);
2999 if (conn->type == ACL_LINK) {
3000 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3001 hci_remove_link_key(hdev, &conn->dst);
3003 hci_req_update_scan(hdev);
3006 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3008 switch (params->auto_connect) {
3009 case HCI_AUTO_CONN_LINK_LOSS:
3010 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3014 case HCI_AUTO_CONN_DIRECT:
3015 case HCI_AUTO_CONN_ALWAYS:
3016 list_del_init(¶ms->action);
3017 list_add(¶ms->action, &hdev->pend_le_conns);
3018 hci_update_background_scan(hdev);
3026 hci_disconn_cfm(conn, ev->reason);
3028 /* The suspend notifier is waiting for all devices to disconnect so
3029 * clear the bit from pending tasks and inform the wait queue.
3031 if (list_empty(&hdev->conn_hash.list) &&
3032 test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
3033 wake_up(&hdev->suspend_wait_q);
3036 /* Re-enable advertising if necessary, since it might
3037 * have been disabled by the connection. From the
3038 * HCI_LE_Set_Advertise_Enable command description in
3039 * the core specification (v4.0):
3040 * "The Controller shall continue advertising until the Host
3041 * issues an LE_Set_Advertise_Enable command with
3042 * Advertising_Enable set to 0x00 (Advertising is disabled)
3043 * or until a connection is created or until the Advertising
3044 * is timed out due to Directed Advertising."
3046 if (conn->type == LE_LINK) {
3047 hdev->cur_adv_instance = conn->adv_instance;
3048 hci_req_reenable_advertising(hdev);
3054 hci_dev_unlock(hdev);
3057 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3059 struct hci_ev_auth_complete *ev = (void *) skb->data;
3060 struct hci_conn *conn;
3062 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3066 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3071 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3073 if (!hci_conn_ssp_enabled(conn) &&
3074 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3075 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3077 set_bit(HCI_CONN_AUTH, &conn->flags);
3078 conn->sec_level = conn->pending_sec_level;
3081 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3082 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3084 mgmt_auth_failed(conn, ev->status);
3087 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3088 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3090 if (conn->state == BT_CONFIG) {
3091 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3092 struct hci_cp_set_conn_encrypt cp;
3093 cp.handle = ev->handle;
3095 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3098 conn->state = BT_CONNECTED;
3099 hci_connect_cfm(conn, ev->status);
3100 hci_conn_drop(conn);
3103 hci_auth_cfm(conn, ev->status);
3105 hci_conn_hold(conn);
3106 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3107 hci_conn_drop(conn);
3110 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3112 struct hci_cp_set_conn_encrypt cp;
3113 cp.handle = ev->handle;
3115 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3118 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3119 hci_encrypt_cfm(conn, ev->status);
3124 hci_dev_unlock(hdev);
3127 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
3129 struct hci_ev_remote_name *ev = (void *) skb->data;
3130 struct hci_conn *conn;
3132 BT_DBG("%s", hdev->name);
3134 hci_conn_check_pending(hdev);
3138 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3140 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3143 if (ev->status == 0)
3144 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3145 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3147 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3153 if (!hci_outgoing_auth_needed(hdev, conn))
3156 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3157 struct hci_cp_auth_requested cp;
3159 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3161 cp.handle = __cpu_to_le16(conn->handle);
3162 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3166 hci_dev_unlock(hdev);
3169 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3170 u16 opcode, struct sk_buff *skb)
3172 const struct hci_rp_read_enc_key_size *rp;
3173 struct hci_conn *conn;
3176 BT_DBG("%s status 0x%02x", hdev->name, status);
3178 if (!skb || skb->len < sizeof(*rp)) {
3179 bt_dev_err(hdev, "invalid read key size response");
3183 rp = (void *)skb->data;
3184 handle = le16_to_cpu(rp->handle);
3188 conn = hci_conn_hash_lookup_handle(hdev, handle);
3192 /* While unexpected, the read_enc_key_size command may fail. The most
3193 * secure approach is to then assume the key size is 0 to force a
3197 bt_dev_err(hdev, "failed to read key size for handle %u",
3199 conn->enc_key_size = 0;
3201 conn->enc_key_size = rp->key_size;
3204 hci_encrypt_cfm(conn, 0);
3207 hci_dev_unlock(hdev);
3210 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3212 struct hci_ev_encrypt_change *ev = (void *) skb->data;
3213 struct hci_conn *conn;
3215 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3219 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3225 /* Encryption implies authentication */
3226 set_bit(HCI_CONN_AUTH, &conn->flags);
3227 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3228 conn->sec_level = conn->pending_sec_level;
3230 /* P-256 authentication key implies FIPS */
3231 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3232 set_bit(HCI_CONN_FIPS, &conn->flags);
3234 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3235 conn->type == LE_LINK)
3236 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3238 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3239 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3243 /* We should disregard the current RPA and generate a new one
3244 * whenever the encryption procedure fails.
3246 if (ev->status && conn->type == LE_LINK) {
3247 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3248 hci_adv_instances_set_rpa_expired(hdev, true);
3251 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3253 /* Check link security requirements are met */
3254 if (!hci_conn_check_link_mode(conn))
3255 ev->status = HCI_ERROR_AUTH_FAILURE;
3257 if (ev->status && conn->state == BT_CONNECTED) {
3258 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3259 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3261 /* Notify upper layers so they can cleanup before
3264 hci_encrypt_cfm(conn, ev->status);
3265 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3266 hci_conn_drop(conn);
3270 /* Try reading the encryption key size for encrypted ACL links */
3271 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3272 struct hci_cp_read_enc_key_size cp;
3273 struct hci_request req;
3275 /* Only send HCI_Read_Encryption_Key_Size if the
3276 * controller really supports it. If it doesn't, assume
3277 * the default size (16).
3279 if (!(hdev->commands[20] & 0x10)) {
3280 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3284 hci_req_init(&req, hdev);
3286 cp.handle = cpu_to_le16(conn->handle);
3287 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3289 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3290 bt_dev_err(hdev, "sending read key size failed");
3291 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3298 /* Set the default Authenticated Payload Timeout after
3299 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3300 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3301 * sent when the link is active and Encryption is enabled, the conn
3302 * type can be either LE or ACL and controller must support LMP Ping.
3303 * Ensure for AES-CCM encryption as well.
3305 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3306 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3307 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3308 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3309 struct hci_cp_write_auth_payload_to cp;
3311 cp.handle = cpu_to_le16(conn->handle);
3312 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3313 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3318 hci_encrypt_cfm(conn, ev->status);
3321 hci_dev_unlock(hdev);
3324 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3325 struct sk_buff *skb)
3327 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3328 struct hci_conn *conn;
3330 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3334 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3337 set_bit(HCI_CONN_SECURE, &conn->flags);
3339 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3341 hci_key_change_cfm(conn, ev->status);
3344 hci_dev_unlock(hdev);
3347 static void hci_remote_features_evt(struct hci_dev *hdev,
3348 struct sk_buff *skb)
3350 struct hci_ev_remote_features *ev = (void *) skb->data;
3351 struct hci_conn *conn;
3353 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3357 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3362 memcpy(conn->features[0], ev->features, 8);
3364 if (conn->state != BT_CONFIG)
3367 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3368 lmp_ext_feat_capable(conn)) {
3369 struct hci_cp_read_remote_ext_features cp;
3370 cp.handle = ev->handle;
3372 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3377 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3378 struct hci_cp_remote_name_req cp;
3379 memset(&cp, 0, sizeof(cp));
3380 bacpy(&cp.bdaddr, &conn->dst);
3381 cp.pscan_rep_mode = 0x02;
3382 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3383 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3384 mgmt_device_connected(hdev, conn, NULL, 0);
3386 if (!hci_outgoing_auth_needed(hdev, conn)) {
3387 conn->state = BT_CONNECTED;
3388 hci_connect_cfm(conn, ev->status);
3389 hci_conn_drop(conn);
3393 hci_dev_unlock(hdev);
3396 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3398 cancel_delayed_work(&hdev->cmd_timer);
3400 if (!test_bit(HCI_RESET, &hdev->flags)) {
3402 cancel_delayed_work(&hdev->ncmd_timer);
3403 atomic_set(&hdev->cmd_cnt, 1);
3405 schedule_delayed_work(&hdev->ncmd_timer,
3411 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3412 u16 *opcode, u8 *status,
3413 hci_req_complete_t *req_complete,
3414 hci_req_complete_skb_t *req_complete_skb)
3416 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3418 *opcode = __le16_to_cpu(ev->opcode);
3419 *status = skb->data[sizeof(*ev)];
3421 skb_pull(skb, sizeof(*ev));
3424 case HCI_OP_INQUIRY_CANCEL:
3425 hci_cc_inquiry_cancel(hdev, skb, status);
3428 case HCI_OP_PERIODIC_INQ:
3429 hci_cc_periodic_inq(hdev, skb);
3432 case HCI_OP_EXIT_PERIODIC_INQ:
3433 hci_cc_exit_periodic_inq(hdev, skb);
3436 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3437 hci_cc_remote_name_req_cancel(hdev, skb);
3440 case HCI_OP_ROLE_DISCOVERY:
3441 hci_cc_role_discovery(hdev, skb);
3444 case HCI_OP_READ_LINK_POLICY:
3445 hci_cc_read_link_policy(hdev, skb);
3448 case HCI_OP_WRITE_LINK_POLICY:
3449 hci_cc_write_link_policy(hdev, skb);
3452 case HCI_OP_READ_DEF_LINK_POLICY:
3453 hci_cc_read_def_link_policy(hdev, skb);
3456 case HCI_OP_WRITE_DEF_LINK_POLICY:
3457 hci_cc_write_def_link_policy(hdev, skb);
3461 hci_cc_reset(hdev, skb);
3464 case HCI_OP_READ_STORED_LINK_KEY:
3465 hci_cc_read_stored_link_key(hdev, skb);
3468 case HCI_OP_DELETE_STORED_LINK_KEY:
3469 hci_cc_delete_stored_link_key(hdev, skb);
3472 case HCI_OP_WRITE_LOCAL_NAME:
3473 hci_cc_write_local_name(hdev, skb);
3476 case HCI_OP_READ_LOCAL_NAME:
3477 hci_cc_read_local_name(hdev, skb);
3480 case HCI_OP_WRITE_AUTH_ENABLE:
3481 hci_cc_write_auth_enable(hdev, skb);
3484 case HCI_OP_WRITE_ENCRYPT_MODE:
3485 hci_cc_write_encrypt_mode(hdev, skb);
3488 case HCI_OP_WRITE_SCAN_ENABLE:
3489 hci_cc_write_scan_enable(hdev, skb);
3492 case HCI_OP_SET_EVENT_FLT:
3493 hci_cc_set_event_filter(hdev, skb);
3496 case HCI_OP_READ_CLASS_OF_DEV:
3497 hci_cc_read_class_of_dev(hdev, skb);
3500 case HCI_OP_WRITE_CLASS_OF_DEV:
3501 hci_cc_write_class_of_dev(hdev, skb);
3504 case HCI_OP_READ_VOICE_SETTING:
3505 hci_cc_read_voice_setting(hdev, skb);
3508 case HCI_OP_WRITE_VOICE_SETTING:
3509 hci_cc_write_voice_setting(hdev, skb);
3512 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3513 hci_cc_read_num_supported_iac(hdev, skb);
3516 case HCI_OP_WRITE_SSP_MODE:
3517 hci_cc_write_ssp_mode(hdev, skb);
3520 case HCI_OP_WRITE_SC_SUPPORT:
3521 hci_cc_write_sc_support(hdev, skb);
3524 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3525 hci_cc_read_auth_payload_timeout(hdev, skb);
3528 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3529 hci_cc_write_auth_payload_timeout(hdev, skb);
3532 case HCI_OP_READ_LOCAL_VERSION:
3533 hci_cc_read_local_version(hdev, skb);
3536 case HCI_OP_READ_LOCAL_COMMANDS:
3537 hci_cc_read_local_commands(hdev, skb);
3540 case HCI_OP_READ_LOCAL_FEATURES:
3541 hci_cc_read_local_features(hdev, skb);
3544 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3545 hci_cc_read_local_ext_features(hdev, skb);
3548 case HCI_OP_READ_BUFFER_SIZE:
3549 hci_cc_read_buffer_size(hdev, skb);
3552 case HCI_OP_READ_BD_ADDR:
3553 hci_cc_read_bd_addr(hdev, skb);
3556 case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3557 hci_cc_read_local_pairing_opts(hdev, skb);
3560 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3561 hci_cc_read_page_scan_activity(hdev, skb);
3564 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3565 hci_cc_write_page_scan_activity(hdev, skb);
3568 case HCI_OP_READ_PAGE_SCAN_TYPE:
3569 hci_cc_read_page_scan_type(hdev, skb);
3572 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3573 hci_cc_write_page_scan_type(hdev, skb);
3576 case HCI_OP_READ_DATA_BLOCK_SIZE:
3577 hci_cc_read_data_block_size(hdev, skb);
3580 case HCI_OP_READ_FLOW_CONTROL_MODE:
3581 hci_cc_read_flow_control_mode(hdev, skb);
3584 case HCI_OP_READ_LOCAL_AMP_INFO:
3585 hci_cc_read_local_amp_info(hdev, skb);
3588 case HCI_OP_READ_CLOCK:
3589 hci_cc_read_clock(hdev, skb);
3592 case HCI_OP_READ_INQ_RSP_TX_POWER:
3593 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3596 case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3597 hci_cc_read_def_err_data_reporting(hdev, skb);
3600 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3601 hci_cc_write_def_err_data_reporting(hdev, skb);
3604 case HCI_OP_PIN_CODE_REPLY:
3605 hci_cc_pin_code_reply(hdev, skb);
3608 case HCI_OP_PIN_CODE_NEG_REPLY:
3609 hci_cc_pin_code_neg_reply(hdev, skb);
3612 case HCI_OP_READ_LOCAL_OOB_DATA:
3613 hci_cc_read_local_oob_data(hdev, skb);
3616 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3617 hci_cc_read_local_oob_ext_data(hdev, skb);
3620 case HCI_OP_LE_READ_BUFFER_SIZE:
3621 hci_cc_le_read_buffer_size(hdev, skb);
3624 case HCI_OP_LE_READ_LOCAL_FEATURES:
3625 hci_cc_le_read_local_features(hdev, skb);
3628 case HCI_OP_LE_READ_ADV_TX_POWER:
3629 hci_cc_le_read_adv_tx_power(hdev, skb);
3632 case HCI_OP_USER_CONFIRM_REPLY:
3633 hci_cc_user_confirm_reply(hdev, skb);
3636 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3637 hci_cc_user_confirm_neg_reply(hdev, skb);
3640 case HCI_OP_USER_PASSKEY_REPLY:
3641 hci_cc_user_passkey_reply(hdev, skb);
3644 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3645 hci_cc_user_passkey_neg_reply(hdev, skb);
3648 case HCI_OP_LE_SET_RANDOM_ADDR:
3649 hci_cc_le_set_random_addr(hdev, skb);
3652 case HCI_OP_LE_SET_ADV_ENABLE:
3653 hci_cc_le_set_adv_enable(hdev, skb);
3656 case HCI_OP_LE_SET_SCAN_PARAM:
3657 hci_cc_le_set_scan_param(hdev, skb);
3660 case HCI_OP_LE_SET_SCAN_ENABLE:
3661 hci_cc_le_set_scan_enable(hdev, skb);
3664 case HCI_OP_LE_READ_ACCEPT_LIST_SIZE:
3665 hci_cc_le_read_accept_list_size(hdev, skb);
3668 case HCI_OP_LE_CLEAR_ACCEPT_LIST:
3669 hci_cc_le_clear_accept_list(hdev, skb);
3672 case HCI_OP_LE_ADD_TO_ACCEPT_LIST:
3673 hci_cc_le_add_to_accept_list(hdev, skb);
3676 case HCI_OP_LE_DEL_FROM_ACCEPT_LIST:
3677 hci_cc_le_del_from_accept_list(hdev, skb);
3680 case HCI_OP_LE_READ_SUPPORTED_STATES:
3681 hci_cc_le_read_supported_states(hdev, skb);
3684 case HCI_OP_LE_READ_DEF_DATA_LEN:
3685 hci_cc_le_read_def_data_len(hdev, skb);
3688 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3689 hci_cc_le_write_def_data_len(hdev, skb);
3692 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3693 hci_cc_le_add_to_resolv_list(hdev, skb);
3696 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3697 hci_cc_le_del_from_resolv_list(hdev, skb);
3700 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3701 hci_cc_le_clear_resolv_list(hdev, skb);
3704 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3705 hci_cc_le_read_resolv_list_size(hdev, skb);
3708 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3709 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3712 case HCI_OP_LE_READ_MAX_DATA_LEN:
3713 hci_cc_le_read_max_data_len(hdev, skb);
3716 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3717 hci_cc_write_le_host_supported(hdev, skb);
3720 case HCI_OP_LE_SET_ADV_PARAM:
3721 hci_cc_set_adv_param(hdev, skb);
3724 case HCI_OP_READ_RSSI:
3725 hci_cc_read_rssi(hdev, skb);
3728 case HCI_OP_READ_TX_POWER:
3729 hci_cc_read_tx_power(hdev, skb);
3732 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3733 hci_cc_write_ssp_debug_mode(hdev, skb);
3736 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3737 hci_cc_le_set_ext_scan_param(hdev, skb);
3740 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3741 hci_cc_le_set_ext_scan_enable(hdev, skb);
3744 case HCI_OP_LE_SET_DEFAULT_PHY:
3745 hci_cc_le_set_default_phy(hdev, skb);
3748 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3749 hci_cc_le_read_num_adv_sets(hdev, skb);
3752 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3753 hci_cc_set_ext_adv_param(hdev, skb);
3756 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3757 hci_cc_le_set_ext_adv_enable(hdev, skb);
3760 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3761 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3764 case HCI_OP_LE_READ_TRANSMIT_POWER:
3765 hci_cc_le_read_transmit_power(hdev, skb);
3768 case HCI_OP_ENABLE_RSSI:
3769 hci_cc_enable_rssi(hdev, skb);
3772 case HCI_OP_GET_RAW_RSSI:
3773 hci_cc_get_raw_rssi(hdev, skb);
3777 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3781 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3783 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3786 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3788 "unexpected event for opcode 0x%4.4x", *opcode);
3792 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3793 queue_work(hdev->workqueue, &hdev->cmd_work);
3796 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3797 u16 *opcode, u8 *status,
3798 hci_req_complete_t *req_complete,
3799 hci_req_complete_skb_t *req_complete_skb)
3801 struct hci_ev_cmd_status *ev = (void *) skb->data;
3803 skb_pull(skb, sizeof(*ev));
3805 *opcode = __le16_to_cpu(ev->opcode);
3806 *status = ev->status;
3809 case HCI_OP_INQUIRY:
3810 hci_cs_inquiry(hdev, ev->status);
3813 case HCI_OP_CREATE_CONN:
3814 hci_cs_create_conn(hdev, ev->status);
3817 case HCI_OP_DISCONNECT:
3818 hci_cs_disconnect(hdev, ev->status);
3821 case HCI_OP_ADD_SCO:
3822 hci_cs_add_sco(hdev, ev->status);
3825 case HCI_OP_AUTH_REQUESTED:
3826 hci_cs_auth_requested(hdev, ev->status);
3829 case HCI_OP_SET_CONN_ENCRYPT:
3830 hci_cs_set_conn_encrypt(hdev, ev->status);
3833 case HCI_OP_REMOTE_NAME_REQ:
3834 hci_cs_remote_name_req(hdev, ev->status);
3837 case HCI_OP_READ_REMOTE_FEATURES:
3838 hci_cs_read_remote_features(hdev, ev->status);
3841 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3842 hci_cs_read_remote_ext_features(hdev, ev->status);
3845 case HCI_OP_SETUP_SYNC_CONN:
3846 hci_cs_setup_sync_conn(hdev, ev->status);
3849 case HCI_OP_SNIFF_MODE:
3850 hci_cs_sniff_mode(hdev, ev->status);
3853 case HCI_OP_EXIT_SNIFF_MODE:
3854 hci_cs_exit_sniff_mode(hdev, ev->status);
3857 case HCI_OP_SWITCH_ROLE:
3858 hci_cs_switch_role(hdev, ev->status);
3861 case HCI_OP_LE_CREATE_CONN:
3862 hci_cs_le_create_conn(hdev, ev->status);
3865 case HCI_OP_LE_READ_REMOTE_FEATURES:
3866 hci_cs_le_read_remote_features(hdev, ev->status);
3869 case HCI_OP_LE_START_ENC:
3870 hci_cs_le_start_enc(hdev, ev->status);
3873 case HCI_OP_LE_EXT_CREATE_CONN:
3874 hci_cs_le_ext_create_conn(hdev, ev->status);
3878 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3882 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3884 /* Indicate request completion if the command failed. Also, if
3885 * we're not waiting for a special event and we get a success
3886 * command status we should try to flag the request as completed
3887 * (since for this kind of commands there will not be a command
3891 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3892 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3895 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3897 "unexpected event for opcode 0x%4.4x", *opcode);
3901 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3902 queue_work(hdev->workqueue, &hdev->cmd_work);
3905 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3907 struct hci_ev_hardware_error *ev = (void *) skb->data;
3911 mgmt_hardware_error(hdev, ev->code);
3912 hci_dev_unlock(hdev);
3914 hdev->hw_error_code = ev->code;
3916 queue_work(hdev->req_workqueue, &hdev->error_reset);
3919 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3921 struct hci_ev_role_change *ev = (void *) skb->data;
3922 struct hci_conn *conn;
3924 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3928 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3931 conn->role = ev->role;
3933 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3935 hci_role_switch_cfm(conn, ev->status, ev->role);
3938 hci_dev_unlock(hdev);
3941 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3943 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3946 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3947 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3951 if (skb->len < sizeof(*ev) ||
3952 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3953 BT_DBG("%s bad parameters", hdev->name);
3957 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3959 for (i = 0; i < ev->num_hndl; i++) {
3960 struct hci_comp_pkts_info *info = &ev->handles[i];
3961 struct hci_conn *conn;
3962 __u16 handle, count;
3964 handle = __le16_to_cpu(info->handle);
3965 count = __le16_to_cpu(info->count);
3967 conn = hci_conn_hash_lookup_handle(hdev, handle);
3971 conn->sent -= count;
3973 switch (conn->type) {
3975 hdev->acl_cnt += count;
3976 if (hdev->acl_cnt > hdev->acl_pkts)
3977 hdev->acl_cnt = hdev->acl_pkts;
3981 if (hdev->le_pkts) {
3982 hdev->le_cnt += count;
3983 if (hdev->le_cnt > hdev->le_pkts)
3984 hdev->le_cnt = hdev->le_pkts;
3986 hdev->acl_cnt += count;
3987 if (hdev->acl_cnt > hdev->acl_pkts)
3988 hdev->acl_cnt = hdev->acl_pkts;
3993 hdev->sco_cnt += count;
3994 if (hdev->sco_cnt > hdev->sco_pkts)
3995 hdev->sco_cnt = hdev->sco_pkts;
3999 bt_dev_err(hdev, "unknown type %d conn %p",
4005 queue_work(hdev->workqueue, &hdev->tx_work);
4008 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4011 struct hci_chan *chan;
4013 switch (hdev->dev_type) {
4015 return hci_conn_hash_lookup_handle(hdev, handle);
4017 chan = hci_chan_lookup_handle(hdev, handle);
4022 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4029 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
4031 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
4034 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4035 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4039 if (skb->len < sizeof(*ev) ||
4040 skb->len < struct_size(ev, handles, ev->num_hndl)) {
4041 BT_DBG("%s bad parameters", hdev->name);
4045 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
4048 for (i = 0; i < ev->num_hndl; i++) {
4049 struct hci_comp_blocks_info *info = &ev->handles[i];
4050 struct hci_conn *conn = NULL;
4051 __u16 handle, block_count;
4053 handle = __le16_to_cpu(info->handle);
4054 block_count = __le16_to_cpu(info->blocks);
4056 conn = __hci_conn_lookup_handle(hdev, handle);
4060 conn->sent -= block_count;
4062 switch (conn->type) {
4065 hdev->block_cnt += block_count;
4066 if (hdev->block_cnt > hdev->num_blocks)
4067 hdev->block_cnt = hdev->num_blocks;
4071 bt_dev_err(hdev, "unknown type %d conn %p",
4077 queue_work(hdev->workqueue, &hdev->tx_work);
4080 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4082 struct hci_ev_mode_change *ev = (void *) skb->data;
4083 struct hci_conn *conn;
4085 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4089 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4091 conn->mode = ev->mode;
4093 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4095 if (conn->mode == HCI_CM_ACTIVE)
4096 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4098 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4101 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4102 hci_sco_setup(conn, ev->status);
4105 hci_dev_unlock(hdev);
4108 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4110 struct hci_ev_pin_code_req *ev = (void *) skb->data;
4111 struct hci_conn *conn;
4113 BT_DBG("%s", hdev->name);
4117 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4121 if (conn->state == BT_CONNECTED) {
4122 hci_conn_hold(conn);
4123 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4124 hci_conn_drop(conn);
4127 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4128 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4129 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4130 sizeof(ev->bdaddr), &ev->bdaddr);
4131 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4134 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4139 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4143 hci_dev_unlock(hdev);
4146 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4148 if (key_type == HCI_LK_CHANGED_COMBINATION)
4151 conn->pin_length = pin_len;
4152 conn->key_type = key_type;
4155 case HCI_LK_LOCAL_UNIT:
4156 case HCI_LK_REMOTE_UNIT:
4157 case HCI_LK_DEBUG_COMBINATION:
4159 case HCI_LK_COMBINATION:
4161 conn->pending_sec_level = BT_SECURITY_HIGH;
4163 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4165 case HCI_LK_UNAUTH_COMBINATION_P192:
4166 case HCI_LK_UNAUTH_COMBINATION_P256:
4167 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4169 case HCI_LK_AUTH_COMBINATION_P192:
4170 conn->pending_sec_level = BT_SECURITY_HIGH;
4172 case HCI_LK_AUTH_COMBINATION_P256:
4173 conn->pending_sec_level = BT_SECURITY_FIPS;
4178 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4180 struct hci_ev_link_key_req *ev = (void *) skb->data;
4181 struct hci_cp_link_key_reply cp;
4182 struct hci_conn *conn;
4183 struct link_key *key;
4185 BT_DBG("%s", hdev->name);
4187 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4192 key = hci_find_link_key(hdev, &ev->bdaddr);
4194 BT_DBG("%s link key not found for %pMR", hdev->name,
4199 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4202 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4204 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4206 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4207 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4208 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4209 BT_DBG("%s ignoring unauthenticated key", hdev->name);
4213 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4214 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4215 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4216 BT_DBG("%s ignoring key unauthenticated for high security",
4221 conn_set_key(conn, key->type, key->pin_len);
4224 bacpy(&cp.bdaddr, &ev->bdaddr);
4225 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4227 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4229 hci_dev_unlock(hdev);
4234 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4235 hci_dev_unlock(hdev);
4238 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4240 struct hci_ev_link_key_notify *ev = (void *) skb->data;
4241 struct hci_conn *conn;
4242 struct link_key *key;
4246 BT_DBG("%s", hdev->name);
4250 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4254 hci_conn_hold(conn);
4255 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4256 hci_conn_drop(conn);
4258 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4259 conn_set_key(conn, ev->key_type, conn->pin_length);
4261 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4264 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4265 ev->key_type, pin_len, &persistent);
4269 /* Update connection information since adding the key will have
4270 * fixed up the type in the case of changed combination keys.
4272 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4273 conn_set_key(conn, key->type, key->pin_len);
4275 mgmt_new_link_key(hdev, key, persistent);
4277 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4278 * is set. If it's not set simply remove the key from the kernel
4279 * list (we've still notified user space about it but with
4280 * store_hint being 0).
4282 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4283 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4284 list_del_rcu(&key->list);
4285 kfree_rcu(key, rcu);
4290 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4292 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4295 hci_dev_unlock(hdev);
4298 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4300 struct hci_ev_clock_offset *ev = (void *) skb->data;
4301 struct hci_conn *conn;
4303 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4307 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4308 if (conn && !ev->status) {
4309 struct inquiry_entry *ie;
4311 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4313 ie->data.clock_offset = ev->clock_offset;
4314 ie->timestamp = jiffies;
4318 hci_dev_unlock(hdev);
4321 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4323 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4324 struct hci_conn *conn;
4326 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4330 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4331 if (conn && !ev->status)
4332 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4334 hci_dev_unlock(hdev);
4337 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4339 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4340 struct inquiry_entry *ie;
4342 BT_DBG("%s", hdev->name);
4346 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4348 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4349 ie->timestamp = jiffies;
4352 hci_dev_unlock(hdev);
4355 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4356 struct sk_buff *skb)
4358 struct inquiry_data data;
4359 int num_rsp = *((__u8 *) skb->data);
4361 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4366 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4371 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4372 struct inquiry_info_with_rssi_and_pscan_mode *info;
4373 info = (void *) (skb->data + 1);
4375 if (skb->len < num_rsp * sizeof(*info) + 1)
4378 for (; num_rsp; num_rsp--, info++) {
4381 bacpy(&data.bdaddr, &info->bdaddr);
4382 data.pscan_rep_mode = info->pscan_rep_mode;
4383 data.pscan_period_mode = info->pscan_period_mode;
4384 data.pscan_mode = info->pscan_mode;
4385 memcpy(data.dev_class, info->dev_class, 3);
4386 data.clock_offset = info->clock_offset;
4387 data.rssi = info->rssi;
4388 data.ssp_mode = 0x00;
4390 flags = hci_inquiry_cache_update(hdev, &data, false);
4392 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4393 info->dev_class, info->rssi,
4394 flags, NULL, 0, NULL, 0);
4397 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4399 if (skb->len < num_rsp * sizeof(*info) + 1)
4402 for (; num_rsp; num_rsp--, info++) {
4405 bacpy(&data.bdaddr, &info->bdaddr);
4406 data.pscan_rep_mode = info->pscan_rep_mode;
4407 data.pscan_period_mode = info->pscan_period_mode;
4408 data.pscan_mode = 0x00;
4409 memcpy(data.dev_class, info->dev_class, 3);
4410 data.clock_offset = info->clock_offset;
4411 data.rssi = info->rssi;
4412 data.ssp_mode = 0x00;
4414 flags = hci_inquiry_cache_update(hdev, &data, false);
4416 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4417 info->dev_class, info->rssi,
4418 flags, NULL, 0, NULL, 0);
4423 hci_dev_unlock(hdev);
4426 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4427 struct sk_buff *skb)
4429 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4430 struct hci_conn *conn;
4432 BT_DBG("%s", hdev->name);
4436 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4440 if (ev->page < HCI_MAX_PAGES)
4441 memcpy(conn->features[ev->page], ev->features, 8);
4443 if (!ev->status && ev->page == 0x01) {
4444 struct inquiry_entry *ie;
4446 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4448 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4450 if (ev->features[0] & LMP_HOST_SSP) {
4451 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4453 /* It is mandatory by the Bluetooth specification that
4454 * Extended Inquiry Results are only used when Secure
4455 * Simple Pairing is enabled, but some devices violate
4458 * To make these devices work, the internal SSP
4459 * enabled flag needs to be cleared if the remote host
4460 * features do not indicate SSP support */
4461 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4464 if (ev->features[0] & LMP_HOST_SC)
4465 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4468 if (conn->state != BT_CONFIG)
4471 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4472 struct hci_cp_remote_name_req cp;
4473 memset(&cp, 0, sizeof(cp));
4474 bacpy(&cp.bdaddr, &conn->dst);
4475 cp.pscan_rep_mode = 0x02;
4476 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4477 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4478 mgmt_device_connected(hdev, conn, NULL, 0);
4480 if (!hci_outgoing_auth_needed(hdev, conn)) {
4481 conn->state = BT_CONNECTED;
4482 hci_connect_cfm(conn, ev->status);
4483 hci_conn_drop(conn);
4487 hci_dev_unlock(hdev);
4490 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4491 struct sk_buff *skb)
4493 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4494 struct hci_conn *conn;
4496 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4500 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4502 if (ev->link_type == ESCO_LINK)
4505 /* When the link type in the event indicates SCO connection
4506 * and lookup of the connection object fails, then check
4507 * if an eSCO connection object exists.
4509 * The core limits the synchronous connections to either
4510 * SCO or eSCO. The eSCO connection is preferred and tried
4511 * to be setup first and until successfully established,
4512 * the link type will be hinted as eSCO.
4514 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4519 switch (ev->status) {
4521 /* The synchronous connection complete event should only be
4522 * sent once per new connection. Receiving a successful
4523 * complete event when the connection status is already
4524 * BT_CONNECTED means that the device is misbehaving and sent
4525 * multiple complete event packets for the same new connection.
4527 * Registering the device more than once can corrupt kernel
4528 * memory, hence upon detecting this invalid event, we report
4529 * an error and ignore the packet.
4531 if (conn->state == BT_CONNECTED) {
4532 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4536 conn->handle = __le16_to_cpu(ev->handle);
4537 conn->state = BT_CONNECTED;
4538 conn->type = ev->link_type;
4540 hci_debugfs_create_conn(conn);
4541 hci_conn_add_sysfs(conn);
4544 case 0x10: /* Connection Accept Timeout */
4545 case 0x0d: /* Connection Rejected due to Limited Resources */
4546 case 0x11: /* Unsupported Feature or Parameter Value */
4547 case 0x1c: /* SCO interval rejected */
4548 case 0x1a: /* Unsupported Remote Feature */
4549 case 0x1e: /* Invalid LMP Parameters */
4550 case 0x1f: /* Unspecified error */
4551 case 0x20: /* Unsupported LMP Parameter value */
4553 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4554 (hdev->esco_type & EDR_ESCO_MASK);
4555 if (hci_setup_sync(conn, conn->link->handle))
4561 conn->state = BT_CLOSED;
4565 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4567 switch (ev->air_mode) {
4570 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4574 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4578 hci_connect_cfm(conn, ev->status);
4583 hci_dev_unlock(hdev);
4586 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4590 while (parsed < eir_len) {
4591 u8 field_len = eir[0];
4596 parsed += field_len + 1;
4597 eir += field_len + 1;
4603 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4604 struct sk_buff *skb)
4606 struct inquiry_data data;
4607 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4608 int num_rsp = *((__u8 *) skb->data);
4611 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4613 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4616 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4621 for (; num_rsp; num_rsp--, info++) {
4625 bacpy(&data.bdaddr, &info->bdaddr);
4626 data.pscan_rep_mode = info->pscan_rep_mode;
4627 data.pscan_period_mode = info->pscan_period_mode;
4628 data.pscan_mode = 0x00;
4629 memcpy(data.dev_class, info->dev_class, 3);
4630 data.clock_offset = info->clock_offset;
4631 data.rssi = info->rssi;
4632 data.ssp_mode = 0x01;
4634 if (hci_dev_test_flag(hdev, HCI_MGMT))
4635 name_known = eir_get_data(info->data,
4637 EIR_NAME_COMPLETE, NULL);
4641 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4643 eir_len = eir_get_length(info->data, sizeof(info->data));
4645 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4646 info->dev_class, info->rssi,
4647 flags, info->data, eir_len, NULL, 0);
4650 hci_dev_unlock(hdev);
4653 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4654 struct sk_buff *skb)
4656 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4657 struct hci_conn *conn;
4659 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4660 __le16_to_cpu(ev->handle));
4664 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4668 /* For BR/EDR the necessary steps are taken through the
4669 * auth_complete event.
4671 if (conn->type != LE_LINK)
4675 conn->sec_level = conn->pending_sec_level;
4677 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4679 if (ev->status && conn->state == BT_CONNECTED) {
4680 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4681 hci_conn_drop(conn);
4685 if (conn->state == BT_CONFIG) {
4687 conn->state = BT_CONNECTED;
4689 hci_connect_cfm(conn, ev->status);
4690 hci_conn_drop(conn);
4692 hci_auth_cfm(conn, ev->status);
4694 hci_conn_hold(conn);
4695 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4696 hci_conn_drop(conn);
4700 hci_dev_unlock(hdev);
4703 static u8 hci_get_auth_req(struct hci_conn *conn)
4705 /* If remote requests no-bonding follow that lead */
4706 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4707 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4708 return conn->remote_auth | (conn->auth_type & 0x01);
4710 /* If both remote and local have enough IO capabilities, require
4713 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4714 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4715 return conn->remote_auth | 0x01;
4717 /* No MITM protection possible so ignore remote requirement */
4718 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4721 static u8 bredr_oob_data_present(struct hci_conn *conn)
4723 struct hci_dev *hdev = conn->hdev;
4724 struct oob_data *data;
4726 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4730 if (bredr_sc_enabled(hdev)) {
4731 /* When Secure Connections is enabled, then just
4732 * return the present value stored with the OOB
4733 * data. The stored value contains the right present
4734 * information. However it can only be trusted when
4735 * not in Secure Connection Only mode.
4737 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4738 return data->present;
4740 /* When Secure Connections Only mode is enabled, then
4741 * the P-256 values are required. If they are not
4742 * available, then do not declare that OOB data is
4745 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4746 !memcmp(data->hash256, ZERO_KEY, 16))
4752 /* When Secure Connections is not enabled or actually
4753 * not supported by the hardware, then check that if
4754 * P-192 data values are present.
4756 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4757 !memcmp(data->hash192, ZERO_KEY, 16))
4763 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4765 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4766 struct hci_conn *conn;
4768 BT_DBG("%s", hdev->name);
4772 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4776 hci_conn_hold(conn);
4778 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4781 /* Allow pairing if we're pairable, the initiators of the
4782 * pairing or if the remote is not requesting bonding.
4784 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4785 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4786 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4787 struct hci_cp_io_capability_reply cp;
4789 bacpy(&cp.bdaddr, &ev->bdaddr);
4790 /* Change the IO capability from KeyboardDisplay
4791 * to DisplayYesNo as it is not supported by BT spec. */
4792 cp.capability = (conn->io_capability == 0x04) ?
4793 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4795 /* If we are initiators, there is no remote information yet */
4796 if (conn->remote_auth == 0xff) {
4797 /* Request MITM protection if our IO caps allow it
4798 * except for the no-bonding case.
4800 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4801 conn->auth_type != HCI_AT_NO_BONDING)
4802 conn->auth_type |= 0x01;
4804 conn->auth_type = hci_get_auth_req(conn);
4807 /* If we're not bondable, force one of the non-bondable
4808 * authentication requirement values.
4810 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4811 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4813 cp.authentication = conn->auth_type;
4814 cp.oob_data = bredr_oob_data_present(conn);
4816 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4819 struct hci_cp_io_capability_neg_reply cp;
4821 bacpy(&cp.bdaddr, &ev->bdaddr);
4822 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4824 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4829 hci_dev_unlock(hdev);
4832 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4834 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4835 struct hci_conn *conn;
4837 BT_DBG("%s", hdev->name);
4841 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4845 conn->remote_cap = ev->capability;
4846 conn->remote_auth = ev->authentication;
4849 hci_dev_unlock(hdev);
4852 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4853 struct sk_buff *skb)
4855 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4856 int loc_mitm, rem_mitm, confirm_hint = 0;
4857 struct hci_conn *conn;
4859 BT_DBG("%s", hdev->name);
4863 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4866 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4870 loc_mitm = (conn->auth_type & 0x01);
4871 rem_mitm = (conn->remote_auth & 0x01);
4873 /* If we require MITM but the remote device can't provide that
4874 * (it has NoInputNoOutput) then reject the confirmation
4875 * request. We check the security level here since it doesn't
4876 * necessarily match conn->auth_type.
4878 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4879 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4880 BT_DBG("Rejecting request: remote device can't provide MITM");
4881 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4882 sizeof(ev->bdaddr), &ev->bdaddr);
4886 /* If no side requires MITM protection; auto-accept */
4887 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4888 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4890 /* If we're not the initiators request authorization to
4891 * proceed from user space (mgmt_user_confirm with
4892 * confirm_hint set to 1). The exception is if neither
4893 * side had MITM or if the local IO capability is
4894 * NoInputNoOutput, in which case we do auto-accept
4896 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4897 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4898 (loc_mitm || rem_mitm)) {
4899 BT_DBG("Confirming auto-accept as acceptor");
4904 /* If there already exists link key in local host, leave the
4905 * decision to user space since the remote device could be
4906 * legitimate or malicious.
4908 if (hci_find_link_key(hdev, &ev->bdaddr)) {
4909 bt_dev_dbg(hdev, "Local host already has link key");
4914 BT_DBG("Auto-accept of user confirmation with %ums delay",
4915 hdev->auto_accept_delay);
4917 if (hdev->auto_accept_delay > 0) {
4918 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4919 queue_delayed_work(conn->hdev->workqueue,
4920 &conn->auto_accept_work, delay);
4924 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4925 sizeof(ev->bdaddr), &ev->bdaddr);
4930 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4931 le32_to_cpu(ev->passkey), confirm_hint);
4934 hci_dev_unlock(hdev);
4937 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4938 struct sk_buff *skb)
4940 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4942 BT_DBG("%s", hdev->name);
4944 if (hci_dev_test_flag(hdev, HCI_MGMT))
4945 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4948 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4949 struct sk_buff *skb)
4951 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4952 struct hci_conn *conn;
4954 BT_DBG("%s", hdev->name);
4956 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4960 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4961 conn->passkey_entered = 0;
4963 if (hci_dev_test_flag(hdev, HCI_MGMT))
4964 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4965 conn->dst_type, conn->passkey_notify,
4966 conn->passkey_entered);
4969 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4971 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4972 struct hci_conn *conn;
4974 BT_DBG("%s", hdev->name);
4976 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4981 case HCI_KEYPRESS_STARTED:
4982 conn->passkey_entered = 0;
4985 case HCI_KEYPRESS_ENTERED:
4986 conn->passkey_entered++;
4989 case HCI_KEYPRESS_ERASED:
4990 conn->passkey_entered--;
4993 case HCI_KEYPRESS_CLEARED:
4994 conn->passkey_entered = 0;
4997 case HCI_KEYPRESS_COMPLETED:
5001 if (hci_dev_test_flag(hdev, HCI_MGMT))
5002 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5003 conn->dst_type, conn->passkey_notify,
5004 conn->passkey_entered);
5007 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
5008 struct sk_buff *skb)
5010 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
5011 struct hci_conn *conn;
5013 BT_DBG("%s", hdev->name);
5017 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5021 /* Reset the authentication requirement to unknown */
5022 conn->remote_auth = 0xff;
5024 /* To avoid duplicate auth_failed events to user space we check
5025 * the HCI_CONN_AUTH_PEND flag which will be set if we
5026 * initiated the authentication. A traditional auth_complete
5027 * event gets always produced as initiator and is also mapped to
5028 * the mgmt_auth_failed event */
5029 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5030 mgmt_auth_failed(conn, ev->status);
5032 hci_conn_drop(conn);
5035 hci_dev_unlock(hdev);
5038 static void hci_remote_host_features_evt(struct hci_dev *hdev,
5039 struct sk_buff *skb)
5041 struct hci_ev_remote_host_features *ev = (void *) skb->data;
5042 struct inquiry_entry *ie;
5043 struct hci_conn *conn;
5045 BT_DBG("%s", hdev->name);
5049 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5051 memcpy(conn->features[1], ev->features, 8);
5053 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5055 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5057 hci_dev_unlock(hdev);
5060 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
5061 struct sk_buff *skb)
5063 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
5064 struct oob_data *data;
5066 BT_DBG("%s", hdev->name);
5070 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5073 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5075 struct hci_cp_remote_oob_data_neg_reply cp;
5077 bacpy(&cp.bdaddr, &ev->bdaddr);
5078 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5083 if (bredr_sc_enabled(hdev)) {
5084 struct hci_cp_remote_oob_ext_data_reply cp;
5086 bacpy(&cp.bdaddr, &ev->bdaddr);
5087 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5088 memset(cp.hash192, 0, sizeof(cp.hash192));
5089 memset(cp.rand192, 0, sizeof(cp.rand192));
5091 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5092 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5094 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5095 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5097 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5100 struct hci_cp_remote_oob_data_reply cp;
5102 bacpy(&cp.bdaddr, &ev->bdaddr);
5103 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5104 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5106 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5111 hci_dev_unlock(hdev);
5114 #if IS_ENABLED(CONFIG_BT_HS)
5115 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5117 struct hci_ev_channel_selected *ev = (void *)skb->data;
5118 struct hci_conn *hcon;
5120 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5122 skb_pull(skb, sizeof(*ev));
5124 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5128 amp_read_loc_assoc_final_data(hdev, hcon);
5131 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
5132 struct sk_buff *skb)
5134 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
5135 struct hci_conn *hcon, *bredr_hcon;
5137 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
5142 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5154 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5156 hcon->state = BT_CONNECTED;
5157 bacpy(&hcon->dst, &bredr_hcon->dst);
5159 hci_conn_hold(hcon);
5160 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5161 hci_conn_drop(hcon);
5163 hci_debugfs_create_conn(hcon);
5164 hci_conn_add_sysfs(hcon);
5166 amp_physical_cfm(bredr_hcon, hcon);
5169 hci_dev_unlock(hdev);
5172 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5174 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
5175 struct hci_conn *hcon;
5176 struct hci_chan *hchan;
5177 struct amp_mgr *mgr;
5179 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5180 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
5183 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5187 /* Create AMP hchan */
5188 hchan = hci_chan_create(hcon);
5192 hchan->handle = le16_to_cpu(ev->handle);
5195 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5197 mgr = hcon->amp_mgr;
5198 if (mgr && mgr->bredr_chan) {
5199 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5201 l2cap_chan_lock(bredr_chan);
5203 bredr_chan->conn->mtu = hdev->block_mtu;
5204 l2cap_logical_cfm(bredr_chan, hchan, 0);
5205 hci_conn_hold(hcon);
5207 l2cap_chan_unlock(bredr_chan);
5211 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5212 struct sk_buff *skb)
5214 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5215 struct hci_chan *hchan;
5217 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5218 le16_to_cpu(ev->handle), ev->status);
5225 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5226 if (!hchan || !hchan->amp)
5229 amp_destroy_logical_link(hchan, ev->reason);
5232 hci_dev_unlock(hdev);
5235 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5236 struct sk_buff *skb)
5238 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5239 struct hci_conn *hcon;
5241 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5248 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5250 hcon->state = BT_CLOSED;
5254 hci_dev_unlock(hdev);
5258 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5259 u8 bdaddr_type, bdaddr_t *local_rpa)
5262 conn->dst_type = bdaddr_type;
5263 conn->resp_addr_type = bdaddr_type;
5264 bacpy(&conn->resp_addr, bdaddr);
5266 /* Check if the controller has set a Local RPA then it must be
5267 * used instead or hdev->rpa.
5269 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5270 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5271 bacpy(&conn->init_addr, local_rpa);
5272 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5273 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5274 bacpy(&conn->init_addr, &conn->hdev->rpa);
5276 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5277 &conn->init_addr_type);
5280 conn->resp_addr_type = conn->hdev->adv_addr_type;
5281 /* Check if the controller has set a Local RPA then it must be
5282 * used instead or hdev->rpa.
5284 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5285 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5286 bacpy(&conn->resp_addr, local_rpa);
5287 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5288 /* In case of ext adv, resp_addr will be updated in
5289 * Adv Terminated event.
5291 if (!ext_adv_capable(conn->hdev))
5292 bacpy(&conn->resp_addr,
5293 &conn->hdev->random_addr);
5295 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5298 conn->init_addr_type = bdaddr_type;
5299 bacpy(&conn->init_addr, bdaddr);
5301 /* For incoming connections, set the default minimum
5302 * and maximum connection interval. They will be used
5303 * to check if the parameters are in range and if not
5304 * trigger the connection update procedure.
5306 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5307 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5311 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5312 bdaddr_t *bdaddr, u8 bdaddr_type,
5313 bdaddr_t *local_rpa, u8 role, u16 handle,
5314 u16 interval, u16 latency,
5315 u16 supervision_timeout)
5317 struct hci_conn_params *params;
5318 struct hci_conn *conn;
5319 struct smp_irk *irk;
5324 /* All controllers implicitly stop advertising in the event of a
5325 * connection, so ensure that the state bit is cleared.
5327 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5329 conn = hci_lookup_le_connect(hdev);
5331 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5333 bt_dev_err(hdev, "no memory for new connection");
5337 conn->dst_type = bdaddr_type;
5339 /* If we didn't have a hci_conn object previously
5340 * but we're in central role this must be something
5341 * initiated using an accept list. Since accept list based
5342 * connections are not "first class citizens" we don't
5343 * have full tracking of them. Therefore, we go ahead
5344 * with a "best effort" approach of determining the
5345 * initiator address based on the HCI_PRIVACY flag.
5348 conn->resp_addr_type = bdaddr_type;
5349 bacpy(&conn->resp_addr, bdaddr);
5350 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5351 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5352 bacpy(&conn->init_addr, &hdev->rpa);
5354 hci_copy_identity_address(hdev,
5356 &conn->init_addr_type);
5360 cancel_delayed_work(&conn->le_conn_timeout);
5363 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5365 /* Lookup the identity address from the stored connection
5366 * address and address type.
5368 * When establishing connections to an identity address, the
5369 * connection procedure will store the resolvable random
5370 * address first. Now if it can be converted back into the
5371 * identity address, start using the identity address from
5374 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5376 bacpy(&conn->dst, &irk->bdaddr);
5377 conn->dst_type = irk->addr_type;
5380 /* When using controller based address resolution, then the new
5381 * address types 0x02 and 0x03 are used. These types need to be
5382 * converted back into either public address or random address type
5384 if (use_ll_privacy(hdev) &&
5385 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5386 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
5387 switch (conn->dst_type) {
5388 case ADDR_LE_DEV_PUBLIC_RESOLVED:
5389 conn->dst_type = ADDR_LE_DEV_PUBLIC;
5391 case ADDR_LE_DEV_RANDOM_RESOLVED:
5392 conn->dst_type = ADDR_LE_DEV_RANDOM;
5398 hci_le_conn_failed(conn, status);
5402 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5403 addr_type = BDADDR_LE_PUBLIC;
5405 addr_type = BDADDR_LE_RANDOM;
5407 /* Drop the connection if the device is blocked */
5408 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5409 hci_conn_drop(conn);
5413 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5414 mgmt_device_connected(hdev, conn, NULL, 0);
5416 conn->sec_level = BT_SECURITY_LOW;
5417 conn->handle = handle;
5418 conn->state = BT_CONFIG;
5420 /* Store current advertising instance as connection advertising instance
5421 * when sotfware rotation is in use so it can be re-enabled when
5424 if (!ext_adv_capable(hdev))
5425 conn->adv_instance = hdev->cur_adv_instance;
5427 conn->le_conn_interval = interval;
5428 conn->le_conn_latency = latency;
5429 conn->le_supv_timeout = supervision_timeout;
5431 hci_debugfs_create_conn(conn);
5432 hci_conn_add_sysfs(conn);
5434 /* The remote features procedure is defined for central
5435 * role only. So only in case of an initiated connection
5436 * request the remote features.
5438 * If the local controller supports peripheral-initiated features
5439 * exchange, then requesting the remote features in peripheral
5440 * role is possible. Otherwise just transition into the
5441 * connected state without requesting the remote features.
5444 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5445 struct hci_cp_le_read_remote_features cp;
5447 cp.handle = __cpu_to_le16(conn->handle);
5449 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5452 hci_conn_hold(conn);
5454 conn->state = BT_CONNECTED;
5455 hci_connect_cfm(conn, status);
5458 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5461 list_del_init(¶ms->action);
5463 hci_conn_drop(params->conn);
5464 hci_conn_put(params->conn);
5465 params->conn = NULL;
5470 hci_update_background_scan(hdev);
5471 hci_dev_unlock(hdev);
5474 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5476 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5478 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5480 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5481 NULL, ev->role, le16_to_cpu(ev->handle),
5482 le16_to_cpu(ev->interval),
5483 le16_to_cpu(ev->latency),
5484 le16_to_cpu(ev->supervision_timeout));
5487 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5488 struct sk_buff *skb)
5490 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5492 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5494 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5495 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5496 le16_to_cpu(ev->interval),
5497 le16_to_cpu(ev->latency),
5498 le16_to_cpu(ev->supervision_timeout));
5500 if (use_ll_privacy(hdev) &&
5501 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5502 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5503 hci_req_disable_address_resolution(hdev);
5506 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5508 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5509 struct hci_conn *conn;
5510 struct adv_info *adv;
5512 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5514 adv = hci_find_adv_instance(hdev, ev->handle);
5520 /* Remove advertising as it has been terminated */
5521 hci_remove_adv_instance(hdev, ev->handle);
5522 mgmt_advertising_removed(NULL, hdev, ev->handle);
5528 adv->enabled = false;
5530 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5532 /* Store handle in the connection so the correct advertising
5533 * instance can be re-enabled when disconnected.
5535 conn->adv_instance = ev->handle;
5537 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5538 bacmp(&conn->resp_addr, BDADDR_ANY))
5542 bacpy(&conn->resp_addr, &hdev->random_addr);
5547 bacpy(&conn->resp_addr, &adv->random_addr);
5551 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5552 struct sk_buff *skb)
5554 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5555 struct hci_conn *conn;
5557 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5564 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5568 hci_dev_unlock(hdev);
5569 mgmt_le_conn_update_failed(hdev, &conn->dst,
5570 conn->type, conn->dst_type, ev->status);
5574 conn->le_conn_interval = le16_to_cpu(ev->interval);
5575 conn->le_conn_latency = le16_to_cpu(ev->latency);
5576 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5579 hci_dev_unlock(hdev);
5582 mgmt_le_conn_updated(hdev, &conn->dst, conn->type,
5583 conn->dst_type, conn->le_conn_interval,
5584 conn->le_conn_latency, conn->le_supv_timeout);
5588 /* This function requires the caller holds hdev->lock */
5589 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5591 u8 addr_type, u8 adv_type,
5592 bdaddr_t *direct_rpa)
5594 struct hci_conn *conn;
5595 struct hci_conn_params *params;
5597 /* If the event is not connectable don't proceed further */
5598 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5601 /* Ignore if the device is blocked */
5602 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type))
5605 /* Most controller will fail if we try to create new connections
5606 * while we have an existing one in peripheral role.
5608 if (hdev->conn_hash.le_num_peripheral > 0 &&
5609 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5610 !(hdev->le_states[3] & 0x10)))
5613 /* If we're not connectable only connect devices that we have in
5614 * our pend_le_conns list.
5616 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5621 if (!params->explicit_connect) {
5622 switch (params->auto_connect) {
5623 case HCI_AUTO_CONN_DIRECT:
5624 /* Only devices advertising with ADV_DIRECT_IND are
5625 * triggering a connection attempt. This is allowing
5626 * incoming connections from peripheral devices.
5628 if (adv_type != LE_ADV_DIRECT_IND)
5631 case HCI_AUTO_CONN_ALWAYS:
5632 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5633 * are triggering a connection attempt. This means
5634 * that incoming connections from peripheral device are
5635 * accepted and also outgoing connections to peripheral
5636 * devices are established when found.
5644 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5645 hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
5647 if (!IS_ERR(conn)) {
5648 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5649 * by higher layer that tried to connect, if no then
5650 * store the pointer since we don't really have any
5651 * other owner of the object besides the params that
5652 * triggered it. This way we can abort the connection if
5653 * the parameters get removed and keep the reference
5654 * count consistent once the connection is established.
5657 if (!params->explicit_connect)
5658 params->conn = hci_conn_get(conn);
5663 switch (PTR_ERR(conn)) {
5665 /* If hci_connect() returns -EBUSY it means there is already
5666 * an LE connection attempt going on. Since controllers don't
5667 * support more than one connection attempt at the time, we
5668 * don't consider this an error case.
5672 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5679 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5680 u8 bdaddr_type, bdaddr_t *direct_addr,
5681 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5684 struct discovery_state *d = &hdev->discovery;
5685 struct smp_irk *irk;
5686 struct hci_conn *conn;
5693 case LE_ADV_DIRECT_IND:
5694 case LE_ADV_SCAN_IND:
5695 case LE_ADV_NONCONN_IND:
5696 case LE_ADV_SCAN_RSP:
5699 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5700 "type: 0x%02x", type);
5704 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5705 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5709 /* Find the end of the data in case the report contains padded zero
5710 * bytes at the end causing an invalid length value.
5712 * When data is NULL, len is 0 so there is no need for extra ptr
5713 * check as 'ptr < data + 0' is already false in such case.
5715 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5716 if (ptr + 1 + *ptr > data + len)
5720 /* Adjust for actual length. This handles the case when remote
5721 * device is advertising with incorrect data length.
5725 /* If the direct address is present, then this report is from
5726 * a LE Direct Advertising Report event. In that case it is
5727 * important to see if the address is matching the local
5728 * controller address.
5731 /* Only resolvable random addresses are valid for these
5732 * kind of reports and others can be ignored.
5734 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5737 /* If the controller is not using resolvable random
5738 * addresses, then this report can be ignored.
5740 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5743 /* If the local IRK of the controller does not match
5744 * with the resolvable random address provided, then
5745 * this report can be ignored.
5747 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5751 /* Check if we need to convert to identity address */
5752 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5754 bdaddr = &irk->bdaddr;
5755 bdaddr_type = irk->addr_type;
5758 /* Check if we have been requested to connect to this device.
5760 * direct_addr is set only for directed advertising reports (it is NULL
5761 * for advertising reports) and is already verified to be RPA above.
5763 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5765 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5766 /* Store report for later inclusion by
5767 * mgmt_device_connected
5769 memcpy(conn->le_adv_data, data, len);
5770 conn->le_adv_data_len = len;
5773 /* Passive scanning shouldn't trigger any device found events,
5774 * except for devices marked as CONN_REPORT for which we do send
5775 * device found events, or advertisement monitoring requested.
5777 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5778 if (type == LE_ADV_DIRECT_IND)
5781 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5782 bdaddr, bdaddr_type) &&
5783 idr_is_empty(&hdev->adv_monitors_idr))
5786 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5787 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5790 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5791 rssi, flags, data, len, NULL, 0);
5795 /* When receiving non-connectable or scannable undirected
5796 * advertising reports, this means that the remote device is
5797 * not connectable and then clearly indicate this in the
5798 * device found event.
5800 * When receiving a scan response, then there is no way to
5801 * know if the remote device is connectable or not. However
5802 * since scan responses are merged with a previously seen
5803 * advertising report, the flags field from that report
5806 * In the really unlikely case that a controller get confused
5807 * and just sends a scan response event, then it is marked as
5808 * not connectable as well.
5810 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5811 type == LE_ADV_SCAN_RSP)
5812 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5816 /* If there's nothing pending either store the data from this
5817 * event or send an immediate device found event if the data
5818 * should not be stored for later.
5820 if (!ext_adv && !has_pending_adv_report(hdev)) {
5821 /* If the report will trigger a SCAN_REQ store it for
5824 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5825 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5826 rssi, flags, data, len);
5830 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5831 rssi, flags, data, len, NULL, 0);
5835 /* Check if the pending report is for the same device as the new one */
5836 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5837 bdaddr_type == d->last_adv_addr_type);
5839 /* If the pending data doesn't match this report or this isn't a
5840 * scan response (e.g. we got a duplicate ADV_IND) then force
5841 * sending of the pending data.
5843 if (type != LE_ADV_SCAN_RSP || !match) {
5844 /* Send out whatever is in the cache, but skip duplicates */
5846 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5847 d->last_adv_addr_type, NULL,
5848 d->last_adv_rssi, d->last_adv_flags,
5850 d->last_adv_data_len, NULL, 0);
5852 /* If the new report will trigger a SCAN_REQ store it for
5855 if (!ext_adv && (type == LE_ADV_IND ||
5856 type == LE_ADV_SCAN_IND)) {
5857 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5858 rssi, flags, data, len);
5862 /* The advertising reports cannot be merged, so clear
5863 * the pending report and send out a device found event.
5865 clear_pending_adv_report(hdev);
5866 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5867 rssi, flags, data, len, NULL, 0);
5871 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5872 * the new event is a SCAN_RSP. We can therefore proceed with
5873 * sending a merged device found event.
5875 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5876 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5877 d->last_adv_data, d->last_adv_data_len, data, len);
5878 clear_pending_adv_report(hdev);
5881 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5883 u8 num_reports = skb->data[0];
5884 void *ptr = &skb->data[1];
5888 while (num_reports--) {
5889 struct hci_ev_le_advertising_info *ev = ptr;
5892 if (ev->length <= HCI_MAX_AD_LENGTH) {
5893 rssi = ev->data[ev->length];
5894 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5895 ev->bdaddr_type, NULL, 0, rssi,
5896 ev->data, ev->length, false);
5898 bt_dev_err(hdev, "Dropping invalid advertising data");
5901 ptr += sizeof(*ev) + ev->length + 1;
5904 hci_dev_unlock(hdev);
5907 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
5909 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5911 case LE_LEGACY_ADV_IND:
5913 case LE_LEGACY_ADV_DIRECT_IND:
5914 return LE_ADV_DIRECT_IND;
5915 case LE_LEGACY_ADV_SCAN_IND:
5916 return LE_ADV_SCAN_IND;
5917 case LE_LEGACY_NONCONN_IND:
5918 return LE_ADV_NONCONN_IND;
5919 case LE_LEGACY_SCAN_RSP_ADV:
5920 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5921 return LE_ADV_SCAN_RSP;
5927 if (evt_type & LE_EXT_ADV_CONN_IND) {
5928 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5929 return LE_ADV_DIRECT_IND;
5934 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5935 return LE_ADV_SCAN_RSP;
5937 if (evt_type & LE_EXT_ADV_SCAN_IND)
5938 return LE_ADV_SCAN_IND;
5940 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5941 evt_type & LE_EXT_ADV_DIRECT_IND)
5942 return LE_ADV_NONCONN_IND;
5945 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5948 return LE_ADV_INVALID;
5951 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5953 u8 num_reports = skb->data[0];
5954 void *ptr = &skb->data[1];
5958 while (num_reports--) {
5959 struct hci_ev_le_ext_adv_report *ev = ptr;
5963 evt_type = __le16_to_cpu(ev->evt_type);
5964 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
5965 if (legacy_evt_type != LE_ADV_INVALID) {
5966 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5967 ev->bdaddr_type, NULL, 0, ev->rssi,
5968 ev->data, ev->length,
5969 !(evt_type & LE_EXT_ADV_LEGACY_PDU));
5972 ptr += sizeof(*ev) + ev->length;
5975 hci_dev_unlock(hdev);
5978 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5979 struct sk_buff *skb)
5981 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5982 struct hci_conn *conn;
5984 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5988 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5991 memcpy(conn->features[0], ev->features, 8);
5993 if (conn->state == BT_CONFIG) {
5996 /* If the local controller supports peripheral-initiated
5997 * features exchange, but the remote controller does
5998 * not, then it is possible that the error code 0x1a
5999 * for unsupported remote feature gets returned.
6001 * In this specific case, allow the connection to
6002 * transition into connected state and mark it as
6005 if (!conn->out && ev->status == 0x1a &&
6006 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6009 status = ev->status;
6011 conn->state = BT_CONNECTED;
6012 hci_connect_cfm(conn, status);
6013 hci_conn_drop(conn);
6017 hci_dev_unlock(hdev);
6020 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
6022 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
6023 struct hci_cp_le_ltk_reply cp;
6024 struct hci_cp_le_ltk_neg_reply neg;
6025 struct hci_conn *conn;
6026 struct smp_ltk *ltk;
6028 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
6032 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6036 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6040 if (smp_ltk_is_sc(ltk)) {
6041 /* With SC both EDiv and Rand are set to zero */
6042 if (ev->ediv || ev->rand)
6045 /* For non-SC keys check that EDiv and Rand match */
6046 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6050 memcpy(cp.ltk, ltk->val, ltk->enc_size);
6051 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6052 cp.handle = cpu_to_le16(conn->handle);
6054 conn->pending_sec_level = smp_ltk_sec_level(ltk);
6056 conn->enc_key_size = ltk->enc_size;
6058 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6060 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6061 * temporary key used to encrypt a connection following
6062 * pairing. It is used during the Encrypted Session Setup to
6063 * distribute the keys. Later, security can be re-established
6064 * using a distributed LTK.
6066 if (ltk->type == SMP_STK) {
6067 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6068 list_del_rcu(<k->list);
6069 kfree_rcu(ltk, rcu);
6071 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6074 hci_dev_unlock(hdev);
6079 neg.handle = ev->handle;
6080 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6081 hci_dev_unlock(hdev);
6084 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6087 struct hci_cp_le_conn_param_req_neg_reply cp;
6089 cp.handle = cpu_to_le16(handle);
6092 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6096 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
6097 struct sk_buff *skb)
6099 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
6100 struct hci_cp_le_conn_param_req_reply cp;
6101 struct hci_conn *hcon;
6102 u16 handle, min, max, latency, timeout;
6104 handle = le16_to_cpu(ev->handle);
6105 min = le16_to_cpu(ev->interval_min);
6106 max = le16_to_cpu(ev->interval_max);
6107 latency = le16_to_cpu(ev->latency);
6108 timeout = le16_to_cpu(ev->timeout);
6110 hcon = hci_conn_hash_lookup_handle(hdev, handle);
6111 if (!hcon || hcon->state != BT_CONNECTED)
6112 return send_conn_param_neg_reply(hdev, handle,
6113 HCI_ERROR_UNKNOWN_CONN_ID);
6115 if (hci_check_conn_params(min, max, latency, timeout))
6116 return send_conn_param_neg_reply(hdev, handle,
6117 HCI_ERROR_INVALID_LL_PARAMS);
6119 if (hcon->role == HCI_ROLE_MASTER) {
6120 struct hci_conn_params *params;
6125 params = hci_conn_params_lookup(hdev, &hcon->dst,
6128 params->conn_min_interval = min;
6129 params->conn_max_interval = max;
6130 params->conn_latency = latency;
6131 params->supervision_timeout = timeout;
6137 hci_dev_unlock(hdev);
6139 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6140 store_hint, min, max, latency, timeout);
6143 cp.handle = ev->handle;
6144 cp.interval_min = ev->interval_min;
6145 cp.interval_max = ev->interval_max;
6146 cp.latency = ev->latency;
6147 cp.timeout = ev->timeout;
6151 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6154 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
6155 struct sk_buff *skb)
6157 u8 num_reports = skb->data[0];
6158 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
6160 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
6165 for (; num_reports; num_reports--, ev++)
6166 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
6167 ev->bdaddr_type, &ev->direct_addr,
6168 ev->direct_addr_type, ev->rssi, NULL, 0,
6171 hci_dev_unlock(hdev);
6174 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
6176 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
6177 struct hci_conn *conn;
6179 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6186 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6190 conn->le_tx_phy = ev->tx_phy;
6191 conn->le_rx_phy = ev->rx_phy;
6194 hci_dev_unlock(hdev);
6197 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
6199 struct hci_ev_le_meta *le_ev = (void *) skb->data;
6201 skb_pull(skb, sizeof(*le_ev));
6203 switch (le_ev->subevent) {
6204 case HCI_EV_LE_CONN_COMPLETE:
6205 hci_le_conn_complete_evt(hdev, skb);
6208 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
6209 hci_le_conn_update_complete_evt(hdev, skb);
6212 case HCI_EV_LE_ADVERTISING_REPORT:
6213 hci_le_adv_report_evt(hdev, skb);
6216 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
6217 hci_le_remote_feat_complete_evt(hdev, skb);
6220 case HCI_EV_LE_LTK_REQ:
6221 hci_le_ltk_request_evt(hdev, skb);
6224 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
6225 hci_le_remote_conn_param_req_evt(hdev, skb);
6228 case HCI_EV_LE_DIRECT_ADV_REPORT:
6229 hci_le_direct_adv_report_evt(hdev, skb);
6232 case HCI_EV_LE_PHY_UPDATE_COMPLETE:
6233 hci_le_phy_update_evt(hdev, skb);
6236 case HCI_EV_LE_EXT_ADV_REPORT:
6237 hci_le_ext_adv_report_evt(hdev, skb);
6240 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
6241 hci_le_enh_conn_complete_evt(hdev, skb);
6244 case HCI_EV_LE_EXT_ADV_SET_TERM:
6245 hci_le_ext_adv_term_evt(hdev, skb);
6253 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6254 u8 event, struct sk_buff *skb)
6256 struct hci_ev_cmd_complete *ev;
6257 struct hci_event_hdr *hdr;
6262 if (skb->len < sizeof(*hdr)) {
6263 bt_dev_err(hdev, "too short HCI event");
6267 hdr = (void *) skb->data;
6268 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6271 if (hdr->evt != event)
6276 /* Check if request ended in Command Status - no way to retrieve
6277 * any extra parameters in this case.
6279 if (hdr->evt == HCI_EV_CMD_STATUS)
6282 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6283 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6288 if (skb->len < sizeof(*ev)) {
6289 bt_dev_err(hdev, "too short cmd_complete event");
6293 ev = (void *) skb->data;
6294 skb_pull(skb, sizeof(*ev));
6296 if (opcode != __le16_to_cpu(ev->opcode)) {
6297 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6298 __le16_to_cpu(ev->opcode));
6305 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6306 struct sk_buff *skb)
6308 struct hci_ev_le_advertising_info *adv;
6309 struct hci_ev_le_direct_adv_info *direct_adv;
6310 struct hci_ev_le_ext_adv_report *ext_adv;
6311 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6312 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6316 /* If we are currently suspended and this is the first BT event seen,
6317 * save the wake reason associated with the event.
6319 if (!hdev->suspended || hdev->wake_reason)
6322 /* Default to remote wake. Values for wake_reason are documented in the
6323 * Bluez mgmt api docs.
6325 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6327 /* Once configured for remote wakeup, we should only wake up for
6328 * reconnections. It's useful to see which device is waking us up so
6329 * keep track of the bdaddr of the connection event that woke us up.
6331 if (event == HCI_EV_CONN_REQUEST) {
6332 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6333 hdev->wake_addr_type = BDADDR_BREDR;
6334 } else if (event == HCI_EV_CONN_COMPLETE) {
6335 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6336 hdev->wake_addr_type = BDADDR_BREDR;
6337 } else if (event == HCI_EV_LE_META) {
6338 struct hci_ev_le_meta *le_ev = (void *)skb->data;
6339 u8 subevent = le_ev->subevent;
6340 u8 *ptr = &skb->data[sizeof(*le_ev)];
6341 u8 num_reports = *ptr;
6343 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6344 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6345 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6347 adv = (void *)(ptr + 1);
6348 direct_adv = (void *)(ptr + 1);
6349 ext_adv = (void *)(ptr + 1);
6352 case HCI_EV_LE_ADVERTISING_REPORT:
6353 bacpy(&hdev->wake_addr, &adv->bdaddr);
6354 hdev->wake_addr_type = adv->bdaddr_type;
6356 case HCI_EV_LE_DIRECT_ADV_REPORT:
6357 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6358 hdev->wake_addr_type = direct_adv->bdaddr_type;
6360 case HCI_EV_LE_EXT_ADV_REPORT:
6361 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6362 hdev->wake_addr_type = ext_adv->bdaddr_type;
6367 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6371 hci_dev_unlock(hdev);
6374 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6376 struct hci_event_hdr *hdr = (void *) skb->data;
6377 hci_req_complete_t req_complete = NULL;
6378 hci_req_complete_skb_t req_complete_skb = NULL;
6379 struct sk_buff *orig_skb = NULL;
6380 u8 status = 0, event = hdr->evt, req_evt = 0;
6381 u16 opcode = HCI_OP_NOP;
6384 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6388 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6389 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6390 opcode = __le16_to_cpu(cmd_hdr->opcode);
6391 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6396 /* If it looks like we might end up having to call
6397 * req_complete_skb, store a pristine copy of the skb since the
6398 * various handlers may modify the original one through
6399 * skb_pull() calls, etc.
6401 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6402 event == HCI_EV_CMD_COMPLETE)
6403 orig_skb = skb_clone(skb, GFP_KERNEL);
6405 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6407 /* Store wake reason if we're suspended */
6408 hci_store_wake_reason(hdev, event, skb);
6411 case HCI_EV_INQUIRY_COMPLETE:
6412 hci_inquiry_complete_evt(hdev, skb);
6415 case HCI_EV_INQUIRY_RESULT:
6416 hci_inquiry_result_evt(hdev, skb);
6419 case HCI_EV_CONN_COMPLETE:
6420 hci_conn_complete_evt(hdev, skb);
6423 case HCI_EV_CONN_REQUEST:
6424 hci_conn_request_evt(hdev, skb);
6427 case HCI_EV_DISCONN_COMPLETE:
6428 hci_disconn_complete_evt(hdev, skb);
6431 case HCI_EV_AUTH_COMPLETE:
6432 hci_auth_complete_evt(hdev, skb);
6435 case HCI_EV_REMOTE_NAME:
6436 hci_remote_name_evt(hdev, skb);
6439 case HCI_EV_ENCRYPT_CHANGE:
6440 hci_encrypt_change_evt(hdev, skb);
6443 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6444 hci_change_link_key_complete_evt(hdev, skb);
6447 case HCI_EV_REMOTE_FEATURES:
6448 hci_remote_features_evt(hdev, skb);
6451 case HCI_EV_CMD_COMPLETE:
6452 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6453 &req_complete, &req_complete_skb);
6456 case HCI_EV_CMD_STATUS:
6457 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6461 case HCI_EV_HARDWARE_ERROR:
6462 hci_hardware_error_evt(hdev, skb);
6465 case HCI_EV_ROLE_CHANGE:
6466 hci_role_change_evt(hdev, skb);
6469 case HCI_EV_NUM_COMP_PKTS:
6470 hci_num_comp_pkts_evt(hdev, skb);
6473 case HCI_EV_MODE_CHANGE:
6474 hci_mode_change_evt(hdev, skb);
6477 case HCI_EV_PIN_CODE_REQ:
6478 hci_pin_code_request_evt(hdev, skb);
6481 case HCI_EV_LINK_KEY_REQ:
6482 hci_link_key_request_evt(hdev, skb);
6485 case HCI_EV_LINK_KEY_NOTIFY:
6486 hci_link_key_notify_evt(hdev, skb);
6489 case HCI_EV_CLOCK_OFFSET:
6490 hci_clock_offset_evt(hdev, skb);
6493 case HCI_EV_PKT_TYPE_CHANGE:
6494 hci_pkt_type_change_evt(hdev, skb);
6497 case HCI_EV_PSCAN_REP_MODE:
6498 hci_pscan_rep_mode_evt(hdev, skb);
6501 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6502 hci_inquiry_result_with_rssi_evt(hdev, skb);
6505 case HCI_EV_REMOTE_EXT_FEATURES:
6506 hci_remote_ext_features_evt(hdev, skb);
6509 case HCI_EV_SYNC_CONN_COMPLETE:
6510 hci_sync_conn_complete_evt(hdev, skb);
6513 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6514 hci_extended_inquiry_result_evt(hdev, skb);
6517 case HCI_EV_KEY_REFRESH_COMPLETE:
6518 hci_key_refresh_complete_evt(hdev, skb);
6521 case HCI_EV_IO_CAPA_REQUEST:
6522 hci_io_capa_request_evt(hdev, skb);
6525 case HCI_EV_IO_CAPA_REPLY:
6526 hci_io_capa_reply_evt(hdev, skb);
6529 case HCI_EV_USER_CONFIRM_REQUEST:
6530 hci_user_confirm_request_evt(hdev, skb);
6533 case HCI_EV_USER_PASSKEY_REQUEST:
6534 hci_user_passkey_request_evt(hdev, skb);
6537 case HCI_EV_USER_PASSKEY_NOTIFY:
6538 hci_user_passkey_notify_evt(hdev, skb);
6541 case HCI_EV_KEYPRESS_NOTIFY:
6542 hci_keypress_notify_evt(hdev, skb);
6545 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6546 hci_simple_pair_complete_evt(hdev, skb);
6549 case HCI_EV_REMOTE_HOST_FEATURES:
6550 hci_remote_host_features_evt(hdev, skb);
6553 case HCI_EV_LE_META:
6554 hci_le_meta_evt(hdev, skb);
6557 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6558 hci_remote_oob_data_request_evt(hdev, skb);
6561 #if IS_ENABLED(CONFIG_BT_HS)
6562 case HCI_EV_CHANNEL_SELECTED:
6563 hci_chan_selected_evt(hdev, skb);
6566 case HCI_EV_PHY_LINK_COMPLETE:
6567 hci_phy_link_complete_evt(hdev, skb);
6570 case HCI_EV_LOGICAL_LINK_COMPLETE:
6571 hci_loglink_complete_evt(hdev, skb);
6574 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6575 hci_disconn_loglink_complete_evt(hdev, skb);
6578 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6579 hci_disconn_phylink_complete_evt(hdev, skb);
6583 case HCI_EV_NUM_COMP_BLOCKS:
6584 hci_num_comp_blocks_evt(hdev, skb);
6588 case HCI_EV_VENDOR_SPECIFIC:
6589 hci_vendor_specific_evt(hdev, skb);
6593 msft_vendor_evt(hdev, skb);
6598 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6603 req_complete(hdev, status, opcode);
6604 } else if (req_complete_skb) {
6605 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6606 kfree_skb(orig_skb);
6609 req_complete_skb(hdev, status, opcode, orig_skb);
6613 kfree_skb(orig_skb);
6615 hdev->stat.evt_rx++;