2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
40 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
41 "\x00\x00\x00\x00\x00\x00\x00\x00"
43 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
45 /* Handle HCI Event packets */
47 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
50 __u8 status = *((__u8 *) skb->data);
52 BT_DBG("%s status 0x%2.2x", hdev->name, status);
54 /* It is possible that we receive Inquiry Complete event right
55 * before we receive Inquiry Cancel Command Complete event, in
56 * which case the latter event should have status of Command
57 * Disallowed (0x0c). This should not be treated as error, since
58 * we actually achieve what Inquiry Cancel wants to achieve,
59 * which is to end the last Inquiry session.
61 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
62 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
71 clear_bit(HCI_INQUIRY, &hdev->flags);
72 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
73 wake_up_bit(&hdev->flags, HCI_INQUIRY);
76 /* Set discovery state to stopped if we're not doing LE active
79 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
80 hdev->le_scan_type != LE_SCAN_ACTIVE)
81 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
84 hci_conn_check_pending(hdev);
87 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
89 __u8 status = *((__u8 *) skb->data);
91 BT_DBG("%s status 0x%2.2x", hdev->name, status);
96 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
99 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
101 __u8 status = *((__u8 *) skb->data);
103 BT_DBG("%s status 0x%2.2x", hdev->name, status);
108 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
110 hci_conn_check_pending(hdev);
113 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
116 BT_DBG("%s", hdev->name);
119 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
121 struct hci_rp_role_discovery *rp = (void *) skb->data;
122 struct hci_conn *conn;
124 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
131 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
133 conn->role = rp->role;
135 hci_dev_unlock(hdev);
138 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
140 struct hci_rp_read_link_policy *rp = (void *) skb->data;
141 struct hci_conn *conn;
143 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
152 conn->link_policy = __le16_to_cpu(rp->policy);
154 hci_dev_unlock(hdev);
157 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
159 struct hci_rp_write_link_policy *rp = (void *) skb->data;
160 struct hci_conn *conn;
163 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
168 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
174 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
176 conn->link_policy = get_unaligned_le16(sent + 2);
178 hci_dev_unlock(hdev);
181 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
184 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
186 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
191 hdev->link_policy = __le16_to_cpu(rp->policy);
194 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
197 __u8 status = *((__u8 *) skb->data);
200 BT_DBG("%s status 0x%2.2x", hdev->name, status);
205 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
209 hdev->link_policy = get_unaligned_le16(sent);
212 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
214 __u8 status = *((__u8 *) skb->data);
216 BT_DBG("%s status 0x%2.2x", hdev->name, status);
218 clear_bit(HCI_RESET, &hdev->flags);
223 /* Reset all non-persistent flags */
224 hci_dev_clear_volatile_flags(hdev);
226 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
228 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
229 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
231 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
232 hdev->adv_data_len = 0;
234 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
235 hdev->scan_rsp_data_len = 0;
237 hdev->le_scan_type = LE_SCAN_PASSIVE;
239 hdev->ssp_debug_mode = 0;
241 hci_bdaddr_list_clear(&hdev->le_accept_list);
242 hci_bdaddr_list_clear(&hdev->le_resolv_list);
245 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
248 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
249 struct hci_cp_read_stored_link_key *sent;
251 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
253 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
257 if (!rp->status && sent->read_all == 0x01) {
258 hdev->stored_max_keys = rp->max_keys;
259 hdev->stored_num_keys = rp->num_keys;
263 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
266 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
268 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
273 if (rp->num_keys <= hdev->stored_num_keys)
274 hdev->stored_num_keys -= rp->num_keys;
276 hdev->stored_num_keys = 0;
279 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
281 __u8 status = *((__u8 *) skb->data);
284 BT_DBG("%s status 0x%2.2x", hdev->name, status);
286 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
292 if (hci_dev_test_flag(hdev, HCI_MGMT))
293 mgmt_set_local_name_complete(hdev, sent, status);
295 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
297 hci_dev_unlock(hdev);
300 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
302 struct hci_rp_read_local_name *rp = (void *) skb->data;
304 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
309 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
310 hci_dev_test_flag(hdev, HCI_CONFIG))
311 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
314 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
316 __u8 status = *((__u8 *) skb->data);
319 BT_DBG("%s status 0x%2.2x", hdev->name, status);
321 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
328 __u8 param = *((__u8 *) sent);
330 if (param == AUTH_ENABLED)
331 set_bit(HCI_AUTH, &hdev->flags);
333 clear_bit(HCI_AUTH, &hdev->flags);
336 if (hci_dev_test_flag(hdev, HCI_MGMT))
337 mgmt_auth_enable_complete(hdev, status);
339 hci_dev_unlock(hdev);
342 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
344 __u8 status = *((__u8 *) skb->data);
348 BT_DBG("%s status 0x%2.2x", hdev->name, status);
353 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
357 param = *((__u8 *) sent);
360 set_bit(HCI_ENCRYPT, &hdev->flags);
362 clear_bit(HCI_ENCRYPT, &hdev->flags);
365 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
367 __u8 status = *((__u8 *) skb->data);
371 BT_DBG("%s status 0x%2.2x", hdev->name, status);
373 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
377 param = *((__u8 *) sent);
382 hdev->discov_timeout = 0;
386 if (param & SCAN_INQUIRY)
387 set_bit(HCI_ISCAN, &hdev->flags);
389 clear_bit(HCI_ISCAN, &hdev->flags);
391 if (param & SCAN_PAGE)
392 set_bit(HCI_PSCAN, &hdev->flags);
394 clear_bit(HCI_PSCAN, &hdev->flags);
397 hci_dev_unlock(hdev);
400 static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb)
402 __u8 status = *((__u8 *)skb->data);
403 struct hci_cp_set_event_filter *cp;
406 BT_DBG("%s status 0x%2.2x", hdev->name, status);
411 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
415 cp = (struct hci_cp_set_event_filter *)sent;
417 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
418 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
420 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
423 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
425 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
427 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
432 memcpy(hdev->dev_class, rp->dev_class, 3);
434 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
435 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
438 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
440 __u8 status = *((__u8 *) skb->data);
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
445 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
452 memcpy(hdev->dev_class, sent, 3);
454 if (hci_dev_test_flag(hdev, HCI_MGMT))
455 mgmt_set_class_of_dev_complete(hdev, sent, status);
457 hci_dev_unlock(hdev);
460 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
462 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
465 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
470 setting = __le16_to_cpu(rp->voice_setting);
472 if (hdev->voice_setting == setting)
475 hdev->voice_setting = setting;
477 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
480 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
483 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
486 __u8 status = *((__u8 *) skb->data);
490 BT_DBG("%s status 0x%2.2x", hdev->name, status);
495 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
499 setting = get_unaligned_le16(sent);
501 if (hdev->voice_setting == setting)
504 hdev->voice_setting = setting;
506 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
509 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
512 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
515 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
517 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
522 hdev->num_iac = rp->num_iac;
524 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
527 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
529 __u8 status = *((__u8 *) skb->data);
530 struct hci_cp_write_ssp_mode *sent;
532 BT_DBG("%s status 0x%2.2x", hdev->name, status);
534 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
542 hdev->features[1][0] |= LMP_HOST_SSP;
544 hdev->features[1][0] &= ~LMP_HOST_SSP;
547 if (hci_dev_test_flag(hdev, HCI_MGMT))
548 mgmt_ssp_enable_complete(hdev, sent->mode, status);
551 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
553 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
556 hci_dev_unlock(hdev);
559 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
561 u8 status = *((u8 *) skb->data);
562 struct hci_cp_write_sc_support *sent;
564 BT_DBG("%s status 0x%2.2x", hdev->name, status);
566 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
574 hdev->features[1][0] |= LMP_HOST_SC;
576 hdev->features[1][0] &= ~LMP_HOST_SC;
579 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
581 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
583 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
586 hci_dev_unlock(hdev);
589 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
591 struct hci_rp_read_local_version *rp = (void *) skb->data;
593 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
598 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
599 hci_dev_test_flag(hdev, HCI_CONFIG)) {
600 hdev->hci_ver = rp->hci_ver;
601 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
602 hdev->lmp_ver = rp->lmp_ver;
603 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
604 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
608 static void hci_cc_read_local_commands(struct hci_dev *hdev,
611 struct hci_rp_read_local_commands *rp = (void *) skb->data;
613 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
618 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
619 hci_dev_test_flag(hdev, HCI_CONFIG))
620 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
623 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
626 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
627 struct hci_conn *conn;
629 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
636 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
638 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
640 hci_dev_unlock(hdev);
643 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
646 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
647 struct hci_conn *conn;
650 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
655 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
661 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
663 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
665 hci_dev_unlock(hdev);
668 static void hci_cc_read_local_features(struct hci_dev *hdev,
671 struct hci_rp_read_local_features *rp = (void *) skb->data;
673 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
678 memcpy(hdev->features, rp->features, 8);
680 /* Adjust default settings according to features
681 * supported by device. */
683 if (hdev->features[0][0] & LMP_3SLOT)
684 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
686 if (hdev->features[0][0] & LMP_5SLOT)
687 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
689 if (hdev->features[0][1] & LMP_HV2) {
690 hdev->pkt_type |= (HCI_HV2);
691 hdev->esco_type |= (ESCO_HV2);
694 if (hdev->features[0][1] & LMP_HV3) {
695 hdev->pkt_type |= (HCI_HV3);
696 hdev->esco_type |= (ESCO_HV3);
699 if (lmp_esco_capable(hdev))
700 hdev->esco_type |= (ESCO_EV3);
702 if (hdev->features[0][4] & LMP_EV4)
703 hdev->esco_type |= (ESCO_EV4);
705 if (hdev->features[0][4] & LMP_EV5)
706 hdev->esco_type |= (ESCO_EV5);
708 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
709 hdev->esco_type |= (ESCO_2EV3);
711 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
712 hdev->esco_type |= (ESCO_3EV3);
714 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
715 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
718 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
721 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
723 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
728 if (hdev->max_page < rp->max_page)
729 hdev->max_page = rp->max_page;
731 if (rp->page < HCI_MAX_PAGES)
732 memcpy(hdev->features[rp->page], rp->features, 8);
735 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
738 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
740 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
745 hdev->flow_ctl_mode = rp->mode;
748 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
750 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
752 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
757 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
758 hdev->sco_mtu = rp->sco_mtu;
759 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
760 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
762 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
767 hdev->acl_cnt = hdev->acl_pkts;
768 hdev->sco_cnt = hdev->sco_pkts;
770 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
771 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
774 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
776 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
778 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
783 if (test_bit(HCI_INIT, &hdev->flags))
784 bacpy(&hdev->bdaddr, &rp->bdaddr);
786 if (hci_dev_test_flag(hdev, HCI_SETUP))
787 bacpy(&hdev->setup_addr, &rp->bdaddr);
790 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
793 struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
795 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
800 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
801 hci_dev_test_flag(hdev, HCI_CONFIG)) {
802 hdev->pairing_opts = rp->pairing_opts;
803 hdev->max_enc_key_size = rp->max_key_size;
807 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
810 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
812 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
817 if (test_bit(HCI_INIT, &hdev->flags)) {
818 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
819 hdev->page_scan_window = __le16_to_cpu(rp->window);
823 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
826 u8 status = *((u8 *) skb->data);
827 struct hci_cp_write_page_scan_activity *sent;
829 BT_DBG("%s status 0x%2.2x", hdev->name, status);
834 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
838 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
839 hdev->page_scan_window = __le16_to_cpu(sent->window);
842 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
845 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
847 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
852 if (test_bit(HCI_INIT, &hdev->flags))
853 hdev->page_scan_type = rp->type;
856 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
859 u8 status = *((u8 *) skb->data);
862 BT_DBG("%s status 0x%2.2x", hdev->name, status);
867 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
869 hdev->page_scan_type = *type;
872 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
875 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
877 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
882 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
883 hdev->block_len = __le16_to_cpu(rp->block_len);
884 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
886 hdev->block_cnt = hdev->num_blocks;
888 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
889 hdev->block_cnt, hdev->block_len);
892 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
894 struct hci_rp_read_clock *rp = (void *) skb->data;
895 struct hci_cp_read_clock *cp;
896 struct hci_conn *conn;
898 BT_DBG("%s", hdev->name);
900 if (skb->len < sizeof(*rp))
908 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
912 if (cp->which == 0x00) {
913 hdev->clock = le32_to_cpu(rp->clock);
917 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
919 conn->clock = le32_to_cpu(rp->clock);
920 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
924 hci_dev_unlock(hdev);
927 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
930 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
932 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
937 hdev->amp_status = rp->amp_status;
938 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
939 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
940 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
941 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
942 hdev->amp_type = rp->amp_type;
943 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
944 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
945 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
946 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
949 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
952 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
954 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
959 hdev->inq_tx_power = rp->tx_power;
962 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
965 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
967 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
972 hdev->err_data_reporting = rp->err_data_reporting;
975 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
978 __u8 status = *((__u8 *)skb->data);
979 struct hci_cp_write_def_err_data_reporting *cp;
981 BT_DBG("%s status 0x%2.2x", hdev->name, status);
986 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
990 hdev->err_data_reporting = cp->err_data_reporting;
993 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
995 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
996 struct hci_cp_pin_code_reply *cp;
997 struct hci_conn *conn;
999 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1003 if (hci_dev_test_flag(hdev, HCI_MGMT))
1004 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1009 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1013 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1015 conn->pin_length = cp->pin_len;
1018 hci_dev_unlock(hdev);
1021 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1023 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1025 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1029 if (hci_dev_test_flag(hdev, HCI_MGMT))
1030 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1033 hci_dev_unlock(hdev);
1036 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1037 struct sk_buff *skb)
1039 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1041 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1046 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1047 hdev->le_pkts = rp->le_max_pkt;
1049 hdev->le_cnt = hdev->le_pkts;
1051 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1054 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1055 struct sk_buff *skb)
1057 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1059 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1064 memcpy(hdev->le_features, rp->features, 8);
1067 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1068 struct sk_buff *skb)
1070 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1072 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1077 hdev->adv_tx_power = rp->tx_power;
1080 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1082 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1084 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1088 if (hci_dev_test_flag(hdev, HCI_MGMT))
1089 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1092 hci_dev_unlock(hdev);
1095 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1096 struct sk_buff *skb)
1098 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1100 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1104 if (hci_dev_test_flag(hdev, HCI_MGMT))
1105 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1106 ACL_LINK, 0, rp->status);
1108 hci_dev_unlock(hdev);
1111 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1113 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1115 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1119 if (hci_dev_test_flag(hdev, HCI_MGMT))
1120 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1123 hci_dev_unlock(hdev);
1126 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1127 struct sk_buff *skb)
1129 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1131 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1135 if (hci_dev_test_flag(hdev, HCI_MGMT))
1136 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1137 ACL_LINK, 0, rp->status);
1139 hci_dev_unlock(hdev);
1142 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1143 struct sk_buff *skb)
1145 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1147 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1150 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1151 struct sk_buff *skb)
1153 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1155 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1158 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1160 __u8 status = *((__u8 *) skb->data);
1163 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1168 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1174 bacpy(&hdev->random_addr, sent);
1176 if (!bacmp(&hdev->rpa, sent)) {
1177 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1178 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1179 secs_to_jiffies(hdev->rpa_timeout));
1182 hci_dev_unlock(hdev);
1185 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1187 __u8 status = *((__u8 *) skb->data);
1188 struct hci_cp_le_set_default_phy *cp;
1190 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1195 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1201 hdev->le_tx_def_phys = cp->tx_phys;
1202 hdev->le_rx_def_phys = cp->rx_phys;
1204 hci_dev_unlock(hdev);
1207 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1208 struct sk_buff *skb)
1210 __u8 status = *((__u8 *) skb->data);
1211 struct hci_cp_le_set_adv_set_rand_addr *cp;
1212 struct adv_info *adv;
1217 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1218 /* Update only in case the adv instance since handle 0x00 shall be using
1219 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1220 * non-extended adverting.
1222 if (!cp || !cp->handle)
1227 adv = hci_find_adv_instance(hdev, cp->handle);
1229 bacpy(&adv->random_addr, &cp->bdaddr);
1230 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1231 adv->rpa_expired = false;
1232 queue_delayed_work(hdev->workqueue,
1233 &adv->rpa_expired_cb,
1234 secs_to_jiffies(hdev->rpa_timeout));
1238 hci_dev_unlock(hdev);
1241 static void hci_cc_le_read_transmit_power(struct hci_dev *hdev,
1242 struct sk_buff *skb)
1244 struct hci_rp_le_read_transmit_power *rp = (void *)skb->data;
1246 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1251 hdev->min_le_tx_power = rp->min_le_tx_power;
1252 hdev->max_le_tx_power = rp->max_le_tx_power;
1255 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1257 __u8 *sent, status = *((__u8 *) skb->data);
1259 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1264 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1270 /* If we're doing connection initiation as peripheral. Set a
1271 * timeout in case something goes wrong.
1274 struct hci_conn *conn;
1276 hci_dev_set_flag(hdev, HCI_LE_ADV);
1278 conn = hci_lookup_le_connect(hdev);
1280 queue_delayed_work(hdev->workqueue,
1281 &conn->le_conn_timeout,
1282 conn->conn_timeout);
1284 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1287 hci_dev_unlock(hdev);
1290 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1291 struct sk_buff *skb)
1293 struct hci_cp_le_set_ext_adv_enable *cp;
1294 struct hci_cp_ext_adv_set *set;
1295 __u8 status = *((__u8 *) skb->data);
1296 struct adv_info *adv = NULL, *n;
1298 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1303 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1307 set = (void *)cp->data;
1311 if (cp->num_of_sets)
1312 adv = hci_find_adv_instance(hdev, set->handle);
1315 struct hci_conn *conn;
1317 hci_dev_set_flag(hdev, HCI_LE_ADV);
1320 adv->enabled = true;
1322 conn = hci_lookup_le_connect(hdev);
1324 queue_delayed_work(hdev->workqueue,
1325 &conn->le_conn_timeout,
1326 conn->conn_timeout);
1328 if (cp->num_of_sets) {
1330 adv->enabled = false;
1332 /* If just one instance was disabled check if there are
1333 * any other instance enabled before clearing HCI_LE_ADV
1335 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1341 /* All instances shall be considered disabled */
1342 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1344 adv->enabled = false;
1347 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1351 hci_dev_unlock(hdev);
1354 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1356 struct hci_cp_le_set_scan_param *cp;
1357 __u8 status = *((__u8 *) skb->data);
1359 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1364 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1370 hdev->le_scan_type = cp->type;
1372 hci_dev_unlock(hdev);
1375 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1376 struct sk_buff *skb)
1378 struct hci_cp_le_set_ext_scan_params *cp;
1379 __u8 status = *((__u8 *) skb->data);
1380 struct hci_cp_le_scan_phy_params *phy_param;
1382 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1387 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1391 phy_param = (void *)cp->data;
1395 hdev->le_scan_type = phy_param->type;
1397 hci_dev_unlock(hdev);
1400 static bool has_pending_adv_report(struct hci_dev *hdev)
1402 struct discovery_state *d = &hdev->discovery;
1404 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1407 static void clear_pending_adv_report(struct hci_dev *hdev)
1409 struct discovery_state *d = &hdev->discovery;
1411 bacpy(&d->last_adv_addr, BDADDR_ANY);
1412 d->last_adv_data_len = 0;
1415 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1416 u8 bdaddr_type, s8 rssi, u32 flags,
1419 struct discovery_state *d = &hdev->discovery;
1421 if (len > HCI_MAX_AD_LENGTH)
1424 bacpy(&d->last_adv_addr, bdaddr);
1425 d->last_adv_addr_type = bdaddr_type;
1426 d->last_adv_rssi = rssi;
1427 d->last_adv_flags = flags;
1428 memcpy(d->last_adv_data, data, len);
1429 d->last_adv_data_len = len;
1432 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1437 case LE_SCAN_ENABLE:
1438 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1439 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1440 clear_pending_adv_report(hdev);
1443 case LE_SCAN_DISABLE:
1444 /* We do this here instead of when setting DISCOVERY_STOPPED
1445 * since the latter would potentially require waiting for
1446 * inquiry to stop too.
1448 if (has_pending_adv_report(hdev)) {
1449 struct discovery_state *d = &hdev->discovery;
1451 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1452 d->last_adv_addr_type, NULL,
1453 d->last_adv_rssi, d->last_adv_flags,
1455 d->last_adv_data_len, NULL, 0);
1458 /* Cancel this timer so that we don't try to disable scanning
1459 * when it's already disabled.
1461 cancel_delayed_work(&hdev->le_scan_disable);
1463 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1465 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1466 * interrupted scanning due to a connect request. Mark
1467 * therefore discovery as stopped. If this was not
1468 * because of a connect request advertising might have
1469 * been disabled because of active scanning, so
1470 * re-enable it again if necessary.
1472 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1473 #ifndef TIZEN_BT /* The below line is kernel bug. */
1474 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1476 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
1478 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1479 hdev->discovery.state == DISCOVERY_FINDING)
1480 hci_req_reenable_advertising(hdev);
1485 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1490 hci_dev_unlock(hdev);
1493 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1494 struct sk_buff *skb)
1496 struct hci_cp_le_set_scan_enable *cp;
1497 __u8 status = *((__u8 *) skb->data);
1499 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1504 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1508 le_set_scan_enable_complete(hdev, cp->enable);
1511 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1512 struct sk_buff *skb)
1514 struct hci_cp_le_set_ext_scan_enable *cp;
1515 __u8 status = *((__u8 *) skb->data);
1517 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1522 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1526 le_set_scan_enable_complete(hdev, cp->enable);
1529 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1530 struct sk_buff *skb)
1532 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1534 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1540 hdev->le_num_of_adv_sets = rp->num_of_sets;
1543 static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev,
1544 struct sk_buff *skb)
1546 struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data;
1548 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1553 hdev->le_accept_list_size = rp->size;
1556 static void hci_cc_le_clear_accept_list(struct hci_dev *hdev,
1557 struct sk_buff *skb)
1559 __u8 status = *((__u8 *) skb->data);
1561 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1567 hci_bdaddr_list_clear(&hdev->le_accept_list);
1568 hci_dev_unlock(hdev);
1571 static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev,
1572 struct sk_buff *skb)
1574 struct hci_cp_le_add_to_accept_list *sent;
1575 __u8 status = *((__u8 *) skb->data);
1577 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1582 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1587 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1589 hci_dev_unlock(hdev);
1592 static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev,
1593 struct sk_buff *skb)
1595 struct hci_cp_le_del_from_accept_list *sent;
1596 __u8 status = *((__u8 *) skb->data);
1598 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1603 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1608 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1610 hci_dev_unlock(hdev);
1613 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1614 struct sk_buff *skb)
1616 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1618 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1623 memcpy(hdev->le_states, rp->le_states, 8);
1626 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1627 struct sk_buff *skb)
1629 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1631 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1636 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1637 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1640 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1641 struct sk_buff *skb)
1643 struct hci_cp_le_write_def_data_len *sent;
1644 __u8 status = *((__u8 *) skb->data);
1646 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1651 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1655 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1656 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1659 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1660 struct sk_buff *skb)
1662 struct hci_cp_le_add_to_resolv_list *sent;
1663 __u8 status = *((__u8 *) skb->data);
1665 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1670 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1675 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1676 sent->bdaddr_type, sent->peer_irk,
1678 hci_dev_unlock(hdev);
1681 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1682 struct sk_buff *skb)
1684 struct hci_cp_le_del_from_resolv_list *sent;
1685 __u8 status = *((__u8 *) skb->data);
1687 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1692 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1697 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1699 hci_dev_unlock(hdev);
1702 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1703 struct sk_buff *skb)
1705 __u8 status = *((__u8 *) skb->data);
1707 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1713 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1714 hci_dev_unlock(hdev);
1717 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1718 struct sk_buff *skb)
1720 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1722 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1727 hdev->le_resolv_list_size = rp->size;
1730 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1731 struct sk_buff *skb)
1733 __u8 *sent, status = *((__u8 *) skb->data);
1735 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1740 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1747 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1749 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1751 hci_dev_unlock(hdev);
1754 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1755 struct sk_buff *skb)
1757 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1759 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1764 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1765 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1766 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1767 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1770 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1771 struct sk_buff *skb)
1773 struct hci_cp_write_le_host_supported *sent;
1774 __u8 status = *((__u8 *) skb->data);
1776 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1781 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1788 hdev->features[1][0] |= LMP_HOST_LE;
1789 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1791 hdev->features[1][0] &= ~LMP_HOST_LE;
1792 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1793 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1797 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1799 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1801 hci_dev_unlock(hdev);
1804 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1806 struct hci_cp_le_set_adv_param *cp;
1807 u8 status = *((u8 *) skb->data);
1809 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1814 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1819 hdev->adv_addr_type = cp->own_address_type;
1820 hci_dev_unlock(hdev);
1823 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1825 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1826 struct hci_cp_le_set_ext_adv_params *cp;
1827 struct adv_info *adv_instance;
1829 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1834 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1839 hdev->adv_addr_type = cp->own_addr_type;
1841 /* Store in hdev for instance 0 */
1842 hdev->adv_tx_power = rp->tx_power;
1844 adv_instance = hci_find_adv_instance(hdev, cp->handle);
1846 adv_instance->tx_power = rp->tx_power;
1848 /* Update adv data as tx power is known now */
1849 hci_req_update_adv_data(hdev, cp->handle);
1851 hci_dev_unlock(hdev);
1855 static void hci_cc_enable_rssi(struct hci_dev *hdev,
1856 struct sk_buff *skb)
1858 struct hci_cc_rsp_enable_rssi *rp = (void *)skb->data;
1860 BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
1861 hdev->name, rp->status, rp->le_ext_opcode);
1863 mgmt_enable_rssi_cc(hdev, rp, rp->status);
1866 static void hci_cc_get_raw_rssi(struct hci_dev *hdev,
1867 struct sk_buff *skb)
1869 struct hci_cc_rp_get_raw_rssi *rp = (void *)skb->data;
1871 BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
1872 hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
1874 mgmt_raw_rssi_response(hdev, rp, rp->status);
1878 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1880 struct hci_rp_read_rssi *rp = (void *) skb->data;
1881 struct hci_conn *conn;
1883 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1890 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1892 conn->rssi = rp->rssi;
1894 hci_dev_unlock(hdev);
1897 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1899 struct hci_cp_read_tx_power *sent;
1900 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1901 struct hci_conn *conn;
1903 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1908 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1914 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1918 switch (sent->type) {
1920 conn->tx_power = rp->tx_power;
1923 conn->max_tx_power = rp->tx_power;
1928 hci_dev_unlock(hdev);
1931 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1933 u8 status = *((u8 *) skb->data);
1936 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1941 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1943 hdev->ssp_debug_mode = *mode;
1946 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1948 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1951 hci_conn_check_pending(hdev);
1955 set_bit(HCI_INQUIRY, &hdev->flags);
1958 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1960 struct hci_cp_create_conn *cp;
1961 struct hci_conn *conn;
1963 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1965 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1971 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1973 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1976 if (conn && conn->state == BT_CONNECT) {
1977 if (status != 0x0c || conn->attempt > 2) {
1978 conn->state = BT_CLOSED;
1979 hci_connect_cfm(conn, status);
1982 conn->state = BT_CONNECT2;
1986 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1989 bt_dev_err(hdev, "no memory for new connection");
1993 hci_dev_unlock(hdev);
1996 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1998 struct hci_cp_add_sco *cp;
1999 struct hci_conn *acl, *sco;
2002 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2007 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2011 handle = __le16_to_cpu(cp->handle);
2013 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2017 acl = hci_conn_hash_lookup_handle(hdev, handle);
2021 sco->state = BT_CLOSED;
2023 hci_connect_cfm(sco, status);
2028 hci_dev_unlock(hdev);
2031 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2033 struct hci_cp_auth_requested *cp;
2034 struct hci_conn *conn;
2036 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2041 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2047 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2049 if (conn->state == BT_CONFIG) {
2050 hci_connect_cfm(conn, status);
2051 hci_conn_drop(conn);
2055 hci_dev_unlock(hdev);
2058 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2060 struct hci_cp_set_conn_encrypt *cp;
2061 struct hci_conn *conn;
2063 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2068 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2074 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2076 if (conn->state == BT_CONFIG) {
2077 hci_connect_cfm(conn, status);
2078 hci_conn_drop(conn);
2082 hci_dev_unlock(hdev);
2085 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2086 struct hci_conn *conn)
2088 if (conn->state != BT_CONFIG || !conn->out)
2091 if (conn->pending_sec_level == BT_SECURITY_SDP)
2094 /* Only request authentication for SSP connections or non-SSP
2095 * devices with sec_level MEDIUM or HIGH or if MITM protection
2098 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2099 conn->pending_sec_level != BT_SECURITY_FIPS &&
2100 conn->pending_sec_level != BT_SECURITY_HIGH &&
2101 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2107 static int hci_resolve_name(struct hci_dev *hdev,
2108 struct inquiry_entry *e)
2110 struct hci_cp_remote_name_req cp;
2112 memset(&cp, 0, sizeof(cp));
2114 bacpy(&cp.bdaddr, &e->data.bdaddr);
2115 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2116 cp.pscan_mode = e->data.pscan_mode;
2117 cp.clock_offset = e->data.clock_offset;
2119 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2122 static bool hci_resolve_next_name(struct hci_dev *hdev)
2124 struct discovery_state *discov = &hdev->discovery;
2125 struct inquiry_entry *e;
2127 if (list_empty(&discov->resolve))
2130 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2134 if (hci_resolve_name(hdev, e) == 0) {
2135 e->name_state = NAME_PENDING;
2142 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2143 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2145 struct discovery_state *discov = &hdev->discovery;
2146 struct inquiry_entry *e;
2149 /* Update the mgmt connected state if necessary. Be careful with
2150 * conn objects that exist but are not (yet) connected however.
2151 * Only those in BT_CONFIG or BT_CONNECTED states can be
2152 * considered connected.
2155 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
2156 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2157 mgmt_device_connected(hdev, conn, 0, name, name_len);
2159 mgmt_device_name_update(hdev, bdaddr, name, name_len);
2163 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2164 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2165 mgmt_device_connected(hdev, conn, name, name_len);
2168 if (discov->state == DISCOVERY_STOPPED)
2171 if (discov->state == DISCOVERY_STOPPING)
2172 goto discov_complete;
2174 if (discov->state != DISCOVERY_RESOLVING)
2177 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2178 /* If the device was not found in a list of found devices names of which
2179 * are pending. there is no need to continue resolving a next name as it
2180 * will be done upon receiving another Remote Name Request Complete
2187 e->name_state = NAME_KNOWN;
2188 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2189 e->data.rssi, name, name_len);
2191 e->name_state = NAME_NOT_KNOWN;
2194 if (hci_resolve_next_name(hdev))
2198 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2201 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2203 struct hci_cp_remote_name_req *cp;
2204 struct hci_conn *conn;
2206 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2208 /* If successful wait for the name req complete event before
2209 * checking for the need to do authentication */
2213 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2219 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2221 if (hci_dev_test_flag(hdev, HCI_MGMT))
2222 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2227 if (!hci_outgoing_auth_needed(hdev, conn))
2230 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2231 struct hci_cp_auth_requested auth_cp;
2233 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2235 auth_cp.handle = __cpu_to_le16(conn->handle);
2236 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2237 sizeof(auth_cp), &auth_cp);
2241 hci_dev_unlock(hdev);
2244 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2246 struct hci_cp_read_remote_features *cp;
2247 struct hci_conn *conn;
2249 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2254 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2260 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2262 if (conn->state == BT_CONFIG) {
2263 hci_connect_cfm(conn, status);
2264 hci_conn_drop(conn);
2268 hci_dev_unlock(hdev);
2271 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2273 struct hci_cp_read_remote_ext_features *cp;
2274 struct hci_conn *conn;
2276 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2281 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2287 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2289 if (conn->state == BT_CONFIG) {
2290 hci_connect_cfm(conn, status);
2291 hci_conn_drop(conn);
2295 hci_dev_unlock(hdev);
2298 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2300 struct hci_cp_setup_sync_conn *cp;
2301 struct hci_conn *acl, *sco;
2304 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2309 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2313 handle = __le16_to_cpu(cp->handle);
2315 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2319 acl = hci_conn_hash_lookup_handle(hdev, handle);
2323 sco->state = BT_CLOSED;
2325 hci_connect_cfm(sco, status);
2330 hci_dev_unlock(hdev);
2333 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2335 struct hci_cp_sniff_mode *cp;
2336 struct hci_conn *conn;
2338 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2343 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2349 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2351 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2353 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2354 hci_sco_setup(conn, status);
2357 hci_dev_unlock(hdev);
2360 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2362 struct hci_cp_exit_sniff_mode *cp;
2363 struct hci_conn *conn;
2365 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2370 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2376 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2378 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2380 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2381 hci_sco_setup(conn, status);
2384 hci_dev_unlock(hdev);
2387 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2389 struct hci_cp_disconnect *cp;
2390 struct hci_conn *conn;
2395 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2401 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2403 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2404 conn->dst_type, status);
2406 if (conn->type == LE_LINK) {
2407 hdev->cur_adv_instance = conn->adv_instance;
2408 hci_req_reenable_advertising(hdev);
2411 /* If the disconnection failed for any reason, the upper layer
2412 * does not retry to disconnect in current implementation.
2413 * Hence, we need to do some basic cleanup here and re-enable
2414 * advertising if necessary.
2419 hci_dev_unlock(hdev);
2422 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2423 u8 peer_addr_type, u8 own_address_type,
2426 struct hci_conn *conn;
2428 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2433 /* When using controller based address resolution, then the new
2434 * address types 0x02 and 0x03 are used. These types need to be
2435 * converted back into either public address or random address type
2437 if (use_ll_privacy(hdev) &&
2438 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2439 switch (own_address_type) {
2440 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2441 own_address_type = ADDR_LE_DEV_PUBLIC;
2443 case ADDR_LE_DEV_RANDOM_RESOLVED:
2444 own_address_type = ADDR_LE_DEV_RANDOM;
2449 /* Store the initiator and responder address information which
2450 * is needed for SMP. These values will not change during the
2451 * lifetime of the connection.
2453 conn->init_addr_type = own_address_type;
2454 if (own_address_type == ADDR_LE_DEV_RANDOM)
2455 bacpy(&conn->init_addr, &hdev->random_addr);
2457 bacpy(&conn->init_addr, &hdev->bdaddr);
2459 conn->resp_addr_type = peer_addr_type;
2460 bacpy(&conn->resp_addr, peer_addr);
2462 /* We don't want the connection attempt to stick around
2463 * indefinitely since LE doesn't have a page timeout concept
2464 * like BR/EDR. Set a timer for any connection that doesn't use
2465 * the accept list for connecting.
2467 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2468 queue_delayed_work(conn->hdev->workqueue,
2469 &conn->le_conn_timeout,
2470 conn->conn_timeout);
2473 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2475 struct hci_cp_le_create_conn *cp;
2477 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2479 /* All connection failure handling is taken care of by the
2480 * hci_le_conn_failed function which is triggered by the HCI
2481 * request completion callbacks used for connecting.
2486 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2492 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2493 cp->own_address_type, cp->filter_policy);
2495 hci_dev_unlock(hdev);
2498 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2500 struct hci_cp_le_ext_create_conn *cp;
2502 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2504 /* All connection failure handling is taken care of by the
2505 * hci_le_conn_failed function which is triggered by the HCI
2506 * request completion callbacks used for connecting.
2511 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2517 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2518 cp->own_addr_type, cp->filter_policy);
2520 hci_dev_unlock(hdev);
2523 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2525 struct hci_cp_le_read_remote_features *cp;
2526 struct hci_conn *conn;
2528 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2533 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2539 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2541 if (conn->state == BT_CONFIG) {
2542 hci_connect_cfm(conn, status);
2543 hci_conn_drop(conn);
2547 hci_dev_unlock(hdev);
2550 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2552 struct hci_cp_le_start_enc *cp;
2553 struct hci_conn *conn;
2555 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2562 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2566 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2570 if (conn->state != BT_CONNECTED)
2573 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2574 hci_conn_drop(conn);
2577 hci_dev_unlock(hdev);
2580 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2582 struct hci_cp_switch_role *cp;
2583 struct hci_conn *conn;
2585 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2590 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2596 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2598 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2600 hci_dev_unlock(hdev);
2603 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2605 __u8 status = *((__u8 *) skb->data);
2606 struct discovery_state *discov = &hdev->discovery;
2607 struct inquiry_entry *e;
2609 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2611 hci_conn_check_pending(hdev);
2613 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2616 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2617 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2619 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2624 if (discov->state != DISCOVERY_FINDING)
2627 if (list_empty(&discov->resolve)) {
2628 /* When BR/EDR inquiry is active and no LE scanning is in
2629 * progress, then change discovery state to indicate completion.
2631 * When running LE scanning and BR/EDR inquiry simultaneously
2632 * and the LE scan already finished, then change the discovery
2633 * state to indicate completion.
2635 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2636 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2637 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2641 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2642 if (e && hci_resolve_name(hdev, e) == 0) {
2643 e->name_state = NAME_PENDING;
2644 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2646 /* When BR/EDR inquiry is active and no LE scanning is in
2647 * progress, then change discovery state to indicate completion.
2649 * When running LE scanning and BR/EDR inquiry simultaneously
2650 * and the LE scan already finished, then change the discovery
2651 * state to indicate completion.
2653 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2654 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2655 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2659 hci_dev_unlock(hdev);
2662 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2664 struct inquiry_data data;
2665 struct inquiry_info *info = (void *) (skb->data + 1);
2666 int num_rsp = *((__u8 *) skb->data);
2668 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2670 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2673 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2678 for (; num_rsp; num_rsp--, info++) {
2681 bacpy(&data.bdaddr, &info->bdaddr);
2682 data.pscan_rep_mode = info->pscan_rep_mode;
2683 data.pscan_period_mode = info->pscan_period_mode;
2684 data.pscan_mode = info->pscan_mode;
2685 memcpy(data.dev_class, info->dev_class, 3);
2686 data.clock_offset = info->clock_offset;
2687 data.rssi = HCI_RSSI_INVALID;
2688 data.ssp_mode = 0x00;
2690 flags = hci_inquiry_cache_update(hdev, &data, false);
2692 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2693 info->dev_class, HCI_RSSI_INVALID,
2694 flags, NULL, 0, NULL, 0);
2697 hci_dev_unlock(hdev);
2700 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2702 struct hci_ev_conn_complete *ev = (void *) skb->data;
2703 struct hci_conn *conn;
2705 BT_DBG("%s", hdev->name);
2709 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2711 /* Connection may not exist if auto-connected. Check the bredr
2712 * allowlist to see if this device is allowed to auto connect.
2713 * If link is an ACL type, create a connection class
2716 * Auto-connect will only occur if the event filter is
2717 * programmed with a given address. Right now, event filter is
2718 * only used during suspend.
2720 if (ev->link_type == ACL_LINK &&
2721 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
2724 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2727 bt_dev_err(hdev, "no memory for new conn");
2731 if (ev->link_type != SCO_LINK)
2734 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2739 conn->type = SCO_LINK;
2744 conn->handle = __le16_to_cpu(ev->handle);
2746 if (conn->type == ACL_LINK) {
2747 conn->state = BT_CONFIG;
2748 hci_conn_hold(conn);
2750 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2751 !hci_find_link_key(hdev, &ev->bdaddr))
2752 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2754 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2756 conn->state = BT_CONNECTED;
2758 hci_debugfs_create_conn(conn);
2759 hci_conn_add_sysfs(conn);
2761 if (test_bit(HCI_AUTH, &hdev->flags))
2762 set_bit(HCI_CONN_AUTH, &conn->flags);
2764 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2765 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2767 /* Get remote features */
2768 if (conn->type == ACL_LINK) {
2769 struct hci_cp_read_remote_features cp;
2770 cp.handle = ev->handle;
2771 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2774 hci_req_update_scan(hdev);
2777 /* Set packet type for incoming connection */
2778 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2779 struct hci_cp_change_conn_ptype cp;
2780 cp.handle = ev->handle;
2781 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2782 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2786 conn->state = BT_CLOSED;
2787 if (conn->type == ACL_LINK)
2788 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2789 conn->dst_type, ev->status);
2792 if (conn->type == ACL_LINK)
2793 hci_sco_setup(conn, ev->status);
2796 hci_connect_cfm(conn, ev->status);
2798 } else if (ev->link_type == SCO_LINK) {
2799 switch (conn->setting & SCO_AIRMODE_MASK) {
2800 case SCO_AIRMODE_CVSD:
2802 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2806 hci_connect_cfm(conn, ev->status);
2810 hci_dev_unlock(hdev);
2812 hci_conn_check_pending(hdev);
2815 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2817 struct hci_cp_reject_conn_req cp;
2819 bacpy(&cp.bdaddr, bdaddr);
2820 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2821 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2824 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2826 struct hci_ev_conn_request *ev = (void *) skb->data;
2827 int mask = hdev->link_mode;
2828 struct inquiry_entry *ie;
2829 struct hci_conn *conn;
2832 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2835 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2838 if (!(mask & HCI_LM_ACCEPT)) {
2839 hci_reject_conn(hdev, &ev->bdaddr);
2845 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
2847 hci_reject_conn(hdev, &ev->bdaddr);
2851 /* Require HCI_CONNECTABLE or an accept list entry to accept the
2852 * connection. These features are only touched through mgmt so
2853 * only do the checks if HCI_MGMT is set.
2855 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2856 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2857 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
2859 hci_reject_conn(hdev, &ev->bdaddr);
2863 /* Connection accepted */
2865 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2867 memcpy(ie->data.dev_class, ev->dev_class, 3);
2869 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2872 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2875 bt_dev_err(hdev, "no memory for new connection");
2880 memcpy(conn->dev_class, ev->dev_class, 3);
2882 hci_dev_unlock(hdev);
2884 if (ev->link_type == ACL_LINK ||
2885 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2886 struct hci_cp_accept_conn_req cp;
2887 conn->state = BT_CONNECT;
2889 bacpy(&cp.bdaddr, &ev->bdaddr);
2891 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2892 cp.role = 0x00; /* Become central */
2894 cp.role = 0x01; /* Remain peripheral */
2896 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2897 } else if (!(flags & HCI_PROTO_DEFER)) {
2898 struct hci_cp_accept_sync_conn_req cp;
2899 conn->state = BT_CONNECT;
2901 bacpy(&cp.bdaddr, &ev->bdaddr);
2902 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2904 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2905 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2906 cp.max_latency = cpu_to_le16(0xffff);
2907 cp.content_format = cpu_to_le16(hdev->voice_setting);
2908 cp.retrans_effort = 0xff;
2910 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2913 conn->state = BT_CONNECT2;
2914 hci_connect_cfm(conn, 0);
2919 hci_dev_unlock(hdev);
2922 static u8 hci_to_mgmt_reason(u8 err)
2925 case HCI_ERROR_CONNECTION_TIMEOUT:
2926 return MGMT_DEV_DISCONN_TIMEOUT;
2927 case HCI_ERROR_REMOTE_USER_TERM:
2928 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2929 case HCI_ERROR_REMOTE_POWER_OFF:
2930 return MGMT_DEV_DISCONN_REMOTE;
2931 case HCI_ERROR_LOCAL_HOST_TERM:
2932 return MGMT_DEV_DISCONN_LOCAL_HOST;
2934 return MGMT_DEV_DISCONN_UNKNOWN;
2938 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2940 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2942 struct hci_conn_params *params;
2943 struct hci_conn *conn;
2944 bool mgmt_connected;
2946 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2950 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2955 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2956 conn->dst_type, ev->status);
2960 conn->state = BT_CLOSED;
2962 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2964 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2965 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2967 reason = hci_to_mgmt_reason(ev->reason);
2969 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2970 reason, mgmt_connected);
2972 if (conn->type == ACL_LINK) {
2973 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2974 hci_remove_link_key(hdev, &conn->dst);
2976 hci_req_update_scan(hdev);
2979 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2981 switch (params->auto_connect) {
2982 case HCI_AUTO_CONN_LINK_LOSS:
2983 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2987 case HCI_AUTO_CONN_DIRECT:
2988 case HCI_AUTO_CONN_ALWAYS:
2989 list_del_init(¶ms->action);
2990 list_add(¶ms->action, &hdev->pend_le_conns);
2991 hci_update_background_scan(hdev);
2999 hci_disconn_cfm(conn, ev->reason);
3001 /* The suspend notifier is waiting for all devices to disconnect so
3002 * clear the bit from pending tasks and inform the wait queue.
3004 if (list_empty(&hdev->conn_hash.list) &&
3005 test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
3006 wake_up(&hdev->suspend_wait_q);
3009 /* Re-enable advertising if necessary, since it might
3010 * have been disabled by the connection. From the
3011 * HCI_LE_Set_Advertise_Enable command description in
3012 * the core specification (v4.0):
3013 * "The Controller shall continue advertising until the Host
3014 * issues an LE_Set_Advertise_Enable command with
3015 * Advertising_Enable set to 0x00 (Advertising is disabled)
3016 * or until a connection is created or until the Advertising
3017 * is timed out due to Directed Advertising."
3019 if (conn->type == LE_LINK) {
3020 hdev->cur_adv_instance = conn->adv_instance;
3021 hci_req_reenable_advertising(hdev);
3027 hci_dev_unlock(hdev);
3030 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3032 struct hci_ev_auth_complete *ev = (void *) skb->data;
3033 struct hci_conn *conn;
3035 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3039 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3044 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3046 if (!hci_conn_ssp_enabled(conn) &&
3047 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3048 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3050 set_bit(HCI_CONN_AUTH, &conn->flags);
3051 conn->sec_level = conn->pending_sec_level;
3054 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3055 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3057 mgmt_auth_failed(conn, ev->status);
3060 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3061 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3063 if (conn->state == BT_CONFIG) {
3064 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3065 struct hci_cp_set_conn_encrypt cp;
3066 cp.handle = ev->handle;
3068 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3071 conn->state = BT_CONNECTED;
3072 hci_connect_cfm(conn, ev->status);
3073 hci_conn_drop(conn);
3076 hci_auth_cfm(conn, ev->status);
3078 hci_conn_hold(conn);
3079 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3080 hci_conn_drop(conn);
3083 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3085 struct hci_cp_set_conn_encrypt cp;
3086 cp.handle = ev->handle;
3088 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3091 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3092 hci_encrypt_cfm(conn, ev->status);
3097 hci_dev_unlock(hdev);
3100 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
3102 struct hci_ev_remote_name *ev = (void *) skb->data;
3103 struct hci_conn *conn;
3105 BT_DBG("%s", hdev->name);
3107 hci_conn_check_pending(hdev);
3111 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3113 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3116 if (ev->status == 0)
3117 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3118 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3120 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3126 if (!hci_outgoing_auth_needed(hdev, conn))
3129 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3130 struct hci_cp_auth_requested cp;
3132 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3134 cp.handle = __cpu_to_le16(conn->handle);
3135 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3139 hci_dev_unlock(hdev);
3142 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3143 u16 opcode, struct sk_buff *skb)
3145 const struct hci_rp_read_enc_key_size *rp;
3146 struct hci_conn *conn;
3149 BT_DBG("%s status 0x%02x", hdev->name, status);
3151 if (!skb || skb->len < sizeof(*rp)) {
3152 bt_dev_err(hdev, "invalid read key size response");
3156 rp = (void *)skb->data;
3157 handle = le16_to_cpu(rp->handle);
3161 conn = hci_conn_hash_lookup_handle(hdev, handle);
3165 /* While unexpected, the read_enc_key_size command may fail. The most
3166 * secure approach is to then assume the key size is 0 to force a
3170 bt_dev_err(hdev, "failed to read key size for handle %u",
3172 conn->enc_key_size = 0;
3174 conn->enc_key_size = rp->key_size;
3177 hci_encrypt_cfm(conn, 0);
3180 hci_dev_unlock(hdev);
3183 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3185 struct hci_ev_encrypt_change *ev = (void *) skb->data;
3186 struct hci_conn *conn;
3188 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3192 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3198 /* Encryption implies authentication */
3199 set_bit(HCI_CONN_AUTH, &conn->flags);
3200 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3201 conn->sec_level = conn->pending_sec_level;
3203 /* P-256 authentication key implies FIPS */
3204 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3205 set_bit(HCI_CONN_FIPS, &conn->flags);
3207 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3208 conn->type == LE_LINK)
3209 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3211 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3212 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3216 /* We should disregard the current RPA and generate a new one
3217 * whenever the encryption procedure fails.
3219 if (ev->status && conn->type == LE_LINK) {
3220 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3221 hci_adv_instances_set_rpa_expired(hdev, true);
3224 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3226 /* Check link security requirements are met */
3227 if (!hci_conn_check_link_mode(conn))
3228 ev->status = HCI_ERROR_AUTH_FAILURE;
3230 if (ev->status && conn->state == BT_CONNECTED) {
3231 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3232 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3234 /* Notify upper layers so they can cleanup before
3237 hci_encrypt_cfm(conn, ev->status);
3238 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3239 hci_conn_drop(conn);
3243 /* Try reading the encryption key size for encrypted ACL links */
3244 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3245 struct hci_cp_read_enc_key_size cp;
3246 struct hci_request req;
3248 /* Only send HCI_Read_Encryption_Key_Size if the
3249 * controller really supports it. If it doesn't, assume
3250 * the default size (16).
3252 if (!(hdev->commands[20] & 0x10)) {
3253 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3257 hci_req_init(&req, hdev);
3259 cp.handle = cpu_to_le16(conn->handle);
3260 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3262 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3263 bt_dev_err(hdev, "sending read key size failed");
3264 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3271 /* Set the default Authenticated Payload Timeout after
3272 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3273 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3274 * sent when the link is active and Encryption is enabled, the conn
3275 * type can be either LE or ACL and controller must support LMP Ping.
3276 * Ensure for AES-CCM encryption as well.
3278 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3279 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3280 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3281 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3282 struct hci_cp_write_auth_payload_to cp;
3284 cp.handle = cpu_to_le16(conn->handle);
3285 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3286 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3291 hci_encrypt_cfm(conn, ev->status);
3294 hci_dev_unlock(hdev);
3297 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3298 struct sk_buff *skb)
3300 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3301 struct hci_conn *conn;
3303 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3307 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3310 set_bit(HCI_CONN_SECURE, &conn->flags);
3312 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3314 hci_key_change_cfm(conn, ev->status);
3317 hci_dev_unlock(hdev);
3320 static void hci_remote_features_evt(struct hci_dev *hdev,
3321 struct sk_buff *skb)
3323 struct hci_ev_remote_features *ev = (void *) skb->data;
3324 struct hci_conn *conn;
3326 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3330 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3335 memcpy(conn->features[0], ev->features, 8);
3337 if (conn->state != BT_CONFIG)
3340 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3341 lmp_ext_feat_capable(conn)) {
3342 struct hci_cp_read_remote_ext_features cp;
3343 cp.handle = ev->handle;
3345 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3350 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3351 struct hci_cp_remote_name_req cp;
3352 memset(&cp, 0, sizeof(cp));
3353 bacpy(&cp.bdaddr, &conn->dst);
3354 cp.pscan_rep_mode = 0x02;
3355 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3356 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3357 mgmt_device_connected(hdev, conn, NULL, 0);
3359 if (!hci_outgoing_auth_needed(hdev, conn)) {
3360 conn->state = BT_CONNECTED;
3361 hci_connect_cfm(conn, ev->status);
3362 hci_conn_drop(conn);
3366 hci_dev_unlock(hdev);
3369 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3371 cancel_delayed_work(&hdev->cmd_timer);
3373 if (!test_bit(HCI_RESET, &hdev->flags)) {
3375 cancel_delayed_work(&hdev->ncmd_timer);
3376 atomic_set(&hdev->cmd_cnt, 1);
3378 schedule_delayed_work(&hdev->ncmd_timer,
3384 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3385 u16 *opcode, u8 *status,
3386 hci_req_complete_t *req_complete,
3387 hci_req_complete_skb_t *req_complete_skb)
3389 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3391 *opcode = __le16_to_cpu(ev->opcode);
3392 *status = skb->data[sizeof(*ev)];
3394 skb_pull(skb, sizeof(*ev));
3397 case HCI_OP_INQUIRY_CANCEL:
3398 hci_cc_inquiry_cancel(hdev, skb, status);
3401 case HCI_OP_PERIODIC_INQ:
3402 hci_cc_periodic_inq(hdev, skb);
3405 case HCI_OP_EXIT_PERIODIC_INQ:
3406 hci_cc_exit_periodic_inq(hdev, skb);
3409 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3410 hci_cc_remote_name_req_cancel(hdev, skb);
3413 case HCI_OP_ROLE_DISCOVERY:
3414 hci_cc_role_discovery(hdev, skb);
3417 case HCI_OP_READ_LINK_POLICY:
3418 hci_cc_read_link_policy(hdev, skb);
3421 case HCI_OP_WRITE_LINK_POLICY:
3422 hci_cc_write_link_policy(hdev, skb);
3425 case HCI_OP_READ_DEF_LINK_POLICY:
3426 hci_cc_read_def_link_policy(hdev, skb);
3429 case HCI_OP_WRITE_DEF_LINK_POLICY:
3430 hci_cc_write_def_link_policy(hdev, skb);
3434 hci_cc_reset(hdev, skb);
3437 case HCI_OP_READ_STORED_LINK_KEY:
3438 hci_cc_read_stored_link_key(hdev, skb);
3441 case HCI_OP_DELETE_STORED_LINK_KEY:
3442 hci_cc_delete_stored_link_key(hdev, skb);
3445 case HCI_OP_WRITE_LOCAL_NAME:
3446 hci_cc_write_local_name(hdev, skb);
3449 case HCI_OP_READ_LOCAL_NAME:
3450 hci_cc_read_local_name(hdev, skb);
3453 case HCI_OP_WRITE_AUTH_ENABLE:
3454 hci_cc_write_auth_enable(hdev, skb);
3457 case HCI_OP_WRITE_ENCRYPT_MODE:
3458 hci_cc_write_encrypt_mode(hdev, skb);
3461 case HCI_OP_WRITE_SCAN_ENABLE:
3462 hci_cc_write_scan_enable(hdev, skb);
3465 case HCI_OP_SET_EVENT_FLT:
3466 hci_cc_set_event_filter(hdev, skb);
3469 case HCI_OP_READ_CLASS_OF_DEV:
3470 hci_cc_read_class_of_dev(hdev, skb);
3473 case HCI_OP_WRITE_CLASS_OF_DEV:
3474 hci_cc_write_class_of_dev(hdev, skb);
3477 case HCI_OP_READ_VOICE_SETTING:
3478 hci_cc_read_voice_setting(hdev, skb);
3481 case HCI_OP_WRITE_VOICE_SETTING:
3482 hci_cc_write_voice_setting(hdev, skb);
3485 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3486 hci_cc_read_num_supported_iac(hdev, skb);
3489 case HCI_OP_WRITE_SSP_MODE:
3490 hci_cc_write_ssp_mode(hdev, skb);
3493 case HCI_OP_WRITE_SC_SUPPORT:
3494 hci_cc_write_sc_support(hdev, skb);
3497 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3498 hci_cc_read_auth_payload_timeout(hdev, skb);
3501 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3502 hci_cc_write_auth_payload_timeout(hdev, skb);
3505 case HCI_OP_READ_LOCAL_VERSION:
3506 hci_cc_read_local_version(hdev, skb);
3509 case HCI_OP_READ_LOCAL_COMMANDS:
3510 hci_cc_read_local_commands(hdev, skb);
3513 case HCI_OP_READ_LOCAL_FEATURES:
3514 hci_cc_read_local_features(hdev, skb);
3517 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3518 hci_cc_read_local_ext_features(hdev, skb);
3521 case HCI_OP_READ_BUFFER_SIZE:
3522 hci_cc_read_buffer_size(hdev, skb);
3525 case HCI_OP_READ_BD_ADDR:
3526 hci_cc_read_bd_addr(hdev, skb);
3529 case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3530 hci_cc_read_local_pairing_opts(hdev, skb);
3533 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3534 hci_cc_read_page_scan_activity(hdev, skb);
3537 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3538 hci_cc_write_page_scan_activity(hdev, skb);
3541 case HCI_OP_READ_PAGE_SCAN_TYPE:
3542 hci_cc_read_page_scan_type(hdev, skb);
3545 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3546 hci_cc_write_page_scan_type(hdev, skb);
3549 case HCI_OP_READ_DATA_BLOCK_SIZE:
3550 hci_cc_read_data_block_size(hdev, skb);
3553 case HCI_OP_READ_FLOW_CONTROL_MODE:
3554 hci_cc_read_flow_control_mode(hdev, skb);
3557 case HCI_OP_READ_LOCAL_AMP_INFO:
3558 hci_cc_read_local_amp_info(hdev, skb);
3561 case HCI_OP_READ_CLOCK:
3562 hci_cc_read_clock(hdev, skb);
3565 case HCI_OP_READ_INQ_RSP_TX_POWER:
3566 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3569 case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3570 hci_cc_read_def_err_data_reporting(hdev, skb);
3573 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3574 hci_cc_write_def_err_data_reporting(hdev, skb);
3577 case HCI_OP_PIN_CODE_REPLY:
3578 hci_cc_pin_code_reply(hdev, skb);
3581 case HCI_OP_PIN_CODE_NEG_REPLY:
3582 hci_cc_pin_code_neg_reply(hdev, skb);
3585 case HCI_OP_READ_LOCAL_OOB_DATA:
3586 hci_cc_read_local_oob_data(hdev, skb);
3589 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3590 hci_cc_read_local_oob_ext_data(hdev, skb);
3593 case HCI_OP_LE_READ_BUFFER_SIZE:
3594 hci_cc_le_read_buffer_size(hdev, skb);
3597 case HCI_OP_LE_READ_LOCAL_FEATURES:
3598 hci_cc_le_read_local_features(hdev, skb);
3601 case HCI_OP_LE_READ_ADV_TX_POWER:
3602 hci_cc_le_read_adv_tx_power(hdev, skb);
3605 case HCI_OP_USER_CONFIRM_REPLY:
3606 hci_cc_user_confirm_reply(hdev, skb);
3609 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3610 hci_cc_user_confirm_neg_reply(hdev, skb);
3613 case HCI_OP_USER_PASSKEY_REPLY:
3614 hci_cc_user_passkey_reply(hdev, skb);
3617 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3618 hci_cc_user_passkey_neg_reply(hdev, skb);
3621 case HCI_OP_LE_SET_RANDOM_ADDR:
3622 hci_cc_le_set_random_addr(hdev, skb);
3625 case HCI_OP_LE_SET_ADV_ENABLE:
3626 hci_cc_le_set_adv_enable(hdev, skb);
3629 case HCI_OP_LE_SET_SCAN_PARAM:
3630 hci_cc_le_set_scan_param(hdev, skb);
3633 case HCI_OP_LE_SET_SCAN_ENABLE:
3634 hci_cc_le_set_scan_enable(hdev, skb);
3637 case HCI_OP_LE_READ_ACCEPT_LIST_SIZE:
3638 hci_cc_le_read_accept_list_size(hdev, skb);
3641 case HCI_OP_LE_CLEAR_ACCEPT_LIST:
3642 hci_cc_le_clear_accept_list(hdev, skb);
3645 case HCI_OP_LE_ADD_TO_ACCEPT_LIST:
3646 hci_cc_le_add_to_accept_list(hdev, skb);
3649 case HCI_OP_LE_DEL_FROM_ACCEPT_LIST:
3650 hci_cc_le_del_from_accept_list(hdev, skb);
3653 case HCI_OP_LE_READ_SUPPORTED_STATES:
3654 hci_cc_le_read_supported_states(hdev, skb);
3657 case HCI_OP_LE_READ_DEF_DATA_LEN:
3658 hci_cc_le_read_def_data_len(hdev, skb);
3661 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3662 hci_cc_le_write_def_data_len(hdev, skb);
3665 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3666 hci_cc_le_add_to_resolv_list(hdev, skb);
3669 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3670 hci_cc_le_del_from_resolv_list(hdev, skb);
3673 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3674 hci_cc_le_clear_resolv_list(hdev, skb);
3677 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3678 hci_cc_le_read_resolv_list_size(hdev, skb);
3681 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3682 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3685 case HCI_OP_LE_READ_MAX_DATA_LEN:
3686 hci_cc_le_read_max_data_len(hdev, skb);
3689 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3690 hci_cc_write_le_host_supported(hdev, skb);
3693 case HCI_OP_LE_SET_ADV_PARAM:
3694 hci_cc_set_adv_param(hdev, skb);
3697 case HCI_OP_READ_RSSI:
3698 hci_cc_read_rssi(hdev, skb);
3701 case HCI_OP_READ_TX_POWER:
3702 hci_cc_read_tx_power(hdev, skb);
3705 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3706 hci_cc_write_ssp_debug_mode(hdev, skb);
3709 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3710 hci_cc_le_set_ext_scan_param(hdev, skb);
3713 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3714 hci_cc_le_set_ext_scan_enable(hdev, skb);
3717 case HCI_OP_LE_SET_DEFAULT_PHY:
3718 hci_cc_le_set_default_phy(hdev, skb);
3721 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3722 hci_cc_le_read_num_adv_sets(hdev, skb);
3725 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3726 hci_cc_set_ext_adv_param(hdev, skb);
3729 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3730 hci_cc_le_set_ext_adv_enable(hdev, skb);
3733 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3734 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3737 case HCI_OP_LE_READ_TRANSMIT_POWER:
3738 hci_cc_le_read_transmit_power(hdev, skb);
3741 case HCI_OP_ENABLE_RSSI:
3742 hci_cc_enable_rssi(hdev, skb);
3745 case HCI_OP_GET_RAW_RSSI:
3746 hci_cc_get_raw_rssi(hdev, skb);
3750 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3754 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3756 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3759 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3761 "unexpected event for opcode 0x%4.4x", *opcode);
3765 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3766 queue_work(hdev->workqueue, &hdev->cmd_work);
3769 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3770 u16 *opcode, u8 *status,
3771 hci_req_complete_t *req_complete,
3772 hci_req_complete_skb_t *req_complete_skb)
3774 struct hci_ev_cmd_status *ev = (void *) skb->data;
3776 skb_pull(skb, sizeof(*ev));
3778 *opcode = __le16_to_cpu(ev->opcode);
3779 *status = ev->status;
3782 case HCI_OP_INQUIRY:
3783 hci_cs_inquiry(hdev, ev->status);
3786 case HCI_OP_CREATE_CONN:
3787 hci_cs_create_conn(hdev, ev->status);
3790 case HCI_OP_DISCONNECT:
3791 hci_cs_disconnect(hdev, ev->status);
3794 case HCI_OP_ADD_SCO:
3795 hci_cs_add_sco(hdev, ev->status);
3798 case HCI_OP_AUTH_REQUESTED:
3799 hci_cs_auth_requested(hdev, ev->status);
3802 case HCI_OP_SET_CONN_ENCRYPT:
3803 hci_cs_set_conn_encrypt(hdev, ev->status);
3806 case HCI_OP_REMOTE_NAME_REQ:
3807 hci_cs_remote_name_req(hdev, ev->status);
3810 case HCI_OP_READ_REMOTE_FEATURES:
3811 hci_cs_read_remote_features(hdev, ev->status);
3814 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3815 hci_cs_read_remote_ext_features(hdev, ev->status);
3818 case HCI_OP_SETUP_SYNC_CONN:
3819 hci_cs_setup_sync_conn(hdev, ev->status);
3822 case HCI_OP_SNIFF_MODE:
3823 hci_cs_sniff_mode(hdev, ev->status);
3826 case HCI_OP_EXIT_SNIFF_MODE:
3827 hci_cs_exit_sniff_mode(hdev, ev->status);
3830 case HCI_OP_SWITCH_ROLE:
3831 hci_cs_switch_role(hdev, ev->status);
3834 case HCI_OP_LE_CREATE_CONN:
3835 hci_cs_le_create_conn(hdev, ev->status);
3838 case HCI_OP_LE_READ_REMOTE_FEATURES:
3839 hci_cs_le_read_remote_features(hdev, ev->status);
3842 case HCI_OP_LE_START_ENC:
3843 hci_cs_le_start_enc(hdev, ev->status);
3846 case HCI_OP_LE_EXT_CREATE_CONN:
3847 hci_cs_le_ext_create_conn(hdev, ev->status);
3851 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3855 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3857 /* Indicate request completion if the command failed. Also, if
3858 * we're not waiting for a special event and we get a success
3859 * command status we should try to flag the request as completed
3860 * (since for this kind of commands there will not be a command
3864 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3865 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3868 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3870 "unexpected event for opcode 0x%4.4x", *opcode);
3874 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3875 queue_work(hdev->workqueue, &hdev->cmd_work);
3878 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3880 struct hci_ev_hardware_error *ev = (void *) skb->data;
3882 hdev->hw_error_code = ev->code;
3884 queue_work(hdev->req_workqueue, &hdev->error_reset);
3887 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3889 struct hci_ev_role_change *ev = (void *) skb->data;
3890 struct hci_conn *conn;
3892 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3896 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3899 conn->role = ev->role;
3901 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3903 hci_role_switch_cfm(conn, ev->status, ev->role);
3906 hci_dev_unlock(hdev);
3909 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3911 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3914 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3915 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3919 if (skb->len < sizeof(*ev) ||
3920 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3921 BT_DBG("%s bad parameters", hdev->name);
3925 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3927 for (i = 0; i < ev->num_hndl; i++) {
3928 struct hci_comp_pkts_info *info = &ev->handles[i];
3929 struct hci_conn *conn;
3930 __u16 handle, count;
3932 handle = __le16_to_cpu(info->handle);
3933 count = __le16_to_cpu(info->count);
3935 conn = hci_conn_hash_lookup_handle(hdev, handle);
3939 conn->sent -= count;
3941 switch (conn->type) {
3943 hdev->acl_cnt += count;
3944 if (hdev->acl_cnt > hdev->acl_pkts)
3945 hdev->acl_cnt = hdev->acl_pkts;
3949 if (hdev->le_pkts) {
3950 hdev->le_cnt += count;
3951 if (hdev->le_cnt > hdev->le_pkts)
3952 hdev->le_cnt = hdev->le_pkts;
3954 hdev->acl_cnt += count;
3955 if (hdev->acl_cnt > hdev->acl_pkts)
3956 hdev->acl_cnt = hdev->acl_pkts;
3961 hdev->sco_cnt += count;
3962 if (hdev->sco_cnt > hdev->sco_pkts)
3963 hdev->sco_cnt = hdev->sco_pkts;
3967 bt_dev_err(hdev, "unknown type %d conn %p",
3973 queue_work(hdev->workqueue, &hdev->tx_work);
3976 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3979 struct hci_chan *chan;
3981 switch (hdev->dev_type) {
3983 return hci_conn_hash_lookup_handle(hdev, handle);
3985 chan = hci_chan_lookup_handle(hdev, handle);
3990 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3997 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3999 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
4002 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4003 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4007 if (skb->len < sizeof(*ev) ||
4008 skb->len < struct_size(ev, handles, ev->num_hndl)) {
4009 BT_DBG("%s bad parameters", hdev->name);
4013 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
4016 for (i = 0; i < ev->num_hndl; i++) {
4017 struct hci_comp_blocks_info *info = &ev->handles[i];
4018 struct hci_conn *conn = NULL;
4019 __u16 handle, block_count;
4021 handle = __le16_to_cpu(info->handle);
4022 block_count = __le16_to_cpu(info->blocks);
4024 conn = __hci_conn_lookup_handle(hdev, handle);
4028 conn->sent -= block_count;
4030 switch (conn->type) {
4033 hdev->block_cnt += block_count;
4034 if (hdev->block_cnt > hdev->num_blocks)
4035 hdev->block_cnt = hdev->num_blocks;
4039 bt_dev_err(hdev, "unknown type %d conn %p",
4045 queue_work(hdev->workqueue, &hdev->tx_work);
4048 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4050 struct hci_ev_mode_change *ev = (void *) skb->data;
4051 struct hci_conn *conn;
4053 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4057 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4059 conn->mode = ev->mode;
4061 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4063 if (conn->mode == HCI_CM_ACTIVE)
4064 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4066 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4069 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4070 hci_sco_setup(conn, ev->status);
4073 hci_dev_unlock(hdev);
4076 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4078 struct hci_ev_pin_code_req *ev = (void *) skb->data;
4079 struct hci_conn *conn;
4081 BT_DBG("%s", hdev->name);
4085 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4089 if (conn->state == BT_CONNECTED) {
4090 hci_conn_hold(conn);
4091 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4092 hci_conn_drop(conn);
4095 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4096 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4097 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4098 sizeof(ev->bdaddr), &ev->bdaddr);
4099 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4102 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4107 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4111 hci_dev_unlock(hdev);
4114 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4116 if (key_type == HCI_LK_CHANGED_COMBINATION)
4119 conn->pin_length = pin_len;
4120 conn->key_type = key_type;
4123 case HCI_LK_LOCAL_UNIT:
4124 case HCI_LK_REMOTE_UNIT:
4125 case HCI_LK_DEBUG_COMBINATION:
4127 case HCI_LK_COMBINATION:
4129 conn->pending_sec_level = BT_SECURITY_HIGH;
4131 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4133 case HCI_LK_UNAUTH_COMBINATION_P192:
4134 case HCI_LK_UNAUTH_COMBINATION_P256:
4135 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4137 case HCI_LK_AUTH_COMBINATION_P192:
4138 conn->pending_sec_level = BT_SECURITY_HIGH;
4140 case HCI_LK_AUTH_COMBINATION_P256:
4141 conn->pending_sec_level = BT_SECURITY_FIPS;
4146 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4148 struct hci_ev_link_key_req *ev = (void *) skb->data;
4149 struct hci_cp_link_key_reply cp;
4150 struct hci_conn *conn;
4151 struct link_key *key;
4153 BT_DBG("%s", hdev->name);
4155 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4160 key = hci_find_link_key(hdev, &ev->bdaddr);
4162 BT_DBG("%s link key not found for %pMR", hdev->name,
4167 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4170 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4172 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4174 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4175 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4176 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4177 BT_DBG("%s ignoring unauthenticated key", hdev->name);
4181 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4182 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4183 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4184 BT_DBG("%s ignoring key unauthenticated for high security",
4189 conn_set_key(conn, key->type, key->pin_len);
4192 bacpy(&cp.bdaddr, &ev->bdaddr);
4193 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4195 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4197 hci_dev_unlock(hdev);
4202 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4203 hci_dev_unlock(hdev);
4206 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4208 struct hci_ev_link_key_notify *ev = (void *) skb->data;
4209 struct hci_conn *conn;
4210 struct link_key *key;
4214 BT_DBG("%s", hdev->name);
4218 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4222 hci_conn_hold(conn);
4223 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4224 hci_conn_drop(conn);
4226 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4227 conn_set_key(conn, ev->key_type, conn->pin_length);
4229 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4232 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4233 ev->key_type, pin_len, &persistent);
4237 /* Update connection information since adding the key will have
4238 * fixed up the type in the case of changed combination keys.
4240 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4241 conn_set_key(conn, key->type, key->pin_len);
4243 mgmt_new_link_key(hdev, key, persistent);
4245 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4246 * is set. If it's not set simply remove the key from the kernel
4247 * list (we've still notified user space about it but with
4248 * store_hint being 0).
4250 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4251 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4252 list_del_rcu(&key->list);
4253 kfree_rcu(key, rcu);
4258 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4260 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4263 hci_dev_unlock(hdev);
4266 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4268 struct hci_ev_clock_offset *ev = (void *) skb->data;
4269 struct hci_conn *conn;
4271 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4275 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4276 if (conn && !ev->status) {
4277 struct inquiry_entry *ie;
4279 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4281 ie->data.clock_offset = ev->clock_offset;
4282 ie->timestamp = jiffies;
4286 hci_dev_unlock(hdev);
4289 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4291 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4292 struct hci_conn *conn;
4294 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4298 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4299 if (conn && !ev->status)
4300 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4302 hci_dev_unlock(hdev);
4305 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4307 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4308 struct inquiry_entry *ie;
4310 BT_DBG("%s", hdev->name);
4314 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4316 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4317 ie->timestamp = jiffies;
4320 hci_dev_unlock(hdev);
4323 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4324 struct sk_buff *skb)
4326 struct inquiry_data data;
4327 int num_rsp = *((__u8 *) skb->data);
4329 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4334 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4339 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4340 struct inquiry_info_with_rssi_and_pscan_mode *info;
4341 info = (void *) (skb->data + 1);
4343 if (skb->len < num_rsp * sizeof(*info) + 1)
4346 for (; num_rsp; num_rsp--, info++) {
4349 bacpy(&data.bdaddr, &info->bdaddr);
4350 data.pscan_rep_mode = info->pscan_rep_mode;
4351 data.pscan_period_mode = info->pscan_period_mode;
4352 data.pscan_mode = info->pscan_mode;
4353 memcpy(data.dev_class, info->dev_class, 3);
4354 data.clock_offset = info->clock_offset;
4355 data.rssi = info->rssi;
4356 data.ssp_mode = 0x00;
4358 flags = hci_inquiry_cache_update(hdev, &data, false);
4360 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4361 info->dev_class, info->rssi,
4362 flags, NULL, 0, NULL, 0);
4365 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4367 if (skb->len < num_rsp * sizeof(*info) + 1)
4370 for (; num_rsp; num_rsp--, info++) {
4373 bacpy(&data.bdaddr, &info->bdaddr);
4374 data.pscan_rep_mode = info->pscan_rep_mode;
4375 data.pscan_period_mode = info->pscan_period_mode;
4376 data.pscan_mode = 0x00;
4377 memcpy(data.dev_class, info->dev_class, 3);
4378 data.clock_offset = info->clock_offset;
4379 data.rssi = info->rssi;
4380 data.ssp_mode = 0x00;
4382 flags = hci_inquiry_cache_update(hdev, &data, false);
4384 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4385 info->dev_class, info->rssi,
4386 flags, NULL, 0, NULL, 0);
4391 hci_dev_unlock(hdev);
4394 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4395 struct sk_buff *skb)
4397 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4398 struct hci_conn *conn;
4400 BT_DBG("%s", hdev->name);
4404 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4408 if (ev->page < HCI_MAX_PAGES)
4409 memcpy(conn->features[ev->page], ev->features, 8);
4411 if (!ev->status && ev->page == 0x01) {
4412 struct inquiry_entry *ie;
4414 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4416 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4418 if (ev->features[0] & LMP_HOST_SSP) {
4419 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4421 /* It is mandatory by the Bluetooth specification that
4422 * Extended Inquiry Results are only used when Secure
4423 * Simple Pairing is enabled, but some devices violate
4426 * To make these devices work, the internal SSP
4427 * enabled flag needs to be cleared if the remote host
4428 * features do not indicate SSP support */
4429 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4432 if (ev->features[0] & LMP_HOST_SC)
4433 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4436 if (conn->state != BT_CONFIG)
4439 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4440 struct hci_cp_remote_name_req cp;
4441 memset(&cp, 0, sizeof(cp));
4442 bacpy(&cp.bdaddr, &conn->dst);
4443 cp.pscan_rep_mode = 0x02;
4444 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4445 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4446 mgmt_device_connected(hdev, conn, NULL, 0);
4448 if (!hci_outgoing_auth_needed(hdev, conn)) {
4449 conn->state = BT_CONNECTED;
4450 hci_connect_cfm(conn, ev->status);
4451 hci_conn_drop(conn);
4455 hci_dev_unlock(hdev);
4458 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4459 struct sk_buff *skb)
4461 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4462 struct hci_conn *conn;
4464 switch (ev->link_type) {
4469 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4470 * for HCI_Synchronous_Connection_Complete is limited to
4471 * either SCO or eSCO
4473 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4477 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4481 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4483 if (ev->link_type == ESCO_LINK)
4486 /* When the link type in the event indicates SCO connection
4487 * and lookup of the connection object fails, then check
4488 * if an eSCO connection object exists.
4490 * The core limits the synchronous connections to either
4491 * SCO or eSCO. The eSCO connection is preferred and tried
4492 * to be setup first and until successfully established,
4493 * the link type will be hinted as eSCO.
4495 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4500 switch (ev->status) {
4502 /* The synchronous connection complete event should only be
4503 * sent once per new connection. Receiving a successful
4504 * complete event when the connection status is already
4505 * BT_CONNECTED means that the device is misbehaving and sent
4506 * multiple complete event packets for the same new connection.
4508 * Registering the device more than once can corrupt kernel
4509 * memory, hence upon detecting this invalid event, we report
4510 * an error and ignore the packet.
4512 if (conn->state == BT_CONNECTED) {
4513 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4517 conn->handle = __le16_to_cpu(ev->handle);
4518 conn->state = BT_CONNECTED;
4519 conn->type = ev->link_type;
4521 hci_debugfs_create_conn(conn);
4522 hci_conn_add_sysfs(conn);
4525 case 0x10: /* Connection Accept Timeout */
4526 case 0x0d: /* Connection Rejected due to Limited Resources */
4527 case 0x11: /* Unsupported Feature or Parameter Value */
4528 case 0x1c: /* SCO interval rejected */
4529 case 0x1a: /* Unsupported Remote Feature */
4530 case 0x1e: /* Invalid LMP Parameters */
4531 case 0x1f: /* Unspecified error */
4532 case 0x20: /* Unsupported LMP Parameter value */
4534 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4535 (hdev->esco_type & EDR_ESCO_MASK);
4536 if (hci_setup_sync(conn, conn->link->handle))
4542 conn->state = BT_CLOSED;
4546 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4548 switch (ev->air_mode) {
4551 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4555 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4559 hci_connect_cfm(conn, ev->status);
4564 hci_dev_unlock(hdev);
4567 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4571 while (parsed < eir_len) {
4572 u8 field_len = eir[0];
4577 parsed += field_len + 1;
4578 eir += field_len + 1;
4584 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4585 struct sk_buff *skb)
4587 struct inquiry_data data;
4588 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4589 int num_rsp = *((__u8 *) skb->data);
4592 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4594 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4597 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4602 for (; num_rsp; num_rsp--, info++) {
4606 bacpy(&data.bdaddr, &info->bdaddr);
4607 data.pscan_rep_mode = info->pscan_rep_mode;
4608 data.pscan_period_mode = info->pscan_period_mode;
4609 data.pscan_mode = 0x00;
4610 memcpy(data.dev_class, info->dev_class, 3);
4611 data.clock_offset = info->clock_offset;
4612 data.rssi = info->rssi;
4613 data.ssp_mode = 0x01;
4615 if (hci_dev_test_flag(hdev, HCI_MGMT))
4616 name_known = eir_get_data(info->data,
4618 EIR_NAME_COMPLETE, NULL);
4622 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4624 eir_len = eir_get_length(info->data, sizeof(info->data));
4626 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4627 info->dev_class, info->rssi,
4628 flags, info->data, eir_len, NULL, 0);
4631 hci_dev_unlock(hdev);
4634 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4635 struct sk_buff *skb)
4637 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4638 struct hci_conn *conn;
4640 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4641 __le16_to_cpu(ev->handle));
4645 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4649 /* For BR/EDR the necessary steps are taken through the
4650 * auth_complete event.
4652 if (conn->type != LE_LINK)
4656 conn->sec_level = conn->pending_sec_level;
4658 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4660 if (ev->status && conn->state == BT_CONNECTED) {
4661 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4662 hci_conn_drop(conn);
4666 if (conn->state == BT_CONFIG) {
4668 conn->state = BT_CONNECTED;
4670 hci_connect_cfm(conn, ev->status);
4671 hci_conn_drop(conn);
4673 hci_auth_cfm(conn, ev->status);
4675 hci_conn_hold(conn);
4676 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4677 hci_conn_drop(conn);
4681 hci_dev_unlock(hdev);
4684 static u8 hci_get_auth_req(struct hci_conn *conn)
4686 /* If remote requests no-bonding follow that lead */
4687 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4688 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4689 return conn->remote_auth | (conn->auth_type & 0x01);
4691 /* If both remote and local have enough IO capabilities, require
4694 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4695 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4696 return conn->remote_auth | 0x01;
4698 /* No MITM protection possible so ignore remote requirement */
4699 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4702 static u8 bredr_oob_data_present(struct hci_conn *conn)
4704 struct hci_dev *hdev = conn->hdev;
4705 struct oob_data *data;
4707 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4711 if (bredr_sc_enabled(hdev)) {
4712 /* When Secure Connections is enabled, then just
4713 * return the present value stored with the OOB
4714 * data. The stored value contains the right present
4715 * information. However it can only be trusted when
4716 * not in Secure Connection Only mode.
4718 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4719 return data->present;
4721 /* When Secure Connections Only mode is enabled, then
4722 * the P-256 values are required. If they are not
4723 * available, then do not declare that OOB data is
4726 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4727 !memcmp(data->hash256, ZERO_KEY, 16))
4733 /* When Secure Connections is not enabled or actually
4734 * not supported by the hardware, then check that if
4735 * P-192 data values are present.
4737 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4738 !memcmp(data->hash192, ZERO_KEY, 16))
4744 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4746 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4747 struct hci_conn *conn;
4749 BT_DBG("%s", hdev->name);
4753 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4757 hci_conn_hold(conn);
4759 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4762 /* Allow pairing if we're pairable, the initiators of the
4763 * pairing or if the remote is not requesting bonding.
4765 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4766 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4767 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4768 struct hci_cp_io_capability_reply cp;
4770 bacpy(&cp.bdaddr, &ev->bdaddr);
4771 /* Change the IO capability from KeyboardDisplay
4772 * to DisplayYesNo as it is not supported by BT spec. */
4773 cp.capability = (conn->io_capability == 0x04) ?
4774 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4776 /* If we are initiators, there is no remote information yet */
4777 if (conn->remote_auth == 0xff) {
4778 /* Request MITM protection if our IO caps allow it
4779 * except for the no-bonding case.
4781 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4782 conn->auth_type != HCI_AT_NO_BONDING)
4783 conn->auth_type |= 0x01;
4785 conn->auth_type = hci_get_auth_req(conn);
4788 /* If we're not bondable, force one of the non-bondable
4789 * authentication requirement values.
4791 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4792 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4794 cp.authentication = conn->auth_type;
4795 cp.oob_data = bredr_oob_data_present(conn);
4797 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4800 struct hci_cp_io_capability_neg_reply cp;
4802 bacpy(&cp.bdaddr, &ev->bdaddr);
4803 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4805 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4810 hci_dev_unlock(hdev);
4813 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4815 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4816 struct hci_conn *conn;
4818 BT_DBG("%s", hdev->name);
4822 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4826 conn->remote_cap = ev->capability;
4827 conn->remote_auth = ev->authentication;
4830 hci_dev_unlock(hdev);
4833 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4834 struct sk_buff *skb)
4836 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4837 int loc_mitm, rem_mitm, confirm_hint = 0;
4838 struct hci_conn *conn;
4840 BT_DBG("%s", hdev->name);
4844 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4847 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4851 loc_mitm = (conn->auth_type & 0x01);
4852 rem_mitm = (conn->remote_auth & 0x01);
4854 /* If we require MITM but the remote device can't provide that
4855 * (it has NoInputNoOutput) then reject the confirmation
4856 * request. We check the security level here since it doesn't
4857 * necessarily match conn->auth_type.
4859 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4860 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4861 BT_DBG("Rejecting request: remote device can't provide MITM");
4862 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4863 sizeof(ev->bdaddr), &ev->bdaddr);
4867 /* If no side requires MITM protection; auto-accept */
4868 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4869 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4871 /* If we're not the initiators request authorization to
4872 * proceed from user space (mgmt_user_confirm with
4873 * confirm_hint set to 1). The exception is if neither
4874 * side had MITM or if the local IO capability is
4875 * NoInputNoOutput, in which case we do auto-accept
4877 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4878 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4879 (loc_mitm || rem_mitm)) {
4880 BT_DBG("Confirming auto-accept as acceptor");
4885 /* If there already exists link key in local host, leave the
4886 * decision to user space since the remote device could be
4887 * legitimate or malicious.
4889 if (hci_find_link_key(hdev, &ev->bdaddr)) {
4890 bt_dev_dbg(hdev, "Local host already has link key");
4895 BT_DBG("Auto-accept of user confirmation with %ums delay",
4896 hdev->auto_accept_delay);
4898 if (hdev->auto_accept_delay > 0) {
4899 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4900 queue_delayed_work(conn->hdev->workqueue,
4901 &conn->auto_accept_work, delay);
4905 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4906 sizeof(ev->bdaddr), &ev->bdaddr);
4911 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4912 le32_to_cpu(ev->passkey), confirm_hint);
4915 hci_dev_unlock(hdev);
4918 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4919 struct sk_buff *skb)
4921 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4923 BT_DBG("%s", hdev->name);
4925 if (hci_dev_test_flag(hdev, HCI_MGMT))
4926 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4929 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4930 struct sk_buff *skb)
4932 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4933 struct hci_conn *conn;
4935 BT_DBG("%s", hdev->name);
4937 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4941 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4942 conn->passkey_entered = 0;
4944 if (hci_dev_test_flag(hdev, HCI_MGMT))
4945 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4946 conn->dst_type, conn->passkey_notify,
4947 conn->passkey_entered);
4950 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4952 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4953 struct hci_conn *conn;
4955 BT_DBG("%s", hdev->name);
4957 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4962 case HCI_KEYPRESS_STARTED:
4963 conn->passkey_entered = 0;
4966 case HCI_KEYPRESS_ENTERED:
4967 conn->passkey_entered++;
4970 case HCI_KEYPRESS_ERASED:
4971 conn->passkey_entered--;
4974 case HCI_KEYPRESS_CLEARED:
4975 conn->passkey_entered = 0;
4978 case HCI_KEYPRESS_COMPLETED:
4982 if (hci_dev_test_flag(hdev, HCI_MGMT))
4983 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4984 conn->dst_type, conn->passkey_notify,
4985 conn->passkey_entered);
4988 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4989 struct sk_buff *skb)
4991 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4992 struct hci_conn *conn;
4994 BT_DBG("%s", hdev->name);
4998 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5002 /* Reset the authentication requirement to unknown */
5003 conn->remote_auth = 0xff;
5005 /* To avoid duplicate auth_failed events to user space we check
5006 * the HCI_CONN_AUTH_PEND flag which will be set if we
5007 * initiated the authentication. A traditional auth_complete
5008 * event gets always produced as initiator and is also mapped to
5009 * the mgmt_auth_failed event */
5010 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5011 mgmt_auth_failed(conn, ev->status);
5013 hci_conn_drop(conn);
5016 hci_dev_unlock(hdev);
5019 static void hci_remote_host_features_evt(struct hci_dev *hdev,
5020 struct sk_buff *skb)
5022 struct hci_ev_remote_host_features *ev = (void *) skb->data;
5023 struct inquiry_entry *ie;
5024 struct hci_conn *conn;
5026 BT_DBG("%s", hdev->name);
5030 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5032 memcpy(conn->features[1], ev->features, 8);
5034 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5036 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5038 hci_dev_unlock(hdev);
5041 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
5042 struct sk_buff *skb)
5044 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
5045 struct oob_data *data;
5047 BT_DBG("%s", hdev->name);
5051 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5054 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5056 struct hci_cp_remote_oob_data_neg_reply cp;
5058 bacpy(&cp.bdaddr, &ev->bdaddr);
5059 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5064 if (bredr_sc_enabled(hdev)) {
5065 struct hci_cp_remote_oob_ext_data_reply cp;
5067 bacpy(&cp.bdaddr, &ev->bdaddr);
5068 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5069 memset(cp.hash192, 0, sizeof(cp.hash192));
5070 memset(cp.rand192, 0, sizeof(cp.rand192));
5072 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5073 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5075 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5076 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5078 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5081 struct hci_cp_remote_oob_data_reply cp;
5083 bacpy(&cp.bdaddr, &ev->bdaddr);
5084 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5085 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5087 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5092 hci_dev_unlock(hdev);
5095 #if IS_ENABLED(CONFIG_BT_HS)
5096 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5098 struct hci_ev_channel_selected *ev = (void *)skb->data;
5099 struct hci_conn *hcon;
5101 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5103 skb_pull(skb, sizeof(*ev));
5105 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5109 amp_read_loc_assoc_final_data(hdev, hcon);
5112 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
5113 struct sk_buff *skb)
5115 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
5116 struct hci_conn *hcon, *bredr_hcon;
5118 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
5123 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5135 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5137 hcon->state = BT_CONNECTED;
5138 bacpy(&hcon->dst, &bredr_hcon->dst);
5140 hci_conn_hold(hcon);
5141 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5142 hci_conn_drop(hcon);
5144 hci_debugfs_create_conn(hcon);
5145 hci_conn_add_sysfs(hcon);
5147 amp_physical_cfm(bredr_hcon, hcon);
5150 hci_dev_unlock(hdev);
5153 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5155 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
5156 struct hci_conn *hcon;
5157 struct hci_chan *hchan;
5158 struct amp_mgr *mgr;
5160 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5161 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
5164 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5168 /* Create AMP hchan */
5169 hchan = hci_chan_create(hcon);
5173 hchan->handle = le16_to_cpu(ev->handle);
5176 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5178 mgr = hcon->amp_mgr;
5179 if (mgr && mgr->bredr_chan) {
5180 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5182 l2cap_chan_lock(bredr_chan);
5184 bredr_chan->conn->mtu = hdev->block_mtu;
5185 l2cap_logical_cfm(bredr_chan, hchan, 0);
5186 hci_conn_hold(hcon);
5188 l2cap_chan_unlock(bredr_chan);
5192 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5193 struct sk_buff *skb)
5195 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5196 struct hci_chan *hchan;
5198 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5199 le16_to_cpu(ev->handle), ev->status);
5206 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5207 if (!hchan || !hchan->amp)
5210 amp_destroy_logical_link(hchan, ev->reason);
5213 hci_dev_unlock(hdev);
5216 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5217 struct sk_buff *skb)
5219 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5220 struct hci_conn *hcon;
5222 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5229 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5230 if (hcon && hcon->type == AMP_LINK) {
5231 hcon->state = BT_CLOSED;
5232 hci_disconn_cfm(hcon, ev->reason);
5236 hci_dev_unlock(hdev);
5240 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5241 u8 bdaddr_type, bdaddr_t *local_rpa)
5244 conn->dst_type = bdaddr_type;
5245 conn->resp_addr_type = bdaddr_type;
5246 bacpy(&conn->resp_addr, bdaddr);
5248 /* Check if the controller has set a Local RPA then it must be
5249 * used instead or hdev->rpa.
5251 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5252 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5253 bacpy(&conn->init_addr, local_rpa);
5254 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5255 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5256 bacpy(&conn->init_addr, &conn->hdev->rpa);
5258 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5259 &conn->init_addr_type);
5262 conn->resp_addr_type = conn->hdev->adv_addr_type;
5263 /* Check if the controller has set a Local RPA then it must be
5264 * used instead or hdev->rpa.
5266 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5267 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5268 bacpy(&conn->resp_addr, local_rpa);
5269 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5270 /* In case of ext adv, resp_addr will be updated in
5271 * Adv Terminated event.
5273 if (!ext_adv_capable(conn->hdev))
5274 bacpy(&conn->resp_addr,
5275 &conn->hdev->random_addr);
5277 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5280 conn->init_addr_type = bdaddr_type;
5281 bacpy(&conn->init_addr, bdaddr);
5283 /* For incoming connections, set the default minimum
5284 * and maximum connection interval. They will be used
5285 * to check if the parameters are in range and if not
5286 * trigger the connection update procedure.
5288 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5289 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5293 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5294 bdaddr_t *bdaddr, u8 bdaddr_type,
5295 bdaddr_t *local_rpa, u8 role, u16 handle,
5296 u16 interval, u16 latency,
5297 u16 supervision_timeout)
5299 struct hci_conn_params *params;
5300 struct hci_conn *conn;
5301 struct smp_irk *irk;
5306 /* All controllers implicitly stop advertising in the event of a
5307 * connection, so ensure that the state bit is cleared.
5309 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5311 conn = hci_lookup_le_connect(hdev);
5313 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5315 bt_dev_err(hdev, "no memory for new connection");
5319 conn->dst_type = bdaddr_type;
5321 /* If we didn't have a hci_conn object previously
5322 * but we're in central role this must be something
5323 * initiated using an accept list. Since accept list based
5324 * connections are not "first class citizens" we don't
5325 * have full tracking of them. Therefore, we go ahead
5326 * with a "best effort" approach of determining the
5327 * initiator address based on the HCI_PRIVACY flag.
5330 conn->resp_addr_type = bdaddr_type;
5331 bacpy(&conn->resp_addr, bdaddr);
5332 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5333 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5334 bacpy(&conn->init_addr, &hdev->rpa);
5336 hci_copy_identity_address(hdev,
5338 &conn->init_addr_type);
5342 cancel_delayed_work(&conn->le_conn_timeout);
5345 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5347 /* Lookup the identity address from the stored connection
5348 * address and address type.
5350 * When establishing connections to an identity address, the
5351 * connection procedure will store the resolvable random
5352 * address first. Now if it can be converted back into the
5353 * identity address, start using the identity address from
5356 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5358 bacpy(&conn->dst, &irk->bdaddr);
5359 conn->dst_type = irk->addr_type;
5362 /* When using controller based address resolution, then the new
5363 * address types 0x02 and 0x03 are used. These types need to be
5364 * converted back into either public address or random address type
5366 if (use_ll_privacy(hdev) &&
5367 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5368 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
5369 switch (conn->dst_type) {
5370 case ADDR_LE_DEV_PUBLIC_RESOLVED:
5371 conn->dst_type = ADDR_LE_DEV_PUBLIC;
5373 case ADDR_LE_DEV_RANDOM_RESOLVED:
5374 conn->dst_type = ADDR_LE_DEV_RANDOM;
5380 hci_le_conn_failed(conn, status);
5384 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5385 addr_type = BDADDR_LE_PUBLIC;
5387 addr_type = BDADDR_LE_RANDOM;
5389 /* Drop the connection if the device is blocked */
5390 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5391 hci_conn_drop(conn);
5395 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5396 mgmt_device_connected(hdev, conn, NULL, 0);
5398 conn->sec_level = BT_SECURITY_LOW;
5399 conn->handle = handle;
5400 conn->state = BT_CONFIG;
5402 /* Store current advertising instance as connection advertising instance
5403 * when sotfware rotation is in use so it can be re-enabled when
5406 if (!ext_adv_capable(hdev))
5407 conn->adv_instance = hdev->cur_adv_instance;
5409 conn->le_conn_interval = interval;
5410 conn->le_conn_latency = latency;
5411 conn->le_supv_timeout = supervision_timeout;
5413 hci_debugfs_create_conn(conn);
5414 hci_conn_add_sysfs(conn);
5416 /* The remote features procedure is defined for central
5417 * role only. So only in case of an initiated connection
5418 * request the remote features.
5420 * If the local controller supports peripheral-initiated features
5421 * exchange, then requesting the remote features in peripheral
5422 * role is possible. Otherwise just transition into the
5423 * connected state without requesting the remote features.
5426 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5427 struct hci_cp_le_read_remote_features cp;
5429 cp.handle = __cpu_to_le16(conn->handle);
5431 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5434 hci_conn_hold(conn);
5436 conn->state = BT_CONNECTED;
5437 hci_connect_cfm(conn, status);
5440 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5443 list_del_init(¶ms->action);
5445 hci_conn_drop(params->conn);
5446 hci_conn_put(params->conn);
5447 params->conn = NULL;
5452 hci_update_background_scan(hdev);
5453 hci_dev_unlock(hdev);
5456 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5458 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5460 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5462 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5463 NULL, ev->role, le16_to_cpu(ev->handle),
5464 le16_to_cpu(ev->interval),
5465 le16_to_cpu(ev->latency),
5466 le16_to_cpu(ev->supervision_timeout));
5469 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5470 struct sk_buff *skb)
5472 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5474 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5476 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5477 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5478 le16_to_cpu(ev->interval),
5479 le16_to_cpu(ev->latency),
5480 le16_to_cpu(ev->supervision_timeout));
5482 if (use_ll_privacy(hdev) &&
5483 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5484 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5485 hci_req_disable_address_resolution(hdev);
5488 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5490 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5491 struct hci_conn *conn;
5492 struct adv_info *adv;
5494 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5496 adv = hci_find_adv_instance(hdev, ev->handle);
5502 /* Remove advertising as it has been terminated */
5503 hci_remove_adv_instance(hdev, ev->handle);
5504 mgmt_advertising_removed(NULL, hdev, ev->handle);
5510 adv->enabled = false;
5512 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5514 /* Store handle in the connection so the correct advertising
5515 * instance can be re-enabled when disconnected.
5517 conn->adv_instance = ev->handle;
5519 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5520 bacmp(&conn->resp_addr, BDADDR_ANY))
5524 bacpy(&conn->resp_addr, &hdev->random_addr);
5529 bacpy(&conn->resp_addr, &adv->random_addr);
5533 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5534 struct sk_buff *skb)
5536 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5537 struct hci_conn *conn;
5539 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5546 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5550 hci_dev_unlock(hdev);
5551 mgmt_le_conn_update_failed(hdev, &conn->dst,
5552 conn->type, conn->dst_type, ev->status);
5556 conn->le_conn_interval = le16_to_cpu(ev->interval);
5557 conn->le_conn_latency = le16_to_cpu(ev->latency);
5558 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5561 hci_dev_unlock(hdev);
5564 mgmt_le_conn_updated(hdev, &conn->dst, conn->type,
5565 conn->dst_type, conn->le_conn_interval,
5566 conn->le_conn_latency, conn->le_supv_timeout);
5570 /* This function requires the caller holds hdev->lock */
5571 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5573 u8 addr_type, u8 adv_type,
5574 bdaddr_t *direct_rpa)
5576 struct hci_conn *conn;
5577 struct hci_conn_params *params;
5579 /* If the event is not connectable don't proceed further */
5580 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5583 /* Ignore if the device is blocked */
5584 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type))
5587 /* Most controller will fail if we try to create new connections
5588 * while we have an existing one in peripheral role.
5590 if (hdev->conn_hash.le_num_peripheral > 0 &&
5591 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5592 !(hdev->le_states[3] & 0x10)))
5595 /* If we're not connectable only connect devices that we have in
5596 * our pend_le_conns list.
5598 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5603 if (!params->explicit_connect) {
5604 switch (params->auto_connect) {
5605 case HCI_AUTO_CONN_DIRECT:
5606 /* Only devices advertising with ADV_DIRECT_IND are
5607 * triggering a connection attempt. This is allowing
5608 * incoming connections from peripheral devices.
5610 if (adv_type != LE_ADV_DIRECT_IND)
5613 case HCI_AUTO_CONN_ALWAYS:
5614 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5615 * are triggering a connection attempt. This means
5616 * that incoming connections from peripheral device are
5617 * accepted and also outgoing connections to peripheral
5618 * devices are established when found.
5626 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5627 hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
5629 if (!IS_ERR(conn)) {
5630 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5631 * by higher layer that tried to connect, if no then
5632 * store the pointer since we don't really have any
5633 * other owner of the object besides the params that
5634 * triggered it. This way we can abort the connection if
5635 * the parameters get removed and keep the reference
5636 * count consistent once the connection is established.
5639 if (!params->explicit_connect)
5640 params->conn = hci_conn_get(conn);
5645 switch (PTR_ERR(conn)) {
5647 /* If hci_connect() returns -EBUSY it means there is already
5648 * an LE connection attempt going on. Since controllers don't
5649 * support more than one connection attempt at the time, we
5650 * don't consider this an error case.
5654 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5661 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5662 u8 bdaddr_type, bdaddr_t *direct_addr,
5663 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5666 struct discovery_state *d = &hdev->discovery;
5667 struct smp_irk *irk;
5668 struct hci_conn *conn;
5675 case LE_ADV_DIRECT_IND:
5676 case LE_ADV_SCAN_IND:
5677 case LE_ADV_NONCONN_IND:
5678 case LE_ADV_SCAN_RSP:
5681 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5682 "type: 0x%02x", type);
5686 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5687 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5691 /* Find the end of the data in case the report contains padded zero
5692 * bytes at the end causing an invalid length value.
5694 * When data is NULL, len is 0 so there is no need for extra ptr
5695 * check as 'ptr < data + 0' is already false in such case.
5697 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5698 if (ptr + 1 + *ptr > data + len)
5702 /* Adjust for actual length. This handles the case when remote
5703 * device is advertising with incorrect data length.
5707 /* If the direct address is present, then this report is from
5708 * a LE Direct Advertising Report event. In that case it is
5709 * important to see if the address is matching the local
5710 * controller address.
5713 /* Only resolvable random addresses are valid for these
5714 * kind of reports and others can be ignored.
5716 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5719 /* If the controller is not using resolvable random
5720 * addresses, then this report can be ignored.
5722 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5725 /* If the local IRK of the controller does not match
5726 * with the resolvable random address provided, then
5727 * this report can be ignored.
5729 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5733 /* Check if we need to convert to identity address */
5734 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5736 bdaddr = &irk->bdaddr;
5737 bdaddr_type = irk->addr_type;
5740 /* Check if we have been requested to connect to this device.
5742 * direct_addr is set only for directed advertising reports (it is NULL
5743 * for advertising reports) and is already verified to be RPA above.
5745 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5747 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5748 /* Store report for later inclusion by
5749 * mgmt_device_connected
5751 memcpy(conn->le_adv_data, data, len);
5752 conn->le_adv_data_len = len;
5755 /* Passive scanning shouldn't trigger any device found events,
5756 * except for devices marked as CONN_REPORT for which we do send
5757 * device found events, or advertisement monitoring requested.
5759 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5760 if (type == LE_ADV_DIRECT_IND)
5763 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5764 bdaddr, bdaddr_type) &&
5765 idr_is_empty(&hdev->adv_monitors_idr))
5768 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5769 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5772 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5773 rssi, flags, data, len, NULL, 0);
5777 /* When receiving non-connectable or scannable undirected
5778 * advertising reports, this means that the remote device is
5779 * not connectable and then clearly indicate this in the
5780 * device found event.
5782 * When receiving a scan response, then there is no way to
5783 * know if the remote device is connectable or not. However
5784 * since scan responses are merged with a previously seen
5785 * advertising report, the flags field from that report
5788 * In the really unlikely case that a controller get confused
5789 * and just sends a scan response event, then it is marked as
5790 * not connectable as well.
5792 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5793 type == LE_ADV_SCAN_RSP)
5794 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5798 /* If there's nothing pending either store the data from this
5799 * event or send an immediate device found event if the data
5800 * should not be stored for later.
5802 if (!ext_adv && !has_pending_adv_report(hdev)) {
5803 /* If the report will trigger a SCAN_REQ store it for
5806 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5807 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5808 rssi, flags, data, len);
5812 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5813 rssi, flags, data, len, NULL, 0);
5817 /* Check if the pending report is for the same device as the new one */
5818 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5819 bdaddr_type == d->last_adv_addr_type);
5821 /* If the pending data doesn't match this report or this isn't a
5822 * scan response (e.g. we got a duplicate ADV_IND) then force
5823 * sending of the pending data.
5825 if (type != LE_ADV_SCAN_RSP || !match) {
5826 /* Send out whatever is in the cache, but skip duplicates */
5828 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5829 d->last_adv_addr_type, NULL,
5830 d->last_adv_rssi, d->last_adv_flags,
5832 d->last_adv_data_len, NULL, 0);
5834 /* If the new report will trigger a SCAN_REQ store it for
5837 if (!ext_adv && (type == LE_ADV_IND ||
5838 type == LE_ADV_SCAN_IND)) {
5839 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5840 rssi, flags, data, len);
5844 /* The advertising reports cannot be merged, so clear
5845 * the pending report and send out a device found event.
5847 clear_pending_adv_report(hdev);
5848 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5849 rssi, flags, data, len, NULL, 0);
5853 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5854 * the new event is a SCAN_RSP. We can therefore proceed with
5855 * sending a merged device found event.
5857 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5858 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5859 d->last_adv_data, d->last_adv_data_len, data, len);
5860 clear_pending_adv_report(hdev);
5863 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5865 u8 num_reports = skb->data[0];
5866 void *ptr = &skb->data[1];
5870 while (num_reports--) {
5871 struct hci_ev_le_advertising_info *ev = ptr;
5874 if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
5875 bt_dev_err(hdev, "Malicious advertising data.");
5879 if (ev->length <= HCI_MAX_AD_LENGTH &&
5880 ev->data + ev->length <= skb_tail_pointer(skb)) {
5881 rssi = ev->data[ev->length];
5882 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5883 ev->bdaddr_type, NULL, 0, rssi,
5884 ev->data, ev->length, false);
5886 bt_dev_err(hdev, "Dropping invalid advertising data");
5889 ptr += sizeof(*ev) + ev->length + 1;
5892 hci_dev_unlock(hdev);
5895 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
5897 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5899 case LE_LEGACY_ADV_IND:
5901 case LE_LEGACY_ADV_DIRECT_IND:
5902 return LE_ADV_DIRECT_IND;
5903 case LE_LEGACY_ADV_SCAN_IND:
5904 return LE_ADV_SCAN_IND;
5905 case LE_LEGACY_NONCONN_IND:
5906 return LE_ADV_NONCONN_IND;
5907 case LE_LEGACY_SCAN_RSP_ADV:
5908 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5909 return LE_ADV_SCAN_RSP;
5915 if (evt_type & LE_EXT_ADV_CONN_IND) {
5916 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5917 return LE_ADV_DIRECT_IND;
5922 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5923 return LE_ADV_SCAN_RSP;
5925 if (evt_type & LE_EXT_ADV_SCAN_IND)
5926 return LE_ADV_SCAN_IND;
5928 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5929 evt_type & LE_EXT_ADV_DIRECT_IND)
5930 return LE_ADV_NONCONN_IND;
5933 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5936 return LE_ADV_INVALID;
5939 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5941 u8 num_reports = skb->data[0];
5942 void *ptr = &skb->data[1];
5946 while (num_reports--) {
5947 struct hci_ev_le_ext_adv_report *ev = ptr;
5951 evt_type = __le16_to_cpu(ev->evt_type);
5952 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
5953 if (legacy_evt_type != LE_ADV_INVALID) {
5954 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5955 ev->bdaddr_type, NULL, 0, ev->rssi,
5956 ev->data, ev->length,
5957 !(evt_type & LE_EXT_ADV_LEGACY_PDU));
5960 ptr += sizeof(*ev) + ev->length;
5963 hci_dev_unlock(hdev);
5966 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5967 struct sk_buff *skb)
5969 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5970 struct hci_conn *conn;
5972 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5976 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5979 memcpy(conn->features[0], ev->features, 8);
5981 if (conn->state == BT_CONFIG) {
5984 /* If the local controller supports peripheral-initiated
5985 * features exchange, but the remote controller does
5986 * not, then it is possible that the error code 0x1a
5987 * for unsupported remote feature gets returned.
5989 * In this specific case, allow the connection to
5990 * transition into connected state and mark it as
5993 if (!conn->out && ev->status == 0x1a &&
5994 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
5997 status = ev->status;
5999 conn->state = BT_CONNECTED;
6000 hci_connect_cfm(conn, status);
6001 hci_conn_drop(conn);
6005 hci_dev_unlock(hdev);
6008 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
6010 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
6011 struct hci_cp_le_ltk_reply cp;
6012 struct hci_cp_le_ltk_neg_reply neg;
6013 struct hci_conn *conn;
6014 struct smp_ltk *ltk;
6016 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
6020 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6024 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6028 if (smp_ltk_is_sc(ltk)) {
6029 /* With SC both EDiv and Rand are set to zero */
6030 if (ev->ediv || ev->rand)
6033 /* For non-SC keys check that EDiv and Rand match */
6034 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6038 memcpy(cp.ltk, ltk->val, ltk->enc_size);
6039 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6040 cp.handle = cpu_to_le16(conn->handle);
6042 conn->pending_sec_level = smp_ltk_sec_level(ltk);
6044 conn->enc_key_size = ltk->enc_size;
6046 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6048 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6049 * temporary key used to encrypt a connection following
6050 * pairing. It is used during the Encrypted Session Setup to
6051 * distribute the keys. Later, security can be re-established
6052 * using a distributed LTK.
6054 if (ltk->type == SMP_STK) {
6055 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6056 list_del_rcu(<k->list);
6057 kfree_rcu(ltk, rcu);
6059 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6062 hci_dev_unlock(hdev);
6067 neg.handle = ev->handle;
6068 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6069 hci_dev_unlock(hdev);
6072 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6075 struct hci_cp_le_conn_param_req_neg_reply cp;
6077 cp.handle = cpu_to_le16(handle);
6080 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6084 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
6085 struct sk_buff *skb)
6087 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
6088 struct hci_cp_le_conn_param_req_reply cp;
6089 struct hci_conn *hcon;
6090 u16 handle, min, max, latency, timeout;
6092 handle = le16_to_cpu(ev->handle);
6093 min = le16_to_cpu(ev->interval_min);
6094 max = le16_to_cpu(ev->interval_max);
6095 latency = le16_to_cpu(ev->latency);
6096 timeout = le16_to_cpu(ev->timeout);
6098 hcon = hci_conn_hash_lookup_handle(hdev, handle);
6099 if (!hcon || hcon->state != BT_CONNECTED)
6100 return send_conn_param_neg_reply(hdev, handle,
6101 HCI_ERROR_UNKNOWN_CONN_ID);
6103 if (hci_check_conn_params(min, max, latency, timeout))
6104 return send_conn_param_neg_reply(hdev, handle,
6105 HCI_ERROR_INVALID_LL_PARAMS);
6107 if (hcon->role == HCI_ROLE_MASTER) {
6108 struct hci_conn_params *params;
6113 params = hci_conn_params_lookup(hdev, &hcon->dst,
6116 params->conn_min_interval = min;
6117 params->conn_max_interval = max;
6118 params->conn_latency = latency;
6119 params->supervision_timeout = timeout;
6125 hci_dev_unlock(hdev);
6127 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6128 store_hint, min, max, latency, timeout);
6131 cp.handle = ev->handle;
6132 cp.interval_min = ev->interval_min;
6133 cp.interval_max = ev->interval_max;
6134 cp.latency = ev->latency;
6135 cp.timeout = ev->timeout;
6139 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6142 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
6143 struct sk_buff *skb)
6145 u8 num_reports = skb->data[0];
6146 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
6148 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
6153 for (; num_reports; num_reports--, ev++)
6154 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
6155 ev->bdaddr_type, &ev->direct_addr,
6156 ev->direct_addr_type, ev->rssi, NULL, 0,
6159 hci_dev_unlock(hdev);
6162 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
6164 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
6165 struct hci_conn *conn;
6167 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6174 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6178 conn->le_tx_phy = ev->tx_phy;
6179 conn->le_rx_phy = ev->rx_phy;
6182 hci_dev_unlock(hdev);
6185 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
6187 struct hci_ev_le_meta *le_ev = (void *) skb->data;
6189 skb_pull(skb, sizeof(*le_ev));
6191 switch (le_ev->subevent) {
6192 case HCI_EV_LE_CONN_COMPLETE:
6193 hci_le_conn_complete_evt(hdev, skb);
6196 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
6197 hci_le_conn_update_complete_evt(hdev, skb);
6200 case HCI_EV_LE_ADVERTISING_REPORT:
6201 hci_le_adv_report_evt(hdev, skb);
6204 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
6205 hci_le_remote_feat_complete_evt(hdev, skb);
6208 case HCI_EV_LE_LTK_REQ:
6209 hci_le_ltk_request_evt(hdev, skb);
6212 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
6213 hci_le_remote_conn_param_req_evt(hdev, skb);
6216 case HCI_EV_LE_DIRECT_ADV_REPORT:
6217 hci_le_direct_adv_report_evt(hdev, skb);
6220 case HCI_EV_LE_PHY_UPDATE_COMPLETE:
6221 hci_le_phy_update_evt(hdev, skb);
6224 case HCI_EV_LE_EXT_ADV_REPORT:
6225 hci_le_ext_adv_report_evt(hdev, skb);
6228 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
6229 hci_le_enh_conn_complete_evt(hdev, skb);
6232 case HCI_EV_LE_EXT_ADV_SET_TERM:
6233 hci_le_ext_adv_term_evt(hdev, skb);
6241 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6242 u8 event, struct sk_buff *skb)
6244 struct hci_ev_cmd_complete *ev;
6245 struct hci_event_hdr *hdr;
6250 if (skb->len < sizeof(*hdr)) {
6251 bt_dev_err(hdev, "too short HCI event");
6255 hdr = (void *) skb->data;
6256 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6259 if (hdr->evt != event)
6264 /* Check if request ended in Command Status - no way to retrieve
6265 * any extra parameters in this case.
6267 if (hdr->evt == HCI_EV_CMD_STATUS)
6270 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6271 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6276 if (skb->len < sizeof(*ev)) {
6277 bt_dev_err(hdev, "too short cmd_complete event");
6281 ev = (void *) skb->data;
6282 skb_pull(skb, sizeof(*ev));
6284 if (opcode != __le16_to_cpu(ev->opcode)) {
6285 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6286 __le16_to_cpu(ev->opcode));
6293 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6294 struct sk_buff *skb)
6296 struct hci_ev_le_advertising_info *adv;
6297 struct hci_ev_le_direct_adv_info *direct_adv;
6298 struct hci_ev_le_ext_adv_report *ext_adv;
6299 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6300 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6304 /* If we are currently suspended and this is the first BT event seen,
6305 * save the wake reason associated with the event.
6307 if (!hdev->suspended || hdev->wake_reason)
6310 /* Default to remote wake. Values for wake_reason are documented in the
6311 * Bluez mgmt api docs.
6313 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6315 /* Once configured for remote wakeup, we should only wake up for
6316 * reconnections. It's useful to see which device is waking us up so
6317 * keep track of the bdaddr of the connection event that woke us up.
6319 if (event == HCI_EV_CONN_REQUEST) {
6320 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6321 hdev->wake_addr_type = BDADDR_BREDR;
6322 } else if (event == HCI_EV_CONN_COMPLETE) {
6323 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6324 hdev->wake_addr_type = BDADDR_BREDR;
6325 } else if (event == HCI_EV_LE_META) {
6326 struct hci_ev_le_meta *le_ev = (void *)skb->data;
6327 u8 subevent = le_ev->subevent;
6328 u8 *ptr = &skb->data[sizeof(*le_ev)];
6329 u8 num_reports = *ptr;
6331 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6332 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6333 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6335 adv = (void *)(ptr + 1);
6336 direct_adv = (void *)(ptr + 1);
6337 ext_adv = (void *)(ptr + 1);
6340 case HCI_EV_LE_ADVERTISING_REPORT:
6341 bacpy(&hdev->wake_addr, &adv->bdaddr);
6342 hdev->wake_addr_type = adv->bdaddr_type;
6344 case HCI_EV_LE_DIRECT_ADV_REPORT:
6345 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6346 hdev->wake_addr_type = direct_adv->bdaddr_type;
6348 case HCI_EV_LE_EXT_ADV_REPORT:
6349 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6350 hdev->wake_addr_type = ext_adv->bdaddr_type;
6355 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6359 hci_dev_unlock(hdev);
6362 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6364 struct hci_event_hdr *hdr = (void *) skb->data;
6365 hci_req_complete_t req_complete = NULL;
6366 hci_req_complete_skb_t req_complete_skb = NULL;
6367 struct sk_buff *orig_skb = NULL;
6368 u8 status = 0, event = hdr->evt, req_evt = 0;
6369 u16 opcode = HCI_OP_NOP;
6372 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6376 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6377 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6378 opcode = __le16_to_cpu(cmd_hdr->opcode);
6379 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6384 /* If it looks like we might end up having to call
6385 * req_complete_skb, store a pristine copy of the skb since the
6386 * various handlers may modify the original one through
6387 * skb_pull() calls, etc.
6389 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6390 event == HCI_EV_CMD_COMPLETE)
6391 orig_skb = skb_clone(skb, GFP_KERNEL);
6393 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6395 /* Store wake reason if we're suspended */
6396 hci_store_wake_reason(hdev, event, skb);
6399 case HCI_EV_INQUIRY_COMPLETE:
6400 hci_inquiry_complete_evt(hdev, skb);
6403 case HCI_EV_INQUIRY_RESULT:
6404 hci_inquiry_result_evt(hdev, skb);
6407 case HCI_EV_CONN_COMPLETE:
6408 hci_conn_complete_evt(hdev, skb);
6411 case HCI_EV_CONN_REQUEST:
6412 hci_conn_request_evt(hdev, skb);
6415 case HCI_EV_DISCONN_COMPLETE:
6416 hci_disconn_complete_evt(hdev, skb);
6419 case HCI_EV_AUTH_COMPLETE:
6420 hci_auth_complete_evt(hdev, skb);
6423 case HCI_EV_REMOTE_NAME:
6424 hci_remote_name_evt(hdev, skb);
6427 case HCI_EV_ENCRYPT_CHANGE:
6428 hci_encrypt_change_evt(hdev, skb);
6431 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6432 hci_change_link_key_complete_evt(hdev, skb);
6435 case HCI_EV_REMOTE_FEATURES:
6436 hci_remote_features_evt(hdev, skb);
6439 case HCI_EV_CMD_COMPLETE:
6440 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6441 &req_complete, &req_complete_skb);
6444 case HCI_EV_CMD_STATUS:
6445 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6449 case HCI_EV_HARDWARE_ERROR:
6450 hci_hardware_error_evt(hdev, skb);
6453 case HCI_EV_ROLE_CHANGE:
6454 hci_role_change_evt(hdev, skb);
6457 case HCI_EV_NUM_COMP_PKTS:
6458 hci_num_comp_pkts_evt(hdev, skb);
6461 case HCI_EV_MODE_CHANGE:
6462 hci_mode_change_evt(hdev, skb);
6465 case HCI_EV_PIN_CODE_REQ:
6466 hci_pin_code_request_evt(hdev, skb);
6469 case HCI_EV_LINK_KEY_REQ:
6470 hci_link_key_request_evt(hdev, skb);
6473 case HCI_EV_LINK_KEY_NOTIFY:
6474 hci_link_key_notify_evt(hdev, skb);
6477 case HCI_EV_CLOCK_OFFSET:
6478 hci_clock_offset_evt(hdev, skb);
6481 case HCI_EV_PKT_TYPE_CHANGE:
6482 hci_pkt_type_change_evt(hdev, skb);
6485 case HCI_EV_PSCAN_REP_MODE:
6486 hci_pscan_rep_mode_evt(hdev, skb);
6489 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6490 hci_inquiry_result_with_rssi_evt(hdev, skb);
6493 case HCI_EV_REMOTE_EXT_FEATURES:
6494 hci_remote_ext_features_evt(hdev, skb);
6497 case HCI_EV_SYNC_CONN_COMPLETE:
6498 hci_sync_conn_complete_evt(hdev, skb);
6501 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6502 hci_extended_inquiry_result_evt(hdev, skb);
6505 case HCI_EV_KEY_REFRESH_COMPLETE:
6506 hci_key_refresh_complete_evt(hdev, skb);
6509 case HCI_EV_IO_CAPA_REQUEST:
6510 hci_io_capa_request_evt(hdev, skb);
6513 case HCI_EV_IO_CAPA_REPLY:
6514 hci_io_capa_reply_evt(hdev, skb);
6517 case HCI_EV_USER_CONFIRM_REQUEST:
6518 hci_user_confirm_request_evt(hdev, skb);
6521 case HCI_EV_USER_PASSKEY_REQUEST:
6522 hci_user_passkey_request_evt(hdev, skb);
6525 case HCI_EV_USER_PASSKEY_NOTIFY:
6526 hci_user_passkey_notify_evt(hdev, skb);
6529 case HCI_EV_KEYPRESS_NOTIFY:
6530 hci_keypress_notify_evt(hdev, skb);
6533 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6534 hci_simple_pair_complete_evt(hdev, skb);
6537 case HCI_EV_REMOTE_HOST_FEATURES:
6538 hci_remote_host_features_evt(hdev, skb);
6541 case HCI_EV_LE_META:
6542 hci_le_meta_evt(hdev, skb);
6545 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6546 hci_remote_oob_data_request_evt(hdev, skb);
6549 #if IS_ENABLED(CONFIG_BT_HS)
6550 case HCI_EV_CHANNEL_SELECTED:
6551 hci_chan_selected_evt(hdev, skb);
6554 case HCI_EV_PHY_LINK_COMPLETE:
6555 hci_phy_link_complete_evt(hdev, skb);
6558 case HCI_EV_LOGICAL_LINK_COMPLETE:
6559 hci_loglink_complete_evt(hdev, skb);
6562 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6563 hci_disconn_loglink_complete_evt(hdev, skb);
6566 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6567 hci_disconn_phylink_complete_evt(hdev, skb);
6571 case HCI_EV_NUM_COMP_BLOCKS:
6572 hci_num_comp_blocks_evt(hdev, skb);
6576 msft_vendor_evt(hdev, skb);
6580 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6585 req_complete(hdev, status, opcode);
6586 } else if (req_complete_skb) {
6587 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6588 kfree_skb(orig_skb);
6591 req_complete_skb(hdev, status, opcode, orig_skb);
6595 kfree_skb(orig_skb);
6597 hdev->stat.evt_rx++;