2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
40 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
41 "\x00\x00\x00\x00\x00\x00\x00\x00"
43 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
45 /* Handle HCI Event packets */
47 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
50 __u8 status = *((__u8 *) skb->data);
52 BT_DBG("%s status 0x%2.2x", hdev->name, status);
54 /* It is possible that we receive Inquiry Complete event right
55 * before we receive Inquiry Cancel Command Complete event, in
56 * which case the latter event should have status of Command
57 * Disallowed (0x0c). This should not be treated as error, since
58 * we actually achieve what Inquiry Cancel wants to achieve,
59 * which is to end the last Inquiry session.
61 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
62 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
71 clear_bit(HCI_INQUIRY, &hdev->flags);
72 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
73 wake_up_bit(&hdev->flags, HCI_INQUIRY);
76 /* Set discovery state to stopped if we're not doing LE active
79 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
80 hdev->le_scan_type != LE_SCAN_ACTIVE)
81 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
84 hci_conn_check_pending(hdev);
87 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
89 __u8 status = *((__u8 *) skb->data);
91 BT_DBG("%s status 0x%2.2x", hdev->name, status);
96 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
99 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
101 __u8 status = *((__u8 *) skb->data);
103 BT_DBG("%s status 0x%2.2x", hdev->name, status);
108 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
110 hci_conn_check_pending(hdev);
113 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
116 BT_DBG("%s", hdev->name);
119 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
121 struct hci_rp_role_discovery *rp = (void *) skb->data;
122 struct hci_conn *conn;
124 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
131 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
133 conn->role = rp->role;
135 hci_dev_unlock(hdev);
138 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
140 struct hci_rp_read_link_policy *rp = (void *) skb->data;
141 struct hci_conn *conn;
143 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
152 conn->link_policy = __le16_to_cpu(rp->policy);
154 hci_dev_unlock(hdev);
157 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
159 struct hci_rp_write_link_policy *rp = (void *) skb->data;
160 struct hci_conn *conn;
163 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
168 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
174 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
176 conn->link_policy = get_unaligned_le16(sent + 2);
178 hci_dev_unlock(hdev);
181 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
184 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
186 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
191 hdev->link_policy = __le16_to_cpu(rp->policy);
194 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
197 __u8 status = *((__u8 *) skb->data);
200 BT_DBG("%s status 0x%2.2x", hdev->name, status);
205 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
209 hdev->link_policy = get_unaligned_le16(sent);
212 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
214 __u8 status = *((__u8 *) skb->data);
216 BT_DBG("%s status 0x%2.2x", hdev->name, status);
218 clear_bit(HCI_RESET, &hdev->flags);
223 /* Reset all non-persistent flags */
224 hci_dev_clear_volatile_flags(hdev);
226 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
228 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
229 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
231 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
232 hdev->adv_data_len = 0;
234 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
235 hdev->scan_rsp_data_len = 0;
237 hdev->le_scan_type = LE_SCAN_PASSIVE;
239 hdev->ssp_debug_mode = 0;
241 hci_bdaddr_list_clear(&hdev->le_accept_list);
242 hci_bdaddr_list_clear(&hdev->le_resolv_list);
245 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
248 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
249 struct hci_cp_read_stored_link_key *sent;
251 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
253 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
257 if (!rp->status && sent->read_all == 0x01) {
258 hdev->stored_max_keys = rp->max_keys;
259 hdev->stored_num_keys = rp->num_keys;
263 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
266 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
268 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
273 if (rp->num_keys <= hdev->stored_num_keys)
274 hdev->stored_num_keys -= rp->num_keys;
276 hdev->stored_num_keys = 0;
279 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
281 __u8 status = *((__u8 *) skb->data);
284 BT_DBG("%s status 0x%2.2x", hdev->name, status);
286 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
292 if (hci_dev_test_flag(hdev, HCI_MGMT))
293 mgmt_set_local_name_complete(hdev, sent, status);
295 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
297 hci_dev_unlock(hdev);
300 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
302 struct hci_rp_read_local_name *rp = (void *) skb->data;
304 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
309 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
310 hci_dev_test_flag(hdev, HCI_CONFIG))
311 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
314 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
316 __u8 status = *((__u8 *) skb->data);
319 BT_DBG("%s status 0x%2.2x", hdev->name, status);
321 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
328 __u8 param = *((__u8 *) sent);
330 if (param == AUTH_ENABLED)
331 set_bit(HCI_AUTH, &hdev->flags);
333 clear_bit(HCI_AUTH, &hdev->flags);
336 if (hci_dev_test_flag(hdev, HCI_MGMT))
337 mgmt_auth_enable_complete(hdev, status);
339 hci_dev_unlock(hdev);
342 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
344 __u8 status = *((__u8 *) skb->data);
348 BT_DBG("%s status 0x%2.2x", hdev->name, status);
353 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
357 param = *((__u8 *) sent);
360 set_bit(HCI_ENCRYPT, &hdev->flags);
362 clear_bit(HCI_ENCRYPT, &hdev->flags);
365 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
367 __u8 status = *((__u8 *) skb->data);
371 BT_DBG("%s status 0x%2.2x", hdev->name, status);
373 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
377 param = *((__u8 *) sent);
382 hdev->discov_timeout = 0;
386 if (param & SCAN_INQUIRY)
387 set_bit(HCI_ISCAN, &hdev->flags);
389 clear_bit(HCI_ISCAN, &hdev->flags);
391 if (param & SCAN_PAGE)
392 set_bit(HCI_PSCAN, &hdev->flags);
394 clear_bit(HCI_PSCAN, &hdev->flags);
397 hci_dev_unlock(hdev);
400 static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb)
402 __u8 status = *((__u8 *)skb->data);
403 struct hci_cp_set_event_filter *cp;
406 BT_DBG("%s status 0x%2.2x", hdev->name, status);
411 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
415 cp = (struct hci_cp_set_event_filter *)sent;
417 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
418 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
420 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
423 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
425 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
427 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
432 memcpy(hdev->dev_class, rp->dev_class, 3);
434 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
435 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
438 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
440 __u8 status = *((__u8 *) skb->data);
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
445 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
452 memcpy(hdev->dev_class, sent, 3);
454 if (hci_dev_test_flag(hdev, HCI_MGMT))
455 mgmt_set_class_of_dev_complete(hdev, sent, status);
457 hci_dev_unlock(hdev);
460 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
462 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
465 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
470 setting = __le16_to_cpu(rp->voice_setting);
472 if (hdev->voice_setting == setting)
475 hdev->voice_setting = setting;
477 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
480 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
483 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
486 __u8 status = *((__u8 *) skb->data);
490 BT_DBG("%s status 0x%2.2x", hdev->name, status);
495 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
499 setting = get_unaligned_le16(sent);
501 if (hdev->voice_setting == setting)
504 hdev->voice_setting = setting;
506 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
509 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
512 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
515 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
517 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
522 hdev->num_iac = rp->num_iac;
524 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
527 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
529 __u8 status = *((__u8 *) skb->data);
530 struct hci_cp_write_ssp_mode *sent;
532 BT_DBG("%s status 0x%2.2x", hdev->name, status);
534 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
542 hdev->features[1][0] |= LMP_HOST_SSP;
544 hdev->features[1][0] &= ~LMP_HOST_SSP;
547 if (hci_dev_test_flag(hdev, HCI_MGMT))
548 mgmt_ssp_enable_complete(hdev, sent->mode, status);
551 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
553 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
556 hci_dev_unlock(hdev);
559 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
561 u8 status = *((u8 *) skb->data);
562 struct hci_cp_write_sc_support *sent;
564 BT_DBG("%s status 0x%2.2x", hdev->name, status);
566 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
574 hdev->features[1][0] |= LMP_HOST_SC;
576 hdev->features[1][0] &= ~LMP_HOST_SC;
579 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
581 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
583 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
586 hci_dev_unlock(hdev);
589 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
591 struct hci_rp_read_local_version *rp = (void *) skb->data;
593 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
598 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
599 hci_dev_test_flag(hdev, HCI_CONFIG)) {
600 hdev->hci_ver = rp->hci_ver;
601 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
602 hdev->lmp_ver = rp->lmp_ver;
603 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
604 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
608 static void hci_cc_read_local_commands(struct hci_dev *hdev,
611 struct hci_rp_read_local_commands *rp = (void *) skb->data;
613 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
618 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
619 hci_dev_test_flag(hdev, HCI_CONFIG))
620 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
623 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
626 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
627 struct hci_conn *conn;
629 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
636 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
638 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
640 hci_dev_unlock(hdev);
643 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
646 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
647 struct hci_conn *conn;
650 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
655 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
661 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
663 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
665 hci_dev_unlock(hdev);
668 static void hci_cc_read_local_features(struct hci_dev *hdev,
671 struct hci_rp_read_local_features *rp = (void *) skb->data;
673 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
678 memcpy(hdev->features, rp->features, 8);
680 /* Adjust default settings according to features
681 * supported by device. */
683 if (hdev->features[0][0] & LMP_3SLOT)
684 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
686 if (hdev->features[0][0] & LMP_5SLOT)
687 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
689 if (hdev->features[0][1] & LMP_HV2) {
690 hdev->pkt_type |= (HCI_HV2);
691 hdev->esco_type |= (ESCO_HV2);
694 if (hdev->features[0][1] & LMP_HV3) {
695 hdev->pkt_type |= (HCI_HV3);
696 hdev->esco_type |= (ESCO_HV3);
699 if (lmp_esco_capable(hdev))
700 hdev->esco_type |= (ESCO_EV3);
702 if (hdev->features[0][4] & LMP_EV4)
703 hdev->esco_type |= (ESCO_EV4);
705 if (hdev->features[0][4] & LMP_EV5)
706 hdev->esco_type |= (ESCO_EV5);
708 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
709 hdev->esco_type |= (ESCO_2EV3);
711 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
712 hdev->esco_type |= (ESCO_3EV3);
714 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
715 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
718 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
721 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
723 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
728 if (hdev->max_page < rp->max_page)
729 hdev->max_page = rp->max_page;
731 if (rp->page < HCI_MAX_PAGES)
732 memcpy(hdev->features[rp->page], rp->features, 8);
735 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
738 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
740 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
745 hdev->flow_ctl_mode = rp->mode;
748 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
750 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
752 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
757 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
758 hdev->sco_mtu = rp->sco_mtu;
759 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
760 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
762 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
767 hdev->acl_cnt = hdev->acl_pkts;
768 hdev->sco_cnt = hdev->sco_pkts;
770 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
771 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
774 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
776 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
778 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
783 if (test_bit(HCI_INIT, &hdev->flags))
784 bacpy(&hdev->bdaddr, &rp->bdaddr);
786 if (hci_dev_test_flag(hdev, HCI_SETUP))
787 bacpy(&hdev->setup_addr, &rp->bdaddr);
790 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
793 struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
795 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
800 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
801 hci_dev_test_flag(hdev, HCI_CONFIG)) {
802 hdev->pairing_opts = rp->pairing_opts;
803 hdev->max_enc_key_size = rp->max_key_size;
807 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
810 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
812 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
817 if (test_bit(HCI_INIT, &hdev->flags)) {
818 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
819 hdev->page_scan_window = __le16_to_cpu(rp->window);
823 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
826 u8 status = *((u8 *) skb->data);
827 struct hci_cp_write_page_scan_activity *sent;
829 BT_DBG("%s status 0x%2.2x", hdev->name, status);
834 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
838 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
839 hdev->page_scan_window = __le16_to_cpu(sent->window);
842 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
845 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
847 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
852 if (test_bit(HCI_INIT, &hdev->flags))
853 hdev->page_scan_type = rp->type;
856 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
859 u8 status = *((u8 *) skb->data);
862 BT_DBG("%s status 0x%2.2x", hdev->name, status);
867 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
869 hdev->page_scan_type = *type;
872 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
875 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
877 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
882 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
883 hdev->block_len = __le16_to_cpu(rp->block_len);
884 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
886 hdev->block_cnt = hdev->num_blocks;
888 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
889 hdev->block_cnt, hdev->block_len);
892 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
894 struct hci_rp_read_clock *rp = (void *) skb->data;
895 struct hci_cp_read_clock *cp;
896 struct hci_conn *conn;
898 BT_DBG("%s", hdev->name);
900 if (skb->len < sizeof(*rp))
908 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
912 if (cp->which == 0x00) {
913 hdev->clock = le32_to_cpu(rp->clock);
917 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
919 conn->clock = le32_to_cpu(rp->clock);
920 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
924 hci_dev_unlock(hdev);
927 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
930 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
932 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
937 hdev->amp_status = rp->amp_status;
938 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
939 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
940 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
941 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
942 hdev->amp_type = rp->amp_type;
943 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
944 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
945 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
946 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
949 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
952 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
954 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
959 hdev->inq_tx_power = rp->tx_power;
962 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
965 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
967 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
972 hdev->err_data_reporting = rp->err_data_reporting;
975 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
978 __u8 status = *((__u8 *)skb->data);
979 struct hci_cp_write_def_err_data_reporting *cp;
981 BT_DBG("%s status 0x%2.2x", hdev->name, status);
986 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
990 hdev->err_data_reporting = cp->err_data_reporting;
993 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
995 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
996 struct hci_cp_pin_code_reply *cp;
997 struct hci_conn *conn;
999 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1003 if (hci_dev_test_flag(hdev, HCI_MGMT))
1004 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1009 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1013 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1015 conn->pin_length = cp->pin_len;
1018 hci_dev_unlock(hdev);
1021 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1023 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1025 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1029 if (hci_dev_test_flag(hdev, HCI_MGMT))
1030 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1033 hci_dev_unlock(hdev);
1036 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1037 struct sk_buff *skb)
1039 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1041 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1046 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1047 hdev->le_pkts = rp->le_max_pkt;
1049 hdev->le_cnt = hdev->le_pkts;
1051 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1054 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1055 struct sk_buff *skb)
1057 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1059 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1064 memcpy(hdev->le_features, rp->features, 8);
1067 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1068 struct sk_buff *skb)
1070 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1072 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1077 hdev->adv_tx_power = rp->tx_power;
1080 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1082 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1084 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1088 if (hci_dev_test_flag(hdev, HCI_MGMT))
1089 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1092 hci_dev_unlock(hdev);
1095 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1096 struct sk_buff *skb)
1098 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1100 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1104 if (hci_dev_test_flag(hdev, HCI_MGMT))
1105 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1106 ACL_LINK, 0, rp->status);
1108 hci_dev_unlock(hdev);
1111 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1113 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1115 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1119 if (hci_dev_test_flag(hdev, HCI_MGMT))
1120 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1123 hci_dev_unlock(hdev);
1126 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1127 struct sk_buff *skb)
1129 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1131 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1135 if (hci_dev_test_flag(hdev, HCI_MGMT))
1136 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1137 ACL_LINK, 0, rp->status);
1139 hci_dev_unlock(hdev);
1142 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1143 struct sk_buff *skb)
1145 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1147 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1150 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1151 struct sk_buff *skb)
1153 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1155 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1158 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1160 __u8 status = *((__u8 *) skb->data);
1163 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1168 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1174 bacpy(&hdev->random_addr, sent);
1176 if (!bacmp(&hdev->rpa, sent)) {
1177 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1178 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1179 secs_to_jiffies(hdev->rpa_timeout));
1182 hci_dev_unlock(hdev);
1185 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1187 __u8 status = *((__u8 *) skb->data);
1188 struct hci_cp_le_set_default_phy *cp;
1190 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1195 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1201 hdev->le_tx_def_phys = cp->tx_phys;
1202 hdev->le_rx_def_phys = cp->rx_phys;
1204 hci_dev_unlock(hdev);
1207 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1208 struct sk_buff *skb)
1210 __u8 status = *((__u8 *) skb->data);
1211 struct hci_cp_le_set_adv_set_rand_addr *cp;
1212 struct adv_info *adv;
1217 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1218 /* Update only in case the adv instance since handle 0x00 shall be using
1219 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1220 * non-extended adverting.
1222 if (!cp || !cp->handle)
1227 adv = hci_find_adv_instance(hdev, cp->handle);
1229 bacpy(&adv->random_addr, &cp->bdaddr);
1230 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1231 adv->rpa_expired = false;
1232 queue_delayed_work(hdev->workqueue,
1233 &adv->rpa_expired_cb,
1234 secs_to_jiffies(hdev->rpa_timeout));
1238 hci_dev_unlock(hdev);
1241 static void hci_cc_le_read_transmit_power(struct hci_dev *hdev,
1242 struct sk_buff *skb)
1244 struct hci_rp_le_read_transmit_power *rp = (void *)skb->data;
1246 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1251 hdev->min_le_tx_power = rp->min_le_tx_power;
1252 hdev->max_le_tx_power = rp->max_le_tx_power;
1255 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1257 __u8 *sent, status = *((__u8 *) skb->data);
1259 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1264 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1270 /* If we're doing connection initiation as peripheral. Set a
1271 * timeout in case something goes wrong.
1274 struct hci_conn *conn;
1276 hci_dev_set_flag(hdev, HCI_LE_ADV);
1278 conn = hci_lookup_le_connect(hdev);
1280 queue_delayed_work(hdev->workqueue,
1281 &conn->le_conn_timeout,
1282 conn->conn_timeout);
1284 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1287 hci_dev_unlock(hdev);
1290 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1291 struct sk_buff *skb)
1293 struct hci_cp_le_set_ext_adv_enable *cp;
1294 struct hci_cp_ext_adv_set *set;
1295 __u8 status = *((__u8 *) skb->data);
1296 struct adv_info *adv = NULL, *n;
1298 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1303 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1307 set = (void *)cp->data;
1311 if (cp->num_of_sets)
1312 adv = hci_find_adv_instance(hdev, set->handle);
1315 struct hci_conn *conn;
1317 hci_dev_set_flag(hdev, HCI_LE_ADV);
1320 adv->enabled = true;
1322 conn = hci_lookup_le_connect(hdev);
1324 queue_delayed_work(hdev->workqueue,
1325 &conn->le_conn_timeout,
1326 conn->conn_timeout);
1328 if (cp->num_of_sets) {
1330 adv->enabled = false;
1332 /* If just one instance was disabled check if there are
1333 * any other instance enabled before clearing HCI_LE_ADV
1335 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1341 /* All instances shall be considered disabled */
1342 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1344 adv->enabled = false;
1347 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1351 hci_dev_unlock(hdev);
1354 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1356 struct hci_cp_le_set_scan_param *cp;
1357 __u8 status = *((__u8 *) skb->data);
1359 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1364 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1370 hdev->le_scan_type = cp->type;
1372 hci_dev_unlock(hdev);
1375 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1376 struct sk_buff *skb)
1378 struct hci_cp_le_set_ext_scan_params *cp;
1379 __u8 status = *((__u8 *) skb->data);
1380 struct hci_cp_le_scan_phy_params *phy_param;
1382 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1387 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1391 phy_param = (void *)cp->data;
1395 hdev->le_scan_type = phy_param->type;
1397 hci_dev_unlock(hdev);
1400 static bool has_pending_adv_report(struct hci_dev *hdev)
1402 struct discovery_state *d = &hdev->discovery;
1404 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1407 static void clear_pending_adv_report(struct hci_dev *hdev)
1409 struct discovery_state *d = &hdev->discovery;
1411 bacpy(&d->last_adv_addr, BDADDR_ANY);
1412 d->last_adv_data_len = 0;
1415 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1416 u8 bdaddr_type, s8 rssi, u32 flags,
1419 struct discovery_state *d = &hdev->discovery;
1421 if (len > HCI_MAX_AD_LENGTH)
1424 bacpy(&d->last_adv_addr, bdaddr);
1425 d->last_adv_addr_type = bdaddr_type;
1426 d->last_adv_rssi = rssi;
1427 d->last_adv_flags = flags;
1428 memcpy(d->last_adv_data, data, len);
1429 d->last_adv_data_len = len;
1432 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1437 case LE_SCAN_ENABLE:
1438 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1439 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1440 clear_pending_adv_report(hdev);
1443 case LE_SCAN_DISABLE:
1444 /* We do this here instead of when setting DISCOVERY_STOPPED
1445 * since the latter would potentially require waiting for
1446 * inquiry to stop too.
1448 if (has_pending_adv_report(hdev)) {
1449 struct discovery_state *d = &hdev->discovery;
1451 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1452 d->last_adv_addr_type, NULL,
1453 d->last_adv_rssi, d->last_adv_flags,
1455 d->last_adv_data_len, NULL, 0);
1458 /* Cancel this timer so that we don't try to disable scanning
1459 * when it's already disabled.
1461 cancel_delayed_work(&hdev->le_scan_disable);
1463 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1465 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1466 * interrupted scanning due to a connect request. Mark
1467 * therefore discovery as stopped. If this was not
1468 * because of a connect request advertising might have
1469 * been disabled because of active scanning, so
1470 * re-enable it again if necessary.
1472 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1473 #ifndef TIZEN_BT /* The below line is kernel bug. */
1474 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1476 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
1478 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1479 hdev->discovery.state == DISCOVERY_FINDING)
1480 hci_req_reenable_advertising(hdev);
1485 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1490 hci_dev_unlock(hdev);
1493 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1494 struct sk_buff *skb)
1496 struct hci_cp_le_set_scan_enable *cp;
1497 __u8 status = *((__u8 *) skb->data);
1499 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1504 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1508 le_set_scan_enable_complete(hdev, cp->enable);
1511 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1512 struct sk_buff *skb)
1514 struct hci_cp_le_set_ext_scan_enable *cp;
1515 __u8 status = *((__u8 *) skb->data);
1517 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1522 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1526 le_set_scan_enable_complete(hdev, cp->enable);
1529 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1530 struct sk_buff *skb)
1532 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1534 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1540 hdev->le_num_of_adv_sets = rp->num_of_sets;
1543 static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev,
1544 struct sk_buff *skb)
1546 struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data;
1548 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1553 hdev->le_accept_list_size = rp->size;
1556 static void hci_cc_le_clear_accept_list(struct hci_dev *hdev,
1557 struct sk_buff *skb)
1559 __u8 status = *((__u8 *) skb->data);
1561 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1567 hci_bdaddr_list_clear(&hdev->le_accept_list);
1568 hci_dev_unlock(hdev);
1571 static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev,
1572 struct sk_buff *skb)
1574 struct hci_cp_le_add_to_accept_list *sent;
1575 __u8 status = *((__u8 *) skb->data);
1577 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1582 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1587 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1589 hci_dev_unlock(hdev);
1592 static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev,
1593 struct sk_buff *skb)
1595 struct hci_cp_le_del_from_accept_list *sent;
1596 __u8 status = *((__u8 *) skb->data);
1598 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1603 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1608 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1610 hci_dev_unlock(hdev);
1613 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1614 struct sk_buff *skb)
1616 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1618 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1623 memcpy(hdev->le_states, rp->le_states, 8);
1626 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1627 struct sk_buff *skb)
1629 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1631 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1636 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1637 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1640 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1641 struct sk_buff *skb)
1643 struct hci_cp_le_write_def_data_len *sent;
1644 __u8 status = *((__u8 *) skb->data);
1646 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1651 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1655 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1656 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1659 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1660 struct sk_buff *skb)
1662 struct hci_cp_le_add_to_resolv_list *sent;
1663 __u8 status = *((__u8 *) skb->data);
1665 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1670 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1675 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1676 sent->bdaddr_type, sent->peer_irk,
1678 hci_dev_unlock(hdev);
1681 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1682 struct sk_buff *skb)
1684 struct hci_cp_le_del_from_resolv_list *sent;
1685 __u8 status = *((__u8 *) skb->data);
1687 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1692 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1697 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1699 hci_dev_unlock(hdev);
1702 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1703 struct sk_buff *skb)
1705 __u8 status = *((__u8 *) skb->data);
1707 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1713 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1714 hci_dev_unlock(hdev);
1717 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1718 struct sk_buff *skb)
1720 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1722 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1727 hdev->le_resolv_list_size = rp->size;
1730 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1731 struct sk_buff *skb)
1733 __u8 *sent, status = *((__u8 *) skb->data);
1735 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1740 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1747 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1749 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1751 hci_dev_unlock(hdev);
1754 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1755 struct sk_buff *skb)
1757 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1759 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1764 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1765 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1766 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1767 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1770 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1771 struct sk_buff *skb)
1773 struct hci_cp_write_le_host_supported *sent;
1774 __u8 status = *((__u8 *) skb->data);
1776 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1781 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1788 hdev->features[1][0] |= LMP_HOST_LE;
1789 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1791 hdev->features[1][0] &= ~LMP_HOST_LE;
1792 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1793 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1797 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1799 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1801 hci_dev_unlock(hdev);
1804 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1806 struct hci_cp_le_set_adv_param *cp;
1807 u8 status = *((u8 *) skb->data);
1809 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1814 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1819 hdev->adv_addr_type = cp->own_address_type;
1820 hci_dev_unlock(hdev);
1823 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1825 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1826 struct hci_cp_le_set_ext_adv_params *cp;
1827 struct adv_info *adv_instance;
1829 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1834 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1839 hdev->adv_addr_type = cp->own_addr_type;
1841 /* Store in hdev for instance 0 */
1842 hdev->adv_tx_power = rp->tx_power;
1844 adv_instance = hci_find_adv_instance(hdev, cp->handle);
1846 adv_instance->tx_power = rp->tx_power;
1848 /* Update adv data as tx power is known now */
1849 hci_req_update_adv_data(hdev, cp->handle);
1851 hci_dev_unlock(hdev);
1855 static void hci_cc_enable_rssi(struct hci_dev *hdev,
1856 struct sk_buff *skb)
1858 struct hci_cc_rsp_enable_rssi *rp = (void *)skb->data;
1860 BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
1861 hdev->name, rp->status, rp->le_ext_opcode);
1863 mgmt_enable_rssi_cc(hdev, rp, rp->status);
1866 static void hci_cc_get_raw_rssi(struct hci_dev *hdev,
1867 struct sk_buff *skb)
1869 struct hci_cc_rp_get_raw_rssi *rp = (void *)skb->data;
1871 BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
1872 hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
1874 mgmt_raw_rssi_response(hdev, rp, rp->status);
1877 static void hci_vendor_specific_group_ext_evt(struct hci_dev *hdev,
1878 struct sk_buff *skb)
1880 struct hci_ev_ext_vendor_specific *ev = (void *)skb->data;
1881 __u8 event_le_ext_sub_code;
1883 BT_DBG("RSSI event LE_META_VENDOR_SPECIFIC_GROUP_EVENT: %X",
1884 LE_META_VENDOR_SPECIFIC_GROUP_EVENT);
1886 skb_pull(skb, sizeof(*ev));
1887 event_le_ext_sub_code = ev->event_le_ext_sub_code;
1889 switch (event_le_ext_sub_code) {
1890 case LE_RSSI_LINK_ALERT:
1891 BT_DBG("RSSI event LE_RSSI_LINK_ALERT %X",
1892 LE_RSSI_LINK_ALERT);
1893 mgmt_rssi_alert_evt(hdev, skb);
1901 static void hci_vendor_specific_evt(struct hci_dev *hdev, struct sk_buff *skb)
1903 struct hci_ev_vendor_specific *ev = (void *)skb->data;
1904 __u8 event_sub_code;
1906 BT_DBG("hci_vendor_specific_evt");
1908 skb_pull(skb, sizeof(*ev));
1909 event_sub_code = ev->event_sub_code;
1911 switch (event_sub_code) {
1912 case LE_META_VENDOR_SPECIFIC_GROUP_EVENT:
1913 hci_vendor_specific_group_ext_evt(hdev, skb);
1922 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1924 struct hci_rp_read_rssi *rp = (void *) skb->data;
1925 struct hci_conn *conn;
1927 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1934 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1936 conn->rssi = rp->rssi;
1938 hci_dev_unlock(hdev);
1941 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1943 struct hci_cp_read_tx_power *sent;
1944 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1945 struct hci_conn *conn;
1947 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1952 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1958 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1962 switch (sent->type) {
1964 conn->tx_power = rp->tx_power;
1967 conn->max_tx_power = rp->tx_power;
1972 hci_dev_unlock(hdev);
1975 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1977 u8 status = *((u8 *) skb->data);
1980 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1985 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1987 hdev->ssp_debug_mode = *mode;
1990 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1992 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1995 hci_conn_check_pending(hdev);
1999 set_bit(HCI_INQUIRY, &hdev->flags);
2002 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2004 struct hci_cp_create_conn *cp;
2005 struct hci_conn *conn;
2007 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2009 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2015 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2017 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
2020 if (conn && conn->state == BT_CONNECT) {
2021 if (status != 0x0c || conn->attempt > 2) {
2022 conn->state = BT_CLOSED;
2023 hci_connect_cfm(conn, status);
2026 conn->state = BT_CONNECT2;
2030 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
2033 bt_dev_err(hdev, "no memory for new connection");
2037 hci_dev_unlock(hdev);
2040 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2042 struct hci_cp_add_sco *cp;
2043 struct hci_conn *acl, *sco;
2046 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2051 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2055 handle = __le16_to_cpu(cp->handle);
2057 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2061 acl = hci_conn_hash_lookup_handle(hdev, handle);
2065 sco->state = BT_CLOSED;
2067 hci_connect_cfm(sco, status);
2072 hci_dev_unlock(hdev);
2075 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2077 struct hci_cp_auth_requested *cp;
2078 struct hci_conn *conn;
2080 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2085 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2091 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2093 if (conn->state == BT_CONFIG) {
2094 hci_connect_cfm(conn, status);
2095 hci_conn_drop(conn);
2099 hci_dev_unlock(hdev);
2102 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2104 struct hci_cp_set_conn_encrypt *cp;
2105 struct hci_conn *conn;
2107 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2112 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2118 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2120 if (conn->state == BT_CONFIG) {
2121 hci_connect_cfm(conn, status);
2122 hci_conn_drop(conn);
2126 hci_dev_unlock(hdev);
2129 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2130 struct hci_conn *conn)
2132 if (conn->state != BT_CONFIG || !conn->out)
2135 if (conn->pending_sec_level == BT_SECURITY_SDP)
2138 /* Only request authentication for SSP connections or non-SSP
2139 * devices with sec_level MEDIUM or HIGH or if MITM protection
2142 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2143 conn->pending_sec_level != BT_SECURITY_FIPS &&
2144 conn->pending_sec_level != BT_SECURITY_HIGH &&
2145 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2151 static int hci_resolve_name(struct hci_dev *hdev,
2152 struct inquiry_entry *e)
2154 struct hci_cp_remote_name_req cp;
2156 memset(&cp, 0, sizeof(cp));
2158 bacpy(&cp.bdaddr, &e->data.bdaddr);
2159 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2160 cp.pscan_mode = e->data.pscan_mode;
2161 cp.clock_offset = e->data.clock_offset;
2163 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2166 static bool hci_resolve_next_name(struct hci_dev *hdev)
2168 struct discovery_state *discov = &hdev->discovery;
2169 struct inquiry_entry *e;
2171 if (list_empty(&discov->resolve))
2174 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2178 if (hci_resolve_name(hdev, e) == 0) {
2179 e->name_state = NAME_PENDING;
2186 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2187 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2189 struct discovery_state *discov = &hdev->discovery;
2190 struct inquiry_entry *e;
2193 /* Update the mgmt connected state if necessary. Be careful with
2194 * conn objects that exist but are not (yet) connected however.
2195 * Only those in BT_CONFIG or BT_CONNECTED states can be
2196 * considered connected.
2199 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
2200 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2201 mgmt_device_connected(hdev, conn, 0, name, name_len);
2203 mgmt_device_name_update(hdev, bdaddr, name, name_len);
2207 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2208 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2209 mgmt_device_connected(hdev, conn, name, name_len);
2212 if (discov->state == DISCOVERY_STOPPED)
2215 if (discov->state == DISCOVERY_STOPPING)
2216 goto discov_complete;
2218 if (discov->state != DISCOVERY_RESOLVING)
2221 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2222 /* If the device was not found in a list of found devices names of which
2223 * are pending. there is no need to continue resolving a next name as it
2224 * will be done upon receiving another Remote Name Request Complete
2231 e->name_state = NAME_KNOWN;
2232 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2233 e->data.rssi, name, name_len);
2235 e->name_state = NAME_NOT_KNOWN;
2238 if (hci_resolve_next_name(hdev))
2242 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2245 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2247 struct hci_cp_remote_name_req *cp;
2248 struct hci_conn *conn;
2250 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2252 /* If successful wait for the name req complete event before
2253 * checking for the need to do authentication */
2257 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2263 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2265 if (hci_dev_test_flag(hdev, HCI_MGMT))
2266 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2271 if (!hci_outgoing_auth_needed(hdev, conn))
2274 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2275 struct hci_cp_auth_requested auth_cp;
2277 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2279 auth_cp.handle = __cpu_to_le16(conn->handle);
2280 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2281 sizeof(auth_cp), &auth_cp);
2285 hci_dev_unlock(hdev);
2288 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2290 struct hci_cp_read_remote_features *cp;
2291 struct hci_conn *conn;
2293 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2298 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2304 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2306 if (conn->state == BT_CONFIG) {
2307 hci_connect_cfm(conn, status);
2308 hci_conn_drop(conn);
2312 hci_dev_unlock(hdev);
2315 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2317 struct hci_cp_read_remote_ext_features *cp;
2318 struct hci_conn *conn;
2320 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2325 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2331 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2333 if (conn->state == BT_CONFIG) {
2334 hci_connect_cfm(conn, status);
2335 hci_conn_drop(conn);
2339 hci_dev_unlock(hdev);
2342 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2344 struct hci_cp_setup_sync_conn *cp;
2345 struct hci_conn *acl, *sco;
2348 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2353 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2357 handle = __le16_to_cpu(cp->handle);
2359 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2363 acl = hci_conn_hash_lookup_handle(hdev, handle);
2367 sco->state = BT_CLOSED;
2369 hci_connect_cfm(sco, status);
2374 hci_dev_unlock(hdev);
2377 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2379 struct hci_cp_sniff_mode *cp;
2380 struct hci_conn *conn;
2382 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2387 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2393 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2395 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2397 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2398 hci_sco_setup(conn, status);
2401 hci_dev_unlock(hdev);
2404 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2406 struct hci_cp_exit_sniff_mode *cp;
2407 struct hci_conn *conn;
2409 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2414 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2420 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2422 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2424 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2425 hci_sco_setup(conn, status);
2428 hci_dev_unlock(hdev);
2431 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2433 struct hci_cp_disconnect *cp;
2434 struct hci_conn *conn;
2439 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2445 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2447 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2448 conn->dst_type, status);
2450 if (conn->type == LE_LINK) {
2451 hdev->cur_adv_instance = conn->adv_instance;
2452 hci_req_reenable_advertising(hdev);
2455 /* If the disconnection failed for any reason, the upper layer
2456 * does not retry to disconnect in current implementation.
2457 * Hence, we need to do some basic cleanup here and re-enable
2458 * advertising if necessary.
2463 hci_dev_unlock(hdev);
2466 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2467 u8 peer_addr_type, u8 own_address_type,
2470 struct hci_conn *conn;
2472 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2477 /* When using controller based address resolution, then the new
2478 * address types 0x02 and 0x03 are used. These types need to be
2479 * converted back into either public address or random address type
2481 if (use_ll_privacy(hdev) &&
2482 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2483 switch (own_address_type) {
2484 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2485 own_address_type = ADDR_LE_DEV_PUBLIC;
2487 case ADDR_LE_DEV_RANDOM_RESOLVED:
2488 own_address_type = ADDR_LE_DEV_RANDOM;
2493 /* Store the initiator and responder address information which
2494 * is needed for SMP. These values will not change during the
2495 * lifetime of the connection.
2497 conn->init_addr_type = own_address_type;
2498 if (own_address_type == ADDR_LE_DEV_RANDOM)
2499 bacpy(&conn->init_addr, &hdev->random_addr);
2501 bacpy(&conn->init_addr, &hdev->bdaddr);
2503 conn->resp_addr_type = peer_addr_type;
2504 bacpy(&conn->resp_addr, peer_addr);
2506 /* We don't want the connection attempt to stick around
2507 * indefinitely since LE doesn't have a page timeout concept
2508 * like BR/EDR. Set a timer for any connection that doesn't use
2509 * the accept list for connecting.
2511 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2512 queue_delayed_work(conn->hdev->workqueue,
2513 &conn->le_conn_timeout,
2514 conn->conn_timeout);
2517 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2519 struct hci_cp_le_create_conn *cp;
2521 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2523 /* All connection failure handling is taken care of by the
2524 * hci_le_conn_failed function which is triggered by the HCI
2525 * request completion callbacks used for connecting.
2530 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2536 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2537 cp->own_address_type, cp->filter_policy);
2539 hci_dev_unlock(hdev);
2542 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2544 struct hci_cp_le_ext_create_conn *cp;
2546 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2548 /* All connection failure handling is taken care of by the
2549 * hci_le_conn_failed function which is triggered by the HCI
2550 * request completion callbacks used for connecting.
2555 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2561 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2562 cp->own_addr_type, cp->filter_policy);
2564 hci_dev_unlock(hdev);
2567 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2569 struct hci_cp_le_read_remote_features *cp;
2570 struct hci_conn *conn;
2572 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2577 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2583 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2585 if (conn->state == BT_CONFIG) {
2586 hci_connect_cfm(conn, status);
2587 hci_conn_drop(conn);
2591 hci_dev_unlock(hdev);
2594 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2596 struct hci_cp_le_start_enc *cp;
2597 struct hci_conn *conn;
2599 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2606 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2610 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2614 if (conn->state != BT_CONNECTED)
2617 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2618 hci_conn_drop(conn);
2621 hci_dev_unlock(hdev);
2624 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2626 struct hci_cp_switch_role *cp;
2627 struct hci_conn *conn;
2629 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2634 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2640 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2642 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2644 hci_dev_unlock(hdev);
2647 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2649 __u8 status = *((__u8 *) skb->data);
2650 struct discovery_state *discov = &hdev->discovery;
2651 struct inquiry_entry *e;
2653 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2655 hci_conn_check_pending(hdev);
2657 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2660 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2661 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2663 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2668 if (discov->state != DISCOVERY_FINDING)
2671 if (list_empty(&discov->resolve)) {
2672 /* When BR/EDR inquiry is active and no LE scanning is in
2673 * progress, then change discovery state to indicate completion.
2675 * When running LE scanning and BR/EDR inquiry simultaneously
2676 * and the LE scan already finished, then change the discovery
2677 * state to indicate completion.
2679 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2680 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2681 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2685 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2686 if (e && hci_resolve_name(hdev, e) == 0) {
2687 e->name_state = NAME_PENDING;
2688 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2690 /* When BR/EDR inquiry is active and no LE scanning is in
2691 * progress, then change discovery state to indicate completion.
2693 * When running LE scanning and BR/EDR inquiry simultaneously
2694 * and the LE scan already finished, then change the discovery
2695 * state to indicate completion.
2697 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2698 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2699 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2703 hci_dev_unlock(hdev);
2706 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2708 struct inquiry_data data;
2709 struct inquiry_info *info = (void *) (skb->data + 1);
2710 int num_rsp = *((__u8 *) skb->data);
2712 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2714 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2717 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2722 for (; num_rsp; num_rsp--, info++) {
2725 bacpy(&data.bdaddr, &info->bdaddr);
2726 data.pscan_rep_mode = info->pscan_rep_mode;
2727 data.pscan_period_mode = info->pscan_period_mode;
2728 data.pscan_mode = info->pscan_mode;
2729 memcpy(data.dev_class, info->dev_class, 3);
2730 data.clock_offset = info->clock_offset;
2731 data.rssi = HCI_RSSI_INVALID;
2732 data.ssp_mode = 0x00;
2734 flags = hci_inquiry_cache_update(hdev, &data, false);
2736 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2737 info->dev_class, HCI_RSSI_INVALID,
2738 flags, NULL, 0, NULL, 0);
2741 hci_dev_unlock(hdev);
2744 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2746 struct hci_ev_conn_complete *ev = (void *) skb->data;
2747 struct hci_conn *conn;
2749 BT_DBG("%s", hdev->name);
2753 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2755 /* Connection may not exist if auto-connected. Check the bredr
2756 * allowlist to see if this device is allowed to auto connect.
2757 * If link is an ACL type, create a connection class
2760 * Auto-connect will only occur if the event filter is
2761 * programmed with a given address. Right now, event filter is
2762 * only used during suspend.
2764 if (ev->link_type == ACL_LINK &&
2765 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
2768 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2771 bt_dev_err(hdev, "no memory for new conn");
2775 if (ev->link_type != SCO_LINK)
2778 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2783 conn->type = SCO_LINK;
2788 conn->handle = __le16_to_cpu(ev->handle);
2790 if (conn->type == ACL_LINK) {
2791 conn->state = BT_CONFIG;
2792 hci_conn_hold(conn);
2794 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2795 !hci_find_link_key(hdev, &ev->bdaddr))
2796 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2798 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2800 conn->state = BT_CONNECTED;
2802 hci_debugfs_create_conn(conn);
2803 hci_conn_add_sysfs(conn);
2805 if (test_bit(HCI_AUTH, &hdev->flags))
2806 set_bit(HCI_CONN_AUTH, &conn->flags);
2808 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2809 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2811 /* Get remote features */
2812 if (conn->type == ACL_LINK) {
2813 struct hci_cp_read_remote_features cp;
2814 cp.handle = ev->handle;
2815 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2818 hci_req_update_scan(hdev);
2821 /* Set packet type for incoming connection */
2822 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2823 struct hci_cp_change_conn_ptype cp;
2824 cp.handle = ev->handle;
2825 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2826 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2830 conn->state = BT_CLOSED;
2831 if (conn->type == ACL_LINK)
2832 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2833 conn->dst_type, ev->status);
2836 if (conn->type == ACL_LINK)
2837 hci_sco_setup(conn, ev->status);
2840 hci_connect_cfm(conn, ev->status);
2842 } else if (ev->link_type == SCO_LINK) {
2843 switch (conn->setting & SCO_AIRMODE_MASK) {
2844 case SCO_AIRMODE_CVSD:
2846 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2850 hci_connect_cfm(conn, ev->status);
2854 hci_dev_unlock(hdev);
2856 hci_conn_check_pending(hdev);
2859 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2861 struct hci_cp_reject_conn_req cp;
2863 bacpy(&cp.bdaddr, bdaddr);
2864 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2865 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2868 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2870 struct hci_ev_conn_request *ev = (void *) skb->data;
2871 int mask = hdev->link_mode;
2872 struct inquiry_entry *ie;
2873 struct hci_conn *conn;
2876 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2879 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2882 if (!(mask & HCI_LM_ACCEPT)) {
2883 hci_reject_conn(hdev, &ev->bdaddr);
2889 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
2891 hci_reject_conn(hdev, &ev->bdaddr);
2895 /* Require HCI_CONNECTABLE or an accept list entry to accept the
2896 * connection. These features are only touched through mgmt so
2897 * only do the checks if HCI_MGMT is set.
2899 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2900 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2901 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
2903 hci_reject_conn(hdev, &ev->bdaddr);
2907 /* Connection accepted */
2909 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2911 memcpy(ie->data.dev_class, ev->dev_class, 3);
2913 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2916 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2919 bt_dev_err(hdev, "no memory for new connection");
2924 memcpy(conn->dev_class, ev->dev_class, 3);
2926 hci_dev_unlock(hdev);
2928 if (ev->link_type == ACL_LINK ||
2929 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2930 struct hci_cp_accept_conn_req cp;
2931 conn->state = BT_CONNECT;
2933 bacpy(&cp.bdaddr, &ev->bdaddr);
2935 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2936 cp.role = 0x00; /* Become central */
2938 cp.role = 0x01; /* Remain peripheral */
2940 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2941 } else if (!(flags & HCI_PROTO_DEFER)) {
2942 struct hci_cp_accept_sync_conn_req cp;
2943 conn->state = BT_CONNECT;
2945 bacpy(&cp.bdaddr, &ev->bdaddr);
2946 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2948 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2949 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2950 cp.max_latency = cpu_to_le16(0xffff);
2951 cp.content_format = cpu_to_le16(hdev->voice_setting);
2952 cp.retrans_effort = 0xff;
2954 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2957 conn->state = BT_CONNECT2;
2958 hci_connect_cfm(conn, 0);
2963 hci_dev_unlock(hdev);
2966 static u8 hci_to_mgmt_reason(u8 err)
2969 case HCI_ERROR_CONNECTION_TIMEOUT:
2970 return MGMT_DEV_DISCONN_TIMEOUT;
2971 case HCI_ERROR_REMOTE_USER_TERM:
2972 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2973 case HCI_ERROR_REMOTE_POWER_OFF:
2974 return MGMT_DEV_DISCONN_REMOTE;
2975 case HCI_ERROR_LOCAL_HOST_TERM:
2976 return MGMT_DEV_DISCONN_LOCAL_HOST;
2978 return MGMT_DEV_DISCONN_UNKNOWN;
2982 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2984 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2986 struct hci_conn_params *params;
2987 struct hci_conn *conn;
2988 bool mgmt_connected;
2990 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2994 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2999 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3000 conn->dst_type, ev->status);
3004 conn->state = BT_CLOSED;
3006 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3008 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3009 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3011 reason = hci_to_mgmt_reason(ev->reason);
3013 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3014 reason, mgmt_connected);
3016 if (conn->type == ACL_LINK) {
3017 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3018 hci_remove_link_key(hdev, &conn->dst);
3020 hci_req_update_scan(hdev);
3023 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3025 switch (params->auto_connect) {
3026 case HCI_AUTO_CONN_LINK_LOSS:
3027 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3031 case HCI_AUTO_CONN_DIRECT:
3032 case HCI_AUTO_CONN_ALWAYS:
3033 list_del_init(¶ms->action);
3034 list_add(¶ms->action, &hdev->pend_le_conns);
3035 hci_update_background_scan(hdev);
3043 hci_disconn_cfm(conn, ev->reason);
3045 /* The suspend notifier is waiting for all devices to disconnect so
3046 * clear the bit from pending tasks and inform the wait queue.
3048 if (list_empty(&hdev->conn_hash.list) &&
3049 test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
3050 wake_up(&hdev->suspend_wait_q);
3053 /* Re-enable advertising if necessary, since it might
3054 * have been disabled by the connection. From the
3055 * HCI_LE_Set_Advertise_Enable command description in
3056 * the core specification (v4.0):
3057 * "The Controller shall continue advertising until the Host
3058 * issues an LE_Set_Advertise_Enable command with
3059 * Advertising_Enable set to 0x00 (Advertising is disabled)
3060 * or until a connection is created or until the Advertising
3061 * is timed out due to Directed Advertising."
3063 if (conn->type == LE_LINK) {
3064 hdev->cur_adv_instance = conn->adv_instance;
3065 hci_req_reenable_advertising(hdev);
3071 hci_dev_unlock(hdev);
3074 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3076 struct hci_ev_auth_complete *ev = (void *) skb->data;
3077 struct hci_conn *conn;
3079 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3083 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3088 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3090 if (!hci_conn_ssp_enabled(conn) &&
3091 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3092 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3094 set_bit(HCI_CONN_AUTH, &conn->flags);
3095 conn->sec_level = conn->pending_sec_level;
3098 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3099 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3101 mgmt_auth_failed(conn, ev->status);
3104 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3105 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3107 if (conn->state == BT_CONFIG) {
3108 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3109 struct hci_cp_set_conn_encrypt cp;
3110 cp.handle = ev->handle;
3112 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3115 conn->state = BT_CONNECTED;
3116 hci_connect_cfm(conn, ev->status);
3117 hci_conn_drop(conn);
3120 hci_auth_cfm(conn, ev->status);
3122 hci_conn_hold(conn);
3123 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3124 hci_conn_drop(conn);
3127 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3129 struct hci_cp_set_conn_encrypt cp;
3130 cp.handle = ev->handle;
3132 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3135 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3136 hci_encrypt_cfm(conn, ev->status);
3141 hci_dev_unlock(hdev);
3144 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
3146 struct hci_ev_remote_name *ev = (void *) skb->data;
3147 struct hci_conn *conn;
3149 BT_DBG("%s", hdev->name);
3151 hci_conn_check_pending(hdev);
3155 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3157 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3160 if (ev->status == 0)
3161 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3162 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3164 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3170 if (!hci_outgoing_auth_needed(hdev, conn))
3173 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3174 struct hci_cp_auth_requested cp;
3176 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3178 cp.handle = __cpu_to_le16(conn->handle);
3179 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3183 hci_dev_unlock(hdev);
3186 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3187 u16 opcode, struct sk_buff *skb)
3189 const struct hci_rp_read_enc_key_size *rp;
3190 struct hci_conn *conn;
3193 BT_DBG("%s status 0x%02x", hdev->name, status);
3195 if (!skb || skb->len < sizeof(*rp)) {
3196 bt_dev_err(hdev, "invalid read key size response");
3200 rp = (void *)skb->data;
3201 handle = le16_to_cpu(rp->handle);
3205 conn = hci_conn_hash_lookup_handle(hdev, handle);
3209 /* While unexpected, the read_enc_key_size command may fail. The most
3210 * secure approach is to then assume the key size is 0 to force a
3214 bt_dev_err(hdev, "failed to read key size for handle %u",
3216 conn->enc_key_size = 0;
3218 conn->enc_key_size = rp->key_size;
3221 hci_encrypt_cfm(conn, 0);
3224 hci_dev_unlock(hdev);
3227 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3229 struct hci_ev_encrypt_change *ev = (void *) skb->data;
3230 struct hci_conn *conn;
3232 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3236 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3242 /* Encryption implies authentication */
3243 set_bit(HCI_CONN_AUTH, &conn->flags);
3244 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3245 conn->sec_level = conn->pending_sec_level;
3247 /* P-256 authentication key implies FIPS */
3248 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3249 set_bit(HCI_CONN_FIPS, &conn->flags);
3251 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3252 conn->type == LE_LINK)
3253 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3255 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3256 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3260 /* We should disregard the current RPA and generate a new one
3261 * whenever the encryption procedure fails.
3263 if (ev->status && conn->type == LE_LINK) {
3264 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3265 hci_adv_instances_set_rpa_expired(hdev, true);
3268 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3270 /* Check link security requirements are met */
3271 if (!hci_conn_check_link_mode(conn))
3272 ev->status = HCI_ERROR_AUTH_FAILURE;
3274 if (ev->status && conn->state == BT_CONNECTED) {
3275 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3276 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3278 /* Notify upper layers so they can cleanup before
3281 hci_encrypt_cfm(conn, ev->status);
3282 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3283 hci_conn_drop(conn);
3287 /* Try reading the encryption key size for encrypted ACL links */
3288 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3289 struct hci_cp_read_enc_key_size cp;
3290 struct hci_request req;
3292 /* Only send HCI_Read_Encryption_Key_Size if the
3293 * controller really supports it. If it doesn't, assume
3294 * the default size (16).
3296 if (!(hdev->commands[20] & 0x10)) {
3297 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3301 hci_req_init(&req, hdev);
3303 cp.handle = cpu_to_le16(conn->handle);
3304 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3306 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3307 bt_dev_err(hdev, "sending read key size failed");
3308 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3315 /* Set the default Authenticated Payload Timeout after
3316 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3317 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3318 * sent when the link is active and Encryption is enabled, the conn
3319 * type can be either LE or ACL and controller must support LMP Ping.
3320 * Ensure for AES-CCM encryption as well.
3322 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3323 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3324 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3325 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3326 struct hci_cp_write_auth_payload_to cp;
3328 cp.handle = cpu_to_le16(conn->handle);
3329 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3330 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3335 hci_encrypt_cfm(conn, ev->status);
3338 hci_dev_unlock(hdev);
3341 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3342 struct sk_buff *skb)
3344 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3345 struct hci_conn *conn;
3347 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3351 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3354 set_bit(HCI_CONN_SECURE, &conn->flags);
3356 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3358 hci_key_change_cfm(conn, ev->status);
3361 hci_dev_unlock(hdev);
3364 static void hci_remote_features_evt(struct hci_dev *hdev,
3365 struct sk_buff *skb)
3367 struct hci_ev_remote_features *ev = (void *) skb->data;
3368 struct hci_conn *conn;
3370 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3374 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3379 memcpy(conn->features[0], ev->features, 8);
3381 if (conn->state != BT_CONFIG)
3384 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3385 lmp_ext_feat_capable(conn)) {
3386 struct hci_cp_read_remote_ext_features cp;
3387 cp.handle = ev->handle;
3389 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3394 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3395 struct hci_cp_remote_name_req cp;
3396 memset(&cp, 0, sizeof(cp));
3397 bacpy(&cp.bdaddr, &conn->dst);
3398 cp.pscan_rep_mode = 0x02;
3399 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3400 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3401 mgmt_device_connected(hdev, conn, NULL, 0);
3403 if (!hci_outgoing_auth_needed(hdev, conn)) {
3404 conn->state = BT_CONNECTED;
3405 hci_connect_cfm(conn, ev->status);
3406 hci_conn_drop(conn);
3410 hci_dev_unlock(hdev);
3413 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3415 cancel_delayed_work(&hdev->cmd_timer);
3417 if (!test_bit(HCI_RESET, &hdev->flags)) {
3419 cancel_delayed_work(&hdev->ncmd_timer);
3420 atomic_set(&hdev->cmd_cnt, 1);
3422 schedule_delayed_work(&hdev->ncmd_timer,
3428 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3429 u16 *opcode, u8 *status,
3430 hci_req_complete_t *req_complete,
3431 hci_req_complete_skb_t *req_complete_skb)
3433 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3435 *opcode = __le16_to_cpu(ev->opcode);
3436 *status = skb->data[sizeof(*ev)];
3438 skb_pull(skb, sizeof(*ev));
3441 case HCI_OP_INQUIRY_CANCEL:
3442 hci_cc_inquiry_cancel(hdev, skb, status);
3445 case HCI_OP_PERIODIC_INQ:
3446 hci_cc_periodic_inq(hdev, skb);
3449 case HCI_OP_EXIT_PERIODIC_INQ:
3450 hci_cc_exit_periodic_inq(hdev, skb);
3453 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3454 hci_cc_remote_name_req_cancel(hdev, skb);
3457 case HCI_OP_ROLE_DISCOVERY:
3458 hci_cc_role_discovery(hdev, skb);
3461 case HCI_OP_READ_LINK_POLICY:
3462 hci_cc_read_link_policy(hdev, skb);
3465 case HCI_OP_WRITE_LINK_POLICY:
3466 hci_cc_write_link_policy(hdev, skb);
3469 case HCI_OP_READ_DEF_LINK_POLICY:
3470 hci_cc_read_def_link_policy(hdev, skb);
3473 case HCI_OP_WRITE_DEF_LINK_POLICY:
3474 hci_cc_write_def_link_policy(hdev, skb);
3478 hci_cc_reset(hdev, skb);
3481 case HCI_OP_READ_STORED_LINK_KEY:
3482 hci_cc_read_stored_link_key(hdev, skb);
3485 case HCI_OP_DELETE_STORED_LINK_KEY:
3486 hci_cc_delete_stored_link_key(hdev, skb);
3489 case HCI_OP_WRITE_LOCAL_NAME:
3490 hci_cc_write_local_name(hdev, skb);
3493 case HCI_OP_READ_LOCAL_NAME:
3494 hci_cc_read_local_name(hdev, skb);
3497 case HCI_OP_WRITE_AUTH_ENABLE:
3498 hci_cc_write_auth_enable(hdev, skb);
3501 case HCI_OP_WRITE_ENCRYPT_MODE:
3502 hci_cc_write_encrypt_mode(hdev, skb);
3505 case HCI_OP_WRITE_SCAN_ENABLE:
3506 hci_cc_write_scan_enable(hdev, skb);
3509 case HCI_OP_SET_EVENT_FLT:
3510 hci_cc_set_event_filter(hdev, skb);
3513 case HCI_OP_READ_CLASS_OF_DEV:
3514 hci_cc_read_class_of_dev(hdev, skb);
3517 case HCI_OP_WRITE_CLASS_OF_DEV:
3518 hci_cc_write_class_of_dev(hdev, skb);
3521 case HCI_OP_READ_VOICE_SETTING:
3522 hci_cc_read_voice_setting(hdev, skb);
3525 case HCI_OP_WRITE_VOICE_SETTING:
3526 hci_cc_write_voice_setting(hdev, skb);
3529 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3530 hci_cc_read_num_supported_iac(hdev, skb);
3533 case HCI_OP_WRITE_SSP_MODE:
3534 hci_cc_write_ssp_mode(hdev, skb);
3537 case HCI_OP_WRITE_SC_SUPPORT:
3538 hci_cc_write_sc_support(hdev, skb);
3541 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3542 hci_cc_read_auth_payload_timeout(hdev, skb);
3545 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3546 hci_cc_write_auth_payload_timeout(hdev, skb);
3549 case HCI_OP_READ_LOCAL_VERSION:
3550 hci_cc_read_local_version(hdev, skb);
3553 case HCI_OP_READ_LOCAL_COMMANDS:
3554 hci_cc_read_local_commands(hdev, skb);
3557 case HCI_OP_READ_LOCAL_FEATURES:
3558 hci_cc_read_local_features(hdev, skb);
3561 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3562 hci_cc_read_local_ext_features(hdev, skb);
3565 case HCI_OP_READ_BUFFER_SIZE:
3566 hci_cc_read_buffer_size(hdev, skb);
3569 case HCI_OP_READ_BD_ADDR:
3570 hci_cc_read_bd_addr(hdev, skb);
3573 case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3574 hci_cc_read_local_pairing_opts(hdev, skb);
3577 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3578 hci_cc_read_page_scan_activity(hdev, skb);
3581 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3582 hci_cc_write_page_scan_activity(hdev, skb);
3585 case HCI_OP_READ_PAGE_SCAN_TYPE:
3586 hci_cc_read_page_scan_type(hdev, skb);
3589 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3590 hci_cc_write_page_scan_type(hdev, skb);
3593 case HCI_OP_READ_DATA_BLOCK_SIZE:
3594 hci_cc_read_data_block_size(hdev, skb);
3597 case HCI_OP_READ_FLOW_CONTROL_MODE:
3598 hci_cc_read_flow_control_mode(hdev, skb);
3601 case HCI_OP_READ_LOCAL_AMP_INFO:
3602 hci_cc_read_local_amp_info(hdev, skb);
3605 case HCI_OP_READ_CLOCK:
3606 hci_cc_read_clock(hdev, skb);
3609 case HCI_OP_READ_INQ_RSP_TX_POWER:
3610 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3613 case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3614 hci_cc_read_def_err_data_reporting(hdev, skb);
3617 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3618 hci_cc_write_def_err_data_reporting(hdev, skb);
3621 case HCI_OP_PIN_CODE_REPLY:
3622 hci_cc_pin_code_reply(hdev, skb);
3625 case HCI_OP_PIN_CODE_NEG_REPLY:
3626 hci_cc_pin_code_neg_reply(hdev, skb);
3629 case HCI_OP_READ_LOCAL_OOB_DATA:
3630 hci_cc_read_local_oob_data(hdev, skb);
3633 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3634 hci_cc_read_local_oob_ext_data(hdev, skb);
3637 case HCI_OP_LE_READ_BUFFER_SIZE:
3638 hci_cc_le_read_buffer_size(hdev, skb);
3641 case HCI_OP_LE_READ_LOCAL_FEATURES:
3642 hci_cc_le_read_local_features(hdev, skb);
3645 case HCI_OP_LE_READ_ADV_TX_POWER:
3646 hci_cc_le_read_adv_tx_power(hdev, skb);
3649 case HCI_OP_USER_CONFIRM_REPLY:
3650 hci_cc_user_confirm_reply(hdev, skb);
3653 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3654 hci_cc_user_confirm_neg_reply(hdev, skb);
3657 case HCI_OP_USER_PASSKEY_REPLY:
3658 hci_cc_user_passkey_reply(hdev, skb);
3661 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3662 hci_cc_user_passkey_neg_reply(hdev, skb);
3665 case HCI_OP_LE_SET_RANDOM_ADDR:
3666 hci_cc_le_set_random_addr(hdev, skb);
3669 case HCI_OP_LE_SET_ADV_ENABLE:
3670 hci_cc_le_set_adv_enable(hdev, skb);
3673 case HCI_OP_LE_SET_SCAN_PARAM:
3674 hci_cc_le_set_scan_param(hdev, skb);
3677 case HCI_OP_LE_SET_SCAN_ENABLE:
3678 hci_cc_le_set_scan_enable(hdev, skb);
3681 case HCI_OP_LE_READ_ACCEPT_LIST_SIZE:
3682 hci_cc_le_read_accept_list_size(hdev, skb);
3685 case HCI_OP_LE_CLEAR_ACCEPT_LIST:
3686 hci_cc_le_clear_accept_list(hdev, skb);
3689 case HCI_OP_LE_ADD_TO_ACCEPT_LIST:
3690 hci_cc_le_add_to_accept_list(hdev, skb);
3693 case HCI_OP_LE_DEL_FROM_ACCEPT_LIST:
3694 hci_cc_le_del_from_accept_list(hdev, skb);
3697 case HCI_OP_LE_READ_SUPPORTED_STATES:
3698 hci_cc_le_read_supported_states(hdev, skb);
3701 case HCI_OP_LE_READ_DEF_DATA_LEN:
3702 hci_cc_le_read_def_data_len(hdev, skb);
3705 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3706 hci_cc_le_write_def_data_len(hdev, skb);
3709 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3710 hci_cc_le_add_to_resolv_list(hdev, skb);
3713 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3714 hci_cc_le_del_from_resolv_list(hdev, skb);
3717 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3718 hci_cc_le_clear_resolv_list(hdev, skb);
3721 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3722 hci_cc_le_read_resolv_list_size(hdev, skb);
3725 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3726 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3729 case HCI_OP_LE_READ_MAX_DATA_LEN:
3730 hci_cc_le_read_max_data_len(hdev, skb);
3733 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3734 hci_cc_write_le_host_supported(hdev, skb);
3737 case HCI_OP_LE_SET_ADV_PARAM:
3738 hci_cc_set_adv_param(hdev, skb);
3741 case HCI_OP_READ_RSSI:
3742 hci_cc_read_rssi(hdev, skb);
3745 case HCI_OP_READ_TX_POWER:
3746 hci_cc_read_tx_power(hdev, skb);
3749 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3750 hci_cc_write_ssp_debug_mode(hdev, skb);
3753 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3754 hci_cc_le_set_ext_scan_param(hdev, skb);
3757 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3758 hci_cc_le_set_ext_scan_enable(hdev, skb);
3761 case HCI_OP_LE_SET_DEFAULT_PHY:
3762 hci_cc_le_set_default_phy(hdev, skb);
3765 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3766 hci_cc_le_read_num_adv_sets(hdev, skb);
3769 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3770 hci_cc_set_ext_adv_param(hdev, skb);
3773 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3774 hci_cc_le_set_ext_adv_enable(hdev, skb);
3777 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3778 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3781 case HCI_OP_LE_READ_TRANSMIT_POWER:
3782 hci_cc_le_read_transmit_power(hdev, skb);
3785 case HCI_OP_ENABLE_RSSI:
3786 hci_cc_enable_rssi(hdev, skb);
3789 case HCI_OP_GET_RAW_RSSI:
3790 hci_cc_get_raw_rssi(hdev, skb);
3794 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3798 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3800 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3803 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3805 "unexpected event for opcode 0x%4.4x", *opcode);
3809 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3810 queue_work(hdev->workqueue, &hdev->cmd_work);
3813 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3814 u16 *opcode, u8 *status,
3815 hci_req_complete_t *req_complete,
3816 hci_req_complete_skb_t *req_complete_skb)
3818 struct hci_ev_cmd_status *ev = (void *) skb->data;
3820 skb_pull(skb, sizeof(*ev));
3822 *opcode = __le16_to_cpu(ev->opcode);
3823 *status = ev->status;
3826 case HCI_OP_INQUIRY:
3827 hci_cs_inquiry(hdev, ev->status);
3830 case HCI_OP_CREATE_CONN:
3831 hci_cs_create_conn(hdev, ev->status);
3834 case HCI_OP_DISCONNECT:
3835 hci_cs_disconnect(hdev, ev->status);
3838 case HCI_OP_ADD_SCO:
3839 hci_cs_add_sco(hdev, ev->status);
3842 case HCI_OP_AUTH_REQUESTED:
3843 hci_cs_auth_requested(hdev, ev->status);
3846 case HCI_OP_SET_CONN_ENCRYPT:
3847 hci_cs_set_conn_encrypt(hdev, ev->status);
3850 case HCI_OP_REMOTE_NAME_REQ:
3851 hci_cs_remote_name_req(hdev, ev->status);
3854 case HCI_OP_READ_REMOTE_FEATURES:
3855 hci_cs_read_remote_features(hdev, ev->status);
3858 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3859 hci_cs_read_remote_ext_features(hdev, ev->status);
3862 case HCI_OP_SETUP_SYNC_CONN:
3863 hci_cs_setup_sync_conn(hdev, ev->status);
3866 case HCI_OP_SNIFF_MODE:
3867 hci_cs_sniff_mode(hdev, ev->status);
3870 case HCI_OP_EXIT_SNIFF_MODE:
3871 hci_cs_exit_sniff_mode(hdev, ev->status);
3874 case HCI_OP_SWITCH_ROLE:
3875 hci_cs_switch_role(hdev, ev->status);
3878 case HCI_OP_LE_CREATE_CONN:
3879 hci_cs_le_create_conn(hdev, ev->status);
3882 case HCI_OP_LE_READ_REMOTE_FEATURES:
3883 hci_cs_le_read_remote_features(hdev, ev->status);
3886 case HCI_OP_LE_START_ENC:
3887 hci_cs_le_start_enc(hdev, ev->status);
3890 case HCI_OP_LE_EXT_CREATE_CONN:
3891 hci_cs_le_ext_create_conn(hdev, ev->status);
3895 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3899 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3901 /* Indicate request completion if the command failed. Also, if
3902 * we're not waiting for a special event and we get a success
3903 * command status we should try to flag the request as completed
3904 * (since for this kind of commands there will not be a command
3908 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3909 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3912 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3914 "unexpected event for opcode 0x%4.4x", *opcode);
3918 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3919 queue_work(hdev->workqueue, &hdev->cmd_work);
3922 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3924 struct hci_ev_hardware_error *ev = (void *) skb->data;
3928 mgmt_hardware_error(hdev, ev->code);
3929 hci_dev_unlock(hdev);
3931 hdev->hw_error_code = ev->code;
3933 queue_work(hdev->req_workqueue, &hdev->error_reset);
3936 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3938 struct hci_ev_role_change *ev = (void *) skb->data;
3939 struct hci_conn *conn;
3941 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3945 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3948 conn->role = ev->role;
3950 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3952 hci_role_switch_cfm(conn, ev->status, ev->role);
3955 hci_dev_unlock(hdev);
3958 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3960 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3963 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3964 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3968 if (skb->len < sizeof(*ev) ||
3969 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3970 BT_DBG("%s bad parameters", hdev->name);
3974 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3976 for (i = 0; i < ev->num_hndl; i++) {
3977 struct hci_comp_pkts_info *info = &ev->handles[i];
3978 struct hci_conn *conn;
3979 __u16 handle, count;
3981 handle = __le16_to_cpu(info->handle);
3982 count = __le16_to_cpu(info->count);
3984 conn = hci_conn_hash_lookup_handle(hdev, handle);
3988 conn->sent -= count;
3990 switch (conn->type) {
3992 hdev->acl_cnt += count;
3993 if (hdev->acl_cnt > hdev->acl_pkts)
3994 hdev->acl_cnt = hdev->acl_pkts;
3998 if (hdev->le_pkts) {
3999 hdev->le_cnt += count;
4000 if (hdev->le_cnt > hdev->le_pkts)
4001 hdev->le_cnt = hdev->le_pkts;
4003 hdev->acl_cnt += count;
4004 if (hdev->acl_cnt > hdev->acl_pkts)
4005 hdev->acl_cnt = hdev->acl_pkts;
4010 hdev->sco_cnt += count;
4011 if (hdev->sco_cnt > hdev->sco_pkts)
4012 hdev->sco_cnt = hdev->sco_pkts;
4016 bt_dev_err(hdev, "unknown type %d conn %p",
4022 queue_work(hdev->workqueue, &hdev->tx_work);
4025 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4028 struct hci_chan *chan;
4030 switch (hdev->dev_type) {
4032 return hci_conn_hash_lookup_handle(hdev, handle);
4034 chan = hci_chan_lookup_handle(hdev, handle);
4039 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4046 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
4048 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
4051 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4052 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4056 if (skb->len < sizeof(*ev) ||
4057 skb->len < struct_size(ev, handles, ev->num_hndl)) {
4058 BT_DBG("%s bad parameters", hdev->name);
4062 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
4065 for (i = 0; i < ev->num_hndl; i++) {
4066 struct hci_comp_blocks_info *info = &ev->handles[i];
4067 struct hci_conn *conn = NULL;
4068 __u16 handle, block_count;
4070 handle = __le16_to_cpu(info->handle);
4071 block_count = __le16_to_cpu(info->blocks);
4073 conn = __hci_conn_lookup_handle(hdev, handle);
4077 conn->sent -= block_count;
4079 switch (conn->type) {
4082 hdev->block_cnt += block_count;
4083 if (hdev->block_cnt > hdev->num_blocks)
4084 hdev->block_cnt = hdev->num_blocks;
4088 bt_dev_err(hdev, "unknown type %d conn %p",
4094 queue_work(hdev->workqueue, &hdev->tx_work);
4097 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4099 struct hci_ev_mode_change *ev = (void *) skb->data;
4100 struct hci_conn *conn;
4102 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4106 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4108 conn->mode = ev->mode;
4110 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4112 if (conn->mode == HCI_CM_ACTIVE)
4113 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4115 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4118 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4119 hci_sco_setup(conn, ev->status);
4122 hci_dev_unlock(hdev);
4125 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4127 struct hci_ev_pin_code_req *ev = (void *) skb->data;
4128 struct hci_conn *conn;
4130 BT_DBG("%s", hdev->name);
4134 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4138 if (conn->state == BT_CONNECTED) {
4139 hci_conn_hold(conn);
4140 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4141 hci_conn_drop(conn);
4144 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4145 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4146 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4147 sizeof(ev->bdaddr), &ev->bdaddr);
4148 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4151 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4156 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4160 hci_dev_unlock(hdev);
4163 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4165 if (key_type == HCI_LK_CHANGED_COMBINATION)
4168 conn->pin_length = pin_len;
4169 conn->key_type = key_type;
4172 case HCI_LK_LOCAL_UNIT:
4173 case HCI_LK_REMOTE_UNIT:
4174 case HCI_LK_DEBUG_COMBINATION:
4176 case HCI_LK_COMBINATION:
4178 conn->pending_sec_level = BT_SECURITY_HIGH;
4180 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4182 case HCI_LK_UNAUTH_COMBINATION_P192:
4183 case HCI_LK_UNAUTH_COMBINATION_P256:
4184 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4186 case HCI_LK_AUTH_COMBINATION_P192:
4187 conn->pending_sec_level = BT_SECURITY_HIGH;
4189 case HCI_LK_AUTH_COMBINATION_P256:
4190 conn->pending_sec_level = BT_SECURITY_FIPS;
4195 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4197 struct hci_ev_link_key_req *ev = (void *) skb->data;
4198 struct hci_cp_link_key_reply cp;
4199 struct hci_conn *conn;
4200 struct link_key *key;
4202 BT_DBG("%s", hdev->name);
4204 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4209 key = hci_find_link_key(hdev, &ev->bdaddr);
4211 BT_DBG("%s link key not found for %pMR", hdev->name,
4216 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4219 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4221 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4223 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4224 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4225 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4226 BT_DBG("%s ignoring unauthenticated key", hdev->name);
4230 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4231 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4232 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4233 BT_DBG("%s ignoring key unauthenticated for high security",
4238 conn_set_key(conn, key->type, key->pin_len);
4241 bacpy(&cp.bdaddr, &ev->bdaddr);
4242 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4244 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4246 hci_dev_unlock(hdev);
4251 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4252 hci_dev_unlock(hdev);
4255 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4257 struct hci_ev_link_key_notify *ev = (void *) skb->data;
4258 struct hci_conn *conn;
4259 struct link_key *key;
4263 BT_DBG("%s", hdev->name);
4267 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4271 hci_conn_hold(conn);
4272 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4273 hci_conn_drop(conn);
4275 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4276 conn_set_key(conn, ev->key_type, conn->pin_length);
4278 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4281 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4282 ev->key_type, pin_len, &persistent);
4286 /* Update connection information since adding the key will have
4287 * fixed up the type in the case of changed combination keys.
4289 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4290 conn_set_key(conn, key->type, key->pin_len);
4292 mgmt_new_link_key(hdev, key, persistent);
4294 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4295 * is set. If it's not set simply remove the key from the kernel
4296 * list (we've still notified user space about it but with
4297 * store_hint being 0).
4299 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4300 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4301 list_del_rcu(&key->list);
4302 kfree_rcu(key, rcu);
4307 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4309 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4312 hci_dev_unlock(hdev);
4315 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4317 struct hci_ev_clock_offset *ev = (void *) skb->data;
4318 struct hci_conn *conn;
4320 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4324 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4325 if (conn && !ev->status) {
4326 struct inquiry_entry *ie;
4328 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4330 ie->data.clock_offset = ev->clock_offset;
4331 ie->timestamp = jiffies;
4335 hci_dev_unlock(hdev);
4338 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4340 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4341 struct hci_conn *conn;
4343 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4347 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4348 if (conn && !ev->status)
4349 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4351 hci_dev_unlock(hdev);
4354 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4356 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4357 struct inquiry_entry *ie;
4359 BT_DBG("%s", hdev->name);
4363 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4365 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4366 ie->timestamp = jiffies;
4369 hci_dev_unlock(hdev);
4372 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4373 struct sk_buff *skb)
4375 struct inquiry_data data;
4376 int num_rsp = *((__u8 *) skb->data);
4378 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4383 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4388 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4389 struct inquiry_info_with_rssi_and_pscan_mode *info;
4390 info = (void *) (skb->data + 1);
4392 if (skb->len < num_rsp * sizeof(*info) + 1)
4395 for (; num_rsp; num_rsp--, info++) {
4398 bacpy(&data.bdaddr, &info->bdaddr);
4399 data.pscan_rep_mode = info->pscan_rep_mode;
4400 data.pscan_period_mode = info->pscan_period_mode;
4401 data.pscan_mode = info->pscan_mode;
4402 memcpy(data.dev_class, info->dev_class, 3);
4403 data.clock_offset = info->clock_offset;
4404 data.rssi = info->rssi;
4405 data.ssp_mode = 0x00;
4407 flags = hci_inquiry_cache_update(hdev, &data, false);
4409 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4410 info->dev_class, info->rssi,
4411 flags, NULL, 0, NULL, 0);
4414 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4416 if (skb->len < num_rsp * sizeof(*info) + 1)
4419 for (; num_rsp; num_rsp--, info++) {
4422 bacpy(&data.bdaddr, &info->bdaddr);
4423 data.pscan_rep_mode = info->pscan_rep_mode;
4424 data.pscan_period_mode = info->pscan_period_mode;
4425 data.pscan_mode = 0x00;
4426 memcpy(data.dev_class, info->dev_class, 3);
4427 data.clock_offset = info->clock_offset;
4428 data.rssi = info->rssi;
4429 data.ssp_mode = 0x00;
4431 flags = hci_inquiry_cache_update(hdev, &data, false);
4433 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4434 info->dev_class, info->rssi,
4435 flags, NULL, 0, NULL, 0);
4440 hci_dev_unlock(hdev);
4443 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4444 struct sk_buff *skb)
4446 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4447 struct hci_conn *conn;
4449 BT_DBG("%s", hdev->name);
4453 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4457 if (ev->page < HCI_MAX_PAGES)
4458 memcpy(conn->features[ev->page], ev->features, 8);
4460 if (!ev->status && ev->page == 0x01) {
4461 struct inquiry_entry *ie;
4463 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4465 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4467 if (ev->features[0] & LMP_HOST_SSP) {
4468 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4470 /* It is mandatory by the Bluetooth specification that
4471 * Extended Inquiry Results are only used when Secure
4472 * Simple Pairing is enabled, but some devices violate
4475 * To make these devices work, the internal SSP
4476 * enabled flag needs to be cleared if the remote host
4477 * features do not indicate SSP support */
4478 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4481 if (ev->features[0] & LMP_HOST_SC)
4482 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4485 if (conn->state != BT_CONFIG)
4488 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4489 struct hci_cp_remote_name_req cp;
4490 memset(&cp, 0, sizeof(cp));
4491 bacpy(&cp.bdaddr, &conn->dst);
4492 cp.pscan_rep_mode = 0x02;
4493 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4494 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4495 mgmt_device_connected(hdev, conn, NULL, 0);
4497 if (!hci_outgoing_auth_needed(hdev, conn)) {
4498 conn->state = BT_CONNECTED;
4499 hci_connect_cfm(conn, ev->status);
4500 hci_conn_drop(conn);
4504 hci_dev_unlock(hdev);
4507 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4508 struct sk_buff *skb)
4510 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4511 struct hci_conn *conn;
4513 switch (ev->link_type) {
4518 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4519 * for HCI_Synchronous_Connection_Complete is limited to
4520 * either SCO or eSCO
4522 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4526 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4530 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4532 if (ev->link_type == ESCO_LINK)
4535 /* When the link type in the event indicates SCO connection
4536 * and lookup of the connection object fails, then check
4537 * if an eSCO connection object exists.
4539 * The core limits the synchronous connections to either
4540 * SCO or eSCO. The eSCO connection is preferred and tried
4541 * to be setup first and until successfully established,
4542 * the link type will be hinted as eSCO.
4544 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4549 switch (ev->status) {
4551 /* The synchronous connection complete event should only be
4552 * sent once per new connection. Receiving a successful
4553 * complete event when the connection status is already
4554 * BT_CONNECTED means that the device is misbehaving and sent
4555 * multiple complete event packets for the same new connection.
4557 * Registering the device more than once can corrupt kernel
4558 * memory, hence upon detecting this invalid event, we report
4559 * an error and ignore the packet.
4561 if (conn->state == BT_CONNECTED) {
4562 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4566 conn->handle = __le16_to_cpu(ev->handle);
4567 conn->state = BT_CONNECTED;
4568 conn->type = ev->link_type;
4570 hci_debugfs_create_conn(conn);
4571 hci_conn_add_sysfs(conn);
4574 case 0x10: /* Connection Accept Timeout */
4575 case 0x0d: /* Connection Rejected due to Limited Resources */
4576 case 0x11: /* Unsupported Feature or Parameter Value */
4577 case 0x1c: /* SCO interval rejected */
4578 case 0x1a: /* Unsupported Remote Feature */
4579 case 0x1e: /* Invalid LMP Parameters */
4580 case 0x1f: /* Unspecified error */
4581 case 0x20: /* Unsupported LMP Parameter value */
4583 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4584 (hdev->esco_type & EDR_ESCO_MASK);
4585 if (hci_setup_sync(conn, conn->link->handle))
4591 conn->state = BT_CLOSED;
4595 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4597 switch (ev->air_mode) {
4600 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4604 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4608 hci_connect_cfm(conn, ev->status);
4613 hci_dev_unlock(hdev);
4616 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4620 while (parsed < eir_len) {
4621 u8 field_len = eir[0];
4626 parsed += field_len + 1;
4627 eir += field_len + 1;
4633 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4634 struct sk_buff *skb)
4636 struct inquiry_data data;
4637 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4638 int num_rsp = *((__u8 *) skb->data);
4641 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4643 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4646 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4651 for (; num_rsp; num_rsp--, info++) {
4655 bacpy(&data.bdaddr, &info->bdaddr);
4656 data.pscan_rep_mode = info->pscan_rep_mode;
4657 data.pscan_period_mode = info->pscan_period_mode;
4658 data.pscan_mode = 0x00;
4659 memcpy(data.dev_class, info->dev_class, 3);
4660 data.clock_offset = info->clock_offset;
4661 data.rssi = info->rssi;
4662 data.ssp_mode = 0x01;
4664 if (hci_dev_test_flag(hdev, HCI_MGMT))
4665 name_known = eir_get_data(info->data,
4667 EIR_NAME_COMPLETE, NULL);
4671 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4673 eir_len = eir_get_length(info->data, sizeof(info->data));
4675 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4676 info->dev_class, info->rssi,
4677 flags, info->data, eir_len, NULL, 0);
4680 hci_dev_unlock(hdev);
4683 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4684 struct sk_buff *skb)
4686 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4687 struct hci_conn *conn;
4689 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4690 __le16_to_cpu(ev->handle));
4694 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4698 /* For BR/EDR the necessary steps are taken through the
4699 * auth_complete event.
4701 if (conn->type != LE_LINK)
4705 conn->sec_level = conn->pending_sec_level;
4707 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4709 if (ev->status && conn->state == BT_CONNECTED) {
4710 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4711 hci_conn_drop(conn);
4715 if (conn->state == BT_CONFIG) {
4717 conn->state = BT_CONNECTED;
4719 hci_connect_cfm(conn, ev->status);
4720 hci_conn_drop(conn);
4722 hci_auth_cfm(conn, ev->status);
4724 hci_conn_hold(conn);
4725 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4726 hci_conn_drop(conn);
4730 hci_dev_unlock(hdev);
4733 static u8 hci_get_auth_req(struct hci_conn *conn)
4735 /* If remote requests no-bonding follow that lead */
4736 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4737 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4738 return conn->remote_auth | (conn->auth_type & 0x01);
4740 /* If both remote and local have enough IO capabilities, require
4743 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4744 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4745 return conn->remote_auth | 0x01;
4747 /* No MITM protection possible so ignore remote requirement */
4748 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4751 static u8 bredr_oob_data_present(struct hci_conn *conn)
4753 struct hci_dev *hdev = conn->hdev;
4754 struct oob_data *data;
4756 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4760 if (bredr_sc_enabled(hdev)) {
4761 /* When Secure Connections is enabled, then just
4762 * return the present value stored with the OOB
4763 * data. The stored value contains the right present
4764 * information. However it can only be trusted when
4765 * not in Secure Connection Only mode.
4767 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4768 return data->present;
4770 /* When Secure Connections Only mode is enabled, then
4771 * the P-256 values are required. If they are not
4772 * available, then do not declare that OOB data is
4775 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4776 !memcmp(data->hash256, ZERO_KEY, 16))
4782 /* When Secure Connections is not enabled or actually
4783 * not supported by the hardware, then check that if
4784 * P-192 data values are present.
4786 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4787 !memcmp(data->hash192, ZERO_KEY, 16))
4793 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4795 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4796 struct hci_conn *conn;
4798 BT_DBG("%s", hdev->name);
4802 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4806 hci_conn_hold(conn);
4808 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4811 /* Allow pairing if we're pairable, the initiators of the
4812 * pairing or if the remote is not requesting bonding.
4814 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4815 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4816 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4817 struct hci_cp_io_capability_reply cp;
4819 bacpy(&cp.bdaddr, &ev->bdaddr);
4820 /* Change the IO capability from KeyboardDisplay
4821 * to DisplayYesNo as it is not supported by BT spec. */
4822 cp.capability = (conn->io_capability == 0x04) ?
4823 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4825 /* If we are initiators, there is no remote information yet */
4826 if (conn->remote_auth == 0xff) {
4827 /* Request MITM protection if our IO caps allow it
4828 * except for the no-bonding case.
4830 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4831 conn->auth_type != HCI_AT_NO_BONDING)
4832 conn->auth_type |= 0x01;
4834 conn->auth_type = hci_get_auth_req(conn);
4837 /* If we're not bondable, force one of the non-bondable
4838 * authentication requirement values.
4840 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4841 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4843 cp.authentication = conn->auth_type;
4844 cp.oob_data = bredr_oob_data_present(conn);
4846 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4849 struct hci_cp_io_capability_neg_reply cp;
4851 bacpy(&cp.bdaddr, &ev->bdaddr);
4852 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4854 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4859 hci_dev_unlock(hdev);
4862 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4864 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4865 struct hci_conn *conn;
4867 BT_DBG("%s", hdev->name);
4871 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4875 conn->remote_cap = ev->capability;
4876 conn->remote_auth = ev->authentication;
4879 hci_dev_unlock(hdev);
4882 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4883 struct sk_buff *skb)
4885 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4886 int loc_mitm, rem_mitm, confirm_hint = 0;
4887 struct hci_conn *conn;
4889 BT_DBG("%s", hdev->name);
4893 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4896 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4900 loc_mitm = (conn->auth_type & 0x01);
4901 rem_mitm = (conn->remote_auth & 0x01);
4903 /* If we require MITM but the remote device can't provide that
4904 * (it has NoInputNoOutput) then reject the confirmation
4905 * request. We check the security level here since it doesn't
4906 * necessarily match conn->auth_type.
4908 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4909 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4910 BT_DBG("Rejecting request: remote device can't provide MITM");
4911 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4912 sizeof(ev->bdaddr), &ev->bdaddr);
4916 /* If no side requires MITM protection; auto-accept */
4917 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4918 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4920 /* If we're not the initiators request authorization to
4921 * proceed from user space (mgmt_user_confirm with
4922 * confirm_hint set to 1). The exception is if neither
4923 * side had MITM or if the local IO capability is
4924 * NoInputNoOutput, in which case we do auto-accept
4926 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4927 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4928 (loc_mitm || rem_mitm)) {
4929 BT_DBG("Confirming auto-accept as acceptor");
4934 /* If there already exists link key in local host, leave the
4935 * decision to user space since the remote device could be
4936 * legitimate or malicious.
4938 if (hci_find_link_key(hdev, &ev->bdaddr)) {
4939 bt_dev_dbg(hdev, "Local host already has link key");
4944 BT_DBG("Auto-accept of user confirmation with %ums delay",
4945 hdev->auto_accept_delay);
4947 if (hdev->auto_accept_delay > 0) {
4948 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4949 queue_delayed_work(conn->hdev->workqueue,
4950 &conn->auto_accept_work, delay);
4954 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4955 sizeof(ev->bdaddr), &ev->bdaddr);
4960 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4961 le32_to_cpu(ev->passkey), confirm_hint);
4964 hci_dev_unlock(hdev);
4967 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4968 struct sk_buff *skb)
4970 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4972 BT_DBG("%s", hdev->name);
4974 if (hci_dev_test_flag(hdev, HCI_MGMT))
4975 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4978 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4979 struct sk_buff *skb)
4981 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4982 struct hci_conn *conn;
4984 BT_DBG("%s", hdev->name);
4986 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4990 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4991 conn->passkey_entered = 0;
4993 if (hci_dev_test_flag(hdev, HCI_MGMT))
4994 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4995 conn->dst_type, conn->passkey_notify,
4996 conn->passkey_entered);
4999 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
5001 struct hci_ev_keypress_notify *ev = (void *) skb->data;
5002 struct hci_conn *conn;
5004 BT_DBG("%s", hdev->name);
5006 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5011 case HCI_KEYPRESS_STARTED:
5012 conn->passkey_entered = 0;
5015 case HCI_KEYPRESS_ENTERED:
5016 conn->passkey_entered++;
5019 case HCI_KEYPRESS_ERASED:
5020 conn->passkey_entered--;
5023 case HCI_KEYPRESS_CLEARED:
5024 conn->passkey_entered = 0;
5027 case HCI_KEYPRESS_COMPLETED:
5031 if (hci_dev_test_flag(hdev, HCI_MGMT))
5032 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5033 conn->dst_type, conn->passkey_notify,
5034 conn->passkey_entered);
5037 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
5038 struct sk_buff *skb)
5040 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
5041 struct hci_conn *conn;
5043 BT_DBG("%s", hdev->name);
5047 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5051 /* Reset the authentication requirement to unknown */
5052 conn->remote_auth = 0xff;
5054 /* To avoid duplicate auth_failed events to user space we check
5055 * the HCI_CONN_AUTH_PEND flag which will be set if we
5056 * initiated the authentication. A traditional auth_complete
5057 * event gets always produced as initiator and is also mapped to
5058 * the mgmt_auth_failed event */
5059 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5060 mgmt_auth_failed(conn, ev->status);
5062 hci_conn_drop(conn);
5065 hci_dev_unlock(hdev);
5068 static void hci_remote_host_features_evt(struct hci_dev *hdev,
5069 struct sk_buff *skb)
5071 struct hci_ev_remote_host_features *ev = (void *) skb->data;
5072 struct inquiry_entry *ie;
5073 struct hci_conn *conn;
5075 BT_DBG("%s", hdev->name);
5079 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5081 memcpy(conn->features[1], ev->features, 8);
5083 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5085 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5087 hci_dev_unlock(hdev);
5090 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
5091 struct sk_buff *skb)
5093 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
5094 struct oob_data *data;
5096 BT_DBG("%s", hdev->name);
5100 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5103 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5105 struct hci_cp_remote_oob_data_neg_reply cp;
5107 bacpy(&cp.bdaddr, &ev->bdaddr);
5108 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5113 if (bredr_sc_enabled(hdev)) {
5114 struct hci_cp_remote_oob_ext_data_reply cp;
5116 bacpy(&cp.bdaddr, &ev->bdaddr);
5117 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5118 memset(cp.hash192, 0, sizeof(cp.hash192));
5119 memset(cp.rand192, 0, sizeof(cp.rand192));
5121 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5122 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5124 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5125 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5127 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5130 struct hci_cp_remote_oob_data_reply cp;
5132 bacpy(&cp.bdaddr, &ev->bdaddr);
5133 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5134 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5136 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5141 hci_dev_unlock(hdev);
5144 #if IS_ENABLED(CONFIG_BT_HS)
5145 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5147 struct hci_ev_channel_selected *ev = (void *)skb->data;
5148 struct hci_conn *hcon;
5150 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5152 skb_pull(skb, sizeof(*ev));
5154 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5158 amp_read_loc_assoc_final_data(hdev, hcon);
5161 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
5162 struct sk_buff *skb)
5164 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
5165 struct hci_conn *hcon, *bredr_hcon;
5167 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
5172 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5184 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5186 hcon->state = BT_CONNECTED;
5187 bacpy(&hcon->dst, &bredr_hcon->dst);
5189 hci_conn_hold(hcon);
5190 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5191 hci_conn_drop(hcon);
5193 hci_debugfs_create_conn(hcon);
5194 hci_conn_add_sysfs(hcon);
5196 amp_physical_cfm(bredr_hcon, hcon);
5199 hci_dev_unlock(hdev);
5202 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5204 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
5205 struct hci_conn *hcon;
5206 struct hci_chan *hchan;
5207 struct amp_mgr *mgr;
5209 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5210 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
5213 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5217 /* Create AMP hchan */
5218 hchan = hci_chan_create(hcon);
5222 hchan->handle = le16_to_cpu(ev->handle);
5225 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5227 mgr = hcon->amp_mgr;
5228 if (mgr && mgr->bredr_chan) {
5229 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5231 l2cap_chan_lock(bredr_chan);
5233 bredr_chan->conn->mtu = hdev->block_mtu;
5234 l2cap_logical_cfm(bredr_chan, hchan, 0);
5235 hci_conn_hold(hcon);
5237 l2cap_chan_unlock(bredr_chan);
5241 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5242 struct sk_buff *skb)
5244 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5245 struct hci_chan *hchan;
5247 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5248 le16_to_cpu(ev->handle), ev->status);
5255 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5256 if (!hchan || !hchan->amp)
5259 amp_destroy_logical_link(hchan, ev->reason);
5262 hci_dev_unlock(hdev);
5265 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5266 struct sk_buff *skb)
5268 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5269 struct hci_conn *hcon;
5271 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5278 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5279 if (hcon && hcon->type == AMP_LINK) {
5280 hcon->state = BT_CLOSED;
5281 hci_disconn_cfm(hcon, ev->reason);
5285 hci_dev_unlock(hdev);
5289 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5290 u8 bdaddr_type, bdaddr_t *local_rpa)
5293 conn->dst_type = bdaddr_type;
5294 conn->resp_addr_type = bdaddr_type;
5295 bacpy(&conn->resp_addr, bdaddr);
5297 /* Check if the controller has set a Local RPA then it must be
5298 * used instead or hdev->rpa.
5300 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5301 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5302 bacpy(&conn->init_addr, local_rpa);
5303 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5304 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5305 bacpy(&conn->init_addr, &conn->hdev->rpa);
5307 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5308 &conn->init_addr_type);
5311 conn->resp_addr_type = conn->hdev->adv_addr_type;
5312 /* Check if the controller has set a Local RPA then it must be
5313 * used instead or hdev->rpa.
5315 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5316 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5317 bacpy(&conn->resp_addr, local_rpa);
5318 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5319 /* In case of ext adv, resp_addr will be updated in
5320 * Adv Terminated event.
5322 if (!ext_adv_capable(conn->hdev))
5323 bacpy(&conn->resp_addr,
5324 &conn->hdev->random_addr);
5326 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5329 conn->init_addr_type = bdaddr_type;
5330 bacpy(&conn->init_addr, bdaddr);
5332 /* For incoming connections, set the default minimum
5333 * and maximum connection interval. They will be used
5334 * to check if the parameters are in range and if not
5335 * trigger the connection update procedure.
5337 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5338 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5342 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5343 bdaddr_t *bdaddr, u8 bdaddr_type,
5344 bdaddr_t *local_rpa, u8 role, u16 handle,
5345 u16 interval, u16 latency,
5346 u16 supervision_timeout)
5348 struct hci_conn_params *params;
5349 struct hci_conn *conn;
5350 struct smp_irk *irk;
5355 /* All controllers implicitly stop advertising in the event of a
5356 * connection, so ensure that the state bit is cleared.
5358 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5360 conn = hci_lookup_le_connect(hdev);
5362 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5364 bt_dev_err(hdev, "no memory for new connection");
5368 conn->dst_type = bdaddr_type;
5370 /* If we didn't have a hci_conn object previously
5371 * but we're in central role this must be something
5372 * initiated using an accept list. Since accept list based
5373 * connections are not "first class citizens" we don't
5374 * have full tracking of them. Therefore, we go ahead
5375 * with a "best effort" approach of determining the
5376 * initiator address based on the HCI_PRIVACY flag.
5379 conn->resp_addr_type = bdaddr_type;
5380 bacpy(&conn->resp_addr, bdaddr);
5381 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5382 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5383 bacpy(&conn->init_addr, &hdev->rpa);
5385 hci_copy_identity_address(hdev,
5387 &conn->init_addr_type);
5391 cancel_delayed_work(&conn->le_conn_timeout);
5394 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5396 /* Lookup the identity address from the stored connection
5397 * address and address type.
5399 * When establishing connections to an identity address, the
5400 * connection procedure will store the resolvable random
5401 * address first. Now if it can be converted back into the
5402 * identity address, start using the identity address from
5405 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5407 bacpy(&conn->dst, &irk->bdaddr);
5408 conn->dst_type = irk->addr_type;
5411 /* When using controller based address resolution, then the new
5412 * address types 0x02 and 0x03 are used. These types need to be
5413 * converted back into either public address or random address type
5415 if (use_ll_privacy(hdev) &&
5416 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5417 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
5418 switch (conn->dst_type) {
5419 case ADDR_LE_DEV_PUBLIC_RESOLVED:
5420 conn->dst_type = ADDR_LE_DEV_PUBLIC;
5422 case ADDR_LE_DEV_RANDOM_RESOLVED:
5423 conn->dst_type = ADDR_LE_DEV_RANDOM;
5429 hci_le_conn_failed(conn, status);
5433 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5434 addr_type = BDADDR_LE_PUBLIC;
5436 addr_type = BDADDR_LE_RANDOM;
5438 /* Drop the connection if the device is blocked */
5439 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5440 hci_conn_drop(conn);
5444 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5445 mgmt_device_connected(hdev, conn, NULL, 0);
5447 conn->sec_level = BT_SECURITY_LOW;
5448 conn->handle = handle;
5449 conn->state = BT_CONFIG;
5451 /* Store current advertising instance as connection advertising instance
5452 * when sotfware rotation is in use so it can be re-enabled when
5455 if (!ext_adv_capable(hdev))
5456 conn->adv_instance = hdev->cur_adv_instance;
5458 conn->le_conn_interval = interval;
5459 conn->le_conn_latency = latency;
5460 conn->le_supv_timeout = supervision_timeout;
5462 hci_debugfs_create_conn(conn);
5463 hci_conn_add_sysfs(conn);
5465 /* The remote features procedure is defined for central
5466 * role only. So only in case of an initiated connection
5467 * request the remote features.
5469 * If the local controller supports peripheral-initiated features
5470 * exchange, then requesting the remote features in peripheral
5471 * role is possible. Otherwise just transition into the
5472 * connected state without requesting the remote features.
5475 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5476 struct hci_cp_le_read_remote_features cp;
5478 cp.handle = __cpu_to_le16(conn->handle);
5480 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5483 hci_conn_hold(conn);
5485 conn->state = BT_CONNECTED;
5486 hci_connect_cfm(conn, status);
5489 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5492 list_del_init(¶ms->action);
5494 hci_conn_drop(params->conn);
5495 hci_conn_put(params->conn);
5496 params->conn = NULL;
5501 hci_update_background_scan(hdev);
5502 hci_dev_unlock(hdev);
5505 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5507 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5509 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5511 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5512 NULL, ev->role, le16_to_cpu(ev->handle),
5513 le16_to_cpu(ev->interval),
5514 le16_to_cpu(ev->latency),
5515 le16_to_cpu(ev->supervision_timeout));
5518 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5519 struct sk_buff *skb)
5521 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5523 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5525 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5526 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5527 le16_to_cpu(ev->interval),
5528 le16_to_cpu(ev->latency),
5529 le16_to_cpu(ev->supervision_timeout));
5531 if (use_ll_privacy(hdev) &&
5532 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5533 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5534 hci_req_disable_address_resolution(hdev);
5537 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5539 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5540 struct hci_conn *conn;
5541 struct adv_info *adv;
5543 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5545 adv = hci_find_adv_instance(hdev, ev->handle);
5551 /* Remove advertising as it has been terminated */
5552 hci_remove_adv_instance(hdev, ev->handle);
5553 mgmt_advertising_removed(NULL, hdev, ev->handle);
5559 adv->enabled = false;
5561 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5563 /* Store handle in the connection so the correct advertising
5564 * instance can be re-enabled when disconnected.
5566 conn->adv_instance = ev->handle;
5568 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5569 bacmp(&conn->resp_addr, BDADDR_ANY))
5573 bacpy(&conn->resp_addr, &hdev->random_addr);
5578 bacpy(&conn->resp_addr, &adv->random_addr);
5582 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5583 struct sk_buff *skb)
5585 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5586 struct hci_conn *conn;
5588 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5595 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5599 hci_dev_unlock(hdev);
5600 mgmt_le_conn_update_failed(hdev, &conn->dst,
5601 conn->type, conn->dst_type, ev->status);
5605 conn->le_conn_interval = le16_to_cpu(ev->interval);
5606 conn->le_conn_latency = le16_to_cpu(ev->latency);
5607 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5610 hci_dev_unlock(hdev);
5613 mgmt_le_conn_updated(hdev, &conn->dst, conn->type,
5614 conn->dst_type, conn->le_conn_interval,
5615 conn->le_conn_latency, conn->le_supv_timeout);
5619 /* This function requires the caller holds hdev->lock */
5620 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5622 u8 addr_type, u8 adv_type,
5623 bdaddr_t *direct_rpa)
5625 struct hci_conn *conn;
5626 struct hci_conn_params *params;
5628 /* If the event is not connectable don't proceed further */
5629 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5632 /* Ignore if the device is blocked */
5633 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type))
5636 /* Most controller will fail if we try to create new connections
5637 * while we have an existing one in peripheral role.
5639 if (hdev->conn_hash.le_num_peripheral > 0 &&
5640 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5641 !(hdev->le_states[3] & 0x10)))
5644 /* If we're not connectable only connect devices that we have in
5645 * our pend_le_conns list.
5647 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5652 if (!params->explicit_connect) {
5653 switch (params->auto_connect) {
5654 case HCI_AUTO_CONN_DIRECT:
5655 /* Only devices advertising with ADV_DIRECT_IND are
5656 * triggering a connection attempt. This is allowing
5657 * incoming connections from peripheral devices.
5659 if (adv_type != LE_ADV_DIRECT_IND)
5662 case HCI_AUTO_CONN_ALWAYS:
5663 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5664 * are triggering a connection attempt. This means
5665 * that incoming connections from peripheral device are
5666 * accepted and also outgoing connections to peripheral
5667 * devices are established when found.
5675 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5676 hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
5678 if (!IS_ERR(conn)) {
5679 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5680 * by higher layer that tried to connect, if no then
5681 * store the pointer since we don't really have any
5682 * other owner of the object besides the params that
5683 * triggered it. This way we can abort the connection if
5684 * the parameters get removed and keep the reference
5685 * count consistent once the connection is established.
5688 if (!params->explicit_connect)
5689 params->conn = hci_conn_get(conn);
5694 switch (PTR_ERR(conn)) {
5696 /* If hci_connect() returns -EBUSY it means there is already
5697 * an LE connection attempt going on. Since controllers don't
5698 * support more than one connection attempt at the time, we
5699 * don't consider this an error case.
5703 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5710 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5711 u8 bdaddr_type, bdaddr_t *direct_addr,
5712 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5715 struct discovery_state *d = &hdev->discovery;
5716 struct smp_irk *irk;
5717 struct hci_conn *conn;
5724 case LE_ADV_DIRECT_IND:
5725 case LE_ADV_SCAN_IND:
5726 case LE_ADV_NONCONN_IND:
5727 case LE_ADV_SCAN_RSP:
5730 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5731 "type: 0x%02x", type);
5735 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5736 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5740 /* Find the end of the data in case the report contains padded zero
5741 * bytes at the end causing an invalid length value.
5743 * When data is NULL, len is 0 so there is no need for extra ptr
5744 * check as 'ptr < data + 0' is already false in such case.
5746 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5747 if (ptr + 1 + *ptr > data + len)
5751 /* Adjust for actual length. This handles the case when remote
5752 * device is advertising with incorrect data length.
5756 /* If the direct address is present, then this report is from
5757 * a LE Direct Advertising Report event. In that case it is
5758 * important to see if the address is matching the local
5759 * controller address.
5762 /* Only resolvable random addresses are valid for these
5763 * kind of reports and others can be ignored.
5765 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5768 /* If the controller is not using resolvable random
5769 * addresses, then this report can be ignored.
5771 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5774 /* If the local IRK of the controller does not match
5775 * with the resolvable random address provided, then
5776 * this report can be ignored.
5778 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5782 /* Check if we need to convert to identity address */
5783 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5785 bdaddr = &irk->bdaddr;
5786 bdaddr_type = irk->addr_type;
5789 /* Check if we have been requested to connect to this device.
5791 * direct_addr is set only for directed advertising reports (it is NULL
5792 * for advertising reports) and is already verified to be RPA above.
5794 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5796 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5797 /* Store report for later inclusion by
5798 * mgmt_device_connected
5800 memcpy(conn->le_adv_data, data, len);
5801 conn->le_adv_data_len = len;
5804 /* Passive scanning shouldn't trigger any device found events,
5805 * except for devices marked as CONN_REPORT for which we do send
5806 * device found events, or advertisement monitoring requested.
5808 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5809 if (type == LE_ADV_DIRECT_IND)
5812 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5813 bdaddr, bdaddr_type) &&
5814 idr_is_empty(&hdev->adv_monitors_idr))
5817 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5818 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5821 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5822 rssi, flags, data, len, NULL, 0);
5826 /* When receiving non-connectable or scannable undirected
5827 * advertising reports, this means that the remote device is
5828 * not connectable and then clearly indicate this in the
5829 * device found event.
5831 * When receiving a scan response, then there is no way to
5832 * know if the remote device is connectable or not. However
5833 * since scan responses are merged with a previously seen
5834 * advertising report, the flags field from that report
5837 * In the really unlikely case that a controller get confused
5838 * and just sends a scan response event, then it is marked as
5839 * not connectable as well.
5841 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5842 type == LE_ADV_SCAN_RSP)
5843 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5847 /* If there's nothing pending either store the data from this
5848 * event or send an immediate device found event if the data
5849 * should not be stored for later.
5851 if (!ext_adv && !has_pending_adv_report(hdev)) {
5852 /* If the report will trigger a SCAN_REQ store it for
5855 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5856 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5857 rssi, flags, data, len);
5861 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5862 rssi, flags, data, len, NULL, 0);
5866 /* Check if the pending report is for the same device as the new one */
5867 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5868 bdaddr_type == d->last_adv_addr_type);
5870 /* If the pending data doesn't match this report or this isn't a
5871 * scan response (e.g. we got a duplicate ADV_IND) then force
5872 * sending of the pending data.
5874 if (type != LE_ADV_SCAN_RSP || !match) {
5875 /* Send out whatever is in the cache, but skip duplicates */
5877 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5878 d->last_adv_addr_type, NULL,
5879 d->last_adv_rssi, d->last_adv_flags,
5881 d->last_adv_data_len, NULL, 0);
5883 /* If the new report will trigger a SCAN_REQ store it for
5886 if (!ext_adv && (type == LE_ADV_IND ||
5887 type == LE_ADV_SCAN_IND)) {
5888 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5889 rssi, flags, data, len);
5893 /* The advertising reports cannot be merged, so clear
5894 * the pending report and send out a device found event.
5896 clear_pending_adv_report(hdev);
5897 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5898 rssi, flags, data, len, NULL, 0);
5902 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5903 * the new event is a SCAN_RSP. We can therefore proceed with
5904 * sending a merged device found event.
5906 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5907 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5908 d->last_adv_data, d->last_adv_data_len, data, len);
5909 clear_pending_adv_report(hdev);
5912 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5914 u8 num_reports = skb->data[0];
5915 void *ptr = &skb->data[1];
5919 while (num_reports--) {
5920 struct hci_ev_le_advertising_info *ev = ptr;
5923 if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
5924 bt_dev_err(hdev, "Malicious advertising data.");
5928 if (ev->length <= HCI_MAX_AD_LENGTH &&
5929 ev->data + ev->length <= skb_tail_pointer(skb)) {
5930 rssi = ev->data[ev->length];
5931 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5932 ev->bdaddr_type, NULL, 0, rssi,
5933 ev->data, ev->length, false);
5935 bt_dev_err(hdev, "Dropping invalid advertising data");
5938 ptr += sizeof(*ev) + ev->length + 1;
5941 hci_dev_unlock(hdev);
5944 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
5946 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5948 case LE_LEGACY_ADV_IND:
5950 case LE_LEGACY_ADV_DIRECT_IND:
5951 return LE_ADV_DIRECT_IND;
5952 case LE_LEGACY_ADV_SCAN_IND:
5953 return LE_ADV_SCAN_IND;
5954 case LE_LEGACY_NONCONN_IND:
5955 return LE_ADV_NONCONN_IND;
5956 case LE_LEGACY_SCAN_RSP_ADV:
5957 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5958 return LE_ADV_SCAN_RSP;
5964 if (evt_type & LE_EXT_ADV_CONN_IND) {
5965 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5966 return LE_ADV_DIRECT_IND;
5971 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5972 return LE_ADV_SCAN_RSP;
5974 if (evt_type & LE_EXT_ADV_SCAN_IND)
5975 return LE_ADV_SCAN_IND;
5977 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5978 evt_type & LE_EXT_ADV_DIRECT_IND)
5979 return LE_ADV_NONCONN_IND;
5982 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
5985 return LE_ADV_INVALID;
5988 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5990 u8 num_reports = skb->data[0];
5991 void *ptr = &skb->data[1];
5995 while (num_reports--) {
5996 struct hci_ev_le_ext_adv_report *ev = ptr;
6000 evt_type = __le16_to_cpu(ev->evt_type);
6001 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6002 if (legacy_evt_type != LE_ADV_INVALID) {
6003 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
6004 ev->bdaddr_type, NULL, 0, ev->rssi,
6005 ev->data, ev->length,
6006 !(evt_type & LE_EXT_ADV_LEGACY_PDU));
6009 ptr += sizeof(*ev) + ev->length;
6012 hci_dev_unlock(hdev);
6015 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
6016 struct sk_buff *skb)
6018 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
6019 struct hci_conn *conn;
6021 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6025 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6028 memcpy(conn->features[0], ev->features, 8);
6030 if (conn->state == BT_CONFIG) {
6033 /* If the local controller supports peripheral-initiated
6034 * features exchange, but the remote controller does
6035 * not, then it is possible that the error code 0x1a
6036 * for unsupported remote feature gets returned.
6038 * In this specific case, allow the connection to
6039 * transition into connected state and mark it as
6042 if (!conn->out && ev->status == 0x1a &&
6043 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6046 status = ev->status;
6048 conn->state = BT_CONNECTED;
6049 hci_connect_cfm(conn, status);
6050 hci_conn_drop(conn);
6054 hci_dev_unlock(hdev);
6057 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
6059 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
6060 struct hci_cp_le_ltk_reply cp;
6061 struct hci_cp_le_ltk_neg_reply neg;
6062 struct hci_conn *conn;
6063 struct smp_ltk *ltk;
6065 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
6069 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6073 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6077 if (smp_ltk_is_sc(ltk)) {
6078 /* With SC both EDiv and Rand are set to zero */
6079 if (ev->ediv || ev->rand)
6082 /* For non-SC keys check that EDiv and Rand match */
6083 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6087 memcpy(cp.ltk, ltk->val, ltk->enc_size);
6088 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6089 cp.handle = cpu_to_le16(conn->handle);
6091 conn->pending_sec_level = smp_ltk_sec_level(ltk);
6093 conn->enc_key_size = ltk->enc_size;
6095 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6097 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6098 * temporary key used to encrypt a connection following
6099 * pairing. It is used during the Encrypted Session Setup to
6100 * distribute the keys. Later, security can be re-established
6101 * using a distributed LTK.
6103 if (ltk->type == SMP_STK) {
6104 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6105 list_del_rcu(<k->list);
6106 kfree_rcu(ltk, rcu);
6108 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6111 hci_dev_unlock(hdev);
6116 neg.handle = ev->handle;
6117 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6118 hci_dev_unlock(hdev);
6121 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6124 struct hci_cp_le_conn_param_req_neg_reply cp;
6126 cp.handle = cpu_to_le16(handle);
6129 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6133 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
6134 struct sk_buff *skb)
6136 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
6137 struct hci_cp_le_conn_param_req_reply cp;
6138 struct hci_conn *hcon;
6139 u16 handle, min, max, latency, timeout;
6141 handle = le16_to_cpu(ev->handle);
6142 min = le16_to_cpu(ev->interval_min);
6143 max = le16_to_cpu(ev->interval_max);
6144 latency = le16_to_cpu(ev->latency);
6145 timeout = le16_to_cpu(ev->timeout);
6147 hcon = hci_conn_hash_lookup_handle(hdev, handle);
6148 if (!hcon || hcon->state != BT_CONNECTED)
6149 return send_conn_param_neg_reply(hdev, handle,
6150 HCI_ERROR_UNKNOWN_CONN_ID);
6152 if (hci_check_conn_params(min, max, latency, timeout))
6153 return send_conn_param_neg_reply(hdev, handle,
6154 HCI_ERROR_INVALID_LL_PARAMS);
6156 if (hcon->role == HCI_ROLE_MASTER) {
6157 struct hci_conn_params *params;
6162 params = hci_conn_params_lookup(hdev, &hcon->dst,
6165 params->conn_min_interval = min;
6166 params->conn_max_interval = max;
6167 params->conn_latency = latency;
6168 params->supervision_timeout = timeout;
6174 hci_dev_unlock(hdev);
6176 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6177 store_hint, min, max, latency, timeout);
6180 cp.handle = ev->handle;
6181 cp.interval_min = ev->interval_min;
6182 cp.interval_max = ev->interval_max;
6183 cp.latency = ev->latency;
6184 cp.timeout = ev->timeout;
6188 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6191 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
6192 struct sk_buff *skb)
6194 u8 num_reports = skb->data[0];
6195 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
6197 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
6202 for (; num_reports; num_reports--, ev++)
6203 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
6204 ev->bdaddr_type, &ev->direct_addr,
6205 ev->direct_addr_type, ev->rssi, NULL, 0,
6208 hci_dev_unlock(hdev);
6211 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
6213 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
6214 struct hci_conn *conn;
6216 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6223 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6227 conn->le_tx_phy = ev->tx_phy;
6228 conn->le_rx_phy = ev->rx_phy;
6231 hci_dev_unlock(hdev);
6234 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
6236 struct hci_ev_le_meta *le_ev = (void *) skb->data;
6238 skb_pull(skb, sizeof(*le_ev));
6240 switch (le_ev->subevent) {
6241 case HCI_EV_LE_CONN_COMPLETE:
6242 hci_le_conn_complete_evt(hdev, skb);
6245 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
6246 hci_le_conn_update_complete_evt(hdev, skb);
6249 case HCI_EV_LE_ADVERTISING_REPORT:
6250 hci_le_adv_report_evt(hdev, skb);
6253 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
6254 hci_le_remote_feat_complete_evt(hdev, skb);
6257 case HCI_EV_LE_LTK_REQ:
6258 hci_le_ltk_request_evt(hdev, skb);
6261 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
6262 hci_le_remote_conn_param_req_evt(hdev, skb);
6265 case HCI_EV_LE_DIRECT_ADV_REPORT:
6266 hci_le_direct_adv_report_evt(hdev, skb);
6269 case HCI_EV_LE_PHY_UPDATE_COMPLETE:
6270 hci_le_phy_update_evt(hdev, skb);
6273 case HCI_EV_LE_EXT_ADV_REPORT:
6274 hci_le_ext_adv_report_evt(hdev, skb);
6277 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
6278 hci_le_enh_conn_complete_evt(hdev, skb);
6281 case HCI_EV_LE_EXT_ADV_SET_TERM:
6282 hci_le_ext_adv_term_evt(hdev, skb);
6290 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6291 u8 event, struct sk_buff *skb)
6293 struct hci_ev_cmd_complete *ev;
6294 struct hci_event_hdr *hdr;
6299 if (skb->len < sizeof(*hdr)) {
6300 bt_dev_err(hdev, "too short HCI event");
6304 hdr = (void *) skb->data;
6305 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6308 if (hdr->evt != event)
6313 /* Check if request ended in Command Status - no way to retrieve
6314 * any extra parameters in this case.
6316 if (hdr->evt == HCI_EV_CMD_STATUS)
6319 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6320 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6325 if (skb->len < sizeof(*ev)) {
6326 bt_dev_err(hdev, "too short cmd_complete event");
6330 ev = (void *) skb->data;
6331 skb_pull(skb, sizeof(*ev));
6333 if (opcode != __le16_to_cpu(ev->opcode)) {
6334 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6335 __le16_to_cpu(ev->opcode));
6342 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6343 struct sk_buff *skb)
6345 struct hci_ev_le_advertising_info *adv;
6346 struct hci_ev_le_direct_adv_info *direct_adv;
6347 struct hci_ev_le_ext_adv_report *ext_adv;
6348 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6349 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6353 /* If we are currently suspended and this is the first BT event seen,
6354 * save the wake reason associated with the event.
6356 if (!hdev->suspended || hdev->wake_reason)
6359 /* Default to remote wake. Values for wake_reason are documented in the
6360 * Bluez mgmt api docs.
6362 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6364 /* Once configured for remote wakeup, we should only wake up for
6365 * reconnections. It's useful to see which device is waking us up so
6366 * keep track of the bdaddr of the connection event that woke us up.
6368 if (event == HCI_EV_CONN_REQUEST) {
6369 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6370 hdev->wake_addr_type = BDADDR_BREDR;
6371 } else if (event == HCI_EV_CONN_COMPLETE) {
6372 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6373 hdev->wake_addr_type = BDADDR_BREDR;
6374 } else if (event == HCI_EV_LE_META) {
6375 struct hci_ev_le_meta *le_ev = (void *)skb->data;
6376 u8 subevent = le_ev->subevent;
6377 u8 *ptr = &skb->data[sizeof(*le_ev)];
6378 u8 num_reports = *ptr;
6380 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6381 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6382 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6384 adv = (void *)(ptr + 1);
6385 direct_adv = (void *)(ptr + 1);
6386 ext_adv = (void *)(ptr + 1);
6389 case HCI_EV_LE_ADVERTISING_REPORT:
6390 bacpy(&hdev->wake_addr, &adv->bdaddr);
6391 hdev->wake_addr_type = adv->bdaddr_type;
6393 case HCI_EV_LE_DIRECT_ADV_REPORT:
6394 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6395 hdev->wake_addr_type = direct_adv->bdaddr_type;
6397 case HCI_EV_LE_EXT_ADV_REPORT:
6398 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6399 hdev->wake_addr_type = ext_adv->bdaddr_type;
6404 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6408 hci_dev_unlock(hdev);
6411 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6413 struct hci_event_hdr *hdr = (void *) skb->data;
6414 hci_req_complete_t req_complete = NULL;
6415 hci_req_complete_skb_t req_complete_skb = NULL;
6416 struct sk_buff *orig_skb = NULL;
6417 u8 status = 0, event = hdr->evt, req_evt = 0;
6418 u16 opcode = HCI_OP_NOP;
6421 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6425 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6426 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6427 opcode = __le16_to_cpu(cmd_hdr->opcode);
6428 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6433 /* If it looks like we might end up having to call
6434 * req_complete_skb, store a pristine copy of the skb since the
6435 * various handlers may modify the original one through
6436 * skb_pull() calls, etc.
6438 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6439 event == HCI_EV_CMD_COMPLETE)
6440 orig_skb = skb_clone(skb, GFP_KERNEL);
6442 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6444 /* Store wake reason if we're suspended */
6445 hci_store_wake_reason(hdev, event, skb);
6448 case HCI_EV_INQUIRY_COMPLETE:
6449 hci_inquiry_complete_evt(hdev, skb);
6452 case HCI_EV_INQUIRY_RESULT:
6453 hci_inquiry_result_evt(hdev, skb);
6456 case HCI_EV_CONN_COMPLETE:
6457 hci_conn_complete_evt(hdev, skb);
6460 case HCI_EV_CONN_REQUEST:
6461 hci_conn_request_evt(hdev, skb);
6464 case HCI_EV_DISCONN_COMPLETE:
6465 hci_disconn_complete_evt(hdev, skb);
6468 case HCI_EV_AUTH_COMPLETE:
6469 hci_auth_complete_evt(hdev, skb);
6472 case HCI_EV_REMOTE_NAME:
6473 hci_remote_name_evt(hdev, skb);
6476 case HCI_EV_ENCRYPT_CHANGE:
6477 hci_encrypt_change_evt(hdev, skb);
6480 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6481 hci_change_link_key_complete_evt(hdev, skb);
6484 case HCI_EV_REMOTE_FEATURES:
6485 hci_remote_features_evt(hdev, skb);
6488 case HCI_EV_CMD_COMPLETE:
6489 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6490 &req_complete, &req_complete_skb);
6493 case HCI_EV_CMD_STATUS:
6494 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6498 case HCI_EV_HARDWARE_ERROR:
6499 hci_hardware_error_evt(hdev, skb);
6502 case HCI_EV_ROLE_CHANGE:
6503 hci_role_change_evt(hdev, skb);
6506 case HCI_EV_NUM_COMP_PKTS:
6507 hci_num_comp_pkts_evt(hdev, skb);
6510 case HCI_EV_MODE_CHANGE:
6511 hci_mode_change_evt(hdev, skb);
6514 case HCI_EV_PIN_CODE_REQ:
6515 hci_pin_code_request_evt(hdev, skb);
6518 case HCI_EV_LINK_KEY_REQ:
6519 hci_link_key_request_evt(hdev, skb);
6522 case HCI_EV_LINK_KEY_NOTIFY:
6523 hci_link_key_notify_evt(hdev, skb);
6526 case HCI_EV_CLOCK_OFFSET:
6527 hci_clock_offset_evt(hdev, skb);
6530 case HCI_EV_PKT_TYPE_CHANGE:
6531 hci_pkt_type_change_evt(hdev, skb);
6534 case HCI_EV_PSCAN_REP_MODE:
6535 hci_pscan_rep_mode_evt(hdev, skb);
6538 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6539 hci_inquiry_result_with_rssi_evt(hdev, skb);
6542 case HCI_EV_REMOTE_EXT_FEATURES:
6543 hci_remote_ext_features_evt(hdev, skb);
6546 case HCI_EV_SYNC_CONN_COMPLETE:
6547 hci_sync_conn_complete_evt(hdev, skb);
6550 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6551 hci_extended_inquiry_result_evt(hdev, skb);
6554 case HCI_EV_KEY_REFRESH_COMPLETE:
6555 hci_key_refresh_complete_evt(hdev, skb);
6558 case HCI_EV_IO_CAPA_REQUEST:
6559 hci_io_capa_request_evt(hdev, skb);
6562 case HCI_EV_IO_CAPA_REPLY:
6563 hci_io_capa_reply_evt(hdev, skb);
6566 case HCI_EV_USER_CONFIRM_REQUEST:
6567 hci_user_confirm_request_evt(hdev, skb);
6570 case HCI_EV_USER_PASSKEY_REQUEST:
6571 hci_user_passkey_request_evt(hdev, skb);
6574 case HCI_EV_USER_PASSKEY_NOTIFY:
6575 hci_user_passkey_notify_evt(hdev, skb);
6578 case HCI_EV_KEYPRESS_NOTIFY:
6579 hci_keypress_notify_evt(hdev, skb);
6582 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6583 hci_simple_pair_complete_evt(hdev, skb);
6586 case HCI_EV_REMOTE_HOST_FEATURES:
6587 hci_remote_host_features_evt(hdev, skb);
6590 case HCI_EV_LE_META:
6591 hci_le_meta_evt(hdev, skb);
6594 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6595 hci_remote_oob_data_request_evt(hdev, skb);
6598 #if IS_ENABLED(CONFIG_BT_HS)
6599 case HCI_EV_CHANNEL_SELECTED:
6600 hci_chan_selected_evt(hdev, skb);
6603 case HCI_EV_PHY_LINK_COMPLETE:
6604 hci_phy_link_complete_evt(hdev, skb);
6607 case HCI_EV_LOGICAL_LINK_COMPLETE:
6608 hci_loglink_complete_evt(hdev, skb);
6611 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6612 hci_disconn_loglink_complete_evt(hdev, skb);
6615 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6616 hci_disconn_phylink_complete_evt(hdev, skb);
6620 case HCI_EV_NUM_COMP_BLOCKS:
6621 hci_num_comp_blocks_evt(hdev, skb);
6625 case HCI_EV_VENDOR_SPECIFIC:
6626 hci_vendor_specific_evt(hdev, skb);
6630 msft_vendor_evt(hdev, skb);
6635 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6640 req_complete(hdev, status, opcode);
6641 } else if (req_complete_skb) {
6642 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6643 kfree_skb(orig_skb);
6646 req_complete_skb(hdev, status, opcode, orig_skb);
6650 kfree_skb(orig_skb);
6652 hdev->stat.evt_rx++;