2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
40 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
41 "\x00\x00\x00\x00\x00\x00\x00\x00"
43 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
45 /* Handle HCI Event packets */
47 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
50 __u8 status = *((__u8 *) skb->data);
52 BT_DBG("%s status 0x%2.2x", hdev->name, status);
54 /* It is possible that we receive Inquiry Complete event right
55 * before we receive Inquiry Cancel Command Complete event, in
56 * which case the latter event should have status of Command
57 * Disallowed (0x0c). This should not be treated as error, since
58 * we actually achieve what Inquiry Cancel wants to achieve,
59 * which is to end the last Inquiry session.
61 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
62 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
71 clear_bit(HCI_INQUIRY, &hdev->flags);
72 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
73 wake_up_bit(&hdev->flags, HCI_INQUIRY);
76 /* Set discovery state to stopped if we're not doing LE active
79 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
80 hdev->le_scan_type != LE_SCAN_ACTIVE)
81 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
84 hci_conn_check_pending(hdev);
87 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
89 __u8 status = *((__u8 *) skb->data);
91 BT_DBG("%s status 0x%2.2x", hdev->name, status);
96 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
99 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
101 __u8 status = *((__u8 *) skb->data);
103 BT_DBG("%s status 0x%2.2x", hdev->name, status);
108 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
110 hci_conn_check_pending(hdev);
113 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
116 BT_DBG("%s", hdev->name);
119 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
121 struct hci_rp_role_discovery *rp = (void *) skb->data;
122 struct hci_conn *conn;
124 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
131 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
133 conn->role = rp->role;
135 hci_dev_unlock(hdev);
138 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
140 struct hci_rp_read_link_policy *rp = (void *) skb->data;
141 struct hci_conn *conn;
143 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
152 conn->link_policy = __le16_to_cpu(rp->policy);
154 hci_dev_unlock(hdev);
157 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
159 struct hci_rp_write_link_policy *rp = (void *) skb->data;
160 struct hci_conn *conn;
163 struct hci_cp_write_link_policy cp;
164 struct hci_conn *sco_conn;
167 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
172 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
178 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
180 conn->link_policy = get_unaligned_le16(sent + 2);
183 sco_conn = hci_conn_hash_lookup_sco(hdev);
184 if (sco_conn && bacmp(&sco_conn->dst, &conn->dst) == 0 &&
185 conn->link_policy & HCI_LP_SNIFF) {
186 BT_ERR("SNIFF is not allowed during sco connection");
187 cp.handle = __cpu_to_le16(conn->handle);
188 cp.policy = __cpu_to_le16(conn->link_policy & ~HCI_LP_SNIFF);
189 hci_send_cmd(hdev, HCI_OP_WRITE_LINK_POLICY, sizeof(cp), &cp);
193 hci_dev_unlock(hdev);
196 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
199 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
201 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
206 hdev->link_policy = __le16_to_cpu(rp->policy);
209 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
212 __u8 status = *((__u8 *) skb->data);
215 BT_DBG("%s status 0x%2.2x", hdev->name, status);
220 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
224 hdev->link_policy = get_unaligned_le16(sent);
227 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
229 __u8 status = *((__u8 *) skb->data);
231 BT_DBG("%s status 0x%2.2x", hdev->name, status);
233 clear_bit(HCI_RESET, &hdev->flags);
238 /* Reset all non-persistent flags */
239 hci_dev_clear_volatile_flags(hdev);
241 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
243 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
244 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
246 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
247 hdev->adv_data_len = 0;
249 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
250 hdev->scan_rsp_data_len = 0;
252 hdev->le_scan_type = LE_SCAN_PASSIVE;
254 hdev->ssp_debug_mode = 0;
256 hci_bdaddr_list_clear(&hdev->le_accept_list);
257 hci_bdaddr_list_clear(&hdev->le_resolv_list);
260 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
263 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
264 struct hci_cp_read_stored_link_key *sent;
266 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
268 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
272 if (!rp->status && sent->read_all == 0x01) {
273 hdev->stored_max_keys = rp->max_keys;
274 hdev->stored_num_keys = rp->num_keys;
278 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
281 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
283 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
288 if (rp->num_keys <= hdev->stored_num_keys)
289 hdev->stored_num_keys -= rp->num_keys;
291 hdev->stored_num_keys = 0;
294 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
296 __u8 status = *((__u8 *) skb->data);
299 BT_DBG("%s status 0x%2.2x", hdev->name, status);
301 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
307 if (hci_dev_test_flag(hdev, HCI_MGMT))
308 mgmt_set_local_name_complete(hdev, sent, status);
310 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
312 hci_dev_unlock(hdev);
315 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
317 struct hci_rp_read_local_name *rp = (void *) skb->data;
319 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
324 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
325 hci_dev_test_flag(hdev, HCI_CONFIG))
326 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
329 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
331 __u8 status = *((__u8 *) skb->data);
334 BT_DBG("%s status 0x%2.2x", hdev->name, status);
336 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
343 __u8 param = *((__u8 *) sent);
345 if (param == AUTH_ENABLED)
346 set_bit(HCI_AUTH, &hdev->flags);
348 clear_bit(HCI_AUTH, &hdev->flags);
351 if (hci_dev_test_flag(hdev, HCI_MGMT))
352 mgmt_auth_enable_complete(hdev, status);
354 hci_dev_unlock(hdev);
357 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
359 __u8 status = *((__u8 *) skb->data);
363 BT_DBG("%s status 0x%2.2x", hdev->name, status);
368 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
372 param = *((__u8 *) sent);
375 set_bit(HCI_ENCRYPT, &hdev->flags);
377 clear_bit(HCI_ENCRYPT, &hdev->flags);
380 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
382 __u8 status = *((__u8 *) skb->data);
386 BT_DBG("%s status 0x%2.2x", hdev->name, status);
388 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
392 param = *((__u8 *) sent);
397 hdev->discov_timeout = 0;
401 if (param & SCAN_INQUIRY)
402 set_bit(HCI_ISCAN, &hdev->flags);
404 clear_bit(HCI_ISCAN, &hdev->flags);
406 if (param & SCAN_PAGE)
407 set_bit(HCI_PSCAN, &hdev->flags);
409 clear_bit(HCI_PSCAN, &hdev->flags);
412 hci_dev_unlock(hdev);
415 static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb)
417 __u8 status = *((__u8 *)skb->data);
418 struct hci_cp_set_event_filter *cp;
421 BT_DBG("%s status 0x%2.2x", hdev->name, status);
426 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
430 cp = (struct hci_cp_set_event_filter *)sent;
432 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
433 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
435 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
438 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
440 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
442 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
447 memcpy(hdev->dev_class, rp->dev_class, 3);
449 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
450 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
453 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
455 __u8 status = *((__u8 *) skb->data);
458 BT_DBG("%s status 0x%2.2x", hdev->name, status);
460 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
467 memcpy(hdev->dev_class, sent, 3);
469 if (hci_dev_test_flag(hdev, HCI_MGMT))
470 mgmt_set_class_of_dev_complete(hdev, sent, status);
472 hci_dev_unlock(hdev);
475 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
477 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
480 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
485 setting = __le16_to_cpu(rp->voice_setting);
487 if (hdev->voice_setting == setting)
490 hdev->voice_setting = setting;
492 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
495 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
498 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
501 __u8 status = *((__u8 *) skb->data);
505 BT_DBG("%s status 0x%2.2x", hdev->name, status);
510 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
514 setting = get_unaligned_le16(sent);
516 if (hdev->voice_setting == setting)
519 hdev->voice_setting = setting;
521 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
524 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
527 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
530 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
532 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
537 hdev->num_iac = rp->num_iac;
539 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
542 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
544 __u8 status = *((__u8 *) skb->data);
545 struct hci_cp_write_ssp_mode *sent;
547 BT_DBG("%s status 0x%2.2x", hdev->name, status);
549 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
557 hdev->features[1][0] |= LMP_HOST_SSP;
559 hdev->features[1][0] &= ~LMP_HOST_SSP;
562 if (hci_dev_test_flag(hdev, HCI_MGMT))
563 mgmt_ssp_enable_complete(hdev, sent->mode, status);
566 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
568 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
571 hci_dev_unlock(hdev);
574 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
576 u8 status = *((u8 *) skb->data);
577 struct hci_cp_write_sc_support *sent;
579 BT_DBG("%s status 0x%2.2x", hdev->name, status);
581 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
589 hdev->features[1][0] |= LMP_HOST_SC;
591 hdev->features[1][0] &= ~LMP_HOST_SC;
594 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
596 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
598 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
601 hci_dev_unlock(hdev);
604 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
606 struct hci_rp_read_local_version *rp = (void *) skb->data;
608 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
613 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
614 hci_dev_test_flag(hdev, HCI_CONFIG)) {
615 hdev->hci_ver = rp->hci_ver;
616 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
617 hdev->lmp_ver = rp->lmp_ver;
618 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
619 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
623 static void hci_cc_read_local_commands(struct hci_dev *hdev,
626 struct hci_rp_read_local_commands *rp = (void *) skb->data;
628 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
633 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
634 hci_dev_test_flag(hdev, HCI_CONFIG))
635 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
638 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
641 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
642 struct hci_conn *conn;
644 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
651 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
653 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
655 hci_dev_unlock(hdev);
658 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
661 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
662 struct hci_conn *conn;
665 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
670 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
676 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
678 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
680 hci_dev_unlock(hdev);
683 static void hci_cc_read_local_features(struct hci_dev *hdev,
686 struct hci_rp_read_local_features *rp = (void *) skb->data;
688 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
693 memcpy(hdev->features, rp->features, 8);
695 /* Adjust default settings according to features
696 * supported by device. */
698 if (hdev->features[0][0] & LMP_3SLOT)
699 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
701 if (hdev->features[0][0] & LMP_5SLOT)
702 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
704 if (hdev->features[0][1] & LMP_HV2) {
705 hdev->pkt_type |= (HCI_HV2);
706 hdev->esco_type |= (ESCO_HV2);
709 if (hdev->features[0][1] & LMP_HV3) {
710 hdev->pkt_type |= (HCI_HV3);
711 hdev->esco_type |= (ESCO_HV3);
714 if (lmp_esco_capable(hdev))
715 hdev->esco_type |= (ESCO_EV3);
717 if (hdev->features[0][4] & LMP_EV4)
718 hdev->esco_type |= (ESCO_EV4);
720 if (hdev->features[0][4] & LMP_EV5)
721 hdev->esco_type |= (ESCO_EV5);
723 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
724 hdev->esco_type |= (ESCO_2EV3);
726 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
727 hdev->esco_type |= (ESCO_3EV3);
729 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
730 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
733 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
736 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
738 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
743 if (hdev->max_page < rp->max_page)
744 hdev->max_page = rp->max_page;
746 if (rp->page < HCI_MAX_PAGES)
747 memcpy(hdev->features[rp->page], rp->features, 8);
750 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
753 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
755 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
760 hdev->flow_ctl_mode = rp->mode;
763 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
765 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
767 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
772 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
773 hdev->sco_mtu = rp->sco_mtu;
774 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
775 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
777 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
782 hdev->acl_cnt = hdev->acl_pkts;
783 hdev->sco_cnt = hdev->sco_pkts;
785 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
786 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
789 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
791 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
793 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
798 if (test_bit(HCI_INIT, &hdev->flags))
799 bacpy(&hdev->bdaddr, &rp->bdaddr);
801 if (hci_dev_test_flag(hdev, HCI_SETUP))
802 bacpy(&hdev->setup_addr, &rp->bdaddr);
805 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
808 struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
810 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
815 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
816 hci_dev_test_flag(hdev, HCI_CONFIG)) {
817 hdev->pairing_opts = rp->pairing_opts;
818 hdev->max_enc_key_size = rp->max_key_size;
822 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
825 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
827 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
832 if (test_bit(HCI_INIT, &hdev->flags)) {
833 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
834 hdev->page_scan_window = __le16_to_cpu(rp->window);
838 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
841 u8 status = *((u8 *) skb->data);
842 struct hci_cp_write_page_scan_activity *sent;
844 BT_DBG("%s status 0x%2.2x", hdev->name, status);
849 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
853 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
854 hdev->page_scan_window = __le16_to_cpu(sent->window);
857 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
860 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
862 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
867 if (test_bit(HCI_INIT, &hdev->flags))
868 hdev->page_scan_type = rp->type;
871 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
874 u8 status = *((u8 *) skb->data);
877 BT_DBG("%s status 0x%2.2x", hdev->name, status);
882 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
884 hdev->page_scan_type = *type;
887 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
890 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
892 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
897 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
898 hdev->block_len = __le16_to_cpu(rp->block_len);
899 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
901 hdev->block_cnt = hdev->num_blocks;
903 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
904 hdev->block_cnt, hdev->block_len);
907 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
909 struct hci_rp_read_clock *rp = (void *) skb->data;
910 struct hci_cp_read_clock *cp;
911 struct hci_conn *conn;
913 BT_DBG("%s", hdev->name);
915 if (skb->len < sizeof(*rp))
923 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
927 if (cp->which == 0x00) {
928 hdev->clock = le32_to_cpu(rp->clock);
932 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
934 conn->clock = le32_to_cpu(rp->clock);
935 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
939 hci_dev_unlock(hdev);
942 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
945 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
947 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
952 hdev->amp_status = rp->amp_status;
953 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
954 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
955 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
956 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
957 hdev->amp_type = rp->amp_type;
958 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
959 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
960 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
961 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
964 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
967 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
969 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
974 hdev->inq_tx_power = rp->tx_power;
977 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
980 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
982 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
987 hdev->err_data_reporting = rp->err_data_reporting;
990 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
993 __u8 status = *((__u8 *)skb->data);
994 struct hci_cp_write_def_err_data_reporting *cp;
996 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1001 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1005 hdev->err_data_reporting = cp->err_data_reporting;
1008 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
1010 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
1011 struct hci_cp_pin_code_reply *cp;
1012 struct hci_conn *conn;
1014 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1018 if (hci_dev_test_flag(hdev, HCI_MGMT))
1019 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1024 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1028 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1030 conn->pin_length = cp->pin_len;
1033 hci_dev_unlock(hdev);
1036 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1038 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1040 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1044 if (hci_dev_test_flag(hdev, HCI_MGMT))
1045 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1048 hci_dev_unlock(hdev);
1051 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1052 struct sk_buff *skb)
1054 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1056 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1061 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1062 hdev->le_pkts = rp->le_max_pkt;
1064 hdev->le_cnt = hdev->le_pkts;
1066 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1069 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1070 struct sk_buff *skb)
1072 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1074 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1079 memcpy(hdev->le_features, rp->features, 8);
1082 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1083 struct sk_buff *skb)
1085 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1087 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1092 hdev->adv_tx_power = rp->tx_power;
1095 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1097 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1099 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1103 if (hci_dev_test_flag(hdev, HCI_MGMT))
1104 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1107 hci_dev_unlock(hdev);
1110 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1111 struct sk_buff *skb)
1113 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1115 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1119 if (hci_dev_test_flag(hdev, HCI_MGMT))
1120 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1121 ACL_LINK, 0, rp->status);
1123 hci_dev_unlock(hdev);
1126 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1128 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1130 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1134 if (hci_dev_test_flag(hdev, HCI_MGMT))
1135 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1138 hci_dev_unlock(hdev);
1141 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1142 struct sk_buff *skb)
1144 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1146 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1150 if (hci_dev_test_flag(hdev, HCI_MGMT))
1151 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1152 ACL_LINK, 0, rp->status);
1154 hci_dev_unlock(hdev);
1157 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1158 struct sk_buff *skb)
1160 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1162 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1165 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1166 struct sk_buff *skb)
1168 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1170 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1173 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1175 __u8 status = *((__u8 *) skb->data);
1178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1183 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1189 bacpy(&hdev->random_addr, sent);
1191 if (!bacmp(&hdev->rpa, sent)) {
1192 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1193 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1194 secs_to_jiffies(hdev->rpa_timeout));
1197 hci_dev_unlock(hdev);
1200 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1202 __u8 status = *((__u8 *) skb->data);
1203 struct hci_cp_le_set_default_phy *cp;
1205 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1210 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1216 hdev->le_tx_def_phys = cp->tx_phys;
1217 hdev->le_rx_def_phys = cp->rx_phys;
1219 hci_dev_unlock(hdev);
1222 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1223 struct sk_buff *skb)
1225 __u8 status = *((__u8 *) skb->data);
1226 struct hci_cp_le_set_adv_set_rand_addr *cp;
1227 struct adv_info *adv;
1232 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1233 /* Update only in case the adv instance since handle 0x00 shall be using
1234 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1235 * non-extended adverting.
1237 if (!cp || !cp->handle)
1242 adv = hci_find_adv_instance(hdev, cp->handle);
1244 bacpy(&adv->random_addr, &cp->bdaddr);
1245 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1246 adv->rpa_expired = false;
1247 queue_delayed_work(hdev->workqueue,
1248 &adv->rpa_expired_cb,
1249 secs_to_jiffies(hdev->rpa_timeout));
1253 hci_dev_unlock(hdev);
1256 static void hci_cc_le_read_transmit_power(struct hci_dev *hdev,
1257 struct sk_buff *skb)
1259 struct hci_rp_le_read_transmit_power *rp = (void *)skb->data;
1261 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1266 hdev->min_le_tx_power = rp->min_le_tx_power;
1267 hdev->max_le_tx_power = rp->max_le_tx_power;
1270 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1272 __u8 *sent, status = *((__u8 *) skb->data);
1274 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1279 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1285 /* If we're doing connection initiation as peripheral. Set a
1286 * timeout in case something goes wrong.
1289 struct hci_conn *conn;
1291 hci_dev_set_flag(hdev, HCI_LE_ADV);
1293 conn = hci_lookup_le_connect(hdev);
1295 queue_delayed_work(hdev->workqueue,
1296 &conn->le_conn_timeout,
1297 conn->conn_timeout);
1299 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1302 hci_dev_unlock(hdev);
1305 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1306 struct sk_buff *skb)
1308 struct hci_cp_le_set_ext_adv_enable *cp;
1309 struct hci_cp_ext_adv_set *set;
1310 __u8 status = *((__u8 *) skb->data);
1311 struct adv_info *adv = NULL, *n;
1313 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1318 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1322 set = (void *)cp->data;
1326 if (cp->num_of_sets)
1327 adv = hci_find_adv_instance(hdev, set->handle);
1330 struct hci_conn *conn;
1332 hci_dev_set_flag(hdev, HCI_LE_ADV);
1335 adv->enabled = true;
1337 conn = hci_lookup_le_connect(hdev);
1339 queue_delayed_work(hdev->workqueue,
1340 &conn->le_conn_timeout,
1341 conn->conn_timeout);
1343 if (cp->num_of_sets) {
1345 adv->enabled = false;
1347 /* If just one instance was disabled check if there are
1348 * any other instance enabled before clearing HCI_LE_ADV
1350 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1356 /* All instances shall be considered disabled */
1357 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1359 adv->enabled = false;
1362 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1366 hci_dev_unlock(hdev);
1369 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1371 struct hci_cp_le_set_scan_param *cp;
1372 __u8 status = *((__u8 *) skb->data);
1374 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1379 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1385 hdev->le_scan_type = cp->type;
1387 hci_dev_unlock(hdev);
1390 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1391 struct sk_buff *skb)
1393 struct hci_cp_le_set_ext_scan_params *cp;
1394 __u8 status = *((__u8 *) skb->data);
1395 struct hci_cp_le_scan_phy_params *phy_param;
1397 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1402 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1406 phy_param = (void *)cp->data;
1410 hdev->le_scan_type = phy_param->type;
1412 hci_dev_unlock(hdev);
1415 static bool has_pending_adv_report(struct hci_dev *hdev)
1417 struct discovery_state *d = &hdev->discovery;
1419 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1422 static void clear_pending_adv_report(struct hci_dev *hdev)
1424 struct discovery_state *d = &hdev->discovery;
1426 bacpy(&d->last_adv_addr, BDADDR_ANY);
1427 d->last_adv_data_len = 0;
1431 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1432 u8 bdaddr_type, s8 rssi, u32 flags,
1435 struct discovery_state *d = &hdev->discovery;
1437 if (len > HCI_MAX_AD_LENGTH)
1440 bacpy(&d->last_adv_addr, bdaddr);
1441 d->last_adv_addr_type = bdaddr_type;
1442 d->last_adv_rssi = rssi;
1443 d->last_adv_flags = flags;
1444 memcpy(d->last_adv_data, data, len);
1445 d->last_adv_data_len = len;
1449 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1454 case LE_SCAN_ENABLE:
1455 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1456 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1457 clear_pending_adv_report(hdev);
1460 case LE_SCAN_DISABLE:
1461 /* We do this here instead of when setting DISCOVERY_STOPPED
1462 * since the latter would potentially require waiting for
1463 * inquiry to stop too.
1465 if (has_pending_adv_report(hdev)) {
1466 struct discovery_state *d = &hdev->discovery;
1468 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1469 d->last_adv_addr_type, NULL,
1470 d->last_adv_rssi, d->last_adv_flags,
1472 d->last_adv_data_len, NULL, 0);
1475 /* Cancel this timer so that we don't try to disable scanning
1476 * when it's already disabled.
1478 cancel_delayed_work(&hdev->le_scan_disable);
1480 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1482 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1483 * interrupted scanning due to a connect request. Mark
1484 * therefore discovery as stopped. If this was not
1485 * because of a connect request advertising might have
1486 * been disabled because of active scanning, so
1487 * re-enable it again if necessary.
1489 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1490 #ifndef TIZEN_BT /* The below line is kernel bug. */
1491 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1493 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
1495 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1496 hdev->discovery.state == DISCOVERY_FINDING)
1497 hci_req_reenable_advertising(hdev);
1502 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1507 hci_dev_unlock(hdev);
1510 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1511 struct sk_buff *skb)
1513 struct hci_cp_le_set_scan_enable *cp;
1514 __u8 status = *((__u8 *) skb->data);
1516 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1521 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1525 le_set_scan_enable_complete(hdev, cp->enable);
1528 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1529 struct sk_buff *skb)
1531 struct hci_cp_le_set_ext_scan_enable *cp;
1532 __u8 status = *((__u8 *) skb->data);
1534 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1539 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1543 le_set_scan_enable_complete(hdev, cp->enable);
1546 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1547 struct sk_buff *skb)
1549 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1551 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1557 hdev->le_num_of_adv_sets = rp->num_of_sets;
1560 static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev,
1561 struct sk_buff *skb)
1563 struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data;
1565 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1570 hdev->le_accept_list_size = rp->size;
1573 static void hci_cc_le_clear_accept_list(struct hci_dev *hdev,
1574 struct sk_buff *skb)
1576 __u8 status = *((__u8 *) skb->data);
1578 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1584 hci_bdaddr_list_clear(&hdev->le_accept_list);
1585 hci_dev_unlock(hdev);
1588 static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev,
1589 struct sk_buff *skb)
1591 struct hci_cp_le_add_to_accept_list *sent;
1592 __u8 status = *((__u8 *) skb->data);
1594 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1599 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1604 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1606 hci_dev_unlock(hdev);
1609 static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev,
1610 struct sk_buff *skb)
1612 struct hci_cp_le_del_from_accept_list *sent;
1613 __u8 status = *((__u8 *) skb->data);
1615 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1620 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1625 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1627 hci_dev_unlock(hdev);
1630 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1631 struct sk_buff *skb)
1633 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1635 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1640 memcpy(hdev->le_states, rp->le_states, 8);
1643 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1644 struct sk_buff *skb)
1646 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1648 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1653 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1654 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1657 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1658 struct sk_buff *skb)
1660 struct hci_cp_le_write_def_data_len *sent;
1661 __u8 status = *((__u8 *) skb->data);
1663 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1668 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1672 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1673 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1676 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1677 struct sk_buff *skb)
1679 struct hci_cp_le_add_to_resolv_list *sent;
1680 __u8 status = *((__u8 *) skb->data);
1682 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1687 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1692 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1693 sent->bdaddr_type, sent->peer_irk,
1695 hci_dev_unlock(hdev);
1698 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1699 struct sk_buff *skb)
1701 struct hci_cp_le_del_from_resolv_list *sent;
1702 __u8 status = *((__u8 *) skb->data);
1704 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1709 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1714 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1716 hci_dev_unlock(hdev);
1719 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1720 struct sk_buff *skb)
1722 __u8 status = *((__u8 *) skb->data);
1724 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1730 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1731 hci_dev_unlock(hdev);
1734 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1735 struct sk_buff *skb)
1737 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1739 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1744 hdev->le_resolv_list_size = rp->size;
1747 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1748 struct sk_buff *skb)
1750 __u8 *sent, status = *((__u8 *) skb->data);
1752 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1757 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1764 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1766 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1768 hci_dev_unlock(hdev);
1771 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1772 struct sk_buff *skb)
1774 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1776 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1781 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1782 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1783 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1784 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1787 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1788 struct sk_buff *skb)
1790 struct hci_cp_write_le_host_supported *sent;
1791 __u8 status = *((__u8 *) skb->data);
1793 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1798 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1805 hdev->features[1][0] |= LMP_HOST_LE;
1806 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1808 hdev->features[1][0] &= ~LMP_HOST_LE;
1809 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1810 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1814 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1816 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1818 hci_dev_unlock(hdev);
1821 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1823 struct hci_cp_le_set_adv_param *cp;
1824 u8 status = *((u8 *) skb->data);
1826 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1831 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1836 hdev->adv_addr_type = cp->own_address_type;
1837 hci_dev_unlock(hdev);
1840 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1842 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1843 struct hci_cp_le_set_ext_adv_params *cp;
1844 struct adv_info *adv_instance;
1846 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1851 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1856 hdev->adv_addr_type = cp->own_addr_type;
1858 /* Store in hdev for instance 0 */
1859 hdev->adv_tx_power = rp->tx_power;
1861 adv_instance = hci_find_adv_instance(hdev, cp->handle);
1863 adv_instance->tx_power = rp->tx_power;
1865 /* Update adv data as tx power is known now */
1866 hci_req_update_adv_data(hdev, cp->handle);
1868 hci_dev_unlock(hdev);
1872 static void hci_cc_enable_rssi(struct hci_dev *hdev,
1873 struct sk_buff *skb)
1875 struct hci_cc_rsp_enable_rssi *rp = (void *)skb->data;
1877 BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
1878 hdev->name, rp->status, rp->le_ext_opcode);
1880 mgmt_enable_rssi_cc(hdev, rp, rp->status);
1883 static void hci_cc_get_raw_rssi(struct hci_dev *hdev,
1884 struct sk_buff *skb)
1886 struct hci_cc_rp_get_raw_rssi *rp = (void *)skb->data;
1888 BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
1889 hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
1891 mgmt_raw_rssi_response(hdev, rp, rp->status);
1894 static void hci_vendor_ext_rssi_link_alert_evt(struct hci_dev *hdev,
1895 struct sk_buff *skb)
1897 struct hci_ev_vendor_specific_rssi_alert *ev = (void *)skb->data;
1899 BT_DBG("RSSI event LE_RSSI_LINK_ALERT %X", LE_RSSI_LINK_ALERT);
1901 mgmt_rssi_alert_evt(hdev, ev->conn_handle, ev->alert_type,
1905 static void hci_vendor_specific_group_ext_evt(struct hci_dev *hdev,
1906 struct sk_buff *skb)
1908 struct hci_ev_ext_vendor_specific *ev = (void *)skb->data;
1909 __u8 event_le_ext_sub_code;
1911 BT_DBG("RSSI event LE_META_VENDOR_SPECIFIC_GROUP_EVENT: %X",
1912 LE_META_VENDOR_SPECIFIC_GROUP_EVENT);
1914 skb_pull(skb, sizeof(*ev));
1915 event_le_ext_sub_code = ev->event_le_ext_sub_code;
1917 switch (event_le_ext_sub_code) {
1918 case LE_RSSI_LINK_ALERT:
1919 hci_vendor_ext_rssi_link_alert_evt(hdev, skb);
1927 static void hci_vendor_multi_adv_state_change_evt(struct hci_dev *hdev,
1928 struct sk_buff *skb)
1930 struct hci_ev_vendor_specific_multi_adv_state *ev = (void *)skb->data;
1932 BT_DBG("LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT");
1934 mgmt_multi_adv_state_change_evt(hdev, ev->adv_instance,
1935 ev->state_change_reason,
1936 ev->connection_handle);
1939 static void hci_vendor_specific_evt(struct hci_dev *hdev, struct sk_buff *skb)
1941 struct hci_ev_vendor_specific *ev = (void *)skb->data;
1942 __u8 event_sub_code;
1944 BT_DBG("hci_vendor_specific_evt");
1946 skb_pull(skb, sizeof(*ev));
1947 event_sub_code = ev->event_sub_code;
1949 switch (event_sub_code) {
1950 case LE_META_VENDOR_SPECIFIC_GROUP_EVENT:
1951 hci_vendor_specific_group_ext_evt(hdev, skb);
1954 case LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT:
1955 hci_vendor_multi_adv_state_change_evt(hdev, skb);
1964 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1966 struct hci_rp_read_rssi *rp = (void *) skb->data;
1967 struct hci_conn *conn;
1969 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1976 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1978 conn->rssi = rp->rssi;
1980 hci_dev_unlock(hdev);
1983 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1985 struct hci_cp_read_tx_power *sent;
1986 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1987 struct hci_conn *conn;
1989 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1994 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2000 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2004 switch (sent->type) {
2006 conn->tx_power = rp->tx_power;
2009 conn->max_tx_power = rp->tx_power;
2014 hci_dev_unlock(hdev);
2017 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
2019 u8 status = *((u8 *) skb->data);
2022 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2027 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2029 hdev->ssp_debug_mode = *mode;
2032 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2034 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2037 hci_conn_check_pending(hdev);
2041 set_bit(HCI_INQUIRY, &hdev->flags);
2044 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2046 struct hci_cp_create_conn *cp;
2047 struct hci_conn *conn;
2049 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2051 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2057 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2059 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
2062 if (conn && conn->state == BT_CONNECT) {
2063 if (status != 0x0c || conn->attempt > 2) {
2064 conn->state = BT_CLOSED;
2065 hci_connect_cfm(conn, status);
2068 conn->state = BT_CONNECT2;
2072 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
2075 bt_dev_err(hdev, "no memory for new connection");
2079 hci_dev_unlock(hdev);
2082 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2084 struct hci_cp_add_sco *cp;
2085 struct hci_conn *acl, *sco;
2088 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2093 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2097 handle = __le16_to_cpu(cp->handle);
2099 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2103 acl = hci_conn_hash_lookup_handle(hdev, handle);
2107 sco->state = BT_CLOSED;
2109 hci_connect_cfm(sco, status);
2114 hci_dev_unlock(hdev);
2117 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2119 struct hci_cp_auth_requested *cp;
2120 struct hci_conn *conn;
2122 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2127 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2133 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2135 if (conn->state == BT_CONFIG) {
2136 hci_connect_cfm(conn, status);
2137 hci_conn_drop(conn);
2141 hci_dev_unlock(hdev);
2144 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2146 struct hci_cp_set_conn_encrypt *cp;
2147 struct hci_conn *conn;
2149 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2154 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2160 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2162 if (conn->state == BT_CONFIG) {
2163 hci_connect_cfm(conn, status);
2164 hci_conn_drop(conn);
2168 hci_dev_unlock(hdev);
2171 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2172 struct hci_conn *conn)
2174 if (conn->state != BT_CONFIG || !conn->out)
2177 if (conn->pending_sec_level == BT_SECURITY_SDP)
2180 /* Only request authentication for SSP connections or non-SSP
2181 * devices with sec_level MEDIUM or HIGH or if MITM protection
2184 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2185 conn->pending_sec_level != BT_SECURITY_FIPS &&
2186 conn->pending_sec_level != BT_SECURITY_HIGH &&
2187 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2193 static int hci_resolve_name(struct hci_dev *hdev,
2194 struct inquiry_entry *e)
2196 struct hci_cp_remote_name_req cp;
2198 memset(&cp, 0, sizeof(cp));
2200 bacpy(&cp.bdaddr, &e->data.bdaddr);
2201 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2202 cp.pscan_mode = e->data.pscan_mode;
2203 cp.clock_offset = e->data.clock_offset;
2205 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2208 static bool hci_resolve_next_name(struct hci_dev *hdev)
2210 struct discovery_state *discov = &hdev->discovery;
2211 struct inquiry_entry *e;
2213 if (list_empty(&discov->resolve))
2216 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2220 if (hci_resolve_name(hdev, e) == 0) {
2221 e->name_state = NAME_PENDING;
2228 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2229 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2231 struct discovery_state *discov = &hdev->discovery;
2232 struct inquiry_entry *e;
2235 /* Update the mgmt connected state if necessary. Be careful with
2236 * conn objects that exist but are not (yet) connected however.
2237 * Only those in BT_CONFIG or BT_CONNECTED states can be
2238 * considered connected.
2241 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
2242 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2243 mgmt_device_connected(hdev, conn, 0, name, name_len);
2245 mgmt_device_name_update(hdev, bdaddr, name, name_len);
2249 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2250 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2251 mgmt_device_connected(hdev, conn, name, name_len);
2254 if (discov->state == DISCOVERY_STOPPED)
2257 if (discov->state == DISCOVERY_STOPPING)
2258 goto discov_complete;
2260 if (discov->state != DISCOVERY_RESOLVING)
2263 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2264 /* If the device was not found in a list of found devices names of which
2265 * are pending. there is no need to continue resolving a next name as it
2266 * will be done upon receiving another Remote Name Request Complete
2273 e->name_state = NAME_KNOWN;
2274 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2275 e->data.rssi, name, name_len);
2277 e->name_state = NAME_NOT_KNOWN;
2280 if (hci_resolve_next_name(hdev))
2284 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2287 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2289 struct hci_cp_remote_name_req *cp;
2290 struct hci_conn *conn;
2292 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2294 /* If successful wait for the name req complete event before
2295 * checking for the need to do authentication */
2299 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2305 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2307 if (hci_dev_test_flag(hdev, HCI_MGMT))
2308 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2313 if (!hci_outgoing_auth_needed(hdev, conn))
2316 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2317 struct hci_cp_auth_requested auth_cp;
2319 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2321 auth_cp.handle = __cpu_to_le16(conn->handle);
2322 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2323 sizeof(auth_cp), &auth_cp);
2327 hci_dev_unlock(hdev);
2330 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2332 struct hci_cp_read_remote_features *cp;
2333 struct hci_conn *conn;
2335 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2340 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2346 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2348 if (conn->state == BT_CONFIG) {
2349 hci_connect_cfm(conn, status);
2350 hci_conn_drop(conn);
2354 hci_dev_unlock(hdev);
2357 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2359 struct hci_cp_read_remote_ext_features *cp;
2360 struct hci_conn *conn;
2362 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2367 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2373 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2375 if (conn->state == BT_CONFIG) {
2376 hci_connect_cfm(conn, status);
2377 hci_conn_drop(conn);
2381 hci_dev_unlock(hdev);
2384 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2386 struct hci_cp_setup_sync_conn *cp;
2387 struct hci_conn *acl, *sco;
2390 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2395 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2399 handle = __le16_to_cpu(cp->handle);
2401 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2405 acl = hci_conn_hash_lookup_handle(hdev, handle);
2409 sco->state = BT_CLOSED;
2411 hci_connect_cfm(sco, status);
2416 hci_dev_unlock(hdev);
2419 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2421 struct hci_cp_sniff_mode *cp;
2422 struct hci_conn *conn;
2424 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2429 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2435 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2437 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2439 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2440 hci_sco_setup(conn, status);
2443 hci_dev_unlock(hdev);
2446 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2448 struct hci_cp_exit_sniff_mode *cp;
2449 struct hci_conn *conn;
2451 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2456 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2462 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2464 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2466 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2467 hci_sco_setup(conn, status);
2470 hci_dev_unlock(hdev);
2473 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2475 struct hci_cp_disconnect *cp;
2476 struct hci_conn *conn;
2481 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2487 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2489 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2490 conn->dst_type, status);
2492 if (conn->type == LE_LINK) {
2493 hdev->cur_adv_instance = conn->adv_instance;
2494 hci_req_reenable_advertising(hdev);
2497 /* If the disconnection failed for any reason, the upper layer
2498 * does not retry to disconnect in current implementation.
2499 * Hence, we need to do some basic cleanup here and re-enable
2500 * advertising if necessary.
2505 hci_dev_unlock(hdev);
2508 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2509 u8 peer_addr_type, u8 own_address_type,
2512 struct hci_conn *conn;
2514 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2519 /* When using controller based address resolution, then the new
2520 * address types 0x02 and 0x03 are used. These types need to be
2521 * converted back into either public address or random address type
2523 if (use_ll_privacy(hdev) &&
2524 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2525 switch (own_address_type) {
2526 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2527 own_address_type = ADDR_LE_DEV_PUBLIC;
2529 case ADDR_LE_DEV_RANDOM_RESOLVED:
2530 own_address_type = ADDR_LE_DEV_RANDOM;
2535 /* Store the initiator and responder address information which
2536 * is needed for SMP. These values will not change during the
2537 * lifetime of the connection.
2539 conn->init_addr_type = own_address_type;
2540 if (own_address_type == ADDR_LE_DEV_RANDOM)
2541 bacpy(&conn->init_addr, &hdev->random_addr);
2543 bacpy(&conn->init_addr, &hdev->bdaddr);
2545 conn->resp_addr_type = peer_addr_type;
2546 bacpy(&conn->resp_addr, peer_addr);
2548 /* We don't want the connection attempt to stick around
2549 * indefinitely since LE doesn't have a page timeout concept
2550 * like BR/EDR. Set a timer for any connection that doesn't use
2551 * the accept list for connecting.
2553 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2554 queue_delayed_work(conn->hdev->workqueue,
2555 &conn->le_conn_timeout,
2556 conn->conn_timeout);
2559 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2561 struct hci_cp_le_create_conn *cp;
2563 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2565 /* All connection failure handling is taken care of by the
2566 * hci_le_conn_failed function which is triggered by the HCI
2567 * request completion callbacks used for connecting.
2572 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2578 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2579 cp->own_address_type, cp->filter_policy);
2581 hci_dev_unlock(hdev);
2584 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2586 struct hci_cp_le_ext_create_conn *cp;
2588 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2590 /* All connection failure handling is taken care of by the
2591 * hci_le_conn_failed function which is triggered by the HCI
2592 * request completion callbacks used for connecting.
2597 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2603 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2604 cp->own_addr_type, cp->filter_policy);
2606 hci_dev_unlock(hdev);
2609 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2611 struct hci_cp_le_read_remote_features *cp;
2612 struct hci_conn *conn;
2614 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2619 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2625 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2627 if (conn->state == BT_CONFIG) {
2628 hci_connect_cfm(conn, status);
2629 hci_conn_drop(conn);
2633 hci_dev_unlock(hdev);
2636 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2638 struct hci_cp_le_start_enc *cp;
2639 struct hci_conn *conn;
2641 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2648 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2652 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2656 if (conn->state != BT_CONNECTED)
2659 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2660 hci_conn_drop(conn);
2663 hci_dev_unlock(hdev);
2666 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2668 struct hci_cp_switch_role *cp;
2669 struct hci_conn *conn;
2671 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2676 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2682 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2684 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2686 hci_dev_unlock(hdev);
2689 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2691 __u8 status = *((__u8 *) skb->data);
2692 struct discovery_state *discov = &hdev->discovery;
2693 struct inquiry_entry *e;
2695 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2697 hci_conn_check_pending(hdev);
2699 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2702 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2703 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2705 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2710 if (discov->state != DISCOVERY_FINDING)
2713 if (list_empty(&discov->resolve)) {
2714 /* When BR/EDR inquiry is active and no LE scanning is in
2715 * progress, then change discovery state to indicate completion.
2717 * When running LE scanning and BR/EDR inquiry simultaneously
2718 * and the LE scan already finished, then change the discovery
2719 * state to indicate completion.
2721 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2722 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2723 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2727 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2728 if (e && hci_resolve_name(hdev, e) == 0) {
2729 e->name_state = NAME_PENDING;
2730 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2732 /* When BR/EDR inquiry is active and no LE scanning is in
2733 * progress, then change discovery state to indicate completion.
2735 * When running LE scanning and BR/EDR inquiry simultaneously
2736 * and the LE scan already finished, then change the discovery
2737 * state to indicate completion.
2739 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2740 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2741 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2745 hci_dev_unlock(hdev);
2748 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2750 struct inquiry_data data;
2751 struct inquiry_info *info = (void *) (skb->data + 1);
2752 int num_rsp = *((__u8 *) skb->data);
2754 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2756 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2759 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2764 for (; num_rsp; num_rsp--, info++) {
2767 bacpy(&data.bdaddr, &info->bdaddr);
2768 data.pscan_rep_mode = info->pscan_rep_mode;
2769 data.pscan_period_mode = info->pscan_period_mode;
2770 data.pscan_mode = info->pscan_mode;
2771 memcpy(data.dev_class, info->dev_class, 3);
2772 data.clock_offset = info->clock_offset;
2773 data.rssi = HCI_RSSI_INVALID;
2774 data.ssp_mode = 0x00;
2776 flags = hci_inquiry_cache_update(hdev, &data, false);
2778 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2779 info->dev_class, HCI_RSSI_INVALID,
2780 flags, NULL, 0, NULL, 0);
2783 hci_dev_unlock(hdev);
2786 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2788 struct hci_ev_conn_complete *ev = (void *) skb->data;
2789 struct hci_conn *conn;
2791 BT_DBG("%s", hdev->name);
2795 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2797 /* Connection may not exist if auto-connected. Check the bredr
2798 * allowlist to see if this device is allowed to auto connect.
2799 * If link is an ACL type, create a connection class
2802 * Auto-connect will only occur if the event filter is
2803 * programmed with a given address. Right now, event filter is
2804 * only used during suspend.
2806 if (ev->link_type == ACL_LINK &&
2807 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
2810 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2813 bt_dev_err(hdev, "no memory for new conn");
2817 if (ev->link_type != SCO_LINK)
2820 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2825 conn->type = SCO_LINK;
2830 conn->handle = __le16_to_cpu(ev->handle);
2832 if (conn->type == ACL_LINK) {
2833 conn->state = BT_CONFIG;
2834 hci_conn_hold(conn);
2836 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2837 !hci_find_link_key(hdev, &ev->bdaddr))
2838 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2840 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2842 conn->state = BT_CONNECTED;
2844 hci_debugfs_create_conn(conn);
2845 hci_conn_add_sysfs(conn);
2847 if (test_bit(HCI_AUTH, &hdev->flags))
2848 set_bit(HCI_CONN_AUTH, &conn->flags);
2850 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2851 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2853 /* Get remote features */
2854 if (conn->type == ACL_LINK) {
2855 struct hci_cp_read_remote_features cp;
2856 cp.handle = ev->handle;
2857 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2860 hci_req_update_scan(hdev);
2863 /* Set packet type for incoming connection */
2864 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2865 struct hci_cp_change_conn_ptype cp;
2866 cp.handle = ev->handle;
2867 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2868 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2872 if (get_link_mode(conn) & HCI_LM_MASTER)
2873 hci_conn_change_supervision_timeout(conn,
2874 LINK_SUPERVISION_TIMEOUT);
2877 conn->state = BT_CLOSED;
2878 if (conn->type == ACL_LINK)
2879 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2880 conn->dst_type, ev->status);
2883 if (conn->type == ACL_LINK)
2884 hci_sco_setup(conn, ev->status);
2887 hci_connect_cfm(conn, ev->status);
2889 } else if (ev->link_type == SCO_LINK) {
2890 switch (conn->setting & SCO_AIRMODE_MASK) {
2891 case SCO_AIRMODE_CVSD:
2893 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2897 hci_connect_cfm(conn, ev->status);
2901 hci_dev_unlock(hdev);
2903 hci_conn_check_pending(hdev);
2906 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2908 struct hci_cp_reject_conn_req cp;
2910 bacpy(&cp.bdaddr, bdaddr);
2911 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2912 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2915 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2917 struct hci_ev_conn_request *ev = (void *) skb->data;
2918 int mask = hdev->link_mode;
2919 struct inquiry_entry *ie;
2920 struct hci_conn *conn;
2923 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2926 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2929 if (!(mask & HCI_LM_ACCEPT)) {
2930 hci_reject_conn(hdev, &ev->bdaddr);
2936 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
2938 hci_reject_conn(hdev, &ev->bdaddr);
2942 /* Require HCI_CONNECTABLE or an accept list entry to accept the
2943 * connection. These features are only touched through mgmt so
2944 * only do the checks if HCI_MGMT is set.
2946 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2947 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2948 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
2950 hci_reject_conn(hdev, &ev->bdaddr);
2954 /* Connection accepted */
2956 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2958 memcpy(ie->data.dev_class, ev->dev_class, 3);
2961 if ((ev->link_type == SCO_LINK || ev->link_type == ESCO_LINK) &&
2962 hci_conn_hash_lookup_sco(hdev)) {
2963 struct hci_cp_reject_conn_req cp;
2965 bacpy(&cp.bdaddr, &ev->bdaddr);
2966 cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2967 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ,
2969 hci_dev_unlock(hdev);
2974 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2977 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2980 bt_dev_err(hdev, "no memory for new connection");
2985 memcpy(conn->dev_class, ev->dev_class, 3);
2987 hci_dev_unlock(hdev);
2989 if (ev->link_type == ACL_LINK ||
2990 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2991 struct hci_cp_accept_conn_req cp;
2992 conn->state = BT_CONNECT;
2994 bacpy(&cp.bdaddr, &ev->bdaddr);
2996 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2997 cp.role = 0x00; /* Become central */
2999 cp.role = 0x01; /* Remain peripheral */
3001 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3002 } else if (!(flags & HCI_PROTO_DEFER)) {
3003 struct hci_cp_accept_sync_conn_req cp;
3004 conn->state = BT_CONNECT;
3006 bacpy(&cp.bdaddr, &ev->bdaddr);
3007 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3009 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
3010 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
3011 cp.max_latency = cpu_to_le16(0xffff);
3012 cp.content_format = cpu_to_le16(hdev->voice_setting);
3013 cp.retrans_effort = 0xff;
3015 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3018 conn->state = BT_CONNECT2;
3019 hci_connect_cfm(conn, 0);
3024 hci_dev_unlock(hdev);
3027 static u8 hci_to_mgmt_reason(u8 err)
3030 case HCI_ERROR_CONNECTION_TIMEOUT:
3031 return MGMT_DEV_DISCONN_TIMEOUT;
3032 case HCI_ERROR_REMOTE_USER_TERM:
3033 case HCI_ERROR_REMOTE_LOW_RESOURCES:
3034 case HCI_ERROR_REMOTE_POWER_OFF:
3035 return MGMT_DEV_DISCONN_REMOTE;
3036 case HCI_ERROR_LOCAL_HOST_TERM:
3037 return MGMT_DEV_DISCONN_LOCAL_HOST;
3039 return MGMT_DEV_DISCONN_UNKNOWN;
3043 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3045 struct hci_ev_disconn_complete *ev = (void *) skb->data;
3047 struct hci_conn_params *params;
3048 struct hci_conn *conn;
3049 bool mgmt_connected;
3051 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3055 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3060 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3061 conn->dst_type, ev->status);
3065 conn->state = BT_CLOSED;
3067 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3069 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3070 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3072 reason = hci_to_mgmt_reason(ev->reason);
3074 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3075 reason, mgmt_connected);
3077 if (conn->type == ACL_LINK) {
3078 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3079 hci_remove_link_key(hdev, &conn->dst);
3081 hci_req_update_scan(hdev);
3084 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3086 switch (params->auto_connect) {
3087 case HCI_AUTO_CONN_LINK_LOSS:
3088 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3092 case HCI_AUTO_CONN_DIRECT:
3093 case HCI_AUTO_CONN_ALWAYS:
3094 list_del_init(¶ms->action);
3095 list_add(¶ms->action, &hdev->pend_le_conns);
3096 hci_update_background_scan(hdev);
3104 hci_disconn_cfm(conn, ev->reason);
3106 /* The suspend notifier is waiting for all devices to disconnect so
3107 * clear the bit from pending tasks and inform the wait queue.
3109 if (list_empty(&hdev->conn_hash.list) &&
3110 test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
3111 wake_up(&hdev->suspend_wait_q);
3114 /* Re-enable advertising if necessary, since it might
3115 * have been disabled by the connection. From the
3116 * HCI_LE_Set_Advertise_Enable command description in
3117 * the core specification (v4.0):
3118 * "The Controller shall continue advertising until the Host
3119 * issues an LE_Set_Advertise_Enable command with
3120 * Advertising_Enable set to 0x00 (Advertising is disabled)
3121 * or until a connection is created or until the Advertising
3122 * is timed out due to Directed Advertising."
3124 if (conn->type == LE_LINK) {
3125 hdev->cur_adv_instance = conn->adv_instance;
3126 hci_req_reenable_advertising(hdev);
3132 hci_dev_unlock(hdev);
3135 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3137 struct hci_ev_auth_complete *ev = (void *) skb->data;
3138 struct hci_conn *conn;
3140 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3144 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3149 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3151 if (!hci_conn_ssp_enabled(conn) &&
3152 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3153 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3155 set_bit(HCI_CONN_AUTH, &conn->flags);
3156 conn->sec_level = conn->pending_sec_level;
3159 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3160 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3162 mgmt_auth_failed(conn, ev->status);
3165 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3166 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3168 if (conn->state == BT_CONFIG) {
3169 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3170 struct hci_cp_set_conn_encrypt cp;
3171 cp.handle = ev->handle;
3173 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3176 conn->state = BT_CONNECTED;
3177 hci_connect_cfm(conn, ev->status);
3178 hci_conn_drop(conn);
3181 hci_auth_cfm(conn, ev->status);
3183 hci_conn_hold(conn);
3184 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3185 hci_conn_drop(conn);
3188 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3190 struct hci_cp_set_conn_encrypt cp;
3191 cp.handle = ev->handle;
3193 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3196 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3197 hci_encrypt_cfm(conn, ev->status);
3202 hci_dev_unlock(hdev);
3205 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
3207 struct hci_ev_remote_name *ev = (void *) skb->data;
3208 struct hci_conn *conn;
3210 BT_DBG("%s", hdev->name);
3212 hci_conn_check_pending(hdev);
3216 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3218 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3221 if (ev->status == 0)
3222 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3223 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3225 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3231 if (!hci_outgoing_auth_needed(hdev, conn))
3234 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3235 struct hci_cp_auth_requested cp;
3237 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3239 cp.handle = __cpu_to_le16(conn->handle);
3240 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3244 hci_dev_unlock(hdev);
3247 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3248 u16 opcode, struct sk_buff *skb)
3250 const struct hci_rp_read_enc_key_size *rp;
3251 struct hci_conn *conn;
3254 BT_DBG("%s status 0x%02x", hdev->name, status);
3256 if (!skb || skb->len < sizeof(*rp)) {
3257 bt_dev_err(hdev, "invalid read key size response");
3261 rp = (void *)skb->data;
3262 handle = le16_to_cpu(rp->handle);
3266 conn = hci_conn_hash_lookup_handle(hdev, handle);
3270 /* While unexpected, the read_enc_key_size command may fail. The most
3271 * secure approach is to then assume the key size is 0 to force a
3275 bt_dev_err(hdev, "failed to read key size for handle %u",
3277 conn->enc_key_size = 0;
3279 conn->enc_key_size = rp->key_size;
3282 hci_encrypt_cfm(conn, 0);
3285 hci_dev_unlock(hdev);
3288 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3290 struct hci_ev_encrypt_change *ev = (void *) skb->data;
3291 struct hci_conn *conn;
3293 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3297 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3303 /* Encryption implies authentication */
3304 set_bit(HCI_CONN_AUTH, &conn->flags);
3305 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3306 conn->sec_level = conn->pending_sec_level;
3308 /* P-256 authentication key implies FIPS */
3309 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3310 set_bit(HCI_CONN_FIPS, &conn->flags);
3312 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3313 conn->type == LE_LINK)
3314 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3316 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3317 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3321 /* We should disregard the current RPA and generate a new one
3322 * whenever the encryption procedure fails.
3324 if (ev->status && conn->type == LE_LINK) {
3325 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3326 hci_adv_instances_set_rpa_expired(hdev, true);
3329 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3331 /* Check link security requirements are met */
3332 if (!hci_conn_check_link_mode(conn))
3333 ev->status = HCI_ERROR_AUTH_FAILURE;
3335 if (ev->status && conn->state == BT_CONNECTED) {
3336 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3337 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3339 /* Notify upper layers so they can cleanup before
3342 hci_encrypt_cfm(conn, ev->status);
3343 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3344 hci_conn_drop(conn);
3348 /* Try reading the encryption key size for encrypted ACL links */
3349 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3350 struct hci_cp_read_enc_key_size cp;
3351 struct hci_request req;
3353 /* Only send HCI_Read_Encryption_Key_Size if the
3354 * controller really supports it. If it doesn't, assume
3355 * the default size (16).
3357 if (!(hdev->commands[20] & 0x10)) {
3358 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3362 hci_req_init(&req, hdev);
3364 cp.handle = cpu_to_le16(conn->handle);
3365 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3367 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3368 bt_dev_err(hdev, "sending read key size failed");
3369 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3376 /* Set the default Authenticated Payload Timeout after
3377 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3378 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3379 * sent when the link is active and Encryption is enabled, the conn
3380 * type can be either LE or ACL and controller must support LMP Ping.
3381 * Ensure for AES-CCM encryption as well.
3383 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3384 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3385 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3386 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3387 struct hci_cp_write_auth_payload_to cp;
3389 cp.handle = cpu_to_le16(conn->handle);
3390 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3391 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3396 hci_encrypt_cfm(conn, ev->status);
3399 hci_dev_unlock(hdev);
3402 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3403 struct sk_buff *skb)
3405 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3406 struct hci_conn *conn;
3408 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3412 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3415 set_bit(HCI_CONN_SECURE, &conn->flags);
3417 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3419 hci_key_change_cfm(conn, ev->status);
3422 hci_dev_unlock(hdev);
3425 static void hci_remote_features_evt(struct hci_dev *hdev,
3426 struct sk_buff *skb)
3428 struct hci_ev_remote_features *ev = (void *) skb->data;
3429 struct hci_conn *conn;
3431 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3435 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3440 memcpy(conn->features[0], ev->features, 8);
3442 if (conn->state != BT_CONFIG)
3445 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3446 lmp_ext_feat_capable(conn)) {
3447 struct hci_cp_read_remote_ext_features cp;
3448 cp.handle = ev->handle;
3450 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3455 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3456 struct hci_cp_remote_name_req cp;
3457 memset(&cp, 0, sizeof(cp));
3458 bacpy(&cp.bdaddr, &conn->dst);
3459 cp.pscan_rep_mode = 0x02;
3460 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3461 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3462 mgmt_device_connected(hdev, conn, NULL, 0);
3464 if (!hci_outgoing_auth_needed(hdev, conn)) {
3465 conn->state = BT_CONNECTED;
3466 hci_connect_cfm(conn, ev->status);
3467 hci_conn_drop(conn);
3471 hci_dev_unlock(hdev);
3474 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3476 cancel_delayed_work(&hdev->cmd_timer);
3478 if (!test_bit(HCI_RESET, &hdev->flags)) {
3480 cancel_delayed_work(&hdev->ncmd_timer);
3481 atomic_set(&hdev->cmd_cnt, 1);
3483 schedule_delayed_work(&hdev->ncmd_timer,
3489 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3490 u16 *opcode, u8 *status,
3491 hci_req_complete_t *req_complete,
3492 hci_req_complete_skb_t *req_complete_skb)
3494 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3496 *opcode = __le16_to_cpu(ev->opcode);
3497 *status = skb->data[sizeof(*ev)];
3499 skb_pull(skb, sizeof(*ev));
3502 case HCI_OP_INQUIRY_CANCEL:
3503 hci_cc_inquiry_cancel(hdev, skb, status);
3506 case HCI_OP_PERIODIC_INQ:
3507 hci_cc_periodic_inq(hdev, skb);
3510 case HCI_OP_EXIT_PERIODIC_INQ:
3511 hci_cc_exit_periodic_inq(hdev, skb);
3514 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3515 hci_cc_remote_name_req_cancel(hdev, skb);
3518 case HCI_OP_ROLE_DISCOVERY:
3519 hci_cc_role_discovery(hdev, skb);
3522 case HCI_OP_READ_LINK_POLICY:
3523 hci_cc_read_link_policy(hdev, skb);
3526 case HCI_OP_WRITE_LINK_POLICY:
3527 hci_cc_write_link_policy(hdev, skb);
3530 case HCI_OP_READ_DEF_LINK_POLICY:
3531 hci_cc_read_def_link_policy(hdev, skb);
3534 case HCI_OP_WRITE_DEF_LINK_POLICY:
3535 hci_cc_write_def_link_policy(hdev, skb);
3539 hci_cc_reset(hdev, skb);
3542 case HCI_OP_READ_STORED_LINK_KEY:
3543 hci_cc_read_stored_link_key(hdev, skb);
3546 case HCI_OP_DELETE_STORED_LINK_KEY:
3547 hci_cc_delete_stored_link_key(hdev, skb);
3550 case HCI_OP_WRITE_LOCAL_NAME:
3551 hci_cc_write_local_name(hdev, skb);
3554 case HCI_OP_READ_LOCAL_NAME:
3555 hci_cc_read_local_name(hdev, skb);
3558 case HCI_OP_WRITE_AUTH_ENABLE:
3559 hci_cc_write_auth_enable(hdev, skb);
3562 case HCI_OP_WRITE_ENCRYPT_MODE:
3563 hci_cc_write_encrypt_mode(hdev, skb);
3566 case HCI_OP_WRITE_SCAN_ENABLE:
3567 hci_cc_write_scan_enable(hdev, skb);
3570 case HCI_OP_SET_EVENT_FLT:
3571 hci_cc_set_event_filter(hdev, skb);
3574 case HCI_OP_READ_CLASS_OF_DEV:
3575 hci_cc_read_class_of_dev(hdev, skb);
3578 case HCI_OP_WRITE_CLASS_OF_DEV:
3579 hci_cc_write_class_of_dev(hdev, skb);
3582 case HCI_OP_READ_VOICE_SETTING:
3583 hci_cc_read_voice_setting(hdev, skb);
3586 case HCI_OP_WRITE_VOICE_SETTING:
3587 hci_cc_write_voice_setting(hdev, skb);
3590 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3591 hci_cc_read_num_supported_iac(hdev, skb);
3594 case HCI_OP_WRITE_SSP_MODE:
3595 hci_cc_write_ssp_mode(hdev, skb);
3598 case HCI_OP_WRITE_SC_SUPPORT:
3599 hci_cc_write_sc_support(hdev, skb);
3602 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3603 hci_cc_read_auth_payload_timeout(hdev, skb);
3606 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3607 hci_cc_write_auth_payload_timeout(hdev, skb);
3610 case HCI_OP_READ_LOCAL_VERSION:
3611 hci_cc_read_local_version(hdev, skb);
3614 case HCI_OP_READ_LOCAL_COMMANDS:
3615 hci_cc_read_local_commands(hdev, skb);
3618 case HCI_OP_READ_LOCAL_FEATURES:
3619 hci_cc_read_local_features(hdev, skb);
3622 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3623 hci_cc_read_local_ext_features(hdev, skb);
3626 case HCI_OP_READ_BUFFER_SIZE:
3627 hci_cc_read_buffer_size(hdev, skb);
3630 case HCI_OP_READ_BD_ADDR:
3631 hci_cc_read_bd_addr(hdev, skb);
3634 case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3635 hci_cc_read_local_pairing_opts(hdev, skb);
3638 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3639 hci_cc_read_page_scan_activity(hdev, skb);
3642 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3643 hci_cc_write_page_scan_activity(hdev, skb);
3646 case HCI_OP_READ_PAGE_SCAN_TYPE:
3647 hci_cc_read_page_scan_type(hdev, skb);
3650 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3651 hci_cc_write_page_scan_type(hdev, skb);
3654 case HCI_OP_READ_DATA_BLOCK_SIZE:
3655 hci_cc_read_data_block_size(hdev, skb);
3658 case HCI_OP_READ_FLOW_CONTROL_MODE:
3659 hci_cc_read_flow_control_mode(hdev, skb);
3662 case HCI_OP_READ_LOCAL_AMP_INFO:
3663 hci_cc_read_local_amp_info(hdev, skb);
3666 case HCI_OP_READ_CLOCK:
3667 hci_cc_read_clock(hdev, skb);
3670 case HCI_OP_READ_INQ_RSP_TX_POWER:
3671 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3674 case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3675 hci_cc_read_def_err_data_reporting(hdev, skb);
3678 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3679 hci_cc_write_def_err_data_reporting(hdev, skb);
3682 case HCI_OP_PIN_CODE_REPLY:
3683 hci_cc_pin_code_reply(hdev, skb);
3686 case HCI_OP_PIN_CODE_NEG_REPLY:
3687 hci_cc_pin_code_neg_reply(hdev, skb);
3690 case HCI_OP_READ_LOCAL_OOB_DATA:
3691 hci_cc_read_local_oob_data(hdev, skb);
3694 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3695 hci_cc_read_local_oob_ext_data(hdev, skb);
3698 case HCI_OP_LE_READ_BUFFER_SIZE:
3699 hci_cc_le_read_buffer_size(hdev, skb);
3702 case HCI_OP_LE_READ_LOCAL_FEATURES:
3703 hci_cc_le_read_local_features(hdev, skb);
3706 case HCI_OP_LE_READ_ADV_TX_POWER:
3707 hci_cc_le_read_adv_tx_power(hdev, skb);
3710 case HCI_OP_USER_CONFIRM_REPLY:
3711 hci_cc_user_confirm_reply(hdev, skb);
3714 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3715 hci_cc_user_confirm_neg_reply(hdev, skb);
3718 case HCI_OP_USER_PASSKEY_REPLY:
3719 hci_cc_user_passkey_reply(hdev, skb);
3722 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3723 hci_cc_user_passkey_neg_reply(hdev, skb);
3726 case HCI_OP_LE_SET_RANDOM_ADDR:
3727 hci_cc_le_set_random_addr(hdev, skb);
3730 case HCI_OP_LE_SET_ADV_ENABLE:
3731 hci_cc_le_set_adv_enable(hdev, skb);
3734 case HCI_OP_LE_SET_SCAN_PARAM:
3735 hci_cc_le_set_scan_param(hdev, skb);
3738 case HCI_OP_LE_SET_SCAN_ENABLE:
3739 hci_cc_le_set_scan_enable(hdev, skb);
3742 case HCI_OP_LE_READ_ACCEPT_LIST_SIZE:
3743 hci_cc_le_read_accept_list_size(hdev, skb);
3746 case HCI_OP_LE_CLEAR_ACCEPT_LIST:
3747 hci_cc_le_clear_accept_list(hdev, skb);
3750 case HCI_OP_LE_ADD_TO_ACCEPT_LIST:
3751 hci_cc_le_add_to_accept_list(hdev, skb);
3754 case HCI_OP_LE_DEL_FROM_ACCEPT_LIST:
3755 hci_cc_le_del_from_accept_list(hdev, skb);
3758 case HCI_OP_LE_READ_SUPPORTED_STATES:
3759 hci_cc_le_read_supported_states(hdev, skb);
3762 case HCI_OP_LE_READ_DEF_DATA_LEN:
3763 hci_cc_le_read_def_data_len(hdev, skb);
3766 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3767 hci_cc_le_write_def_data_len(hdev, skb);
3770 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3771 hci_cc_le_add_to_resolv_list(hdev, skb);
3774 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3775 hci_cc_le_del_from_resolv_list(hdev, skb);
3778 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3779 hci_cc_le_clear_resolv_list(hdev, skb);
3782 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3783 hci_cc_le_read_resolv_list_size(hdev, skb);
3786 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3787 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3790 case HCI_OP_LE_READ_MAX_DATA_LEN:
3791 hci_cc_le_read_max_data_len(hdev, skb);
3794 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3795 hci_cc_write_le_host_supported(hdev, skb);
3798 case HCI_OP_LE_SET_ADV_PARAM:
3799 hci_cc_set_adv_param(hdev, skb);
3802 case HCI_OP_READ_RSSI:
3803 hci_cc_read_rssi(hdev, skb);
3806 case HCI_OP_READ_TX_POWER:
3807 hci_cc_read_tx_power(hdev, skb);
3810 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3811 hci_cc_write_ssp_debug_mode(hdev, skb);
3814 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3815 hci_cc_le_set_ext_scan_param(hdev, skb);
3818 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3819 hci_cc_le_set_ext_scan_enable(hdev, skb);
3822 case HCI_OP_LE_SET_DEFAULT_PHY:
3823 hci_cc_le_set_default_phy(hdev, skb);
3826 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3827 hci_cc_le_read_num_adv_sets(hdev, skb);
3830 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3831 hci_cc_set_ext_adv_param(hdev, skb);
3834 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3835 hci_cc_le_set_ext_adv_enable(hdev, skb);
3838 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3839 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3842 case HCI_OP_LE_READ_TRANSMIT_POWER:
3843 hci_cc_le_read_transmit_power(hdev, skb);
3846 case HCI_OP_ENABLE_RSSI:
3847 hci_cc_enable_rssi(hdev, skb);
3850 case HCI_OP_GET_RAW_RSSI:
3851 hci_cc_get_raw_rssi(hdev, skb);
3855 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3859 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3861 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3864 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3866 "unexpected event for opcode 0x%4.4x", *opcode);
3870 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3871 queue_work(hdev->workqueue, &hdev->cmd_work);
3874 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3875 u16 *opcode, u8 *status,
3876 hci_req_complete_t *req_complete,
3877 hci_req_complete_skb_t *req_complete_skb)
3879 struct hci_ev_cmd_status *ev = (void *) skb->data;
3881 skb_pull(skb, sizeof(*ev));
3883 *opcode = __le16_to_cpu(ev->opcode);
3884 *status = ev->status;
3887 case HCI_OP_INQUIRY:
3888 hci_cs_inquiry(hdev, ev->status);
3891 case HCI_OP_CREATE_CONN:
3892 hci_cs_create_conn(hdev, ev->status);
3895 case HCI_OP_DISCONNECT:
3896 hci_cs_disconnect(hdev, ev->status);
3899 case HCI_OP_ADD_SCO:
3900 hci_cs_add_sco(hdev, ev->status);
3903 case HCI_OP_AUTH_REQUESTED:
3904 hci_cs_auth_requested(hdev, ev->status);
3907 case HCI_OP_SET_CONN_ENCRYPT:
3908 hci_cs_set_conn_encrypt(hdev, ev->status);
3911 case HCI_OP_REMOTE_NAME_REQ:
3912 hci_cs_remote_name_req(hdev, ev->status);
3915 case HCI_OP_READ_REMOTE_FEATURES:
3916 hci_cs_read_remote_features(hdev, ev->status);
3919 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3920 hci_cs_read_remote_ext_features(hdev, ev->status);
3923 case HCI_OP_SETUP_SYNC_CONN:
3924 hci_cs_setup_sync_conn(hdev, ev->status);
3927 case HCI_OP_SNIFF_MODE:
3928 hci_cs_sniff_mode(hdev, ev->status);
3931 case HCI_OP_EXIT_SNIFF_MODE:
3932 hci_cs_exit_sniff_mode(hdev, ev->status);
3935 case HCI_OP_SWITCH_ROLE:
3936 hci_cs_switch_role(hdev, ev->status);
3939 case HCI_OP_LE_CREATE_CONN:
3940 hci_cs_le_create_conn(hdev, ev->status);
3943 case HCI_OP_LE_READ_REMOTE_FEATURES:
3944 hci_cs_le_read_remote_features(hdev, ev->status);
3947 case HCI_OP_LE_START_ENC:
3948 hci_cs_le_start_enc(hdev, ev->status);
3951 case HCI_OP_LE_EXT_CREATE_CONN:
3952 hci_cs_le_ext_create_conn(hdev, ev->status);
3956 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3960 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3962 /* Indicate request completion if the command failed. Also, if
3963 * we're not waiting for a special event and we get a success
3964 * command status we should try to flag the request as completed
3965 * (since for this kind of commands there will not be a command
3969 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3970 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3973 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3975 "unexpected event for opcode 0x%4.4x", *opcode);
3979 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3980 queue_work(hdev->workqueue, &hdev->cmd_work);
3983 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3985 struct hci_ev_hardware_error *ev = (void *) skb->data;
3989 mgmt_hardware_error(hdev, ev->code);
3990 hci_dev_unlock(hdev);
3992 hdev->hw_error_code = ev->code;
3994 queue_work(hdev->req_workqueue, &hdev->error_reset);
3997 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3999 struct hci_ev_role_change *ev = (void *) skb->data;
4000 struct hci_conn *conn;
4002 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4006 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4009 conn->role = ev->role;
4011 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4013 hci_role_switch_cfm(conn, ev->status, ev->role);
4015 if (!ev->status && (get_link_mode(conn) & HCI_LM_MASTER))
4016 hci_conn_change_supervision_timeout(conn,
4017 LINK_SUPERVISION_TIMEOUT);
4021 hci_dev_unlock(hdev);
4024 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
4026 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
4029 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4030 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4034 if (skb->len < sizeof(*ev) ||
4035 skb->len < struct_size(ev, handles, ev->num_hndl)) {
4036 BT_DBG("%s bad parameters", hdev->name);
4040 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
4042 for (i = 0; i < ev->num_hndl; i++) {
4043 struct hci_comp_pkts_info *info = &ev->handles[i];
4044 struct hci_conn *conn;
4045 __u16 handle, count;
4047 handle = __le16_to_cpu(info->handle);
4048 count = __le16_to_cpu(info->count);
4050 conn = hci_conn_hash_lookup_handle(hdev, handle);
4054 conn->sent -= count;
4056 switch (conn->type) {
4058 hdev->acl_cnt += count;
4059 if (hdev->acl_cnt > hdev->acl_pkts)
4060 hdev->acl_cnt = hdev->acl_pkts;
4064 if (hdev->le_pkts) {
4065 hdev->le_cnt += count;
4066 if (hdev->le_cnt > hdev->le_pkts)
4067 hdev->le_cnt = hdev->le_pkts;
4069 hdev->acl_cnt += count;
4070 if (hdev->acl_cnt > hdev->acl_pkts)
4071 hdev->acl_cnt = hdev->acl_pkts;
4076 hdev->sco_cnt += count;
4077 if (hdev->sco_cnt > hdev->sco_pkts)
4078 hdev->sco_cnt = hdev->sco_pkts;
4082 bt_dev_err(hdev, "unknown type %d conn %p",
4088 queue_work(hdev->workqueue, &hdev->tx_work);
4091 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4094 struct hci_chan *chan;
4096 switch (hdev->dev_type) {
4098 return hci_conn_hash_lookup_handle(hdev, handle);
4100 chan = hci_chan_lookup_handle(hdev, handle);
4105 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4112 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
4114 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
4117 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4118 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4122 if (skb->len < sizeof(*ev) ||
4123 skb->len < struct_size(ev, handles, ev->num_hndl)) {
4124 BT_DBG("%s bad parameters", hdev->name);
4128 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
4131 for (i = 0; i < ev->num_hndl; i++) {
4132 struct hci_comp_blocks_info *info = &ev->handles[i];
4133 struct hci_conn *conn = NULL;
4134 __u16 handle, block_count;
4136 handle = __le16_to_cpu(info->handle);
4137 block_count = __le16_to_cpu(info->blocks);
4139 conn = __hci_conn_lookup_handle(hdev, handle);
4143 conn->sent -= block_count;
4145 switch (conn->type) {
4148 hdev->block_cnt += block_count;
4149 if (hdev->block_cnt > hdev->num_blocks)
4150 hdev->block_cnt = hdev->num_blocks;
4154 bt_dev_err(hdev, "unknown type %d conn %p",
4160 queue_work(hdev->workqueue, &hdev->tx_work);
4163 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4165 struct hci_ev_mode_change *ev = (void *) skb->data;
4166 struct hci_conn *conn;
4168 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4172 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4174 conn->mode = ev->mode;
4176 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4178 if (conn->mode == HCI_CM_ACTIVE)
4179 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4181 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4184 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4185 hci_sco_setup(conn, ev->status);
4188 hci_dev_unlock(hdev);
4191 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4193 struct hci_ev_pin_code_req *ev = (void *) skb->data;
4194 struct hci_conn *conn;
4196 BT_DBG("%s", hdev->name);
4200 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4204 if (conn->state == BT_CONNECTED) {
4205 hci_conn_hold(conn);
4206 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4207 hci_conn_drop(conn);
4210 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4211 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4212 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4213 sizeof(ev->bdaddr), &ev->bdaddr);
4214 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4217 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4222 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4226 hci_dev_unlock(hdev);
4229 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4231 if (key_type == HCI_LK_CHANGED_COMBINATION)
4234 conn->pin_length = pin_len;
4235 conn->key_type = key_type;
4238 case HCI_LK_LOCAL_UNIT:
4239 case HCI_LK_REMOTE_UNIT:
4240 case HCI_LK_DEBUG_COMBINATION:
4242 case HCI_LK_COMBINATION:
4244 conn->pending_sec_level = BT_SECURITY_HIGH;
4246 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4248 case HCI_LK_UNAUTH_COMBINATION_P192:
4249 case HCI_LK_UNAUTH_COMBINATION_P256:
4250 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4252 case HCI_LK_AUTH_COMBINATION_P192:
4253 conn->pending_sec_level = BT_SECURITY_HIGH;
4255 case HCI_LK_AUTH_COMBINATION_P256:
4256 conn->pending_sec_level = BT_SECURITY_FIPS;
4261 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4263 struct hci_ev_link_key_req *ev = (void *) skb->data;
4264 struct hci_cp_link_key_reply cp;
4265 struct hci_conn *conn;
4266 struct link_key *key;
4268 BT_DBG("%s", hdev->name);
4270 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4275 key = hci_find_link_key(hdev, &ev->bdaddr);
4277 BT_DBG("%s link key not found for %pMR", hdev->name,
4282 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4285 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4287 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4289 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4290 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4291 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4292 BT_DBG("%s ignoring unauthenticated key", hdev->name);
4296 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4297 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4298 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4299 BT_DBG("%s ignoring key unauthenticated for high security",
4304 conn_set_key(conn, key->type, key->pin_len);
4307 bacpy(&cp.bdaddr, &ev->bdaddr);
4308 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4310 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4312 hci_dev_unlock(hdev);
4317 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4318 hci_dev_unlock(hdev);
4321 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4323 struct hci_ev_link_key_notify *ev = (void *) skb->data;
4324 struct hci_conn *conn;
4325 struct link_key *key;
4329 BT_DBG("%s", hdev->name);
4333 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4337 hci_conn_hold(conn);
4338 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4339 hci_conn_drop(conn);
4341 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4342 conn_set_key(conn, ev->key_type, conn->pin_length);
4344 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4347 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4348 ev->key_type, pin_len, &persistent);
4352 /* Update connection information since adding the key will have
4353 * fixed up the type in the case of changed combination keys.
4355 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4356 conn_set_key(conn, key->type, key->pin_len);
4358 mgmt_new_link_key(hdev, key, persistent);
4360 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4361 * is set. If it's not set simply remove the key from the kernel
4362 * list (we've still notified user space about it but with
4363 * store_hint being 0).
4365 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4366 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4367 list_del_rcu(&key->list);
4368 kfree_rcu(key, rcu);
4373 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4375 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4378 hci_dev_unlock(hdev);
4381 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4383 struct hci_ev_clock_offset *ev = (void *) skb->data;
4384 struct hci_conn *conn;
4386 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4390 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4391 if (conn && !ev->status) {
4392 struct inquiry_entry *ie;
4394 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4396 ie->data.clock_offset = ev->clock_offset;
4397 ie->timestamp = jiffies;
4401 hci_dev_unlock(hdev);
4404 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4406 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4407 struct hci_conn *conn;
4409 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4413 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4414 if (conn && !ev->status)
4415 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4417 hci_dev_unlock(hdev);
4420 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4422 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4423 struct inquiry_entry *ie;
4425 BT_DBG("%s", hdev->name);
4429 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4431 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4432 ie->timestamp = jiffies;
4435 hci_dev_unlock(hdev);
4438 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4439 struct sk_buff *skb)
4441 struct inquiry_data data;
4442 int num_rsp = *((__u8 *) skb->data);
4444 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4449 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4454 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4455 struct inquiry_info_with_rssi_and_pscan_mode *info;
4456 info = (void *) (skb->data + 1);
4458 if (skb->len < num_rsp * sizeof(*info) + 1)
4461 for (; num_rsp; num_rsp--, info++) {
4464 bacpy(&data.bdaddr, &info->bdaddr);
4465 data.pscan_rep_mode = info->pscan_rep_mode;
4466 data.pscan_period_mode = info->pscan_period_mode;
4467 data.pscan_mode = info->pscan_mode;
4468 memcpy(data.dev_class, info->dev_class, 3);
4469 data.clock_offset = info->clock_offset;
4470 data.rssi = info->rssi;
4471 data.ssp_mode = 0x00;
4473 flags = hci_inquiry_cache_update(hdev, &data, false);
4475 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4476 info->dev_class, info->rssi,
4477 flags, NULL, 0, NULL, 0);
4480 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4482 if (skb->len < num_rsp * sizeof(*info) + 1)
4485 for (; num_rsp; num_rsp--, info++) {
4488 bacpy(&data.bdaddr, &info->bdaddr);
4489 data.pscan_rep_mode = info->pscan_rep_mode;
4490 data.pscan_period_mode = info->pscan_period_mode;
4491 data.pscan_mode = 0x00;
4492 memcpy(data.dev_class, info->dev_class, 3);
4493 data.clock_offset = info->clock_offset;
4494 data.rssi = info->rssi;
4495 data.ssp_mode = 0x00;
4497 flags = hci_inquiry_cache_update(hdev, &data, false);
4499 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4500 info->dev_class, info->rssi,
4501 flags, NULL, 0, NULL, 0);
4506 hci_dev_unlock(hdev);
4509 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4510 struct sk_buff *skb)
4512 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4513 struct hci_conn *conn;
4515 BT_DBG("%s", hdev->name);
4519 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4523 if (ev->page < HCI_MAX_PAGES)
4524 memcpy(conn->features[ev->page], ev->features, 8);
4526 if (!ev->status && ev->page == 0x01) {
4527 struct inquiry_entry *ie;
4529 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4531 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4533 if (ev->features[0] & LMP_HOST_SSP) {
4534 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4536 /* It is mandatory by the Bluetooth specification that
4537 * Extended Inquiry Results are only used when Secure
4538 * Simple Pairing is enabled, but some devices violate
4541 * To make these devices work, the internal SSP
4542 * enabled flag needs to be cleared if the remote host
4543 * features do not indicate SSP support */
4544 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4547 if (ev->features[0] & LMP_HOST_SC)
4548 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4551 if (conn->state != BT_CONFIG)
4554 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4555 struct hci_cp_remote_name_req cp;
4556 memset(&cp, 0, sizeof(cp));
4557 bacpy(&cp.bdaddr, &conn->dst);
4558 cp.pscan_rep_mode = 0x02;
4559 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4560 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4561 mgmt_device_connected(hdev, conn, NULL, 0);
4563 if (!hci_outgoing_auth_needed(hdev, conn)) {
4564 conn->state = BT_CONNECTED;
4565 hci_connect_cfm(conn, ev->status);
4566 hci_conn_drop(conn);
4570 hci_dev_unlock(hdev);
4573 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4574 struct sk_buff *skb)
4576 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4577 struct hci_conn *conn;
4579 switch (ev->link_type) {
4584 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4585 * for HCI_Synchronous_Connection_Complete is limited to
4586 * either SCO or eSCO
4588 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4592 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4596 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4598 if (ev->link_type == ESCO_LINK)
4601 /* When the link type in the event indicates SCO connection
4602 * and lookup of the connection object fails, then check
4603 * if an eSCO connection object exists.
4605 * The core limits the synchronous connections to either
4606 * SCO or eSCO. The eSCO connection is preferred and tried
4607 * to be setup first and until successfully established,
4608 * the link type will be hinted as eSCO.
4610 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4615 switch (ev->status) {
4617 /* The synchronous connection complete event should only be
4618 * sent once per new connection. Receiving a successful
4619 * complete event when the connection status is already
4620 * BT_CONNECTED means that the device is misbehaving and sent
4621 * multiple complete event packets for the same new connection.
4623 * Registering the device more than once can corrupt kernel
4624 * memory, hence upon detecting this invalid event, we report
4625 * an error and ignore the packet.
4627 if (conn->state == BT_CONNECTED) {
4628 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4632 conn->handle = __le16_to_cpu(ev->handle);
4633 conn->state = BT_CONNECTED;
4634 conn->type = ev->link_type;
4636 hci_debugfs_create_conn(conn);
4637 hci_conn_add_sysfs(conn);
4640 case 0x10: /* Connection Accept Timeout */
4641 case 0x0d: /* Connection Rejected due to Limited Resources */
4642 case 0x11: /* Unsupported Feature or Parameter Value */
4643 case 0x1c: /* SCO interval rejected */
4644 case 0x1a: /* Unsupported Remote Feature */
4645 case 0x1e: /* Invalid LMP Parameters */
4646 case 0x1f: /* Unspecified error */
4647 case 0x20: /* Unsupported LMP Parameter value */
4649 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4650 (hdev->esco_type & EDR_ESCO_MASK);
4651 if (hci_setup_sync(conn, conn->link->handle))
4657 conn->state = BT_CLOSED;
4661 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4663 switch (ev->air_mode) {
4666 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4670 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4674 hci_connect_cfm(conn, ev->status);
4679 hci_dev_unlock(hdev);
4682 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4686 while (parsed < eir_len) {
4687 u8 field_len = eir[0];
4692 parsed += field_len + 1;
4693 eir += field_len + 1;
4699 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4700 struct sk_buff *skb)
4702 struct inquiry_data data;
4703 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4704 int num_rsp = *((__u8 *) skb->data);
4707 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4709 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4712 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4717 for (; num_rsp; num_rsp--, info++) {
4721 bacpy(&data.bdaddr, &info->bdaddr);
4722 data.pscan_rep_mode = info->pscan_rep_mode;
4723 data.pscan_period_mode = info->pscan_period_mode;
4724 data.pscan_mode = 0x00;
4725 memcpy(data.dev_class, info->dev_class, 3);
4726 data.clock_offset = info->clock_offset;
4727 data.rssi = info->rssi;
4728 data.ssp_mode = 0x01;
4730 if (hci_dev_test_flag(hdev, HCI_MGMT))
4731 name_known = eir_get_data(info->data,
4733 EIR_NAME_COMPLETE, NULL);
4737 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4739 eir_len = eir_get_length(info->data, sizeof(info->data));
4741 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4742 info->dev_class, info->rssi,
4743 flags, info->data, eir_len, NULL, 0);
4746 hci_dev_unlock(hdev);
4749 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4750 struct sk_buff *skb)
4752 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4753 struct hci_conn *conn;
4755 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4756 __le16_to_cpu(ev->handle));
4760 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4764 /* For BR/EDR the necessary steps are taken through the
4765 * auth_complete event.
4767 if (conn->type != LE_LINK)
4771 conn->sec_level = conn->pending_sec_level;
4773 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4775 if (ev->status && conn->state == BT_CONNECTED) {
4776 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4777 hci_conn_drop(conn);
4781 if (conn->state == BT_CONFIG) {
4783 conn->state = BT_CONNECTED;
4785 hci_connect_cfm(conn, ev->status);
4786 hci_conn_drop(conn);
4788 hci_auth_cfm(conn, ev->status);
4790 hci_conn_hold(conn);
4791 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4792 hci_conn_drop(conn);
4796 hci_dev_unlock(hdev);
4799 static u8 hci_get_auth_req(struct hci_conn *conn)
4802 if (conn->remote_auth == HCI_AT_GENERAL_BONDING_MITM) {
4803 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4804 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4805 return HCI_AT_GENERAL_BONDING_MITM;
4809 /* If remote requests no-bonding follow that lead */
4810 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4811 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4812 return conn->remote_auth | (conn->auth_type & 0x01);
4814 /* If both remote and local have enough IO capabilities, require
4817 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4818 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4819 return conn->remote_auth | 0x01;
4821 /* No MITM protection possible so ignore remote requirement */
4822 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4825 static u8 bredr_oob_data_present(struct hci_conn *conn)
4827 struct hci_dev *hdev = conn->hdev;
4828 struct oob_data *data;
4830 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4834 if (bredr_sc_enabled(hdev)) {
4835 /* When Secure Connections is enabled, then just
4836 * return the present value stored with the OOB
4837 * data. The stored value contains the right present
4838 * information. However it can only be trusted when
4839 * not in Secure Connection Only mode.
4841 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4842 return data->present;
4844 /* When Secure Connections Only mode is enabled, then
4845 * the P-256 values are required. If they are not
4846 * available, then do not declare that OOB data is
4849 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4850 !memcmp(data->hash256, ZERO_KEY, 16))
4856 /* When Secure Connections is not enabled or actually
4857 * not supported by the hardware, then check that if
4858 * P-192 data values are present.
4860 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4861 !memcmp(data->hash192, ZERO_KEY, 16))
4867 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4869 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4870 struct hci_conn *conn;
4872 BT_DBG("%s", hdev->name);
4876 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4880 hci_conn_hold(conn);
4882 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4885 /* Allow pairing if we're pairable, the initiators of the
4886 * pairing or if the remote is not requesting bonding.
4888 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4889 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4890 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4891 struct hci_cp_io_capability_reply cp;
4893 bacpy(&cp.bdaddr, &ev->bdaddr);
4894 /* Change the IO capability from KeyboardDisplay
4895 * to DisplayYesNo as it is not supported by BT spec. */
4896 cp.capability = (conn->io_capability == 0x04) ?
4897 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4899 /* If we are initiators, there is no remote information yet */
4900 if (conn->remote_auth == 0xff) {
4901 /* Request MITM protection if our IO caps allow it
4902 * except for the no-bonding case.
4904 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4905 conn->auth_type != HCI_AT_NO_BONDING)
4906 conn->auth_type |= 0x01;
4908 conn->auth_type = hci_get_auth_req(conn);
4911 /* If we're not bondable, force one of the non-bondable
4912 * authentication requirement values.
4914 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4915 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4917 cp.authentication = conn->auth_type;
4918 cp.oob_data = bredr_oob_data_present(conn);
4920 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4923 struct hci_cp_io_capability_neg_reply cp;
4925 bacpy(&cp.bdaddr, &ev->bdaddr);
4926 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4928 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4933 hci_dev_unlock(hdev);
4936 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4938 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4939 struct hci_conn *conn;
4941 BT_DBG("%s", hdev->name);
4945 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4949 conn->remote_cap = ev->capability;
4950 conn->remote_auth = ev->authentication;
4953 hci_dev_unlock(hdev);
4956 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4957 struct sk_buff *skb)
4959 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4960 int loc_mitm, rem_mitm, confirm_hint = 0;
4961 struct hci_conn *conn;
4963 BT_DBG("%s", hdev->name);
4967 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4970 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4974 loc_mitm = (conn->auth_type & 0x01);
4975 rem_mitm = (conn->remote_auth & 0x01);
4977 /* If we require MITM but the remote device can't provide that
4978 * (it has NoInputNoOutput) then reject the confirmation
4979 * request. We check the security level here since it doesn't
4980 * necessarily match conn->auth_type.
4982 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4983 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4984 BT_DBG("Rejecting request: remote device can't provide MITM");
4985 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4986 sizeof(ev->bdaddr), &ev->bdaddr);
4990 /* If no side requires MITM protection; auto-accept */
4991 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4992 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4994 /* If we're not the initiators request authorization to
4995 * proceed from user space (mgmt_user_confirm with
4996 * confirm_hint set to 1). The exception is if neither
4997 * side had MITM or if the local IO capability is
4998 * NoInputNoOutput, in which case we do auto-accept
5000 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5001 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5002 (loc_mitm || rem_mitm)) {
5003 BT_DBG("Confirming auto-accept as acceptor");
5008 /* If there already exists link key in local host, leave the
5009 * decision to user space since the remote device could be
5010 * legitimate or malicious.
5012 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5013 bt_dev_dbg(hdev, "Local host already has link key");
5018 BT_DBG("Auto-accept of user confirmation with %ums delay",
5019 hdev->auto_accept_delay);
5021 if (hdev->auto_accept_delay > 0) {
5022 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5023 queue_delayed_work(conn->hdev->workqueue,
5024 &conn->auto_accept_work, delay);
5028 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5029 sizeof(ev->bdaddr), &ev->bdaddr);
5034 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5035 le32_to_cpu(ev->passkey), confirm_hint);
5038 hci_dev_unlock(hdev);
5041 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
5042 struct sk_buff *skb)
5044 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
5046 BT_DBG("%s", hdev->name);
5048 if (hci_dev_test_flag(hdev, HCI_MGMT))
5049 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5052 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
5053 struct sk_buff *skb)
5055 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
5056 struct hci_conn *conn;
5058 BT_DBG("%s", hdev->name);
5060 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5064 conn->passkey_notify = __le32_to_cpu(ev->passkey);
5065 conn->passkey_entered = 0;
5067 if (hci_dev_test_flag(hdev, HCI_MGMT))
5068 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5069 conn->dst_type, conn->passkey_notify,
5070 conn->passkey_entered);
5073 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
5075 struct hci_ev_keypress_notify *ev = (void *) skb->data;
5076 struct hci_conn *conn;
5078 BT_DBG("%s", hdev->name);
5080 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5085 case HCI_KEYPRESS_STARTED:
5086 conn->passkey_entered = 0;
5089 case HCI_KEYPRESS_ENTERED:
5090 conn->passkey_entered++;
5093 case HCI_KEYPRESS_ERASED:
5094 conn->passkey_entered--;
5097 case HCI_KEYPRESS_CLEARED:
5098 conn->passkey_entered = 0;
5101 case HCI_KEYPRESS_COMPLETED:
5105 if (hci_dev_test_flag(hdev, HCI_MGMT))
5106 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5107 conn->dst_type, conn->passkey_notify,
5108 conn->passkey_entered);
5111 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
5112 struct sk_buff *skb)
5114 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
5115 struct hci_conn *conn;
5117 BT_DBG("%s", hdev->name);
5121 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5125 /* Reset the authentication requirement to unknown */
5126 conn->remote_auth = 0xff;
5128 /* To avoid duplicate auth_failed events to user space we check
5129 * the HCI_CONN_AUTH_PEND flag which will be set if we
5130 * initiated the authentication. A traditional auth_complete
5131 * event gets always produced as initiator and is also mapped to
5132 * the mgmt_auth_failed event */
5133 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5134 mgmt_auth_failed(conn, ev->status);
5136 hci_conn_drop(conn);
5139 hci_dev_unlock(hdev);
5142 static void hci_remote_host_features_evt(struct hci_dev *hdev,
5143 struct sk_buff *skb)
5145 struct hci_ev_remote_host_features *ev = (void *) skb->data;
5146 struct inquiry_entry *ie;
5147 struct hci_conn *conn;
5149 BT_DBG("%s", hdev->name);
5153 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5155 memcpy(conn->features[1], ev->features, 8);
5157 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5159 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5161 hci_dev_unlock(hdev);
5164 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
5165 struct sk_buff *skb)
5167 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
5168 struct oob_data *data;
5170 BT_DBG("%s", hdev->name);
5174 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5177 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5179 struct hci_cp_remote_oob_data_neg_reply cp;
5181 bacpy(&cp.bdaddr, &ev->bdaddr);
5182 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5187 if (bredr_sc_enabled(hdev)) {
5188 struct hci_cp_remote_oob_ext_data_reply cp;
5190 bacpy(&cp.bdaddr, &ev->bdaddr);
5191 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5192 memset(cp.hash192, 0, sizeof(cp.hash192));
5193 memset(cp.rand192, 0, sizeof(cp.rand192));
5195 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5196 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5198 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5199 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5201 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5204 struct hci_cp_remote_oob_data_reply cp;
5206 bacpy(&cp.bdaddr, &ev->bdaddr);
5207 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5208 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5210 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5215 hci_dev_unlock(hdev);
5218 #if IS_ENABLED(CONFIG_BT_HS)
5219 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5221 struct hci_ev_channel_selected *ev = (void *)skb->data;
5222 struct hci_conn *hcon;
5224 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5226 skb_pull(skb, sizeof(*ev));
5228 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5232 amp_read_loc_assoc_final_data(hdev, hcon);
5235 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
5236 struct sk_buff *skb)
5238 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
5239 struct hci_conn *hcon, *bredr_hcon;
5241 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
5246 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5258 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5260 hcon->state = BT_CONNECTED;
5261 bacpy(&hcon->dst, &bredr_hcon->dst);
5263 hci_conn_hold(hcon);
5264 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5265 hci_conn_drop(hcon);
5267 hci_debugfs_create_conn(hcon);
5268 hci_conn_add_sysfs(hcon);
5270 amp_physical_cfm(bredr_hcon, hcon);
5273 hci_dev_unlock(hdev);
5276 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5278 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
5279 struct hci_conn *hcon;
5280 struct hci_chan *hchan;
5281 struct amp_mgr *mgr;
5283 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5284 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
5287 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5291 /* Create AMP hchan */
5292 hchan = hci_chan_create(hcon);
5296 hchan->handle = le16_to_cpu(ev->handle);
5299 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5301 mgr = hcon->amp_mgr;
5302 if (mgr && mgr->bredr_chan) {
5303 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5305 l2cap_chan_lock(bredr_chan);
5307 bredr_chan->conn->mtu = hdev->block_mtu;
5308 l2cap_logical_cfm(bredr_chan, hchan, 0);
5309 hci_conn_hold(hcon);
5311 l2cap_chan_unlock(bredr_chan);
5315 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5316 struct sk_buff *skb)
5318 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5319 struct hci_chan *hchan;
5321 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5322 le16_to_cpu(ev->handle), ev->status);
5329 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5330 if (!hchan || !hchan->amp)
5333 amp_destroy_logical_link(hchan, ev->reason);
5336 hci_dev_unlock(hdev);
5339 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5340 struct sk_buff *skb)
5342 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5343 struct hci_conn *hcon;
5345 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5352 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5353 if (hcon && hcon->type == AMP_LINK) {
5354 hcon->state = BT_CLOSED;
5355 hci_disconn_cfm(hcon, ev->reason);
5359 hci_dev_unlock(hdev);
5363 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5364 u8 bdaddr_type, bdaddr_t *local_rpa)
5367 conn->dst_type = bdaddr_type;
5368 conn->resp_addr_type = bdaddr_type;
5369 bacpy(&conn->resp_addr, bdaddr);
5371 /* Check if the controller has set a Local RPA then it must be
5372 * used instead or hdev->rpa.
5374 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5375 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5376 bacpy(&conn->init_addr, local_rpa);
5377 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5378 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5379 bacpy(&conn->init_addr, &conn->hdev->rpa);
5381 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5382 &conn->init_addr_type);
5385 conn->resp_addr_type = conn->hdev->adv_addr_type;
5386 /* Check if the controller has set a Local RPA then it must be
5387 * used instead or hdev->rpa.
5389 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5390 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5391 bacpy(&conn->resp_addr, local_rpa);
5392 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5393 /* In case of ext adv, resp_addr will be updated in
5394 * Adv Terminated event.
5396 if (!ext_adv_capable(conn->hdev))
5397 bacpy(&conn->resp_addr,
5398 &conn->hdev->random_addr);
5400 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5403 conn->init_addr_type = bdaddr_type;
5404 bacpy(&conn->init_addr, bdaddr);
5406 /* For incoming connections, set the default minimum
5407 * and maximum connection interval. They will be used
5408 * to check if the parameters are in range and if not
5409 * trigger the connection update procedure.
5411 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5412 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5416 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5417 bdaddr_t *bdaddr, u8 bdaddr_type,
5418 bdaddr_t *local_rpa, u8 role, u16 handle,
5419 u16 interval, u16 latency,
5420 u16 supervision_timeout)
5422 struct hci_conn_params *params;
5423 struct hci_conn *conn;
5424 struct smp_irk *irk;
5429 /* All controllers implicitly stop advertising in the event of a
5430 * connection, so ensure that the state bit is cleared.
5432 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5434 conn = hci_lookup_le_connect(hdev);
5436 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5438 bt_dev_err(hdev, "no memory for new connection");
5442 conn->dst_type = bdaddr_type;
5444 /* If we didn't have a hci_conn object previously
5445 * but we're in central role this must be something
5446 * initiated using an accept list. Since accept list based
5447 * connections are not "first class citizens" we don't
5448 * have full tracking of them. Therefore, we go ahead
5449 * with a "best effort" approach of determining the
5450 * initiator address based on the HCI_PRIVACY flag.
5453 conn->resp_addr_type = bdaddr_type;
5454 bacpy(&conn->resp_addr, bdaddr);
5455 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5456 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5457 bacpy(&conn->init_addr, &hdev->rpa);
5459 hci_copy_identity_address(hdev,
5461 &conn->init_addr_type);
5466 /* LE auto connect */
5467 bacpy(&conn->dst, bdaddr);
5469 cancel_delayed_work(&conn->le_conn_timeout);
5472 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5474 /* Lookup the identity address from the stored connection
5475 * address and address type.
5477 * When establishing connections to an identity address, the
5478 * connection procedure will store the resolvable random
5479 * address first. Now if it can be converted back into the
5480 * identity address, start using the identity address from
5483 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5485 bacpy(&conn->dst, &irk->bdaddr);
5486 conn->dst_type = irk->addr_type;
5489 /* When using controller based address resolution, then the new
5490 * address types 0x02 and 0x03 are used. These types need to be
5491 * converted back into either public address or random address type
5493 if (use_ll_privacy(hdev) &&
5494 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5495 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
5496 switch (conn->dst_type) {
5497 case ADDR_LE_DEV_PUBLIC_RESOLVED:
5498 conn->dst_type = ADDR_LE_DEV_PUBLIC;
5500 case ADDR_LE_DEV_RANDOM_RESOLVED:
5501 conn->dst_type = ADDR_LE_DEV_RANDOM;
5507 hci_le_conn_failed(conn, status);
5511 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5512 addr_type = BDADDR_LE_PUBLIC;
5514 addr_type = BDADDR_LE_RANDOM;
5516 /* Drop the connection if the device is blocked */
5517 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5518 hci_conn_drop(conn);
5522 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5523 mgmt_device_connected(hdev, conn, NULL, 0);
5525 conn->sec_level = BT_SECURITY_LOW;
5526 conn->handle = handle;
5527 conn->state = BT_CONFIG;
5529 /* Store current advertising instance as connection advertising instance
5530 * when sotfware rotation is in use so it can be re-enabled when
5533 if (!ext_adv_capable(hdev))
5534 conn->adv_instance = hdev->cur_adv_instance;
5536 conn->le_conn_interval = interval;
5537 conn->le_conn_latency = latency;
5538 conn->le_supv_timeout = supervision_timeout;
5540 hci_debugfs_create_conn(conn);
5541 hci_conn_add_sysfs(conn);
5543 /* The remote features procedure is defined for central
5544 * role only. So only in case of an initiated connection
5545 * request the remote features.
5547 * If the local controller supports peripheral-initiated features
5548 * exchange, then requesting the remote features in peripheral
5549 * role is possible. Otherwise just transition into the
5550 * connected state without requesting the remote features.
5553 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5554 struct hci_cp_le_read_remote_features cp;
5556 cp.handle = __cpu_to_le16(conn->handle);
5558 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5561 hci_conn_hold(conn);
5563 conn->state = BT_CONNECTED;
5564 hci_connect_cfm(conn, status);
5567 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5570 list_del_init(¶ms->action);
5572 hci_conn_drop(params->conn);
5573 hci_conn_put(params->conn);
5574 params->conn = NULL;
5579 hci_update_background_scan(hdev);
5580 hci_dev_unlock(hdev);
5583 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5585 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5587 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5589 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5590 NULL, ev->role, le16_to_cpu(ev->handle),
5591 le16_to_cpu(ev->interval),
5592 le16_to_cpu(ev->latency),
5593 le16_to_cpu(ev->supervision_timeout));
5596 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5597 struct sk_buff *skb)
5599 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5601 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5603 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5604 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5605 le16_to_cpu(ev->interval),
5606 le16_to_cpu(ev->latency),
5607 le16_to_cpu(ev->supervision_timeout));
5609 if (use_ll_privacy(hdev) &&
5610 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5611 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5612 hci_req_disable_address_resolution(hdev);
5615 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5617 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5618 struct hci_conn *conn;
5619 struct adv_info *adv;
5621 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5623 adv = hci_find_adv_instance(hdev, ev->handle);
5629 /* Remove advertising as it has been terminated */
5630 hci_remove_adv_instance(hdev, ev->handle);
5631 mgmt_advertising_removed(NULL, hdev, ev->handle);
5637 adv->enabled = false;
5639 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5641 /* Store handle in the connection so the correct advertising
5642 * instance can be re-enabled when disconnected.
5644 conn->adv_instance = ev->handle;
5646 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5647 bacmp(&conn->resp_addr, BDADDR_ANY))
5651 bacpy(&conn->resp_addr, &hdev->random_addr);
5656 bacpy(&conn->resp_addr, &adv->random_addr);
5660 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5661 struct sk_buff *skb)
5663 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5664 struct hci_conn *conn;
5666 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5673 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5677 hci_dev_unlock(hdev);
5678 mgmt_le_conn_update_failed(hdev, &conn->dst,
5679 conn->type, conn->dst_type, ev->status);
5683 conn->le_conn_interval = le16_to_cpu(ev->interval);
5684 conn->le_conn_latency = le16_to_cpu(ev->latency);
5685 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5688 hci_dev_unlock(hdev);
5691 mgmt_le_conn_updated(hdev, &conn->dst, conn->type,
5692 conn->dst_type, conn->le_conn_interval,
5693 conn->le_conn_latency, conn->le_supv_timeout);
5697 /* This function requires the caller holds hdev->lock */
5698 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5700 u8 addr_type, u8 adv_type,
5701 bdaddr_t *direct_rpa)
5703 struct hci_conn *conn;
5704 struct hci_conn_params *params;
5706 /* If the event is not connectable don't proceed further */
5707 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5710 /* Ignore if the device is blocked */
5711 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type))
5714 /* Most controller will fail if we try to create new connections
5715 * while we have an existing one in peripheral role.
5717 if (hdev->conn_hash.le_num_peripheral > 0 &&
5718 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5719 !(hdev->le_states[3] & 0x10)))
5722 /* If we're not connectable only connect devices that we have in
5723 * our pend_le_conns list.
5725 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5730 if (!params->explicit_connect) {
5731 switch (params->auto_connect) {
5732 case HCI_AUTO_CONN_DIRECT:
5733 /* Only devices advertising with ADV_DIRECT_IND are
5734 * triggering a connection attempt. This is allowing
5735 * incoming connections from peripheral devices.
5737 if (adv_type != LE_ADV_DIRECT_IND)
5740 case HCI_AUTO_CONN_ALWAYS:
5741 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5742 * are triggering a connection attempt. This means
5743 * that incoming connections from peripheral device are
5744 * accepted and also outgoing connections to peripheral
5745 * devices are established when found.
5753 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5754 hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
5756 if (!IS_ERR(conn)) {
5757 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5758 * by higher layer that tried to connect, if no then
5759 * store the pointer since we don't really have any
5760 * other owner of the object besides the params that
5761 * triggered it. This way we can abort the connection if
5762 * the parameters get removed and keep the reference
5763 * count consistent once the connection is established.
5766 if (!params->explicit_connect)
5767 params->conn = hci_conn_get(conn);
5772 switch (PTR_ERR(conn)) {
5774 /* If hci_connect() returns -EBUSY it means there is already
5775 * an LE connection attempt going on. Since controllers don't
5776 * support more than one connection attempt at the time, we
5777 * don't consider this an error case.
5781 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5788 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5789 u8 bdaddr_type, bdaddr_t *direct_addr,
5790 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5794 struct discovery_state *d = &hdev->discovery;
5796 struct smp_irk *irk;
5797 struct hci_conn *conn;
5806 case LE_ADV_DIRECT_IND:
5807 case LE_ADV_SCAN_IND:
5808 case LE_ADV_NONCONN_IND:
5809 case LE_ADV_SCAN_RSP:
5812 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5813 "type: 0x%02x", type);
5817 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5818 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5822 /* Find the end of the data in case the report contains padded zero
5823 * bytes at the end causing an invalid length value.
5825 * When data is NULL, len is 0 so there is no need for extra ptr
5826 * check as 'ptr < data + 0' is already false in such case.
5828 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5829 if (ptr + 1 + *ptr > data + len)
5833 /* Adjust for actual length. This handles the case when remote
5834 * device is advertising with incorrect data length.
5838 /* If the direct address is present, then this report is from
5839 * a LE Direct Advertising Report event. In that case it is
5840 * important to see if the address is matching the local
5841 * controller address.
5844 /* Only resolvable random addresses are valid for these
5845 * kind of reports and others can be ignored.
5847 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5850 /* If the controller is not using resolvable random
5851 * addresses, then this report can be ignored.
5853 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5856 /* If the local IRK of the controller does not match
5857 * with the resolvable random address provided, then
5858 * this report can be ignored.
5860 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5864 /* Check if we need to convert to identity address */
5865 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5867 bdaddr = &irk->bdaddr;
5868 bdaddr_type = irk->addr_type;
5871 /* Check if we have been requested to connect to this device.
5873 * direct_addr is set only for directed advertising reports (it is NULL
5874 * for advertising reports) and is already verified to be RPA above.
5876 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5878 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5879 /* Store report for later inclusion by
5880 * mgmt_device_connected
5882 memcpy(conn->le_adv_data, data, len);
5883 conn->le_adv_data_len = len;
5886 /* Passive scanning shouldn't trigger any device found events,
5887 * except for devices marked as CONN_REPORT for which we do send
5888 * device found events, or advertisement monitoring requested.
5890 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5891 if (type == LE_ADV_DIRECT_IND)
5895 /* Handle all adv packet in platform */
5896 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5897 bdaddr, bdaddr_type) &&
5898 idr_is_empty(&hdev->adv_monitors_idr))
5902 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5903 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5907 mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5908 rssi, flags, data, len, NULL, 0, type);
5910 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5911 rssi, flags, data, len, NULL, 0);
5916 /* When receiving non-connectable or scannable undirected
5917 * advertising reports, this means that the remote device is
5918 * not connectable and then clearly indicate this in the
5919 * device found event.
5921 * When receiving a scan response, then there is no way to
5922 * know if the remote device is connectable or not. However
5923 * since scan responses are merged with a previously seen
5924 * advertising report, the flags field from that report
5927 * In the really unlikely case that a controller get confused
5928 * and just sends a scan response event, then it is marked as
5929 * not connectable as well.
5931 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5932 type == LE_ADV_SCAN_RSP)
5933 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5938 /* Disable adv ind and scan rsp merging */
5939 mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5940 rssi, flags, data, len, NULL, 0, type);
5942 /* If there's nothing pending either store the data from this
5943 * event or send an immediate device found event if the data
5944 * should not be stored for later.
5946 if (!ext_adv && !has_pending_adv_report(hdev)) {
5947 /* If the report will trigger a SCAN_REQ store it for
5950 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5951 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5952 rssi, flags, data, len);
5956 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5957 rssi, flags, data, len, NULL, 0);
5961 /* Check if the pending report is for the same device as the new one */
5962 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5963 bdaddr_type == d->last_adv_addr_type);
5965 /* If the pending data doesn't match this report or this isn't a
5966 * scan response (e.g. we got a duplicate ADV_IND) then force
5967 * sending of the pending data.
5969 if (type != LE_ADV_SCAN_RSP || !match) {
5970 /* Send out whatever is in the cache, but skip duplicates */
5972 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5973 d->last_adv_addr_type, NULL,
5974 d->last_adv_rssi, d->last_adv_flags,
5976 d->last_adv_data_len, NULL, 0);
5978 /* If the new report will trigger a SCAN_REQ store it for
5981 if (!ext_adv && (type == LE_ADV_IND ||
5982 type == LE_ADV_SCAN_IND)) {
5983 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5984 rssi, flags, data, len);
5988 /* The advertising reports cannot be merged, so clear
5989 * the pending report and send out a device found event.
5991 clear_pending_adv_report(hdev);
5992 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5993 rssi, flags, data, len, NULL, 0);
5997 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5998 * the new event is a SCAN_RSP. We can therefore proceed with
5999 * sending a merged device found event.
6001 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6002 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6003 d->last_adv_data, d->last_adv_data_len, data, len);
6004 clear_pending_adv_report(hdev);
6008 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
6010 u8 num_reports = skb->data[0];
6011 void *ptr = &skb->data[1];
6015 while (num_reports--) {
6016 struct hci_ev_le_advertising_info *ev = ptr;
6019 if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
6020 bt_dev_err(hdev, "Malicious advertising data.");
6024 if (ev->length <= HCI_MAX_AD_LENGTH &&
6025 ev->data + ev->length <= skb_tail_pointer(skb)) {
6026 rssi = ev->data[ev->length];
6027 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
6028 ev->bdaddr_type, NULL, 0, rssi,
6029 ev->data, ev->length, false);
6031 bt_dev_err(hdev, "Dropping invalid advertising data");
6034 ptr += sizeof(*ev) + ev->length + 1;
6037 hci_dev_unlock(hdev);
6040 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6042 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6044 case LE_LEGACY_ADV_IND:
6046 case LE_LEGACY_ADV_DIRECT_IND:
6047 return LE_ADV_DIRECT_IND;
6048 case LE_LEGACY_ADV_SCAN_IND:
6049 return LE_ADV_SCAN_IND;
6050 case LE_LEGACY_NONCONN_IND:
6051 return LE_ADV_NONCONN_IND;
6052 case LE_LEGACY_SCAN_RSP_ADV:
6053 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6054 return LE_ADV_SCAN_RSP;
6060 if (evt_type & LE_EXT_ADV_CONN_IND) {
6061 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6062 return LE_ADV_DIRECT_IND;
6067 if (evt_type & LE_EXT_ADV_SCAN_RSP)
6068 return LE_ADV_SCAN_RSP;
6070 if (evt_type & LE_EXT_ADV_SCAN_IND)
6071 return LE_ADV_SCAN_IND;
6073 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6074 evt_type & LE_EXT_ADV_DIRECT_IND)
6075 return LE_ADV_NONCONN_IND;
6078 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6081 return LE_ADV_INVALID;
6084 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
6086 u8 num_reports = skb->data[0];
6087 void *ptr = &skb->data[1];
6091 while (num_reports--) {
6092 struct hci_ev_le_ext_adv_report *ev = ptr;
6096 evt_type = __le16_to_cpu(ev->evt_type);
6097 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6098 if (legacy_evt_type != LE_ADV_INVALID) {
6099 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
6100 ev->bdaddr_type, NULL, 0, ev->rssi,
6101 ev->data, ev->length,
6102 !(evt_type & LE_EXT_ADV_LEGACY_PDU));
6105 ptr += sizeof(*ev) + ev->length;
6108 hci_dev_unlock(hdev);
6111 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
6112 struct sk_buff *skb)
6114 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
6115 struct hci_conn *conn;
6117 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6121 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6124 memcpy(conn->features[0], ev->features, 8);
6126 if (conn->state == BT_CONFIG) {
6129 /* If the local controller supports peripheral-initiated
6130 * features exchange, but the remote controller does
6131 * not, then it is possible that the error code 0x1a
6132 * for unsupported remote feature gets returned.
6134 * In this specific case, allow the connection to
6135 * transition into connected state and mark it as
6138 if (!conn->out && ev->status == 0x1a &&
6139 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6142 status = ev->status;
6144 conn->state = BT_CONNECTED;
6145 hci_connect_cfm(conn, status);
6146 hci_conn_drop(conn);
6150 hci_dev_unlock(hdev);
6153 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
6155 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
6156 struct hci_cp_le_ltk_reply cp;
6157 struct hci_cp_le_ltk_neg_reply neg;
6158 struct hci_conn *conn;
6159 struct smp_ltk *ltk;
6161 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
6165 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6169 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6173 if (smp_ltk_is_sc(ltk)) {
6174 /* With SC both EDiv and Rand are set to zero */
6175 if (ev->ediv || ev->rand)
6178 /* For non-SC keys check that EDiv and Rand match */
6179 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6183 memcpy(cp.ltk, ltk->val, ltk->enc_size);
6184 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6185 cp.handle = cpu_to_le16(conn->handle);
6187 conn->pending_sec_level = smp_ltk_sec_level(ltk);
6189 conn->enc_key_size = ltk->enc_size;
6191 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6193 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6194 * temporary key used to encrypt a connection following
6195 * pairing. It is used during the Encrypted Session Setup to
6196 * distribute the keys. Later, security can be re-established
6197 * using a distributed LTK.
6199 if (ltk->type == SMP_STK) {
6200 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6201 list_del_rcu(<k->list);
6202 kfree_rcu(ltk, rcu);
6204 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6207 hci_dev_unlock(hdev);
6212 neg.handle = ev->handle;
6213 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6214 hci_dev_unlock(hdev);
6217 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6220 struct hci_cp_le_conn_param_req_neg_reply cp;
6222 cp.handle = cpu_to_le16(handle);
6225 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6229 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
6230 struct sk_buff *skb)
6232 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
6233 struct hci_cp_le_conn_param_req_reply cp;
6234 struct hci_conn *hcon;
6235 u16 handle, min, max, latency, timeout;
6237 handle = le16_to_cpu(ev->handle);
6238 min = le16_to_cpu(ev->interval_min);
6239 max = le16_to_cpu(ev->interval_max);
6240 latency = le16_to_cpu(ev->latency);
6241 timeout = le16_to_cpu(ev->timeout);
6243 hcon = hci_conn_hash_lookup_handle(hdev, handle);
6244 if (!hcon || hcon->state != BT_CONNECTED)
6245 return send_conn_param_neg_reply(hdev, handle,
6246 HCI_ERROR_UNKNOWN_CONN_ID);
6248 if (hci_check_conn_params(min, max, latency, timeout))
6249 return send_conn_param_neg_reply(hdev, handle,
6250 HCI_ERROR_INVALID_LL_PARAMS);
6252 if (hcon->role == HCI_ROLE_MASTER) {
6253 struct hci_conn_params *params;
6258 params = hci_conn_params_lookup(hdev, &hcon->dst,
6261 params->conn_min_interval = min;
6262 params->conn_max_interval = max;
6263 params->conn_latency = latency;
6264 params->supervision_timeout = timeout;
6270 hci_dev_unlock(hdev);
6272 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6273 store_hint, min, max, latency, timeout);
6276 cp.handle = ev->handle;
6277 cp.interval_min = ev->interval_min;
6278 cp.interval_max = ev->interval_max;
6279 cp.latency = ev->latency;
6280 cp.timeout = ev->timeout;
6284 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6287 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
6288 struct sk_buff *skb)
6290 u8 num_reports = skb->data[0];
6291 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
6293 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
6298 for (; num_reports; num_reports--, ev++)
6299 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
6300 ev->bdaddr_type, &ev->direct_addr,
6301 ev->direct_addr_type, ev->rssi, NULL, 0,
6304 hci_dev_unlock(hdev);
6307 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
6309 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
6310 struct hci_conn *conn;
6312 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6319 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6323 conn->le_tx_phy = ev->tx_phy;
6324 conn->le_rx_phy = ev->rx_phy;
6327 hci_dev_unlock(hdev);
6330 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
6332 struct hci_ev_le_meta *le_ev = (void *) skb->data;
6334 skb_pull(skb, sizeof(*le_ev));
6336 switch (le_ev->subevent) {
6337 case HCI_EV_LE_CONN_COMPLETE:
6338 hci_le_conn_complete_evt(hdev, skb);
6341 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
6342 hci_le_conn_update_complete_evt(hdev, skb);
6345 case HCI_EV_LE_ADVERTISING_REPORT:
6346 hci_le_adv_report_evt(hdev, skb);
6349 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
6350 hci_le_remote_feat_complete_evt(hdev, skb);
6353 case HCI_EV_LE_LTK_REQ:
6354 hci_le_ltk_request_evt(hdev, skb);
6357 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
6358 hci_le_remote_conn_param_req_evt(hdev, skb);
6361 case HCI_EV_LE_DIRECT_ADV_REPORT:
6362 hci_le_direct_adv_report_evt(hdev, skb);
6365 case HCI_EV_LE_PHY_UPDATE_COMPLETE:
6366 hci_le_phy_update_evt(hdev, skb);
6369 case HCI_EV_LE_EXT_ADV_REPORT:
6370 hci_le_ext_adv_report_evt(hdev, skb);
6373 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
6374 hci_le_enh_conn_complete_evt(hdev, skb);
6377 case HCI_EV_LE_EXT_ADV_SET_TERM:
6378 hci_le_ext_adv_term_evt(hdev, skb);
6386 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6387 u8 event, struct sk_buff *skb)
6389 struct hci_ev_cmd_complete *ev;
6390 struct hci_event_hdr *hdr;
6395 if (skb->len < sizeof(*hdr)) {
6396 bt_dev_err(hdev, "too short HCI event");
6400 hdr = (void *) skb->data;
6401 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6404 if (hdr->evt != event)
6409 /* Check if request ended in Command Status - no way to retrieve
6410 * any extra parameters in this case.
6412 if (hdr->evt == HCI_EV_CMD_STATUS)
6415 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6416 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6421 if (skb->len < sizeof(*ev)) {
6422 bt_dev_err(hdev, "too short cmd_complete event");
6426 ev = (void *) skb->data;
6427 skb_pull(skb, sizeof(*ev));
6429 if (opcode != __le16_to_cpu(ev->opcode)) {
6430 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6431 __le16_to_cpu(ev->opcode));
6438 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6439 struct sk_buff *skb)
6441 struct hci_ev_le_advertising_info *adv;
6442 struct hci_ev_le_direct_adv_info *direct_adv;
6443 struct hci_ev_le_ext_adv_report *ext_adv;
6444 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6445 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6449 /* If we are currently suspended and this is the first BT event seen,
6450 * save the wake reason associated with the event.
6452 if (!hdev->suspended || hdev->wake_reason)
6455 /* Default to remote wake. Values for wake_reason are documented in the
6456 * Bluez mgmt api docs.
6458 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6460 /* Once configured for remote wakeup, we should only wake up for
6461 * reconnections. It's useful to see which device is waking us up so
6462 * keep track of the bdaddr of the connection event that woke us up.
6464 if (event == HCI_EV_CONN_REQUEST) {
6465 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6466 hdev->wake_addr_type = BDADDR_BREDR;
6467 } else if (event == HCI_EV_CONN_COMPLETE) {
6468 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6469 hdev->wake_addr_type = BDADDR_BREDR;
6470 } else if (event == HCI_EV_LE_META) {
6471 struct hci_ev_le_meta *le_ev = (void *)skb->data;
6472 u8 subevent = le_ev->subevent;
6473 u8 *ptr = &skb->data[sizeof(*le_ev)];
6474 u8 num_reports = *ptr;
6476 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6477 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6478 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6480 adv = (void *)(ptr + 1);
6481 direct_adv = (void *)(ptr + 1);
6482 ext_adv = (void *)(ptr + 1);
6485 case HCI_EV_LE_ADVERTISING_REPORT:
6486 bacpy(&hdev->wake_addr, &adv->bdaddr);
6487 hdev->wake_addr_type = adv->bdaddr_type;
6489 case HCI_EV_LE_DIRECT_ADV_REPORT:
6490 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6491 hdev->wake_addr_type = direct_adv->bdaddr_type;
6493 case HCI_EV_LE_EXT_ADV_REPORT:
6494 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6495 hdev->wake_addr_type = ext_adv->bdaddr_type;
6500 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6504 hci_dev_unlock(hdev);
6507 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6509 struct hci_event_hdr *hdr = (void *) skb->data;
6510 hci_req_complete_t req_complete = NULL;
6511 hci_req_complete_skb_t req_complete_skb = NULL;
6512 struct sk_buff *orig_skb = NULL;
6513 u8 status = 0, event = hdr->evt, req_evt = 0;
6514 u16 opcode = HCI_OP_NOP;
6517 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6521 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6522 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6523 opcode = __le16_to_cpu(cmd_hdr->opcode);
6524 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6529 /* If it looks like we might end up having to call
6530 * req_complete_skb, store a pristine copy of the skb since the
6531 * various handlers may modify the original one through
6532 * skb_pull() calls, etc.
6534 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6535 event == HCI_EV_CMD_COMPLETE)
6536 orig_skb = skb_clone(skb, GFP_KERNEL);
6538 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6540 /* Store wake reason if we're suspended */
6541 hci_store_wake_reason(hdev, event, skb);
6544 case HCI_EV_INQUIRY_COMPLETE:
6545 hci_inquiry_complete_evt(hdev, skb);
6548 case HCI_EV_INQUIRY_RESULT:
6549 hci_inquiry_result_evt(hdev, skb);
6552 case HCI_EV_CONN_COMPLETE:
6553 hci_conn_complete_evt(hdev, skb);
6556 case HCI_EV_CONN_REQUEST:
6557 hci_conn_request_evt(hdev, skb);
6560 case HCI_EV_DISCONN_COMPLETE:
6561 hci_disconn_complete_evt(hdev, skb);
6564 case HCI_EV_AUTH_COMPLETE:
6565 hci_auth_complete_evt(hdev, skb);
6568 case HCI_EV_REMOTE_NAME:
6569 hci_remote_name_evt(hdev, skb);
6572 case HCI_EV_ENCRYPT_CHANGE:
6573 hci_encrypt_change_evt(hdev, skb);
6576 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6577 hci_change_link_key_complete_evt(hdev, skb);
6580 case HCI_EV_REMOTE_FEATURES:
6581 hci_remote_features_evt(hdev, skb);
6584 case HCI_EV_CMD_COMPLETE:
6585 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6586 &req_complete, &req_complete_skb);
6589 case HCI_EV_CMD_STATUS:
6590 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6594 case HCI_EV_HARDWARE_ERROR:
6595 hci_hardware_error_evt(hdev, skb);
6598 case HCI_EV_ROLE_CHANGE:
6599 hci_role_change_evt(hdev, skb);
6602 case HCI_EV_NUM_COMP_PKTS:
6603 hci_num_comp_pkts_evt(hdev, skb);
6606 case HCI_EV_MODE_CHANGE:
6607 hci_mode_change_evt(hdev, skb);
6610 case HCI_EV_PIN_CODE_REQ:
6611 hci_pin_code_request_evt(hdev, skb);
6614 case HCI_EV_LINK_KEY_REQ:
6615 hci_link_key_request_evt(hdev, skb);
6618 case HCI_EV_LINK_KEY_NOTIFY:
6619 hci_link_key_notify_evt(hdev, skb);
6622 case HCI_EV_CLOCK_OFFSET:
6623 hci_clock_offset_evt(hdev, skb);
6626 case HCI_EV_PKT_TYPE_CHANGE:
6627 hci_pkt_type_change_evt(hdev, skb);
6630 case HCI_EV_PSCAN_REP_MODE:
6631 hci_pscan_rep_mode_evt(hdev, skb);
6634 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6635 hci_inquiry_result_with_rssi_evt(hdev, skb);
6638 case HCI_EV_REMOTE_EXT_FEATURES:
6639 hci_remote_ext_features_evt(hdev, skb);
6642 case HCI_EV_SYNC_CONN_COMPLETE:
6643 hci_sync_conn_complete_evt(hdev, skb);
6646 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6647 hci_extended_inquiry_result_evt(hdev, skb);
6650 case HCI_EV_KEY_REFRESH_COMPLETE:
6651 hci_key_refresh_complete_evt(hdev, skb);
6654 case HCI_EV_IO_CAPA_REQUEST:
6655 hci_io_capa_request_evt(hdev, skb);
6658 case HCI_EV_IO_CAPA_REPLY:
6659 hci_io_capa_reply_evt(hdev, skb);
6662 case HCI_EV_USER_CONFIRM_REQUEST:
6663 hci_user_confirm_request_evt(hdev, skb);
6666 case HCI_EV_USER_PASSKEY_REQUEST:
6667 hci_user_passkey_request_evt(hdev, skb);
6670 case HCI_EV_USER_PASSKEY_NOTIFY:
6671 hci_user_passkey_notify_evt(hdev, skb);
6674 case HCI_EV_KEYPRESS_NOTIFY:
6675 hci_keypress_notify_evt(hdev, skb);
6678 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6679 hci_simple_pair_complete_evt(hdev, skb);
6682 case HCI_EV_REMOTE_HOST_FEATURES:
6683 hci_remote_host_features_evt(hdev, skb);
6686 case HCI_EV_LE_META:
6687 hci_le_meta_evt(hdev, skb);
6690 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6691 hci_remote_oob_data_request_evt(hdev, skb);
6694 #if IS_ENABLED(CONFIG_BT_HS)
6695 case HCI_EV_CHANNEL_SELECTED:
6696 hci_chan_selected_evt(hdev, skb);
6699 case HCI_EV_PHY_LINK_COMPLETE:
6700 hci_phy_link_complete_evt(hdev, skb);
6703 case HCI_EV_LOGICAL_LINK_COMPLETE:
6704 hci_loglink_complete_evt(hdev, skb);
6707 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6708 hci_disconn_loglink_complete_evt(hdev, skb);
6711 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6712 hci_disconn_phylink_complete_evt(hdev, skb);
6716 case HCI_EV_NUM_COMP_BLOCKS:
6717 hci_num_comp_blocks_evt(hdev, skb);
6721 case HCI_EV_VENDOR_SPECIFIC:
6722 hci_vendor_specific_evt(hdev, skb);
6726 msft_vendor_evt(hdev, skb);
6731 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6736 req_complete(hdev, status, opcode);
6737 } else if (req_complete_skb) {
6738 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6739 kfree_skb(orig_skb);
6742 req_complete_skb(hdev, status, opcode, orig_skb);
6746 kfree_skb(orig_skb);
6748 hdev->stat.evt_rx++;