2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
40 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
41 "\x00\x00\x00\x00\x00\x00\x00\x00"
43 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
45 /* Handle HCI Event packets */
47 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
50 __u8 status = *((__u8 *) skb->data);
52 BT_DBG("%s status 0x%2.2x", hdev->name, status);
54 /* It is possible that we receive Inquiry Complete event right
55 * before we receive Inquiry Cancel Command Complete event, in
56 * which case the latter event should have status of Command
57 * Disallowed (0x0c). This should not be treated as error, since
58 * we actually achieve what Inquiry Cancel wants to achieve,
59 * which is to end the last Inquiry session.
61 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
62 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
71 clear_bit(HCI_INQUIRY, &hdev->flags);
72 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
73 wake_up_bit(&hdev->flags, HCI_INQUIRY);
76 /* Set discovery state to stopped if we're not doing LE active
79 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
80 hdev->le_scan_type != LE_SCAN_ACTIVE)
81 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
84 hci_conn_check_pending(hdev);
87 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
89 __u8 status = *((__u8 *) skb->data);
91 BT_DBG("%s status 0x%2.2x", hdev->name, status);
96 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
99 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
101 __u8 status = *((__u8 *) skb->data);
103 BT_DBG("%s status 0x%2.2x", hdev->name, status);
108 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
110 hci_conn_check_pending(hdev);
113 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
116 BT_DBG("%s", hdev->name);
119 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
121 struct hci_rp_role_discovery *rp = (void *) skb->data;
122 struct hci_conn *conn;
124 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
131 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
133 conn->role = rp->role;
135 hci_dev_unlock(hdev);
138 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
140 struct hci_rp_read_link_policy *rp = (void *) skb->data;
141 struct hci_conn *conn;
143 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
152 conn->link_policy = __le16_to_cpu(rp->policy);
154 hci_dev_unlock(hdev);
157 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
159 struct hci_rp_write_link_policy *rp = (void *) skb->data;
160 struct hci_conn *conn;
163 struct hci_cp_write_link_policy cp;
164 struct hci_conn *sco_conn;
167 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
172 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
178 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
180 conn->link_policy = get_unaligned_le16(sent + 2);
183 sco_conn = hci_conn_hash_lookup_sco(hdev);
184 if (sco_conn && bacmp(&sco_conn->dst, &conn->dst) == 0 &&
185 conn->link_policy & HCI_LP_SNIFF) {
186 BT_ERR("SNIFF is not allowed during sco connection");
187 cp.handle = __cpu_to_le16(conn->handle);
188 cp.policy = __cpu_to_le16(conn->link_policy & ~HCI_LP_SNIFF);
189 hci_send_cmd(hdev, HCI_OP_WRITE_LINK_POLICY, sizeof(cp), &cp);
193 hci_dev_unlock(hdev);
196 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
199 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
201 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
206 hdev->link_policy = __le16_to_cpu(rp->policy);
209 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
212 __u8 status = *((__u8 *) skb->data);
215 BT_DBG("%s status 0x%2.2x", hdev->name, status);
220 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
224 hdev->link_policy = get_unaligned_le16(sent);
227 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
229 __u8 status = *((__u8 *) skb->data);
231 BT_DBG("%s status 0x%2.2x", hdev->name, status);
233 clear_bit(HCI_RESET, &hdev->flags);
238 /* Reset all non-persistent flags */
239 hci_dev_clear_volatile_flags(hdev);
241 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
243 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
244 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
246 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
247 hdev->adv_data_len = 0;
249 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
250 hdev->scan_rsp_data_len = 0;
252 hdev->le_scan_type = LE_SCAN_PASSIVE;
254 hdev->ssp_debug_mode = 0;
256 hci_bdaddr_list_clear(&hdev->le_accept_list);
257 hci_bdaddr_list_clear(&hdev->le_resolv_list);
260 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
263 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
264 struct hci_cp_read_stored_link_key *sent;
266 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
268 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
272 if (!rp->status && sent->read_all == 0x01) {
273 hdev->stored_max_keys = rp->max_keys;
274 hdev->stored_num_keys = rp->num_keys;
278 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
281 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
283 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
288 if (rp->num_keys <= hdev->stored_num_keys)
289 hdev->stored_num_keys -= rp->num_keys;
291 hdev->stored_num_keys = 0;
294 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
296 __u8 status = *((__u8 *) skb->data);
299 BT_DBG("%s status 0x%2.2x", hdev->name, status);
301 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
307 if (hci_dev_test_flag(hdev, HCI_MGMT))
308 mgmt_set_local_name_complete(hdev, sent, status);
310 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
312 hci_dev_unlock(hdev);
315 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
317 struct hci_rp_read_local_name *rp = (void *) skb->data;
319 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
324 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
325 hci_dev_test_flag(hdev, HCI_CONFIG))
326 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
329 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
331 __u8 status = *((__u8 *) skb->data);
334 BT_DBG("%s status 0x%2.2x", hdev->name, status);
336 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
343 __u8 param = *((__u8 *) sent);
345 if (param == AUTH_ENABLED)
346 set_bit(HCI_AUTH, &hdev->flags);
348 clear_bit(HCI_AUTH, &hdev->flags);
351 if (hci_dev_test_flag(hdev, HCI_MGMT))
352 mgmt_auth_enable_complete(hdev, status);
354 hci_dev_unlock(hdev);
357 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
359 __u8 status = *((__u8 *) skb->data);
363 BT_DBG("%s status 0x%2.2x", hdev->name, status);
368 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
372 param = *((__u8 *) sent);
375 set_bit(HCI_ENCRYPT, &hdev->flags);
377 clear_bit(HCI_ENCRYPT, &hdev->flags);
380 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
382 __u8 status = *((__u8 *) skb->data);
386 BT_DBG("%s status 0x%2.2x", hdev->name, status);
388 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
392 param = *((__u8 *) sent);
397 hdev->discov_timeout = 0;
401 if (param & SCAN_INQUIRY)
402 set_bit(HCI_ISCAN, &hdev->flags);
404 clear_bit(HCI_ISCAN, &hdev->flags);
406 if (param & SCAN_PAGE)
407 set_bit(HCI_PSCAN, &hdev->flags);
409 clear_bit(HCI_PSCAN, &hdev->flags);
412 hci_dev_unlock(hdev);
415 static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb)
417 __u8 status = *((__u8 *)skb->data);
418 struct hci_cp_set_event_filter *cp;
421 BT_DBG("%s status 0x%2.2x", hdev->name, status);
426 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
430 cp = (struct hci_cp_set_event_filter *)sent;
432 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
433 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
435 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
438 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
440 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
442 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
447 memcpy(hdev->dev_class, rp->dev_class, 3);
449 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
450 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
453 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
455 __u8 status = *((__u8 *) skb->data);
458 BT_DBG("%s status 0x%2.2x", hdev->name, status);
460 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
467 memcpy(hdev->dev_class, sent, 3);
469 if (hci_dev_test_flag(hdev, HCI_MGMT))
470 mgmt_set_class_of_dev_complete(hdev, sent, status);
472 hci_dev_unlock(hdev);
475 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
477 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
480 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
485 setting = __le16_to_cpu(rp->voice_setting);
487 if (hdev->voice_setting == setting)
490 hdev->voice_setting = setting;
492 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
495 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
498 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
501 __u8 status = *((__u8 *) skb->data);
505 BT_DBG("%s status 0x%2.2x", hdev->name, status);
510 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
514 setting = get_unaligned_le16(sent);
516 if (hdev->voice_setting == setting)
519 hdev->voice_setting = setting;
521 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
524 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
527 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
530 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
532 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
537 hdev->num_iac = rp->num_iac;
539 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
542 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
544 __u8 status = *((__u8 *) skb->data);
545 struct hci_cp_write_ssp_mode *sent;
547 BT_DBG("%s status 0x%2.2x", hdev->name, status);
549 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
557 hdev->features[1][0] |= LMP_HOST_SSP;
559 hdev->features[1][0] &= ~LMP_HOST_SSP;
562 if (hci_dev_test_flag(hdev, HCI_MGMT))
563 mgmt_ssp_enable_complete(hdev, sent->mode, status);
566 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
568 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
571 hci_dev_unlock(hdev);
574 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
576 u8 status = *((u8 *) skb->data);
577 struct hci_cp_write_sc_support *sent;
579 BT_DBG("%s status 0x%2.2x", hdev->name, status);
581 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
589 hdev->features[1][0] |= LMP_HOST_SC;
591 hdev->features[1][0] &= ~LMP_HOST_SC;
594 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
596 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
598 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
601 hci_dev_unlock(hdev);
604 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
606 struct hci_rp_read_local_version *rp = (void *) skb->data;
608 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
613 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
614 hci_dev_test_flag(hdev, HCI_CONFIG)) {
615 hdev->hci_ver = rp->hci_ver;
616 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
617 hdev->lmp_ver = rp->lmp_ver;
618 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
619 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
623 static void hci_cc_read_local_commands(struct hci_dev *hdev,
626 struct hci_rp_read_local_commands *rp = (void *) skb->data;
628 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
633 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
634 hci_dev_test_flag(hdev, HCI_CONFIG))
635 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
638 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
641 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
642 struct hci_conn *conn;
644 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
651 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
653 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
655 hci_dev_unlock(hdev);
658 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
661 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
662 struct hci_conn *conn;
665 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
670 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
676 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
678 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
680 hci_dev_unlock(hdev);
683 static void hci_cc_read_local_features(struct hci_dev *hdev,
686 struct hci_rp_read_local_features *rp = (void *) skb->data;
688 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
693 memcpy(hdev->features, rp->features, 8);
695 /* Adjust default settings according to features
696 * supported by device. */
698 if (hdev->features[0][0] & LMP_3SLOT)
699 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
701 if (hdev->features[0][0] & LMP_5SLOT)
702 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
704 if (hdev->features[0][1] & LMP_HV2) {
705 hdev->pkt_type |= (HCI_HV2);
706 hdev->esco_type |= (ESCO_HV2);
709 if (hdev->features[0][1] & LMP_HV3) {
710 hdev->pkt_type |= (HCI_HV3);
711 hdev->esco_type |= (ESCO_HV3);
714 if (lmp_esco_capable(hdev))
715 hdev->esco_type |= (ESCO_EV3);
717 if (hdev->features[0][4] & LMP_EV4)
718 hdev->esco_type |= (ESCO_EV4);
720 if (hdev->features[0][4] & LMP_EV5)
721 hdev->esco_type |= (ESCO_EV5);
723 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
724 hdev->esco_type |= (ESCO_2EV3);
726 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
727 hdev->esco_type |= (ESCO_3EV3);
729 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
730 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
733 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
736 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
738 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
743 if (hdev->max_page < rp->max_page)
744 hdev->max_page = rp->max_page;
746 if (rp->page < HCI_MAX_PAGES)
747 memcpy(hdev->features[rp->page], rp->features, 8);
750 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
753 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
755 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
760 hdev->flow_ctl_mode = rp->mode;
763 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
765 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
767 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
772 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
773 hdev->sco_mtu = rp->sco_mtu;
774 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
775 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
777 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
782 hdev->acl_cnt = hdev->acl_pkts;
783 hdev->sco_cnt = hdev->sco_pkts;
785 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
786 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
789 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
791 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
793 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
798 if (test_bit(HCI_INIT, &hdev->flags))
799 bacpy(&hdev->bdaddr, &rp->bdaddr);
801 if (hci_dev_test_flag(hdev, HCI_SETUP))
802 bacpy(&hdev->setup_addr, &rp->bdaddr);
805 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
808 struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
810 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
815 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
816 hci_dev_test_flag(hdev, HCI_CONFIG)) {
817 hdev->pairing_opts = rp->pairing_opts;
818 hdev->max_enc_key_size = rp->max_key_size;
822 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
825 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
827 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
832 if (test_bit(HCI_INIT, &hdev->flags)) {
833 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
834 hdev->page_scan_window = __le16_to_cpu(rp->window);
838 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
841 u8 status = *((u8 *) skb->data);
842 struct hci_cp_write_page_scan_activity *sent;
844 BT_DBG("%s status 0x%2.2x", hdev->name, status);
849 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
853 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
854 hdev->page_scan_window = __le16_to_cpu(sent->window);
857 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
860 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
862 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
867 if (test_bit(HCI_INIT, &hdev->flags))
868 hdev->page_scan_type = rp->type;
871 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
874 u8 status = *((u8 *) skb->data);
877 BT_DBG("%s status 0x%2.2x", hdev->name, status);
882 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
884 hdev->page_scan_type = *type;
887 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
890 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
892 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
897 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
898 hdev->block_len = __le16_to_cpu(rp->block_len);
899 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
901 hdev->block_cnt = hdev->num_blocks;
903 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
904 hdev->block_cnt, hdev->block_len);
907 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
909 struct hci_rp_read_clock *rp = (void *) skb->data;
910 struct hci_cp_read_clock *cp;
911 struct hci_conn *conn;
913 BT_DBG("%s", hdev->name);
915 if (skb->len < sizeof(*rp))
923 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
927 if (cp->which == 0x00) {
928 hdev->clock = le32_to_cpu(rp->clock);
932 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
934 conn->clock = le32_to_cpu(rp->clock);
935 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
939 hci_dev_unlock(hdev);
942 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
945 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
947 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
952 hdev->amp_status = rp->amp_status;
953 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
954 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
955 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
956 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
957 hdev->amp_type = rp->amp_type;
958 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
959 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
960 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
961 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
964 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
967 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
969 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
974 hdev->inq_tx_power = rp->tx_power;
977 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
980 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
982 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
987 hdev->err_data_reporting = rp->err_data_reporting;
990 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
993 __u8 status = *((__u8 *)skb->data);
994 struct hci_cp_write_def_err_data_reporting *cp;
996 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1001 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1005 hdev->err_data_reporting = cp->err_data_reporting;
1008 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
1010 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
1011 struct hci_cp_pin_code_reply *cp;
1012 struct hci_conn *conn;
1014 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1018 if (hci_dev_test_flag(hdev, HCI_MGMT))
1019 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1024 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1028 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1030 conn->pin_length = cp->pin_len;
1033 hci_dev_unlock(hdev);
1036 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1038 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1040 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1044 if (hci_dev_test_flag(hdev, HCI_MGMT))
1045 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1048 hci_dev_unlock(hdev);
1051 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1052 struct sk_buff *skb)
1054 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1056 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1061 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1062 hdev->le_pkts = rp->le_max_pkt;
1064 hdev->le_cnt = hdev->le_pkts;
1066 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1069 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1070 struct sk_buff *skb)
1072 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1074 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1079 memcpy(hdev->le_features, rp->features, 8);
1082 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1083 struct sk_buff *skb)
1085 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1087 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1092 hdev->adv_tx_power = rp->tx_power;
1095 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1097 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1099 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1103 if (hci_dev_test_flag(hdev, HCI_MGMT))
1104 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1107 hci_dev_unlock(hdev);
1110 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1111 struct sk_buff *skb)
1113 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1115 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1119 if (hci_dev_test_flag(hdev, HCI_MGMT))
1120 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1121 ACL_LINK, 0, rp->status);
1123 hci_dev_unlock(hdev);
1126 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1128 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1130 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1134 if (hci_dev_test_flag(hdev, HCI_MGMT))
1135 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1138 hci_dev_unlock(hdev);
1141 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1142 struct sk_buff *skb)
1144 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1146 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1150 if (hci_dev_test_flag(hdev, HCI_MGMT))
1151 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1152 ACL_LINK, 0, rp->status);
1154 hci_dev_unlock(hdev);
1157 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1158 struct sk_buff *skb)
1160 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1162 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1165 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1166 struct sk_buff *skb)
1168 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1170 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1173 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1175 __u8 status = *((__u8 *) skb->data);
1178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1183 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1189 bacpy(&hdev->random_addr, sent);
1191 if (!bacmp(&hdev->rpa, sent)) {
1192 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1193 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1194 secs_to_jiffies(hdev->rpa_timeout));
1197 hci_dev_unlock(hdev);
1200 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1202 __u8 status = *((__u8 *) skb->data);
1203 struct hci_cp_le_set_default_phy *cp;
1205 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1210 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1216 hdev->le_tx_def_phys = cp->tx_phys;
1217 hdev->le_rx_def_phys = cp->rx_phys;
1219 hci_dev_unlock(hdev);
1222 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1223 struct sk_buff *skb)
1225 __u8 status = *((__u8 *) skb->data);
1226 struct hci_cp_le_set_adv_set_rand_addr *cp;
1227 struct adv_info *adv;
1232 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1233 /* Update only in case the adv instance since handle 0x00 shall be using
1234 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1235 * non-extended adverting.
1237 if (!cp || !cp->handle)
1242 adv = hci_find_adv_instance(hdev, cp->handle);
1244 bacpy(&adv->random_addr, &cp->bdaddr);
1245 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1246 adv->rpa_expired = false;
1247 queue_delayed_work(hdev->workqueue,
1248 &adv->rpa_expired_cb,
1249 secs_to_jiffies(hdev->rpa_timeout));
1253 hci_dev_unlock(hdev);
1256 static void hci_cc_le_read_transmit_power(struct hci_dev *hdev,
1257 struct sk_buff *skb)
1259 struct hci_rp_le_read_transmit_power *rp = (void *)skb->data;
1261 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1266 hdev->min_le_tx_power = rp->min_le_tx_power;
1267 hdev->max_le_tx_power = rp->max_le_tx_power;
1270 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1272 __u8 *sent, status = *((__u8 *) skb->data);
1274 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1279 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1285 /* If we're doing connection initiation as peripheral. Set a
1286 * timeout in case something goes wrong.
1289 struct hci_conn *conn;
1291 hci_dev_set_flag(hdev, HCI_LE_ADV);
1293 conn = hci_lookup_le_connect(hdev);
1295 queue_delayed_work(hdev->workqueue,
1296 &conn->le_conn_timeout,
1297 conn->conn_timeout);
1299 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1302 hci_dev_unlock(hdev);
1305 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1306 struct sk_buff *skb)
1308 struct hci_cp_le_set_ext_adv_enable *cp;
1309 struct hci_cp_ext_adv_set *set;
1310 __u8 status = *((__u8 *) skb->data);
1311 struct adv_info *adv = NULL, *n;
1313 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1318 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1322 set = (void *)cp->data;
1326 if (cp->num_of_sets)
1327 adv = hci_find_adv_instance(hdev, set->handle);
1330 struct hci_conn *conn;
1332 hci_dev_set_flag(hdev, HCI_LE_ADV);
1335 adv->enabled = true;
1337 conn = hci_lookup_le_connect(hdev);
1339 queue_delayed_work(hdev->workqueue,
1340 &conn->le_conn_timeout,
1341 conn->conn_timeout);
1343 if (cp->num_of_sets) {
1345 adv->enabled = false;
1347 /* If just one instance was disabled check if there are
1348 * any other instance enabled before clearing HCI_LE_ADV
1350 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1356 /* All instances shall be considered disabled */
1357 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1359 adv->enabled = false;
1362 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1366 hci_dev_unlock(hdev);
1369 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1371 struct hci_cp_le_set_scan_param *cp;
1372 __u8 status = *((__u8 *) skb->data);
1374 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1379 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1385 hdev->le_scan_type = cp->type;
1387 hci_dev_unlock(hdev);
1390 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1391 struct sk_buff *skb)
1393 struct hci_cp_le_set_ext_scan_params *cp;
1394 __u8 status = *((__u8 *) skb->data);
1395 struct hci_cp_le_scan_phy_params *phy_param;
1397 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1402 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1406 phy_param = (void *)cp->data;
1410 hdev->le_scan_type = phy_param->type;
1412 hci_dev_unlock(hdev);
1415 static bool has_pending_adv_report(struct hci_dev *hdev)
1417 struct discovery_state *d = &hdev->discovery;
1419 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1422 static void clear_pending_adv_report(struct hci_dev *hdev)
1424 struct discovery_state *d = &hdev->discovery;
1426 bacpy(&d->last_adv_addr, BDADDR_ANY);
1427 d->last_adv_data_len = 0;
1431 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1432 u8 bdaddr_type, s8 rssi, u32 flags,
1435 struct discovery_state *d = &hdev->discovery;
1437 if (len > HCI_MAX_AD_LENGTH)
1440 bacpy(&d->last_adv_addr, bdaddr);
1441 d->last_adv_addr_type = bdaddr_type;
1442 d->last_adv_rssi = rssi;
1443 d->last_adv_flags = flags;
1444 memcpy(d->last_adv_data, data, len);
1445 d->last_adv_data_len = len;
1449 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1454 case LE_SCAN_ENABLE:
1455 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1456 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1457 clear_pending_adv_report(hdev);
1460 case LE_SCAN_DISABLE:
1461 /* We do this here instead of when setting DISCOVERY_STOPPED
1462 * since the latter would potentially require waiting for
1463 * inquiry to stop too.
1465 if (has_pending_adv_report(hdev)) {
1466 struct discovery_state *d = &hdev->discovery;
1468 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1469 d->last_adv_addr_type, NULL,
1470 d->last_adv_rssi, d->last_adv_flags,
1472 d->last_adv_data_len, NULL, 0);
1475 /* Cancel this timer so that we don't try to disable scanning
1476 * when it's already disabled.
1478 cancel_delayed_work(&hdev->le_scan_disable);
1480 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1482 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1483 * interrupted scanning due to a connect request. Mark
1484 * therefore discovery as stopped. If this was not
1485 * because of a connect request advertising might have
1486 * been disabled because of active scanning, so
1487 * re-enable it again if necessary.
1489 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1490 #ifndef TIZEN_BT /* The below line is kernel bug. */
1491 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1493 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
1495 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1496 hdev->discovery.state == DISCOVERY_FINDING)
1497 hci_req_reenable_advertising(hdev);
1502 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1507 hci_dev_unlock(hdev);
1510 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1511 struct sk_buff *skb)
1513 struct hci_cp_le_set_scan_enable *cp;
1514 __u8 status = *((__u8 *) skb->data);
1516 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1521 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1525 le_set_scan_enable_complete(hdev, cp->enable);
1528 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1529 struct sk_buff *skb)
1531 struct hci_cp_le_set_ext_scan_enable *cp;
1532 __u8 status = *((__u8 *) skb->data);
1534 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1539 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1543 le_set_scan_enable_complete(hdev, cp->enable);
1546 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1547 struct sk_buff *skb)
1549 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1551 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1557 hdev->le_num_of_adv_sets = rp->num_of_sets;
1560 static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev,
1561 struct sk_buff *skb)
1563 struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data;
1565 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1570 hdev->le_accept_list_size = rp->size;
1573 static void hci_cc_le_clear_accept_list(struct hci_dev *hdev,
1574 struct sk_buff *skb)
1576 __u8 status = *((__u8 *) skb->data);
1578 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1584 hci_bdaddr_list_clear(&hdev->le_accept_list);
1585 hci_dev_unlock(hdev);
1588 static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev,
1589 struct sk_buff *skb)
1591 struct hci_cp_le_add_to_accept_list *sent;
1592 __u8 status = *((__u8 *) skb->data);
1594 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1599 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1604 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1606 hci_dev_unlock(hdev);
1609 static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev,
1610 struct sk_buff *skb)
1612 struct hci_cp_le_del_from_accept_list *sent;
1613 __u8 status = *((__u8 *) skb->data);
1615 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1620 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1625 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1627 hci_dev_unlock(hdev);
1630 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1631 struct sk_buff *skb)
1633 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1635 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1640 memcpy(hdev->le_states, rp->le_states, 8);
1643 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1644 struct sk_buff *skb)
1646 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1648 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1653 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1654 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1657 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1658 struct sk_buff *skb)
1660 struct hci_cp_le_write_def_data_len *sent;
1661 __u8 status = *((__u8 *) skb->data);
1663 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1668 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1672 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1673 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1676 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1677 struct sk_buff *skb)
1679 struct hci_cp_le_add_to_resolv_list *sent;
1680 __u8 status = *((__u8 *) skb->data);
1682 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1687 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1692 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1693 sent->bdaddr_type, sent->peer_irk,
1695 hci_dev_unlock(hdev);
1698 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1699 struct sk_buff *skb)
1701 struct hci_cp_le_del_from_resolv_list *sent;
1702 __u8 status = *((__u8 *) skb->data);
1704 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1709 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1714 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1716 hci_dev_unlock(hdev);
1719 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1720 struct sk_buff *skb)
1722 __u8 status = *((__u8 *) skb->data);
1724 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1730 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1731 hci_dev_unlock(hdev);
1734 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1735 struct sk_buff *skb)
1737 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1739 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1744 hdev->le_resolv_list_size = rp->size;
1747 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1748 struct sk_buff *skb)
1750 __u8 *sent, status = *((__u8 *) skb->data);
1752 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1757 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1764 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1766 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1768 hci_dev_unlock(hdev);
1771 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1772 struct sk_buff *skb)
1774 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1776 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1781 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1782 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1783 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1784 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1787 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1788 struct sk_buff *skb)
1790 struct hci_cp_write_le_host_supported *sent;
1791 __u8 status = *((__u8 *) skb->data);
1793 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1798 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1805 hdev->features[1][0] |= LMP_HOST_LE;
1806 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1808 hdev->features[1][0] &= ~LMP_HOST_LE;
1809 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1810 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1814 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1816 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1818 hci_dev_unlock(hdev);
1821 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1823 struct hci_cp_le_set_adv_param *cp;
1824 u8 status = *((u8 *) skb->data);
1826 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1831 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1836 hdev->adv_addr_type = cp->own_address_type;
1837 hci_dev_unlock(hdev);
1840 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1842 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1843 struct hci_cp_le_set_ext_adv_params *cp;
1844 struct adv_info *adv_instance;
1846 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1851 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1856 hdev->adv_addr_type = cp->own_addr_type;
1858 /* Store in hdev for instance 0 */
1859 hdev->adv_tx_power = rp->tx_power;
1861 adv_instance = hci_find_adv_instance(hdev, cp->handle);
1863 adv_instance->tx_power = rp->tx_power;
1865 /* Update adv data as tx power is known now */
1866 hci_req_update_adv_data(hdev, cp->handle);
1868 hci_dev_unlock(hdev);
1872 static void hci_cc_enable_rssi(struct hci_dev *hdev,
1873 struct sk_buff *skb)
1875 struct hci_cc_rsp_enable_rssi *rp = (void *)skb->data;
1877 BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
1878 hdev->name, rp->status, rp->le_ext_opcode);
1880 mgmt_enable_rssi_cc(hdev, rp, rp->status);
1883 static void hci_cc_get_raw_rssi(struct hci_dev *hdev,
1884 struct sk_buff *skb)
1886 struct hci_cc_rp_get_raw_rssi *rp = (void *)skb->data;
1888 BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
1889 hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
1891 mgmt_raw_rssi_response(hdev, rp, rp->status);
1894 static void hci_vendor_ext_rssi_link_alert_evt(struct hci_dev *hdev,
1895 struct sk_buff *skb)
1897 struct hci_ev_vendor_specific_rssi_alert *ev = (void *)skb->data;
1899 BT_DBG("RSSI event LE_RSSI_LINK_ALERT %X", LE_RSSI_LINK_ALERT);
1901 mgmt_rssi_alert_evt(hdev, ev->conn_handle, ev->alert_type,
1905 static void hci_vendor_specific_group_ext_evt(struct hci_dev *hdev,
1906 struct sk_buff *skb)
1908 struct hci_ev_ext_vendor_specific *ev = (void *)skb->data;
1909 __u8 event_le_ext_sub_code;
1911 BT_DBG("RSSI event LE_META_VENDOR_SPECIFIC_GROUP_EVENT: %X",
1912 LE_META_VENDOR_SPECIFIC_GROUP_EVENT);
1914 skb_pull(skb, sizeof(*ev));
1915 event_le_ext_sub_code = ev->event_le_ext_sub_code;
1917 switch (event_le_ext_sub_code) {
1918 case LE_RSSI_LINK_ALERT:
1919 hci_vendor_ext_rssi_link_alert_evt(hdev, skb);
1927 static void hci_vendor_multi_adv_state_change_evt(struct hci_dev *hdev,
1928 struct sk_buff *skb)
1930 struct hci_ev_vendor_specific_multi_adv_state *ev = (void *)skb->data;
1932 BT_DBG("LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT");
1934 mgmt_multi_adv_state_change_evt(hdev, ev->adv_instance,
1935 ev->state_change_reason,
1936 ev->connection_handle);
1939 static void hci_vendor_specific_evt(struct hci_dev *hdev, struct sk_buff *skb)
1941 struct hci_ev_vendor_specific *ev = (void *)skb->data;
1942 __u8 event_sub_code;
1944 BT_DBG("hci_vendor_specific_evt");
1946 skb_pull(skb, sizeof(*ev));
1947 event_sub_code = ev->event_sub_code;
1949 switch (event_sub_code) {
1950 case LE_META_VENDOR_SPECIFIC_GROUP_EVENT:
1951 hci_vendor_specific_group_ext_evt(hdev, skb);
1954 case LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT:
1955 hci_vendor_multi_adv_state_change_evt(hdev, skb);
1964 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1966 struct hci_rp_read_rssi *rp = (void *) skb->data;
1967 struct hci_conn *conn;
1969 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1976 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1978 conn->rssi = rp->rssi;
1980 hci_dev_unlock(hdev);
1983 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1985 struct hci_cp_read_tx_power *sent;
1986 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1987 struct hci_conn *conn;
1989 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1994 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2000 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2004 switch (sent->type) {
2006 conn->tx_power = rp->tx_power;
2009 conn->max_tx_power = rp->tx_power;
2014 hci_dev_unlock(hdev);
2017 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
2019 u8 status = *((u8 *) skb->data);
2022 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2027 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2029 hdev->ssp_debug_mode = *mode;
2032 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2034 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2037 hci_conn_check_pending(hdev);
2041 set_bit(HCI_INQUIRY, &hdev->flags);
2044 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2046 struct hci_cp_create_conn *cp;
2047 struct hci_conn *conn;
2049 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2051 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2057 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2059 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
2062 if (conn && conn->state == BT_CONNECT) {
2063 if (status != 0x0c || conn->attempt > 2) {
2064 conn->state = BT_CLOSED;
2065 hci_connect_cfm(conn, status);
2068 conn->state = BT_CONNECT2;
2072 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
2075 bt_dev_err(hdev, "no memory for new connection");
2079 hci_dev_unlock(hdev);
2082 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2084 struct hci_cp_add_sco *cp;
2085 struct hci_conn *acl, *sco;
2088 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2093 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2097 handle = __le16_to_cpu(cp->handle);
2099 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2103 acl = hci_conn_hash_lookup_handle(hdev, handle);
2107 sco->state = BT_CLOSED;
2109 hci_connect_cfm(sco, status);
2114 hci_dev_unlock(hdev);
2117 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2119 struct hci_cp_auth_requested *cp;
2120 struct hci_conn *conn;
2122 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2127 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2133 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2135 if (conn->state == BT_CONFIG) {
2136 hci_connect_cfm(conn, status);
2137 hci_conn_drop(conn);
2141 hci_dev_unlock(hdev);
2144 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2146 struct hci_cp_set_conn_encrypt *cp;
2147 struct hci_conn *conn;
2149 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2154 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2160 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2162 if (conn->state == BT_CONFIG) {
2163 hci_connect_cfm(conn, status);
2164 hci_conn_drop(conn);
2168 hci_dev_unlock(hdev);
2171 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2172 struct hci_conn *conn)
2174 if (conn->state != BT_CONFIG || !conn->out)
2177 if (conn->pending_sec_level == BT_SECURITY_SDP)
2180 /* Only request authentication for SSP connections or non-SSP
2181 * devices with sec_level MEDIUM or HIGH or if MITM protection
2184 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2185 conn->pending_sec_level != BT_SECURITY_FIPS &&
2186 conn->pending_sec_level != BT_SECURITY_HIGH &&
2187 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2193 static int hci_resolve_name(struct hci_dev *hdev,
2194 struct inquiry_entry *e)
2196 struct hci_cp_remote_name_req cp;
2198 memset(&cp, 0, sizeof(cp));
2200 bacpy(&cp.bdaddr, &e->data.bdaddr);
2201 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2202 cp.pscan_mode = e->data.pscan_mode;
2203 cp.clock_offset = e->data.clock_offset;
2205 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2208 static bool hci_resolve_next_name(struct hci_dev *hdev)
2210 struct discovery_state *discov = &hdev->discovery;
2211 struct inquiry_entry *e;
2213 if (list_empty(&discov->resolve))
2216 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2220 if (hci_resolve_name(hdev, e) == 0) {
2221 e->name_state = NAME_PENDING;
2228 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2229 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2231 struct discovery_state *discov = &hdev->discovery;
2232 struct inquiry_entry *e;
2235 /* Update the mgmt connected state if necessary. Be careful with
2236 * conn objects that exist but are not (yet) connected however.
2237 * Only those in BT_CONFIG or BT_CONNECTED states can be
2238 * considered connected.
2241 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
2242 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2243 mgmt_device_connected(hdev, conn, 0, name, name_len);
2245 mgmt_device_name_update(hdev, bdaddr, name, name_len);
2249 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2250 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2251 mgmt_device_connected(hdev, conn, name, name_len);
2254 if (discov->state == DISCOVERY_STOPPED)
2257 if (discov->state == DISCOVERY_STOPPING)
2258 goto discov_complete;
2260 if (discov->state != DISCOVERY_RESOLVING)
2263 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2264 /* If the device was not found in a list of found devices names of which
2265 * are pending. there is no need to continue resolving a next name as it
2266 * will be done upon receiving another Remote Name Request Complete
2273 e->name_state = NAME_KNOWN;
2274 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2275 e->data.rssi, name, name_len);
2277 e->name_state = NAME_NOT_KNOWN;
2280 if (hci_resolve_next_name(hdev))
2284 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2287 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2289 struct hci_cp_remote_name_req *cp;
2290 struct hci_conn *conn;
2292 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2294 /* If successful wait for the name req complete event before
2295 * checking for the need to do authentication */
2299 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2305 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2307 if (hci_dev_test_flag(hdev, HCI_MGMT))
2308 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2313 if (!hci_outgoing_auth_needed(hdev, conn))
2316 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2317 struct hci_cp_auth_requested auth_cp;
2319 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2321 auth_cp.handle = __cpu_to_le16(conn->handle);
2322 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2323 sizeof(auth_cp), &auth_cp);
2327 hci_dev_unlock(hdev);
2330 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2332 struct hci_cp_read_remote_features *cp;
2333 struct hci_conn *conn;
2335 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2340 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2346 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2348 if (conn->state == BT_CONFIG) {
2349 hci_connect_cfm(conn, status);
2350 hci_conn_drop(conn);
2354 hci_dev_unlock(hdev);
2357 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2359 struct hci_cp_read_remote_ext_features *cp;
2360 struct hci_conn *conn;
2362 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2367 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2373 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2375 if (conn->state == BT_CONFIG) {
2376 hci_connect_cfm(conn, status);
2377 hci_conn_drop(conn);
2381 hci_dev_unlock(hdev);
2384 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2386 struct hci_cp_setup_sync_conn *cp;
2387 struct hci_conn *acl, *sco;
2390 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2395 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2399 handle = __le16_to_cpu(cp->handle);
2401 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2405 acl = hci_conn_hash_lookup_handle(hdev, handle);
2409 sco->state = BT_CLOSED;
2411 hci_connect_cfm(sco, status);
2416 hci_dev_unlock(hdev);
2419 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2421 struct hci_cp_sniff_mode *cp;
2422 struct hci_conn *conn;
2424 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2429 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2435 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2437 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2439 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2440 hci_sco_setup(conn, status);
2443 hci_dev_unlock(hdev);
2446 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2448 struct hci_cp_exit_sniff_mode *cp;
2449 struct hci_conn *conn;
2451 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2456 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2462 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2464 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2466 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2467 hci_sco_setup(conn, status);
2470 hci_dev_unlock(hdev);
2473 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2475 struct hci_cp_disconnect *cp;
2476 struct hci_conn *conn;
2481 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2487 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2489 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2490 conn->dst_type, status);
2492 if (conn->type == LE_LINK) {
2493 hdev->cur_adv_instance = conn->adv_instance;
2494 hci_req_reenable_advertising(hdev);
2497 /* If the disconnection failed for any reason, the upper layer
2498 * does not retry to disconnect in current implementation.
2499 * Hence, we need to do some basic cleanup here and re-enable
2500 * advertising if necessary.
2505 hci_dev_unlock(hdev);
2508 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2509 u8 peer_addr_type, u8 own_address_type,
2512 struct hci_conn *conn;
2514 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2519 /* When using controller based address resolution, then the new
2520 * address types 0x02 and 0x03 are used. These types need to be
2521 * converted back into either public address or random address type
2523 if (use_ll_privacy(hdev) &&
2524 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2525 switch (own_address_type) {
2526 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2527 own_address_type = ADDR_LE_DEV_PUBLIC;
2529 case ADDR_LE_DEV_RANDOM_RESOLVED:
2530 own_address_type = ADDR_LE_DEV_RANDOM;
2535 /* Store the initiator and responder address information which
2536 * is needed for SMP. These values will not change during the
2537 * lifetime of the connection.
2539 conn->init_addr_type = own_address_type;
2540 if (own_address_type == ADDR_LE_DEV_RANDOM)
2541 bacpy(&conn->init_addr, &hdev->random_addr);
2543 bacpy(&conn->init_addr, &hdev->bdaddr);
2545 conn->resp_addr_type = peer_addr_type;
2546 bacpy(&conn->resp_addr, peer_addr);
2548 /* We don't want the connection attempt to stick around
2549 * indefinitely since LE doesn't have a page timeout concept
2550 * like BR/EDR. Set a timer for any connection that doesn't use
2551 * the accept list for connecting.
2553 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2554 queue_delayed_work(conn->hdev->workqueue,
2555 &conn->le_conn_timeout,
2556 conn->conn_timeout);
2559 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2561 struct hci_cp_le_create_conn *cp;
2563 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2565 /* All connection failure handling is taken care of by the
2566 * hci_le_conn_failed function which is triggered by the HCI
2567 * request completion callbacks used for connecting.
2572 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2578 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2579 cp->own_address_type, cp->filter_policy);
2581 hci_dev_unlock(hdev);
2584 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2586 struct hci_cp_le_ext_create_conn *cp;
2588 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2590 /* All connection failure handling is taken care of by the
2591 * hci_le_conn_failed function which is triggered by the HCI
2592 * request completion callbacks used for connecting.
2597 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2603 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2604 cp->own_addr_type, cp->filter_policy);
2606 hci_dev_unlock(hdev);
2609 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2611 struct hci_cp_le_read_remote_features *cp;
2612 struct hci_conn *conn;
2614 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2619 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2625 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2627 if (conn->state == BT_CONFIG) {
2628 hci_connect_cfm(conn, status);
2629 hci_conn_drop(conn);
2633 hci_dev_unlock(hdev);
2636 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2638 struct hci_cp_le_start_enc *cp;
2639 struct hci_conn *conn;
2641 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2648 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2652 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2656 if (conn->state != BT_CONNECTED)
2659 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2660 hci_conn_drop(conn);
2663 hci_dev_unlock(hdev);
2666 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2668 struct hci_cp_switch_role *cp;
2669 struct hci_conn *conn;
2671 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2676 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2682 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2684 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2686 hci_dev_unlock(hdev);
2689 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2691 __u8 status = *((__u8 *) skb->data);
2692 struct discovery_state *discov = &hdev->discovery;
2693 struct inquiry_entry *e;
2695 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2697 hci_conn_check_pending(hdev);
2699 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2702 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2703 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2705 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2710 if (discov->state != DISCOVERY_FINDING)
2713 if (list_empty(&discov->resolve)) {
2714 /* When BR/EDR inquiry is active and no LE scanning is in
2715 * progress, then change discovery state to indicate completion.
2717 * When running LE scanning and BR/EDR inquiry simultaneously
2718 * and the LE scan already finished, then change the discovery
2719 * state to indicate completion.
2721 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2722 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2723 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2727 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2728 if (e && hci_resolve_name(hdev, e) == 0) {
2729 e->name_state = NAME_PENDING;
2730 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2732 /* When BR/EDR inquiry is active and no LE scanning is in
2733 * progress, then change discovery state to indicate completion.
2735 * When running LE scanning and BR/EDR inquiry simultaneously
2736 * and the LE scan already finished, then change the discovery
2737 * state to indicate completion.
2739 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2740 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2741 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2745 hci_dev_unlock(hdev);
2748 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2750 struct inquiry_data data;
2751 struct inquiry_info *info = (void *) (skb->data + 1);
2752 int num_rsp = *((__u8 *) skb->data);
2754 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2756 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2759 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2764 for (; num_rsp; num_rsp--, info++) {
2767 bacpy(&data.bdaddr, &info->bdaddr);
2768 data.pscan_rep_mode = info->pscan_rep_mode;
2769 data.pscan_period_mode = info->pscan_period_mode;
2770 data.pscan_mode = info->pscan_mode;
2771 memcpy(data.dev_class, info->dev_class, 3);
2772 data.clock_offset = info->clock_offset;
2773 data.rssi = HCI_RSSI_INVALID;
2774 data.ssp_mode = 0x00;
2776 flags = hci_inquiry_cache_update(hdev, &data, false);
2778 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2779 info->dev_class, HCI_RSSI_INVALID,
2780 flags, NULL, 0, NULL, 0);
2783 hci_dev_unlock(hdev);
2786 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2788 struct hci_ev_conn_complete *ev = (void *) skb->data;
2789 struct hci_conn *conn;
2791 BT_DBG("%s", hdev->name);
2795 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2797 /* Connection may not exist if auto-connected. Check the bredr
2798 * allowlist to see if this device is allowed to auto connect.
2799 * If link is an ACL type, create a connection class
2802 * Auto-connect will only occur if the event filter is
2803 * programmed with a given address. Right now, event filter is
2804 * only used during suspend.
2806 if (ev->link_type == ACL_LINK &&
2807 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
2810 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2813 bt_dev_err(hdev, "no memory for new conn");
2817 if (ev->link_type != SCO_LINK)
2820 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2825 conn->type = SCO_LINK;
2830 conn->handle = __le16_to_cpu(ev->handle);
2832 if (conn->type == ACL_LINK) {
2833 conn->state = BT_CONFIG;
2834 hci_conn_hold(conn);
2836 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2837 !hci_find_link_key(hdev, &ev->bdaddr))
2838 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2840 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2842 conn->state = BT_CONNECTED;
2844 hci_debugfs_create_conn(conn);
2845 hci_conn_add_sysfs(conn);
2847 if (test_bit(HCI_AUTH, &hdev->flags))
2848 set_bit(HCI_CONN_AUTH, &conn->flags);
2850 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2851 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2853 /* Get remote features */
2854 if (conn->type == ACL_LINK) {
2855 struct hci_cp_read_remote_features cp;
2856 cp.handle = ev->handle;
2857 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2860 hci_req_update_scan(hdev);
2863 /* Set packet type for incoming connection */
2864 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2865 struct hci_cp_change_conn_ptype cp;
2866 cp.handle = ev->handle;
2867 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2868 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2872 if (get_link_mode(conn) & HCI_LM_MASTER)
2873 hci_conn_change_supervision_timeout(conn,
2874 LINK_SUPERVISION_TIMEOUT);
2877 conn->state = BT_CLOSED;
2878 if (conn->type == ACL_LINK)
2879 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2880 conn->dst_type, ev->status);
2883 if (conn->type == ACL_LINK)
2884 hci_sco_setup(conn, ev->status);
2887 hci_connect_cfm(conn, ev->status);
2889 } else if (ev->link_type == SCO_LINK) {
2890 switch (conn->setting & SCO_AIRMODE_MASK) {
2891 case SCO_AIRMODE_CVSD:
2893 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2897 hci_connect_cfm(conn, ev->status);
2901 hci_dev_unlock(hdev);
2903 hci_conn_check_pending(hdev);
2906 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2908 struct hci_cp_reject_conn_req cp;
2910 bacpy(&cp.bdaddr, bdaddr);
2911 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2912 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2915 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2917 struct hci_ev_conn_request *ev = (void *) skb->data;
2918 int mask = hdev->link_mode;
2919 struct inquiry_entry *ie;
2920 struct hci_conn *conn;
2923 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2926 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2929 if (!(mask & HCI_LM_ACCEPT)) {
2930 hci_reject_conn(hdev, &ev->bdaddr);
2936 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
2938 hci_reject_conn(hdev, &ev->bdaddr);
2942 /* Require HCI_CONNECTABLE or an accept list entry to accept the
2943 * connection. These features are only touched through mgmt so
2944 * only do the checks if HCI_MGMT is set.
2946 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2947 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2948 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
2950 hci_reject_conn(hdev, &ev->bdaddr);
2954 /* Connection accepted */
2956 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2958 memcpy(ie->data.dev_class, ev->dev_class, 3);
2961 if ((ev->link_type == SCO_LINK || ev->link_type == ESCO_LINK) &&
2962 hci_conn_hash_lookup_sco(hdev)) {
2963 struct hci_cp_reject_conn_req cp;
2965 bacpy(&cp.bdaddr, &ev->bdaddr);
2966 cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2967 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ,
2969 hci_dev_unlock(hdev);
2974 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2977 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2980 bt_dev_err(hdev, "no memory for new connection");
2985 memcpy(conn->dev_class, ev->dev_class, 3);
2987 hci_dev_unlock(hdev);
2989 if (ev->link_type == ACL_LINK ||
2990 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2991 struct hci_cp_accept_conn_req cp;
2992 conn->state = BT_CONNECT;
2994 bacpy(&cp.bdaddr, &ev->bdaddr);
2996 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2997 cp.role = 0x00; /* Become central */
2999 cp.role = 0x01; /* Remain peripheral */
3001 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3002 } else if (!(flags & HCI_PROTO_DEFER)) {
3003 struct hci_cp_accept_sync_conn_req cp;
3004 conn->state = BT_CONNECT;
3006 bacpy(&cp.bdaddr, &ev->bdaddr);
3007 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3009 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
3010 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
3011 cp.max_latency = cpu_to_le16(0xffff);
3012 cp.content_format = cpu_to_le16(hdev->voice_setting);
3013 cp.retrans_effort = 0xff;
3015 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3018 conn->state = BT_CONNECT2;
3019 hci_connect_cfm(conn, 0);
3024 hci_dev_unlock(hdev);
3027 static u8 hci_to_mgmt_reason(u8 err)
3030 case HCI_ERROR_CONNECTION_TIMEOUT:
3031 return MGMT_DEV_DISCONN_TIMEOUT;
3032 case HCI_ERROR_REMOTE_USER_TERM:
3033 case HCI_ERROR_REMOTE_LOW_RESOURCES:
3034 case HCI_ERROR_REMOTE_POWER_OFF:
3035 return MGMT_DEV_DISCONN_REMOTE;
3036 case HCI_ERROR_LOCAL_HOST_TERM:
3037 return MGMT_DEV_DISCONN_LOCAL_HOST;
3039 return MGMT_DEV_DISCONN_UNKNOWN;
3043 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3045 struct hci_ev_disconn_complete *ev = (void *) skb->data;
3047 struct hci_conn_params *params;
3048 struct hci_conn *conn;
3049 bool mgmt_connected;
3051 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3055 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3060 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3061 conn->dst_type, ev->status);
3065 conn->state = BT_CLOSED;
3067 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3069 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3070 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3072 reason = hci_to_mgmt_reason(ev->reason);
3074 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3075 reason, mgmt_connected);
3077 if (conn->type == ACL_LINK) {
3078 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3079 hci_remove_link_key(hdev, &conn->dst);
3081 hci_req_update_scan(hdev);
3084 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3086 switch (params->auto_connect) {
3087 case HCI_AUTO_CONN_LINK_LOSS:
3088 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3092 case HCI_AUTO_CONN_DIRECT:
3093 case HCI_AUTO_CONN_ALWAYS:
3094 list_del_init(¶ms->action);
3095 list_add(¶ms->action, &hdev->pend_le_conns);
3096 hci_update_background_scan(hdev);
3104 hci_disconn_cfm(conn, ev->reason);
3106 /* The suspend notifier is waiting for all devices to disconnect so
3107 * clear the bit from pending tasks and inform the wait queue.
3109 if (list_empty(&hdev->conn_hash.list) &&
3110 test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
3111 wake_up(&hdev->suspend_wait_q);
3114 /* Re-enable advertising if necessary, since it might
3115 * have been disabled by the connection. From the
3116 * HCI_LE_Set_Advertise_Enable command description in
3117 * the core specification (v4.0):
3118 * "The Controller shall continue advertising until the Host
3119 * issues an LE_Set_Advertise_Enable command with
3120 * Advertising_Enable set to 0x00 (Advertising is disabled)
3121 * or until a connection is created or until the Advertising
3122 * is timed out due to Directed Advertising."
3124 if (conn->type == LE_LINK) {
3125 hdev->cur_adv_instance = conn->adv_instance;
3126 hci_req_reenable_advertising(hdev);
3132 if (type == ACL_LINK && !hci_conn_num(hdev, ACL_LINK)) {
3136 iscan = test_bit(HCI_ISCAN, &hdev->flags);
3137 pscan = test_bit(HCI_PSCAN, &hdev->flags);
3138 if (!iscan && !pscan) {
3139 u8 scan_enable = SCAN_PAGE;
3141 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE,
3142 sizeof(scan_enable), &scan_enable);
3148 hci_dev_unlock(hdev);
3151 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3153 struct hci_ev_auth_complete *ev = (void *) skb->data;
3154 struct hci_conn *conn;
3156 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3160 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3165 /* PIN or Key Missing patch */
3166 BT_DBG("remote_auth %x, remote_cap %x, auth_type %x, io_capability %x",
3167 conn->remote_auth, conn->remote_cap,
3168 conn->auth_type, conn->io_capability);
3170 if (ev->status == 0x06 && hci_conn_ssp_enabled(conn)) {
3171 struct hci_cp_auth_requested cp;
3173 BT_DBG("Pin or key missing");
3174 hci_remove_link_key(hdev, &conn->dst);
3175 cp.handle = cpu_to_le16(conn->handle);
3176 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
3183 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3185 if (!hci_conn_ssp_enabled(conn) &&
3186 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3187 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3189 set_bit(HCI_CONN_AUTH, &conn->flags);
3190 conn->sec_level = conn->pending_sec_level;
3193 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3194 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3196 mgmt_auth_failed(conn, ev->status);
3199 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3200 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3202 if (conn->state == BT_CONFIG) {
3203 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3204 struct hci_cp_set_conn_encrypt cp;
3205 cp.handle = ev->handle;
3207 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3210 conn->state = BT_CONNECTED;
3211 hci_connect_cfm(conn, ev->status);
3212 hci_conn_drop(conn);
3215 hci_auth_cfm(conn, ev->status);
3217 hci_conn_hold(conn);
3218 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3219 hci_conn_drop(conn);
3222 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3224 struct hci_cp_set_conn_encrypt cp;
3225 cp.handle = ev->handle;
3227 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3230 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3231 hci_encrypt_cfm(conn, ev->status);
3236 hci_dev_unlock(hdev);
3239 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
3241 struct hci_ev_remote_name *ev = (void *) skb->data;
3242 struct hci_conn *conn;
3244 BT_DBG("%s", hdev->name);
3246 hci_conn_check_pending(hdev);
3250 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3252 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3255 if (ev->status == 0)
3256 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3257 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3259 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3265 if (!hci_outgoing_auth_needed(hdev, conn))
3268 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3269 struct hci_cp_auth_requested cp;
3271 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3273 cp.handle = __cpu_to_le16(conn->handle);
3274 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3278 hci_dev_unlock(hdev);
3281 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3282 u16 opcode, struct sk_buff *skb)
3284 const struct hci_rp_read_enc_key_size *rp;
3285 struct hci_conn *conn;
3288 BT_DBG("%s status 0x%02x", hdev->name, status);
3290 if (!skb || skb->len < sizeof(*rp)) {
3291 bt_dev_err(hdev, "invalid read key size response");
3295 rp = (void *)skb->data;
3296 handle = le16_to_cpu(rp->handle);
3300 conn = hci_conn_hash_lookup_handle(hdev, handle);
3304 /* While unexpected, the read_enc_key_size command may fail. The most
3305 * secure approach is to then assume the key size is 0 to force a
3309 bt_dev_err(hdev, "failed to read key size for handle %u",
3311 conn->enc_key_size = 0;
3313 conn->enc_key_size = rp->key_size;
3316 hci_encrypt_cfm(conn, 0);
3319 hci_dev_unlock(hdev);
3322 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3324 struct hci_ev_encrypt_change *ev = (void *) skb->data;
3325 struct hci_conn *conn;
3327 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3331 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3337 /* Encryption implies authentication */
3338 set_bit(HCI_CONN_AUTH, &conn->flags);
3339 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3340 conn->sec_level = conn->pending_sec_level;
3342 /* P-256 authentication key implies FIPS */
3343 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3344 set_bit(HCI_CONN_FIPS, &conn->flags);
3346 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3347 conn->type == LE_LINK)
3348 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3350 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3351 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3355 /* We should disregard the current RPA and generate a new one
3356 * whenever the encryption procedure fails.
3358 if (ev->status && conn->type == LE_LINK) {
3359 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3360 hci_adv_instances_set_rpa_expired(hdev, true);
3363 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3365 /* Check link security requirements are met */
3366 if (!hci_conn_check_link_mode(conn))
3367 ev->status = HCI_ERROR_AUTH_FAILURE;
3369 if (ev->status && conn->state == BT_CONNECTED) {
3370 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3371 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3373 /* Notify upper layers so they can cleanup before
3376 hci_encrypt_cfm(conn, ev->status);
3377 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3378 hci_conn_drop(conn);
3382 /* Try reading the encryption key size for encrypted ACL links */
3383 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3384 struct hci_cp_read_enc_key_size cp;
3385 struct hci_request req;
3387 /* Only send HCI_Read_Encryption_Key_Size if the
3388 * controller really supports it. If it doesn't, assume
3389 * the default size (16).
3391 if (!(hdev->commands[20] & 0x10)) {
3392 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3396 hci_req_init(&req, hdev);
3398 cp.handle = cpu_to_le16(conn->handle);
3399 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3401 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3402 bt_dev_err(hdev, "sending read key size failed");
3403 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3410 /* Set the default Authenticated Payload Timeout after
3411 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3412 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3413 * sent when the link is active and Encryption is enabled, the conn
3414 * type can be either LE or ACL and controller must support LMP Ping.
3415 * Ensure for AES-CCM encryption as well.
3417 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3418 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3419 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3420 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3421 struct hci_cp_write_auth_payload_to cp;
3423 cp.handle = cpu_to_le16(conn->handle);
3424 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3425 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3430 hci_encrypt_cfm(conn, ev->status);
3433 hci_dev_unlock(hdev);
3436 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3437 struct sk_buff *skb)
3439 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3440 struct hci_conn *conn;
3442 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3446 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3449 set_bit(HCI_CONN_SECURE, &conn->flags);
3451 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3453 hci_key_change_cfm(conn, ev->status);
3456 hci_dev_unlock(hdev);
3459 static void hci_remote_features_evt(struct hci_dev *hdev,
3460 struct sk_buff *skb)
3462 struct hci_ev_remote_features *ev = (void *) skb->data;
3463 struct hci_conn *conn;
3465 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3469 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3474 memcpy(conn->features[0], ev->features, 8);
3476 if (conn->state != BT_CONFIG)
3479 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3480 lmp_ext_feat_capable(conn)) {
3481 struct hci_cp_read_remote_ext_features cp;
3482 cp.handle = ev->handle;
3484 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3489 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3490 struct hci_cp_remote_name_req cp;
3491 memset(&cp, 0, sizeof(cp));
3492 bacpy(&cp.bdaddr, &conn->dst);
3493 cp.pscan_rep_mode = 0x02;
3494 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3495 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3496 mgmt_device_connected(hdev, conn, NULL, 0);
3498 if (!hci_outgoing_auth_needed(hdev, conn)) {
3499 conn->state = BT_CONNECTED;
3500 hci_connect_cfm(conn, ev->status);
3501 hci_conn_drop(conn);
3505 hci_dev_unlock(hdev);
3508 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3510 cancel_delayed_work(&hdev->cmd_timer);
3512 if (!test_bit(HCI_RESET, &hdev->flags)) {
3514 cancel_delayed_work(&hdev->ncmd_timer);
3515 atomic_set(&hdev->cmd_cnt, 1);
3517 schedule_delayed_work(&hdev->ncmd_timer,
3523 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3524 u16 *opcode, u8 *status,
3525 hci_req_complete_t *req_complete,
3526 hci_req_complete_skb_t *req_complete_skb)
3528 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3530 *opcode = __le16_to_cpu(ev->opcode);
3531 *status = skb->data[sizeof(*ev)];
3533 skb_pull(skb, sizeof(*ev));
3536 case HCI_OP_INQUIRY_CANCEL:
3537 hci_cc_inquiry_cancel(hdev, skb, status);
3540 case HCI_OP_PERIODIC_INQ:
3541 hci_cc_periodic_inq(hdev, skb);
3544 case HCI_OP_EXIT_PERIODIC_INQ:
3545 hci_cc_exit_periodic_inq(hdev, skb);
3548 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3549 hci_cc_remote_name_req_cancel(hdev, skb);
3552 case HCI_OP_ROLE_DISCOVERY:
3553 hci_cc_role_discovery(hdev, skb);
3556 case HCI_OP_READ_LINK_POLICY:
3557 hci_cc_read_link_policy(hdev, skb);
3560 case HCI_OP_WRITE_LINK_POLICY:
3561 hci_cc_write_link_policy(hdev, skb);
3564 case HCI_OP_READ_DEF_LINK_POLICY:
3565 hci_cc_read_def_link_policy(hdev, skb);
3568 case HCI_OP_WRITE_DEF_LINK_POLICY:
3569 hci_cc_write_def_link_policy(hdev, skb);
3573 hci_cc_reset(hdev, skb);
3576 case HCI_OP_READ_STORED_LINK_KEY:
3577 hci_cc_read_stored_link_key(hdev, skb);
3580 case HCI_OP_DELETE_STORED_LINK_KEY:
3581 hci_cc_delete_stored_link_key(hdev, skb);
3584 case HCI_OP_WRITE_LOCAL_NAME:
3585 hci_cc_write_local_name(hdev, skb);
3588 case HCI_OP_READ_LOCAL_NAME:
3589 hci_cc_read_local_name(hdev, skb);
3592 case HCI_OP_WRITE_AUTH_ENABLE:
3593 hci_cc_write_auth_enable(hdev, skb);
3596 case HCI_OP_WRITE_ENCRYPT_MODE:
3597 hci_cc_write_encrypt_mode(hdev, skb);
3600 case HCI_OP_WRITE_SCAN_ENABLE:
3601 hci_cc_write_scan_enable(hdev, skb);
3604 case HCI_OP_SET_EVENT_FLT:
3605 hci_cc_set_event_filter(hdev, skb);
3608 case HCI_OP_READ_CLASS_OF_DEV:
3609 hci_cc_read_class_of_dev(hdev, skb);
3612 case HCI_OP_WRITE_CLASS_OF_DEV:
3613 hci_cc_write_class_of_dev(hdev, skb);
3616 case HCI_OP_READ_VOICE_SETTING:
3617 hci_cc_read_voice_setting(hdev, skb);
3620 case HCI_OP_WRITE_VOICE_SETTING:
3621 hci_cc_write_voice_setting(hdev, skb);
3624 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3625 hci_cc_read_num_supported_iac(hdev, skb);
3628 case HCI_OP_WRITE_SSP_MODE:
3629 hci_cc_write_ssp_mode(hdev, skb);
3632 case HCI_OP_WRITE_SC_SUPPORT:
3633 hci_cc_write_sc_support(hdev, skb);
3636 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3637 hci_cc_read_auth_payload_timeout(hdev, skb);
3640 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3641 hci_cc_write_auth_payload_timeout(hdev, skb);
3644 case HCI_OP_READ_LOCAL_VERSION:
3645 hci_cc_read_local_version(hdev, skb);
3648 case HCI_OP_READ_LOCAL_COMMANDS:
3649 hci_cc_read_local_commands(hdev, skb);
3652 case HCI_OP_READ_LOCAL_FEATURES:
3653 hci_cc_read_local_features(hdev, skb);
3656 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3657 hci_cc_read_local_ext_features(hdev, skb);
3660 case HCI_OP_READ_BUFFER_SIZE:
3661 hci_cc_read_buffer_size(hdev, skb);
3664 case HCI_OP_READ_BD_ADDR:
3665 hci_cc_read_bd_addr(hdev, skb);
3668 case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3669 hci_cc_read_local_pairing_opts(hdev, skb);
3672 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3673 hci_cc_read_page_scan_activity(hdev, skb);
3676 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3677 hci_cc_write_page_scan_activity(hdev, skb);
3680 case HCI_OP_READ_PAGE_SCAN_TYPE:
3681 hci_cc_read_page_scan_type(hdev, skb);
3684 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3685 hci_cc_write_page_scan_type(hdev, skb);
3688 case HCI_OP_READ_DATA_BLOCK_SIZE:
3689 hci_cc_read_data_block_size(hdev, skb);
3692 case HCI_OP_READ_FLOW_CONTROL_MODE:
3693 hci_cc_read_flow_control_mode(hdev, skb);
3696 case HCI_OP_READ_LOCAL_AMP_INFO:
3697 hci_cc_read_local_amp_info(hdev, skb);
3700 case HCI_OP_READ_CLOCK:
3701 hci_cc_read_clock(hdev, skb);
3704 case HCI_OP_READ_INQ_RSP_TX_POWER:
3705 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3708 case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3709 hci_cc_read_def_err_data_reporting(hdev, skb);
3712 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3713 hci_cc_write_def_err_data_reporting(hdev, skb);
3716 case HCI_OP_PIN_CODE_REPLY:
3717 hci_cc_pin_code_reply(hdev, skb);
3720 case HCI_OP_PIN_CODE_NEG_REPLY:
3721 hci_cc_pin_code_neg_reply(hdev, skb);
3724 case HCI_OP_READ_LOCAL_OOB_DATA:
3725 hci_cc_read_local_oob_data(hdev, skb);
3728 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3729 hci_cc_read_local_oob_ext_data(hdev, skb);
3732 case HCI_OP_LE_READ_BUFFER_SIZE:
3733 hci_cc_le_read_buffer_size(hdev, skb);
3736 case HCI_OP_LE_READ_LOCAL_FEATURES:
3737 hci_cc_le_read_local_features(hdev, skb);
3740 case HCI_OP_LE_READ_ADV_TX_POWER:
3741 hci_cc_le_read_adv_tx_power(hdev, skb);
3744 case HCI_OP_USER_CONFIRM_REPLY:
3745 hci_cc_user_confirm_reply(hdev, skb);
3748 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3749 hci_cc_user_confirm_neg_reply(hdev, skb);
3752 case HCI_OP_USER_PASSKEY_REPLY:
3753 hci_cc_user_passkey_reply(hdev, skb);
3756 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3757 hci_cc_user_passkey_neg_reply(hdev, skb);
3760 case HCI_OP_LE_SET_RANDOM_ADDR:
3761 hci_cc_le_set_random_addr(hdev, skb);
3764 case HCI_OP_LE_SET_ADV_ENABLE:
3765 hci_cc_le_set_adv_enable(hdev, skb);
3768 case HCI_OP_LE_SET_SCAN_PARAM:
3769 hci_cc_le_set_scan_param(hdev, skb);
3772 case HCI_OP_LE_SET_SCAN_ENABLE:
3773 hci_cc_le_set_scan_enable(hdev, skb);
3776 case HCI_OP_LE_READ_ACCEPT_LIST_SIZE:
3777 hci_cc_le_read_accept_list_size(hdev, skb);
3780 case HCI_OP_LE_CLEAR_ACCEPT_LIST:
3781 hci_cc_le_clear_accept_list(hdev, skb);
3784 case HCI_OP_LE_ADD_TO_ACCEPT_LIST:
3785 hci_cc_le_add_to_accept_list(hdev, skb);
3788 case HCI_OP_LE_DEL_FROM_ACCEPT_LIST:
3789 hci_cc_le_del_from_accept_list(hdev, skb);
3792 case HCI_OP_LE_READ_SUPPORTED_STATES:
3793 hci_cc_le_read_supported_states(hdev, skb);
3796 case HCI_OP_LE_READ_DEF_DATA_LEN:
3797 hci_cc_le_read_def_data_len(hdev, skb);
3800 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3801 hci_cc_le_write_def_data_len(hdev, skb);
3804 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3805 hci_cc_le_add_to_resolv_list(hdev, skb);
3808 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3809 hci_cc_le_del_from_resolv_list(hdev, skb);
3812 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3813 hci_cc_le_clear_resolv_list(hdev, skb);
3816 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3817 hci_cc_le_read_resolv_list_size(hdev, skb);
3820 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3821 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3824 case HCI_OP_LE_READ_MAX_DATA_LEN:
3825 hci_cc_le_read_max_data_len(hdev, skb);
3828 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3829 hci_cc_write_le_host_supported(hdev, skb);
3832 case HCI_OP_LE_SET_ADV_PARAM:
3833 hci_cc_set_adv_param(hdev, skb);
3836 case HCI_OP_READ_RSSI:
3837 hci_cc_read_rssi(hdev, skb);
3840 case HCI_OP_READ_TX_POWER:
3841 hci_cc_read_tx_power(hdev, skb);
3844 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3845 hci_cc_write_ssp_debug_mode(hdev, skb);
3848 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3849 hci_cc_le_set_ext_scan_param(hdev, skb);
3852 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3853 hci_cc_le_set_ext_scan_enable(hdev, skb);
3856 case HCI_OP_LE_SET_DEFAULT_PHY:
3857 hci_cc_le_set_default_phy(hdev, skb);
3860 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3861 hci_cc_le_read_num_adv_sets(hdev, skb);
3864 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3865 hci_cc_set_ext_adv_param(hdev, skb);
3868 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3869 hci_cc_le_set_ext_adv_enable(hdev, skb);
3872 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3873 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3876 case HCI_OP_LE_READ_TRANSMIT_POWER:
3877 hci_cc_le_read_transmit_power(hdev, skb);
3880 case HCI_OP_ENABLE_RSSI:
3881 hci_cc_enable_rssi(hdev, skb);
3884 case HCI_OP_GET_RAW_RSSI:
3885 hci_cc_get_raw_rssi(hdev, skb);
3889 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3893 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3895 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3898 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3900 "unexpected event for opcode 0x%4.4x", *opcode);
3904 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3905 queue_work(hdev->workqueue, &hdev->cmd_work);
3908 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3909 u16 *opcode, u8 *status,
3910 hci_req_complete_t *req_complete,
3911 hci_req_complete_skb_t *req_complete_skb)
3913 struct hci_ev_cmd_status *ev = (void *) skb->data;
3915 skb_pull(skb, sizeof(*ev));
3917 *opcode = __le16_to_cpu(ev->opcode);
3918 *status = ev->status;
3921 case HCI_OP_INQUIRY:
3922 hci_cs_inquiry(hdev, ev->status);
3925 case HCI_OP_CREATE_CONN:
3926 hci_cs_create_conn(hdev, ev->status);
3929 case HCI_OP_DISCONNECT:
3930 hci_cs_disconnect(hdev, ev->status);
3933 case HCI_OP_ADD_SCO:
3934 hci_cs_add_sco(hdev, ev->status);
3937 case HCI_OP_AUTH_REQUESTED:
3938 hci_cs_auth_requested(hdev, ev->status);
3941 case HCI_OP_SET_CONN_ENCRYPT:
3942 hci_cs_set_conn_encrypt(hdev, ev->status);
3945 case HCI_OP_REMOTE_NAME_REQ:
3946 hci_cs_remote_name_req(hdev, ev->status);
3949 case HCI_OP_READ_REMOTE_FEATURES:
3950 hci_cs_read_remote_features(hdev, ev->status);
3953 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3954 hci_cs_read_remote_ext_features(hdev, ev->status);
3957 case HCI_OP_SETUP_SYNC_CONN:
3958 hci_cs_setup_sync_conn(hdev, ev->status);
3961 case HCI_OP_SNIFF_MODE:
3962 hci_cs_sniff_mode(hdev, ev->status);
3965 case HCI_OP_EXIT_SNIFF_MODE:
3966 hci_cs_exit_sniff_mode(hdev, ev->status);
3969 case HCI_OP_SWITCH_ROLE:
3970 hci_cs_switch_role(hdev, ev->status);
3973 case HCI_OP_LE_CREATE_CONN:
3974 hci_cs_le_create_conn(hdev, ev->status);
3977 case HCI_OP_LE_READ_REMOTE_FEATURES:
3978 hci_cs_le_read_remote_features(hdev, ev->status);
3981 case HCI_OP_LE_START_ENC:
3982 hci_cs_le_start_enc(hdev, ev->status);
3985 case HCI_OP_LE_EXT_CREATE_CONN:
3986 hci_cs_le_ext_create_conn(hdev, ev->status);
3990 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3994 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3996 /* Indicate request completion if the command failed. Also, if
3997 * we're not waiting for a special event and we get a success
3998 * command status we should try to flag the request as completed
3999 * (since for this kind of commands there will not be a command
4003 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
4004 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4007 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4009 "unexpected event for opcode 0x%4.4x", *opcode);
4013 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4014 queue_work(hdev->workqueue, &hdev->cmd_work);
4017 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
4019 struct hci_ev_hardware_error *ev = (void *) skb->data;
4023 mgmt_hardware_error(hdev, ev->code);
4024 hci_dev_unlock(hdev);
4026 hdev->hw_error_code = ev->code;
4028 queue_work(hdev->req_workqueue, &hdev->error_reset);
4031 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4033 struct hci_ev_role_change *ev = (void *) skb->data;
4034 struct hci_conn *conn;
4036 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4040 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4043 conn->role = ev->role;
4045 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4047 hci_role_switch_cfm(conn, ev->status, ev->role);
4049 if (!ev->status && (get_link_mode(conn) & HCI_LM_MASTER))
4050 hci_conn_change_supervision_timeout(conn,
4051 LINK_SUPERVISION_TIMEOUT);
4055 hci_dev_unlock(hdev);
4058 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
4060 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
4063 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4064 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4068 if (skb->len < sizeof(*ev) ||
4069 skb->len < struct_size(ev, handles, ev->num_hndl)) {
4070 BT_DBG("%s bad parameters", hdev->name);
4074 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
4076 for (i = 0; i < ev->num_hndl; i++) {
4077 struct hci_comp_pkts_info *info = &ev->handles[i];
4078 struct hci_conn *conn;
4079 __u16 handle, count;
4081 handle = __le16_to_cpu(info->handle);
4082 count = __le16_to_cpu(info->count);
4084 conn = hci_conn_hash_lookup_handle(hdev, handle);
4088 conn->sent -= count;
4090 switch (conn->type) {
4092 hdev->acl_cnt += count;
4093 if (hdev->acl_cnt > hdev->acl_pkts)
4094 hdev->acl_cnt = hdev->acl_pkts;
4098 if (hdev->le_pkts) {
4099 hdev->le_cnt += count;
4100 if (hdev->le_cnt > hdev->le_pkts)
4101 hdev->le_cnt = hdev->le_pkts;
4103 hdev->acl_cnt += count;
4104 if (hdev->acl_cnt > hdev->acl_pkts)
4105 hdev->acl_cnt = hdev->acl_pkts;
4110 hdev->sco_cnt += count;
4111 if (hdev->sco_cnt > hdev->sco_pkts)
4112 hdev->sco_cnt = hdev->sco_pkts;
4116 bt_dev_err(hdev, "unknown type %d conn %p",
4122 queue_work(hdev->workqueue, &hdev->tx_work);
4125 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4128 struct hci_chan *chan;
4130 switch (hdev->dev_type) {
4132 return hci_conn_hash_lookup_handle(hdev, handle);
4134 chan = hci_chan_lookup_handle(hdev, handle);
4139 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4146 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
4148 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
4151 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4152 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4156 if (skb->len < sizeof(*ev) ||
4157 skb->len < struct_size(ev, handles, ev->num_hndl)) {
4158 BT_DBG("%s bad parameters", hdev->name);
4162 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
4165 for (i = 0; i < ev->num_hndl; i++) {
4166 struct hci_comp_blocks_info *info = &ev->handles[i];
4167 struct hci_conn *conn = NULL;
4168 __u16 handle, block_count;
4170 handle = __le16_to_cpu(info->handle);
4171 block_count = __le16_to_cpu(info->blocks);
4173 conn = __hci_conn_lookup_handle(hdev, handle);
4177 conn->sent -= block_count;
4179 switch (conn->type) {
4182 hdev->block_cnt += block_count;
4183 if (hdev->block_cnt > hdev->num_blocks)
4184 hdev->block_cnt = hdev->num_blocks;
4188 bt_dev_err(hdev, "unknown type %d conn %p",
4194 queue_work(hdev->workqueue, &hdev->tx_work);
4197 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4199 struct hci_ev_mode_change *ev = (void *) skb->data;
4200 struct hci_conn *conn;
4202 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4206 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4208 conn->mode = ev->mode;
4210 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4212 if (conn->mode == HCI_CM_ACTIVE)
4213 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4215 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4218 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4219 hci_sco_setup(conn, ev->status);
4222 hci_dev_unlock(hdev);
4225 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4227 struct hci_ev_pin_code_req *ev = (void *) skb->data;
4228 struct hci_conn *conn;
4230 BT_DBG("%s", hdev->name);
4234 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4238 if (conn->state == BT_CONNECTED) {
4239 hci_conn_hold(conn);
4240 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4241 hci_conn_drop(conn);
4244 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4245 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4246 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4247 sizeof(ev->bdaddr), &ev->bdaddr);
4248 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4251 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4256 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4260 hci_dev_unlock(hdev);
4263 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4265 if (key_type == HCI_LK_CHANGED_COMBINATION)
4268 conn->pin_length = pin_len;
4269 conn->key_type = key_type;
4272 case HCI_LK_LOCAL_UNIT:
4273 case HCI_LK_REMOTE_UNIT:
4274 case HCI_LK_DEBUG_COMBINATION:
4276 case HCI_LK_COMBINATION:
4278 conn->pending_sec_level = BT_SECURITY_HIGH;
4280 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4282 case HCI_LK_UNAUTH_COMBINATION_P192:
4283 case HCI_LK_UNAUTH_COMBINATION_P256:
4284 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4286 case HCI_LK_AUTH_COMBINATION_P192:
4287 conn->pending_sec_level = BT_SECURITY_HIGH;
4289 case HCI_LK_AUTH_COMBINATION_P256:
4290 conn->pending_sec_level = BT_SECURITY_FIPS;
4295 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4297 struct hci_ev_link_key_req *ev = (void *) skb->data;
4298 struct hci_cp_link_key_reply cp;
4299 struct hci_conn *conn;
4300 struct link_key *key;
4302 BT_DBG("%s", hdev->name);
4304 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4309 key = hci_find_link_key(hdev, &ev->bdaddr);
4311 BT_DBG("%s link key not found for %pMR", hdev->name,
4316 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4319 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4321 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4323 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4324 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4325 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4326 BT_DBG("%s ignoring unauthenticated key", hdev->name);
4330 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4331 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4332 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4333 BT_DBG("%s ignoring key unauthenticated for high security",
4338 conn_set_key(conn, key->type, key->pin_len);
4341 bacpy(&cp.bdaddr, &ev->bdaddr);
4342 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4344 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4346 hci_dev_unlock(hdev);
4351 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4352 hci_dev_unlock(hdev);
4355 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4357 struct hci_ev_link_key_notify *ev = (void *) skb->data;
4358 struct hci_conn *conn;
4359 struct link_key *key;
4363 BT_DBG("%s", hdev->name);
4367 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4371 hci_conn_hold(conn);
4372 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4373 hci_conn_drop(conn);
4375 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4376 conn_set_key(conn, ev->key_type, conn->pin_length);
4378 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4381 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4382 ev->key_type, pin_len, &persistent);
4386 /* Update connection information since adding the key will have
4387 * fixed up the type in the case of changed combination keys.
4389 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4390 conn_set_key(conn, key->type, key->pin_len);
4392 mgmt_new_link_key(hdev, key, persistent);
4394 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4395 * is set. If it's not set simply remove the key from the kernel
4396 * list (we've still notified user space about it but with
4397 * store_hint being 0).
4399 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4400 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4401 list_del_rcu(&key->list);
4402 kfree_rcu(key, rcu);
4407 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4409 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4412 hci_dev_unlock(hdev);
4415 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4417 struct hci_ev_clock_offset *ev = (void *) skb->data;
4418 struct hci_conn *conn;
4420 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4424 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4425 if (conn && !ev->status) {
4426 struct inquiry_entry *ie;
4428 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4430 ie->data.clock_offset = ev->clock_offset;
4431 ie->timestamp = jiffies;
4435 hci_dev_unlock(hdev);
4438 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4440 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4441 struct hci_conn *conn;
4443 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4447 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4448 if (conn && !ev->status)
4449 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4451 hci_dev_unlock(hdev);
4454 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4456 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4457 struct inquiry_entry *ie;
4459 BT_DBG("%s", hdev->name);
4463 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4465 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4466 ie->timestamp = jiffies;
4469 hci_dev_unlock(hdev);
4472 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4473 struct sk_buff *skb)
4475 struct inquiry_data data;
4476 int num_rsp = *((__u8 *) skb->data);
4478 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4483 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4488 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4489 struct inquiry_info_with_rssi_and_pscan_mode *info;
4490 info = (void *) (skb->data + 1);
4492 if (skb->len < num_rsp * sizeof(*info) + 1)
4495 for (; num_rsp; num_rsp--, info++) {
4498 bacpy(&data.bdaddr, &info->bdaddr);
4499 data.pscan_rep_mode = info->pscan_rep_mode;
4500 data.pscan_period_mode = info->pscan_period_mode;
4501 data.pscan_mode = info->pscan_mode;
4502 memcpy(data.dev_class, info->dev_class, 3);
4503 data.clock_offset = info->clock_offset;
4504 data.rssi = info->rssi;
4505 data.ssp_mode = 0x00;
4507 flags = hci_inquiry_cache_update(hdev, &data, false);
4509 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4510 info->dev_class, info->rssi,
4511 flags, NULL, 0, NULL, 0);
4514 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4516 if (skb->len < num_rsp * sizeof(*info) + 1)
4519 for (; num_rsp; num_rsp--, info++) {
4522 bacpy(&data.bdaddr, &info->bdaddr);
4523 data.pscan_rep_mode = info->pscan_rep_mode;
4524 data.pscan_period_mode = info->pscan_period_mode;
4525 data.pscan_mode = 0x00;
4526 memcpy(data.dev_class, info->dev_class, 3);
4527 data.clock_offset = info->clock_offset;
4528 data.rssi = info->rssi;
4529 data.ssp_mode = 0x00;
4531 flags = hci_inquiry_cache_update(hdev, &data, false);
4533 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4534 info->dev_class, info->rssi,
4535 flags, NULL, 0, NULL, 0);
4540 hci_dev_unlock(hdev);
4543 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4544 struct sk_buff *skb)
4546 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4547 struct hci_conn *conn;
4549 BT_DBG("%s", hdev->name);
4553 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4557 if (ev->page < HCI_MAX_PAGES)
4558 memcpy(conn->features[ev->page], ev->features, 8);
4560 if (!ev->status && ev->page == 0x01) {
4561 struct inquiry_entry *ie;
4563 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4565 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4567 if (ev->features[0] & LMP_HOST_SSP) {
4568 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4570 /* It is mandatory by the Bluetooth specification that
4571 * Extended Inquiry Results are only used when Secure
4572 * Simple Pairing is enabled, but some devices violate
4575 * To make these devices work, the internal SSP
4576 * enabled flag needs to be cleared if the remote host
4577 * features do not indicate SSP support */
4578 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4581 if (ev->features[0] & LMP_HOST_SC)
4582 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4585 if (conn->state != BT_CONFIG)
4588 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4589 struct hci_cp_remote_name_req cp;
4590 memset(&cp, 0, sizeof(cp));
4591 bacpy(&cp.bdaddr, &conn->dst);
4592 cp.pscan_rep_mode = 0x02;
4593 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4594 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4595 mgmt_device_connected(hdev, conn, NULL, 0);
4597 if (!hci_outgoing_auth_needed(hdev, conn)) {
4598 conn->state = BT_CONNECTED;
4599 hci_connect_cfm(conn, ev->status);
4600 hci_conn_drop(conn);
4604 hci_dev_unlock(hdev);
4607 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4608 struct sk_buff *skb)
4610 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4611 struct hci_conn *conn;
4613 switch (ev->link_type) {
4618 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4619 * for HCI_Synchronous_Connection_Complete is limited to
4620 * either SCO or eSCO
4622 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4626 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4630 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4632 if (ev->link_type == ESCO_LINK)
4635 /* When the link type in the event indicates SCO connection
4636 * and lookup of the connection object fails, then check
4637 * if an eSCO connection object exists.
4639 * The core limits the synchronous connections to either
4640 * SCO or eSCO. The eSCO connection is preferred and tried
4641 * to be setup first and until successfully established,
4642 * the link type will be hinted as eSCO.
4644 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4649 switch (ev->status) {
4651 /* The synchronous connection complete event should only be
4652 * sent once per new connection. Receiving a successful
4653 * complete event when the connection status is already
4654 * BT_CONNECTED means that the device is misbehaving and sent
4655 * multiple complete event packets for the same new connection.
4657 * Registering the device more than once can corrupt kernel
4658 * memory, hence upon detecting this invalid event, we report
4659 * an error and ignore the packet.
4661 if (conn->state == BT_CONNECTED) {
4662 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4666 conn->handle = __le16_to_cpu(ev->handle);
4667 conn->state = BT_CONNECTED;
4668 conn->type = ev->link_type;
4670 hci_debugfs_create_conn(conn);
4671 hci_conn_add_sysfs(conn);
4674 case 0x10: /* Connection Accept Timeout */
4675 case 0x0d: /* Connection Rejected due to Limited Resources */
4676 case 0x11: /* Unsupported Feature or Parameter Value */
4677 case 0x1c: /* SCO interval rejected */
4678 case 0x1a: /* Unsupported Remote Feature */
4679 case 0x1e: /* Invalid LMP Parameters */
4680 case 0x1f: /* Unspecified error */
4681 case 0x20: /* Unsupported LMP Parameter value */
4683 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4684 (hdev->esco_type & EDR_ESCO_MASK);
4685 if (hci_setup_sync(conn, conn->link->handle))
4691 conn->state = BT_CLOSED;
4695 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4697 switch (ev->air_mode) {
4700 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4704 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4708 hci_connect_cfm(conn, ev->status);
4713 hci_dev_unlock(hdev);
4716 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4720 while (parsed < eir_len) {
4721 u8 field_len = eir[0];
4726 parsed += field_len + 1;
4727 eir += field_len + 1;
4733 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4734 struct sk_buff *skb)
4736 struct inquiry_data data;
4737 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4738 int num_rsp = *((__u8 *) skb->data);
4741 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4743 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4746 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4751 for (; num_rsp; num_rsp--, info++) {
4755 bacpy(&data.bdaddr, &info->bdaddr);
4756 data.pscan_rep_mode = info->pscan_rep_mode;
4757 data.pscan_period_mode = info->pscan_period_mode;
4758 data.pscan_mode = 0x00;
4759 memcpy(data.dev_class, info->dev_class, 3);
4760 data.clock_offset = info->clock_offset;
4761 data.rssi = info->rssi;
4762 data.ssp_mode = 0x01;
4764 if (hci_dev_test_flag(hdev, HCI_MGMT))
4765 name_known = eir_get_data(info->data,
4767 EIR_NAME_COMPLETE, NULL);
4771 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4773 eir_len = eir_get_length(info->data, sizeof(info->data));
4775 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4776 info->dev_class, info->rssi,
4777 flags, info->data, eir_len, NULL, 0);
4780 hci_dev_unlock(hdev);
4783 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4784 struct sk_buff *skb)
4786 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4787 struct hci_conn *conn;
4789 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4790 __le16_to_cpu(ev->handle));
4794 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4798 /* For BR/EDR the necessary steps are taken through the
4799 * auth_complete event.
4801 if (conn->type != LE_LINK)
4805 conn->sec_level = conn->pending_sec_level;
4807 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4809 if (ev->status && conn->state == BT_CONNECTED) {
4810 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4811 hci_conn_drop(conn);
4815 if (conn->state == BT_CONFIG) {
4817 conn->state = BT_CONNECTED;
4819 hci_connect_cfm(conn, ev->status);
4820 hci_conn_drop(conn);
4822 hci_auth_cfm(conn, ev->status);
4824 hci_conn_hold(conn);
4825 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4826 hci_conn_drop(conn);
4830 hci_dev_unlock(hdev);
4833 static u8 hci_get_auth_req(struct hci_conn *conn)
4836 if (conn->remote_auth == HCI_AT_GENERAL_BONDING_MITM) {
4837 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4838 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4839 return HCI_AT_GENERAL_BONDING_MITM;
4843 /* If remote requests no-bonding follow that lead */
4844 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4845 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4846 return conn->remote_auth | (conn->auth_type & 0x01);
4848 /* If both remote and local have enough IO capabilities, require
4851 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4852 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4853 return conn->remote_auth | 0x01;
4855 /* No MITM protection possible so ignore remote requirement */
4856 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4859 static u8 bredr_oob_data_present(struct hci_conn *conn)
4861 struct hci_dev *hdev = conn->hdev;
4862 struct oob_data *data;
4864 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4868 if (bredr_sc_enabled(hdev)) {
4869 /* When Secure Connections is enabled, then just
4870 * return the present value stored with the OOB
4871 * data. The stored value contains the right present
4872 * information. However it can only be trusted when
4873 * not in Secure Connection Only mode.
4875 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4876 return data->present;
4878 /* When Secure Connections Only mode is enabled, then
4879 * the P-256 values are required. If they are not
4880 * available, then do not declare that OOB data is
4883 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4884 !memcmp(data->hash256, ZERO_KEY, 16))
4890 /* When Secure Connections is not enabled or actually
4891 * not supported by the hardware, then check that if
4892 * P-192 data values are present.
4894 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4895 !memcmp(data->hash192, ZERO_KEY, 16))
4901 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4903 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4904 struct hci_conn *conn;
4906 BT_DBG("%s", hdev->name);
4910 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4914 hci_conn_hold(conn);
4916 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4919 /* Allow pairing if we're pairable, the initiators of the
4920 * pairing or if the remote is not requesting bonding.
4922 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4923 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4924 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4925 struct hci_cp_io_capability_reply cp;
4927 bacpy(&cp.bdaddr, &ev->bdaddr);
4928 /* Change the IO capability from KeyboardDisplay
4929 * to DisplayYesNo as it is not supported by BT spec. */
4930 cp.capability = (conn->io_capability == 0x04) ?
4931 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4933 /* If we are initiators, there is no remote information yet */
4934 if (conn->remote_auth == 0xff) {
4935 /* Request MITM protection if our IO caps allow it
4936 * except for the no-bonding case.
4938 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4939 conn->auth_type != HCI_AT_NO_BONDING)
4940 conn->auth_type |= 0x01;
4942 conn->auth_type = hci_get_auth_req(conn);
4945 /* If we're not bondable, force one of the non-bondable
4946 * authentication requirement values.
4948 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4949 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4951 cp.authentication = conn->auth_type;
4952 cp.oob_data = bredr_oob_data_present(conn);
4954 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4957 struct hci_cp_io_capability_neg_reply cp;
4959 bacpy(&cp.bdaddr, &ev->bdaddr);
4960 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4962 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4967 hci_dev_unlock(hdev);
4970 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4972 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4973 struct hci_conn *conn;
4975 BT_DBG("%s", hdev->name);
4979 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4983 conn->remote_cap = ev->capability;
4984 conn->remote_auth = ev->authentication;
4987 hci_dev_unlock(hdev);
4990 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4991 struct sk_buff *skb)
4993 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4994 int loc_mitm, rem_mitm, confirm_hint = 0;
4995 struct hci_conn *conn;
4997 BT_DBG("%s", hdev->name);
5001 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5004 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5008 loc_mitm = (conn->auth_type & 0x01);
5009 rem_mitm = (conn->remote_auth & 0x01);
5011 /* If we require MITM but the remote device can't provide that
5012 * (it has NoInputNoOutput) then reject the confirmation
5013 * request. We check the security level here since it doesn't
5014 * necessarily match conn->auth_type.
5016 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5017 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5018 BT_DBG("Rejecting request: remote device can't provide MITM");
5019 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5020 sizeof(ev->bdaddr), &ev->bdaddr);
5024 /* If no side requires MITM protection; auto-accept */
5025 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5026 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5028 /* If we're not the initiators request authorization to
5029 * proceed from user space (mgmt_user_confirm with
5030 * confirm_hint set to 1). The exception is if neither
5031 * side had MITM or if the local IO capability is
5032 * NoInputNoOutput, in which case we do auto-accept
5034 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5035 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5036 (loc_mitm || rem_mitm)) {
5037 BT_DBG("Confirming auto-accept as acceptor");
5042 /* If there already exists link key in local host, leave the
5043 * decision to user space since the remote device could be
5044 * legitimate or malicious.
5046 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5047 bt_dev_dbg(hdev, "Local host already has link key");
5052 BT_DBG("Auto-accept of user confirmation with %ums delay",
5053 hdev->auto_accept_delay);
5055 if (hdev->auto_accept_delay > 0) {
5056 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5057 queue_delayed_work(conn->hdev->workqueue,
5058 &conn->auto_accept_work, delay);
5062 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5063 sizeof(ev->bdaddr), &ev->bdaddr);
5068 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5069 le32_to_cpu(ev->passkey), confirm_hint);
5072 hci_dev_unlock(hdev);
5075 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
5076 struct sk_buff *skb)
5078 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
5080 BT_DBG("%s", hdev->name);
5082 if (hci_dev_test_flag(hdev, HCI_MGMT))
5083 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5086 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
5087 struct sk_buff *skb)
5089 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
5090 struct hci_conn *conn;
5092 BT_DBG("%s", hdev->name);
5094 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5098 conn->passkey_notify = __le32_to_cpu(ev->passkey);
5099 conn->passkey_entered = 0;
5101 if (hci_dev_test_flag(hdev, HCI_MGMT))
5102 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5103 conn->dst_type, conn->passkey_notify,
5104 conn->passkey_entered);
5107 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
5109 struct hci_ev_keypress_notify *ev = (void *) skb->data;
5110 struct hci_conn *conn;
5112 BT_DBG("%s", hdev->name);
5114 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5119 case HCI_KEYPRESS_STARTED:
5120 conn->passkey_entered = 0;
5123 case HCI_KEYPRESS_ENTERED:
5124 conn->passkey_entered++;
5127 case HCI_KEYPRESS_ERASED:
5128 conn->passkey_entered--;
5131 case HCI_KEYPRESS_CLEARED:
5132 conn->passkey_entered = 0;
5135 case HCI_KEYPRESS_COMPLETED:
5139 if (hci_dev_test_flag(hdev, HCI_MGMT))
5140 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5141 conn->dst_type, conn->passkey_notify,
5142 conn->passkey_entered);
5145 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
5146 struct sk_buff *skb)
5148 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
5149 struct hci_conn *conn;
5151 BT_DBG("%s", hdev->name);
5155 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5159 /* Reset the authentication requirement to unknown */
5160 conn->remote_auth = 0xff;
5162 /* To avoid duplicate auth_failed events to user space we check
5163 * the HCI_CONN_AUTH_PEND flag which will be set if we
5164 * initiated the authentication. A traditional auth_complete
5165 * event gets always produced as initiator and is also mapped to
5166 * the mgmt_auth_failed event */
5167 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5168 mgmt_auth_failed(conn, ev->status);
5170 hci_conn_drop(conn);
5173 hci_dev_unlock(hdev);
5176 static void hci_remote_host_features_evt(struct hci_dev *hdev,
5177 struct sk_buff *skb)
5179 struct hci_ev_remote_host_features *ev = (void *) skb->data;
5180 struct inquiry_entry *ie;
5181 struct hci_conn *conn;
5183 BT_DBG("%s", hdev->name);
5187 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5189 memcpy(conn->features[1], ev->features, 8);
5191 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5193 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5195 hci_dev_unlock(hdev);
5198 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
5199 struct sk_buff *skb)
5201 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
5202 struct oob_data *data;
5204 BT_DBG("%s", hdev->name);
5208 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5211 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5213 struct hci_cp_remote_oob_data_neg_reply cp;
5215 bacpy(&cp.bdaddr, &ev->bdaddr);
5216 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5221 if (bredr_sc_enabled(hdev)) {
5222 struct hci_cp_remote_oob_ext_data_reply cp;
5224 bacpy(&cp.bdaddr, &ev->bdaddr);
5225 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5226 memset(cp.hash192, 0, sizeof(cp.hash192));
5227 memset(cp.rand192, 0, sizeof(cp.rand192));
5229 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5230 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5232 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5233 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5235 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5238 struct hci_cp_remote_oob_data_reply cp;
5240 bacpy(&cp.bdaddr, &ev->bdaddr);
5241 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5242 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5244 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5249 hci_dev_unlock(hdev);
5252 #if IS_ENABLED(CONFIG_BT_HS)
5253 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5255 struct hci_ev_channel_selected *ev = (void *)skb->data;
5256 struct hci_conn *hcon;
5258 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5260 skb_pull(skb, sizeof(*ev));
5262 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5266 amp_read_loc_assoc_final_data(hdev, hcon);
5269 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
5270 struct sk_buff *skb)
5272 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
5273 struct hci_conn *hcon, *bredr_hcon;
5275 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
5280 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5292 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5294 hcon->state = BT_CONNECTED;
5295 bacpy(&hcon->dst, &bredr_hcon->dst);
5297 hci_conn_hold(hcon);
5298 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5299 hci_conn_drop(hcon);
5301 hci_debugfs_create_conn(hcon);
5302 hci_conn_add_sysfs(hcon);
5304 amp_physical_cfm(bredr_hcon, hcon);
5307 hci_dev_unlock(hdev);
5310 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5312 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
5313 struct hci_conn *hcon;
5314 struct hci_chan *hchan;
5315 struct amp_mgr *mgr;
5317 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5318 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
5321 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5325 /* Create AMP hchan */
5326 hchan = hci_chan_create(hcon);
5330 hchan->handle = le16_to_cpu(ev->handle);
5333 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5335 mgr = hcon->amp_mgr;
5336 if (mgr && mgr->bredr_chan) {
5337 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5339 l2cap_chan_lock(bredr_chan);
5341 bredr_chan->conn->mtu = hdev->block_mtu;
5342 l2cap_logical_cfm(bredr_chan, hchan, 0);
5343 hci_conn_hold(hcon);
5345 l2cap_chan_unlock(bredr_chan);
5349 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5350 struct sk_buff *skb)
5352 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5353 struct hci_chan *hchan;
5355 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5356 le16_to_cpu(ev->handle), ev->status);
5363 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5364 if (!hchan || !hchan->amp)
5367 amp_destroy_logical_link(hchan, ev->reason);
5370 hci_dev_unlock(hdev);
5373 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5374 struct sk_buff *skb)
5376 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5377 struct hci_conn *hcon;
5379 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5386 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5387 if (hcon && hcon->type == AMP_LINK) {
5388 hcon->state = BT_CLOSED;
5389 hci_disconn_cfm(hcon, ev->reason);
5393 hci_dev_unlock(hdev);
5397 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5398 u8 bdaddr_type, bdaddr_t *local_rpa)
5401 conn->dst_type = bdaddr_type;
5402 conn->resp_addr_type = bdaddr_type;
5403 bacpy(&conn->resp_addr, bdaddr);
5405 /* Check if the controller has set a Local RPA then it must be
5406 * used instead or hdev->rpa.
5408 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5409 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5410 bacpy(&conn->init_addr, local_rpa);
5411 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5412 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5413 bacpy(&conn->init_addr, &conn->hdev->rpa);
5415 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5416 &conn->init_addr_type);
5419 conn->resp_addr_type = conn->hdev->adv_addr_type;
5420 /* Check if the controller has set a Local RPA then it must be
5421 * used instead or hdev->rpa.
5423 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5424 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5425 bacpy(&conn->resp_addr, local_rpa);
5426 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5427 /* In case of ext adv, resp_addr will be updated in
5428 * Adv Terminated event.
5430 if (!ext_adv_capable(conn->hdev))
5431 bacpy(&conn->resp_addr,
5432 &conn->hdev->random_addr);
5434 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5437 conn->init_addr_type = bdaddr_type;
5438 bacpy(&conn->init_addr, bdaddr);
5440 /* For incoming connections, set the default minimum
5441 * and maximum connection interval. They will be used
5442 * to check if the parameters are in range and if not
5443 * trigger the connection update procedure.
5445 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5446 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5450 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5451 bdaddr_t *bdaddr, u8 bdaddr_type,
5452 bdaddr_t *local_rpa, u8 role, u16 handle,
5453 u16 interval, u16 latency,
5454 u16 supervision_timeout)
5456 struct hci_conn_params *params;
5457 struct hci_conn *conn;
5458 struct smp_irk *irk;
5463 /* All controllers implicitly stop advertising in the event of a
5464 * connection, so ensure that the state bit is cleared.
5466 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5468 conn = hci_lookup_le_connect(hdev);
5470 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5472 bt_dev_err(hdev, "no memory for new connection");
5476 conn->dst_type = bdaddr_type;
5478 /* If we didn't have a hci_conn object previously
5479 * but we're in central role this must be something
5480 * initiated using an accept list. Since accept list based
5481 * connections are not "first class citizens" we don't
5482 * have full tracking of them. Therefore, we go ahead
5483 * with a "best effort" approach of determining the
5484 * initiator address based on the HCI_PRIVACY flag.
5487 conn->resp_addr_type = bdaddr_type;
5488 bacpy(&conn->resp_addr, bdaddr);
5489 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5490 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5491 bacpy(&conn->init_addr, &hdev->rpa);
5493 hci_copy_identity_address(hdev,
5495 &conn->init_addr_type);
5500 /* LE auto connect */
5501 bacpy(&conn->dst, bdaddr);
5503 cancel_delayed_work(&conn->le_conn_timeout);
5506 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5508 /* Lookup the identity address from the stored connection
5509 * address and address type.
5511 * When establishing connections to an identity address, the
5512 * connection procedure will store the resolvable random
5513 * address first. Now if it can be converted back into the
5514 * identity address, start using the identity address from
5517 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5519 bacpy(&conn->dst, &irk->bdaddr);
5520 conn->dst_type = irk->addr_type;
5523 /* When using controller based address resolution, then the new
5524 * address types 0x02 and 0x03 are used. These types need to be
5525 * converted back into either public address or random address type
5527 if (use_ll_privacy(hdev) &&
5528 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5529 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
5530 switch (conn->dst_type) {
5531 case ADDR_LE_DEV_PUBLIC_RESOLVED:
5532 conn->dst_type = ADDR_LE_DEV_PUBLIC;
5534 case ADDR_LE_DEV_RANDOM_RESOLVED:
5535 conn->dst_type = ADDR_LE_DEV_RANDOM;
5541 hci_le_conn_failed(conn, status);
5545 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5546 addr_type = BDADDR_LE_PUBLIC;
5548 addr_type = BDADDR_LE_RANDOM;
5550 /* Drop the connection if the device is blocked */
5551 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5552 hci_conn_drop(conn);
5556 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5557 mgmt_device_connected(hdev, conn, NULL, 0);
5559 conn->sec_level = BT_SECURITY_LOW;
5560 conn->handle = handle;
5561 conn->state = BT_CONFIG;
5563 /* Store current advertising instance as connection advertising instance
5564 * when sotfware rotation is in use so it can be re-enabled when
5567 if (!ext_adv_capable(hdev))
5568 conn->adv_instance = hdev->cur_adv_instance;
5570 conn->le_conn_interval = interval;
5571 conn->le_conn_latency = latency;
5572 conn->le_supv_timeout = supervision_timeout;
5574 hci_debugfs_create_conn(conn);
5575 hci_conn_add_sysfs(conn);
5577 /* The remote features procedure is defined for central
5578 * role only. So only in case of an initiated connection
5579 * request the remote features.
5581 * If the local controller supports peripheral-initiated features
5582 * exchange, then requesting the remote features in peripheral
5583 * role is possible. Otherwise just transition into the
5584 * connected state without requesting the remote features.
5587 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5588 struct hci_cp_le_read_remote_features cp;
5590 cp.handle = __cpu_to_le16(conn->handle);
5592 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5595 hci_conn_hold(conn);
5597 conn->state = BT_CONNECTED;
5598 hci_connect_cfm(conn, status);
5601 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5604 list_del_init(¶ms->action);
5606 hci_conn_drop(params->conn);
5607 hci_conn_put(params->conn);
5608 params->conn = NULL;
5613 hci_update_background_scan(hdev);
5614 hci_dev_unlock(hdev);
5617 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5619 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5621 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5623 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5624 NULL, ev->role, le16_to_cpu(ev->handle),
5625 le16_to_cpu(ev->interval),
5626 le16_to_cpu(ev->latency),
5627 le16_to_cpu(ev->supervision_timeout));
5630 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5631 struct sk_buff *skb)
5633 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5635 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5637 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5638 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5639 le16_to_cpu(ev->interval),
5640 le16_to_cpu(ev->latency),
5641 le16_to_cpu(ev->supervision_timeout));
5643 if (use_ll_privacy(hdev) &&
5644 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5645 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5646 hci_req_disable_address_resolution(hdev);
5649 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5651 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5652 struct hci_conn *conn;
5653 struct adv_info *adv;
5655 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5657 adv = hci_find_adv_instance(hdev, ev->handle);
5663 /* Remove advertising as it has been terminated */
5664 hci_remove_adv_instance(hdev, ev->handle);
5665 mgmt_advertising_removed(NULL, hdev, ev->handle);
5671 adv->enabled = false;
5673 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5675 /* Store handle in the connection so the correct advertising
5676 * instance can be re-enabled when disconnected.
5678 conn->adv_instance = ev->handle;
5680 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5681 bacmp(&conn->resp_addr, BDADDR_ANY))
5685 bacpy(&conn->resp_addr, &hdev->random_addr);
5690 bacpy(&conn->resp_addr, &adv->random_addr);
5694 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5695 struct sk_buff *skb)
5697 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5698 struct hci_conn *conn;
5700 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5707 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5711 hci_dev_unlock(hdev);
5712 mgmt_le_conn_update_failed(hdev, &conn->dst,
5713 conn->type, conn->dst_type, ev->status);
5717 conn->le_conn_interval = le16_to_cpu(ev->interval);
5718 conn->le_conn_latency = le16_to_cpu(ev->latency);
5719 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5722 hci_dev_unlock(hdev);
5725 mgmt_le_conn_updated(hdev, &conn->dst, conn->type,
5726 conn->dst_type, conn->le_conn_interval,
5727 conn->le_conn_latency, conn->le_supv_timeout);
5731 /* This function requires the caller holds hdev->lock */
5732 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5734 u8 addr_type, u8 adv_type,
5735 bdaddr_t *direct_rpa)
5737 struct hci_conn *conn;
5738 struct hci_conn_params *params;
5740 /* If the event is not connectable don't proceed further */
5741 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5744 /* Ignore if the device is blocked */
5745 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type))
5748 /* Most controller will fail if we try to create new connections
5749 * while we have an existing one in peripheral role.
5751 if (hdev->conn_hash.le_num_peripheral > 0 &&
5752 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5753 !(hdev->le_states[3] & 0x10)))
5756 /* If we're not connectable only connect devices that we have in
5757 * our pend_le_conns list.
5759 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5764 if (!params->explicit_connect) {
5765 switch (params->auto_connect) {
5766 case HCI_AUTO_CONN_DIRECT:
5767 /* Only devices advertising with ADV_DIRECT_IND are
5768 * triggering a connection attempt. This is allowing
5769 * incoming connections from peripheral devices.
5771 if (adv_type != LE_ADV_DIRECT_IND)
5774 case HCI_AUTO_CONN_ALWAYS:
5775 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5776 * are triggering a connection attempt. This means
5777 * that incoming connections from peripheral device are
5778 * accepted and also outgoing connections to peripheral
5779 * devices are established when found.
5787 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5788 hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
5790 if (!IS_ERR(conn)) {
5791 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5792 * by higher layer that tried to connect, if no then
5793 * store the pointer since we don't really have any
5794 * other owner of the object besides the params that
5795 * triggered it. This way we can abort the connection if
5796 * the parameters get removed and keep the reference
5797 * count consistent once the connection is established.
5800 if (!params->explicit_connect)
5801 params->conn = hci_conn_get(conn);
5806 switch (PTR_ERR(conn)) {
5808 /* If hci_connect() returns -EBUSY it means there is already
5809 * an LE connection attempt going on. Since controllers don't
5810 * support more than one connection attempt at the time, we
5811 * don't consider this an error case.
5815 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5822 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5823 u8 bdaddr_type, bdaddr_t *direct_addr,
5824 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5828 struct discovery_state *d = &hdev->discovery;
5830 struct smp_irk *irk;
5831 struct hci_conn *conn;
5840 case LE_ADV_DIRECT_IND:
5841 case LE_ADV_SCAN_IND:
5842 case LE_ADV_NONCONN_IND:
5843 case LE_ADV_SCAN_RSP:
5846 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5847 "type: 0x%02x", type);
5851 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5852 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5856 /* Find the end of the data in case the report contains padded zero
5857 * bytes at the end causing an invalid length value.
5859 * When data is NULL, len is 0 so there is no need for extra ptr
5860 * check as 'ptr < data + 0' is already false in such case.
5862 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5863 if (ptr + 1 + *ptr > data + len)
5867 /* Adjust for actual length. This handles the case when remote
5868 * device is advertising with incorrect data length.
5872 /* If the direct address is present, then this report is from
5873 * a LE Direct Advertising Report event. In that case it is
5874 * important to see if the address is matching the local
5875 * controller address.
5878 /* Only resolvable random addresses are valid for these
5879 * kind of reports and others can be ignored.
5881 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5884 /* If the controller is not using resolvable random
5885 * addresses, then this report can be ignored.
5887 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5890 /* If the local IRK of the controller does not match
5891 * with the resolvable random address provided, then
5892 * this report can be ignored.
5894 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5898 /* Check if we need to convert to identity address */
5899 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5901 bdaddr = &irk->bdaddr;
5902 bdaddr_type = irk->addr_type;
5905 /* Check if we have been requested to connect to this device.
5907 * direct_addr is set only for directed advertising reports (it is NULL
5908 * for advertising reports) and is already verified to be RPA above.
5910 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5912 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5913 /* Store report for later inclusion by
5914 * mgmt_device_connected
5916 memcpy(conn->le_adv_data, data, len);
5917 conn->le_adv_data_len = len;
5920 /* Passive scanning shouldn't trigger any device found events,
5921 * except for devices marked as CONN_REPORT for which we do send
5922 * device found events, or advertisement monitoring requested.
5924 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5925 if (type == LE_ADV_DIRECT_IND)
5929 /* Handle all adv packet in platform */
5930 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5931 bdaddr, bdaddr_type) &&
5932 idr_is_empty(&hdev->adv_monitors_idr))
5936 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5937 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5941 mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5942 rssi, flags, data, len, NULL, 0, type);
5944 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5945 rssi, flags, data, len, NULL, 0);
5950 /* When receiving non-connectable or scannable undirected
5951 * advertising reports, this means that the remote device is
5952 * not connectable and then clearly indicate this in the
5953 * device found event.
5955 * When receiving a scan response, then there is no way to
5956 * know if the remote device is connectable or not. However
5957 * since scan responses are merged with a previously seen
5958 * advertising report, the flags field from that report
5961 * In the really unlikely case that a controller get confused
5962 * and just sends a scan response event, then it is marked as
5963 * not connectable as well.
5965 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5966 type == LE_ADV_SCAN_RSP)
5967 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5972 /* Disable adv ind and scan rsp merging */
5973 mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5974 rssi, flags, data, len, NULL, 0, type);
5976 /* If there's nothing pending either store the data from this
5977 * event or send an immediate device found event if the data
5978 * should not be stored for later.
5980 if (!ext_adv && !has_pending_adv_report(hdev)) {
5981 /* If the report will trigger a SCAN_REQ store it for
5984 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5985 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5986 rssi, flags, data, len);
5990 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5991 rssi, flags, data, len, NULL, 0);
5995 /* Check if the pending report is for the same device as the new one */
5996 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5997 bdaddr_type == d->last_adv_addr_type);
5999 /* If the pending data doesn't match this report or this isn't a
6000 * scan response (e.g. we got a duplicate ADV_IND) then force
6001 * sending of the pending data.
6003 if (type != LE_ADV_SCAN_RSP || !match) {
6004 /* Send out whatever is in the cache, but skip duplicates */
6006 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6007 d->last_adv_addr_type, NULL,
6008 d->last_adv_rssi, d->last_adv_flags,
6010 d->last_adv_data_len, NULL, 0);
6012 /* If the new report will trigger a SCAN_REQ store it for
6015 if (!ext_adv && (type == LE_ADV_IND ||
6016 type == LE_ADV_SCAN_IND)) {
6017 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6018 rssi, flags, data, len);
6022 /* The advertising reports cannot be merged, so clear
6023 * the pending report and send out a device found event.
6025 clear_pending_adv_report(hdev);
6026 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6027 rssi, flags, data, len, NULL, 0);
6031 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6032 * the new event is a SCAN_RSP. We can therefore proceed with
6033 * sending a merged device found event.
6035 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6036 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6037 d->last_adv_data, d->last_adv_data_len, data, len);
6038 clear_pending_adv_report(hdev);
6042 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
6044 u8 num_reports = skb->data[0];
6045 void *ptr = &skb->data[1];
6049 while (num_reports--) {
6050 struct hci_ev_le_advertising_info *ev = ptr;
6053 if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
6054 bt_dev_err(hdev, "Malicious advertising data.");
6058 if (ev->length <= HCI_MAX_AD_LENGTH &&
6059 ev->data + ev->length <= skb_tail_pointer(skb)) {
6060 rssi = ev->data[ev->length];
6061 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
6062 ev->bdaddr_type, NULL, 0, rssi,
6063 ev->data, ev->length, false);
6065 bt_dev_err(hdev, "Dropping invalid advertising data");
6068 ptr += sizeof(*ev) + ev->length + 1;
6071 hci_dev_unlock(hdev);
6074 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6076 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6078 case LE_LEGACY_ADV_IND:
6080 case LE_LEGACY_ADV_DIRECT_IND:
6081 return LE_ADV_DIRECT_IND;
6082 case LE_LEGACY_ADV_SCAN_IND:
6083 return LE_ADV_SCAN_IND;
6084 case LE_LEGACY_NONCONN_IND:
6085 return LE_ADV_NONCONN_IND;
6086 case LE_LEGACY_SCAN_RSP_ADV:
6087 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6088 return LE_ADV_SCAN_RSP;
6094 if (evt_type & LE_EXT_ADV_CONN_IND) {
6095 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6096 return LE_ADV_DIRECT_IND;
6101 if (evt_type & LE_EXT_ADV_SCAN_RSP)
6102 return LE_ADV_SCAN_RSP;
6104 if (evt_type & LE_EXT_ADV_SCAN_IND)
6105 return LE_ADV_SCAN_IND;
6107 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6108 evt_type & LE_EXT_ADV_DIRECT_IND)
6109 return LE_ADV_NONCONN_IND;
6112 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6115 return LE_ADV_INVALID;
6118 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
6120 u8 num_reports = skb->data[0];
6121 void *ptr = &skb->data[1];
6125 while (num_reports--) {
6126 struct hci_ev_le_ext_adv_report *ev = ptr;
6130 evt_type = __le16_to_cpu(ev->evt_type);
6131 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6132 if (legacy_evt_type != LE_ADV_INVALID) {
6133 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
6134 ev->bdaddr_type, NULL, 0, ev->rssi,
6135 ev->data, ev->length,
6136 !(evt_type & LE_EXT_ADV_LEGACY_PDU));
6139 ptr += sizeof(*ev) + ev->length;
6142 hci_dev_unlock(hdev);
6145 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
6146 struct sk_buff *skb)
6148 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
6149 struct hci_conn *conn;
6151 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6155 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6158 memcpy(conn->features[0], ev->features, 8);
6160 if (conn->state == BT_CONFIG) {
6163 /* If the local controller supports peripheral-initiated
6164 * features exchange, but the remote controller does
6165 * not, then it is possible that the error code 0x1a
6166 * for unsupported remote feature gets returned.
6168 * In this specific case, allow the connection to
6169 * transition into connected state and mark it as
6172 if (!conn->out && ev->status == 0x1a &&
6173 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6176 status = ev->status;
6178 conn->state = BT_CONNECTED;
6179 hci_connect_cfm(conn, status);
6180 hci_conn_drop(conn);
6184 hci_dev_unlock(hdev);
6187 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
6189 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
6190 struct hci_cp_le_ltk_reply cp;
6191 struct hci_cp_le_ltk_neg_reply neg;
6192 struct hci_conn *conn;
6193 struct smp_ltk *ltk;
6195 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
6199 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6203 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6207 if (smp_ltk_is_sc(ltk)) {
6208 /* With SC both EDiv and Rand are set to zero */
6209 if (ev->ediv || ev->rand)
6212 /* For non-SC keys check that EDiv and Rand match */
6213 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6217 memcpy(cp.ltk, ltk->val, ltk->enc_size);
6218 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6219 cp.handle = cpu_to_le16(conn->handle);
6221 conn->pending_sec_level = smp_ltk_sec_level(ltk);
6223 conn->enc_key_size = ltk->enc_size;
6225 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6227 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6228 * temporary key used to encrypt a connection following
6229 * pairing. It is used during the Encrypted Session Setup to
6230 * distribute the keys. Later, security can be re-established
6231 * using a distributed LTK.
6233 if (ltk->type == SMP_STK) {
6234 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6235 list_del_rcu(<k->list);
6236 kfree_rcu(ltk, rcu);
6238 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6241 hci_dev_unlock(hdev);
6246 neg.handle = ev->handle;
6247 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6248 hci_dev_unlock(hdev);
6251 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6254 struct hci_cp_le_conn_param_req_neg_reply cp;
6256 cp.handle = cpu_to_le16(handle);
6259 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6263 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
6264 struct sk_buff *skb)
6266 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
6267 struct hci_cp_le_conn_param_req_reply cp;
6268 struct hci_conn *hcon;
6269 u16 handle, min, max, latency, timeout;
6271 handle = le16_to_cpu(ev->handle);
6272 min = le16_to_cpu(ev->interval_min);
6273 max = le16_to_cpu(ev->interval_max);
6274 latency = le16_to_cpu(ev->latency);
6275 timeout = le16_to_cpu(ev->timeout);
6277 hcon = hci_conn_hash_lookup_handle(hdev, handle);
6278 if (!hcon || hcon->state != BT_CONNECTED)
6279 return send_conn_param_neg_reply(hdev, handle,
6280 HCI_ERROR_UNKNOWN_CONN_ID);
6282 if (hci_check_conn_params(min, max, latency, timeout))
6283 return send_conn_param_neg_reply(hdev, handle,
6284 HCI_ERROR_INVALID_LL_PARAMS);
6286 if (hcon->role == HCI_ROLE_MASTER) {
6287 struct hci_conn_params *params;
6292 params = hci_conn_params_lookup(hdev, &hcon->dst,
6295 params->conn_min_interval = min;
6296 params->conn_max_interval = max;
6297 params->conn_latency = latency;
6298 params->supervision_timeout = timeout;
6304 hci_dev_unlock(hdev);
6306 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6307 store_hint, min, max, latency, timeout);
6310 cp.handle = ev->handle;
6311 cp.interval_min = ev->interval_min;
6312 cp.interval_max = ev->interval_max;
6313 cp.latency = ev->latency;
6314 cp.timeout = ev->timeout;
6318 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6321 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
6322 struct sk_buff *skb)
6324 u8 num_reports = skb->data[0];
6325 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
6327 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
6332 for (; num_reports; num_reports--, ev++)
6333 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
6334 ev->bdaddr_type, &ev->direct_addr,
6335 ev->direct_addr_type, ev->rssi, NULL, 0,
6338 hci_dev_unlock(hdev);
6341 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
6343 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
6344 struct hci_conn *conn;
6346 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6353 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6357 conn->le_tx_phy = ev->tx_phy;
6358 conn->le_rx_phy = ev->rx_phy;
6361 hci_dev_unlock(hdev);
6364 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
6366 struct hci_ev_le_meta *le_ev = (void *) skb->data;
6368 skb_pull(skb, sizeof(*le_ev));
6370 switch (le_ev->subevent) {
6371 case HCI_EV_LE_CONN_COMPLETE:
6372 hci_le_conn_complete_evt(hdev, skb);
6375 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
6376 hci_le_conn_update_complete_evt(hdev, skb);
6379 case HCI_EV_LE_ADVERTISING_REPORT:
6380 hci_le_adv_report_evt(hdev, skb);
6383 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
6384 hci_le_remote_feat_complete_evt(hdev, skb);
6387 case HCI_EV_LE_LTK_REQ:
6388 hci_le_ltk_request_evt(hdev, skb);
6391 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
6392 hci_le_remote_conn_param_req_evt(hdev, skb);
6395 case HCI_EV_LE_DIRECT_ADV_REPORT:
6396 hci_le_direct_adv_report_evt(hdev, skb);
6399 case HCI_EV_LE_PHY_UPDATE_COMPLETE:
6400 hci_le_phy_update_evt(hdev, skb);
6403 case HCI_EV_LE_EXT_ADV_REPORT:
6404 hci_le_ext_adv_report_evt(hdev, skb);
6407 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
6408 hci_le_enh_conn_complete_evt(hdev, skb);
6411 case HCI_EV_LE_EXT_ADV_SET_TERM:
6412 hci_le_ext_adv_term_evt(hdev, skb);
6420 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6421 u8 event, struct sk_buff *skb)
6423 struct hci_ev_cmd_complete *ev;
6424 struct hci_event_hdr *hdr;
6429 if (skb->len < sizeof(*hdr)) {
6430 bt_dev_err(hdev, "too short HCI event");
6434 hdr = (void *) skb->data;
6435 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6438 if (hdr->evt != event)
6443 /* Check if request ended in Command Status - no way to retrieve
6444 * any extra parameters in this case.
6446 if (hdr->evt == HCI_EV_CMD_STATUS)
6449 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6450 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6455 if (skb->len < sizeof(*ev)) {
6456 bt_dev_err(hdev, "too short cmd_complete event");
6460 ev = (void *) skb->data;
6461 skb_pull(skb, sizeof(*ev));
6463 if (opcode != __le16_to_cpu(ev->opcode)) {
6464 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6465 __le16_to_cpu(ev->opcode));
6472 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6473 struct sk_buff *skb)
6475 struct hci_ev_le_advertising_info *adv;
6476 struct hci_ev_le_direct_adv_info *direct_adv;
6477 struct hci_ev_le_ext_adv_report *ext_adv;
6478 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6479 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6483 /* If we are currently suspended and this is the first BT event seen,
6484 * save the wake reason associated with the event.
6486 if (!hdev->suspended || hdev->wake_reason)
6489 /* Default to remote wake. Values for wake_reason are documented in the
6490 * Bluez mgmt api docs.
6492 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6494 /* Once configured for remote wakeup, we should only wake up for
6495 * reconnections. It's useful to see which device is waking us up so
6496 * keep track of the bdaddr of the connection event that woke us up.
6498 if (event == HCI_EV_CONN_REQUEST) {
6499 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6500 hdev->wake_addr_type = BDADDR_BREDR;
6501 } else if (event == HCI_EV_CONN_COMPLETE) {
6502 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6503 hdev->wake_addr_type = BDADDR_BREDR;
6504 } else if (event == HCI_EV_LE_META) {
6505 struct hci_ev_le_meta *le_ev = (void *)skb->data;
6506 u8 subevent = le_ev->subevent;
6507 u8 *ptr = &skb->data[sizeof(*le_ev)];
6508 u8 num_reports = *ptr;
6510 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6511 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6512 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6514 adv = (void *)(ptr + 1);
6515 direct_adv = (void *)(ptr + 1);
6516 ext_adv = (void *)(ptr + 1);
6519 case HCI_EV_LE_ADVERTISING_REPORT:
6520 bacpy(&hdev->wake_addr, &adv->bdaddr);
6521 hdev->wake_addr_type = adv->bdaddr_type;
6523 case HCI_EV_LE_DIRECT_ADV_REPORT:
6524 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6525 hdev->wake_addr_type = direct_adv->bdaddr_type;
6527 case HCI_EV_LE_EXT_ADV_REPORT:
6528 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6529 hdev->wake_addr_type = ext_adv->bdaddr_type;
6534 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6538 hci_dev_unlock(hdev);
6541 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6543 struct hci_event_hdr *hdr = (void *) skb->data;
6544 hci_req_complete_t req_complete = NULL;
6545 hci_req_complete_skb_t req_complete_skb = NULL;
6546 struct sk_buff *orig_skb = NULL;
6547 u8 status = 0, event = hdr->evt, req_evt = 0;
6548 u16 opcode = HCI_OP_NOP;
6551 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6555 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6556 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6557 opcode = __le16_to_cpu(cmd_hdr->opcode);
6558 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6563 /* If it looks like we might end up having to call
6564 * req_complete_skb, store a pristine copy of the skb since the
6565 * various handlers may modify the original one through
6566 * skb_pull() calls, etc.
6568 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6569 event == HCI_EV_CMD_COMPLETE)
6570 orig_skb = skb_clone(skb, GFP_KERNEL);
6572 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6574 /* Store wake reason if we're suspended */
6575 hci_store_wake_reason(hdev, event, skb);
6578 case HCI_EV_INQUIRY_COMPLETE:
6579 hci_inquiry_complete_evt(hdev, skb);
6582 case HCI_EV_INQUIRY_RESULT:
6583 hci_inquiry_result_evt(hdev, skb);
6586 case HCI_EV_CONN_COMPLETE:
6587 hci_conn_complete_evt(hdev, skb);
6590 case HCI_EV_CONN_REQUEST:
6591 hci_conn_request_evt(hdev, skb);
6594 case HCI_EV_DISCONN_COMPLETE:
6595 hci_disconn_complete_evt(hdev, skb);
6598 case HCI_EV_AUTH_COMPLETE:
6599 hci_auth_complete_evt(hdev, skb);
6602 case HCI_EV_REMOTE_NAME:
6603 hci_remote_name_evt(hdev, skb);
6606 case HCI_EV_ENCRYPT_CHANGE:
6607 hci_encrypt_change_evt(hdev, skb);
6610 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6611 hci_change_link_key_complete_evt(hdev, skb);
6614 case HCI_EV_REMOTE_FEATURES:
6615 hci_remote_features_evt(hdev, skb);
6618 case HCI_EV_CMD_COMPLETE:
6619 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6620 &req_complete, &req_complete_skb);
6623 case HCI_EV_CMD_STATUS:
6624 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6628 case HCI_EV_HARDWARE_ERROR:
6629 hci_hardware_error_evt(hdev, skb);
6632 case HCI_EV_ROLE_CHANGE:
6633 hci_role_change_evt(hdev, skb);
6636 case HCI_EV_NUM_COMP_PKTS:
6637 hci_num_comp_pkts_evt(hdev, skb);
6640 case HCI_EV_MODE_CHANGE:
6641 hci_mode_change_evt(hdev, skb);
6644 case HCI_EV_PIN_CODE_REQ:
6645 hci_pin_code_request_evt(hdev, skb);
6648 case HCI_EV_LINK_KEY_REQ:
6649 hci_link_key_request_evt(hdev, skb);
6652 case HCI_EV_LINK_KEY_NOTIFY:
6653 hci_link_key_notify_evt(hdev, skb);
6656 case HCI_EV_CLOCK_OFFSET:
6657 hci_clock_offset_evt(hdev, skb);
6660 case HCI_EV_PKT_TYPE_CHANGE:
6661 hci_pkt_type_change_evt(hdev, skb);
6664 case HCI_EV_PSCAN_REP_MODE:
6665 hci_pscan_rep_mode_evt(hdev, skb);
6668 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6669 hci_inquiry_result_with_rssi_evt(hdev, skb);
6672 case HCI_EV_REMOTE_EXT_FEATURES:
6673 hci_remote_ext_features_evt(hdev, skb);
6676 case HCI_EV_SYNC_CONN_COMPLETE:
6677 hci_sync_conn_complete_evt(hdev, skb);
6680 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6681 hci_extended_inquiry_result_evt(hdev, skb);
6684 case HCI_EV_KEY_REFRESH_COMPLETE:
6685 hci_key_refresh_complete_evt(hdev, skb);
6688 case HCI_EV_IO_CAPA_REQUEST:
6689 hci_io_capa_request_evt(hdev, skb);
6692 case HCI_EV_IO_CAPA_REPLY:
6693 hci_io_capa_reply_evt(hdev, skb);
6696 case HCI_EV_USER_CONFIRM_REQUEST:
6697 hci_user_confirm_request_evt(hdev, skb);
6700 case HCI_EV_USER_PASSKEY_REQUEST:
6701 hci_user_passkey_request_evt(hdev, skb);
6704 case HCI_EV_USER_PASSKEY_NOTIFY:
6705 hci_user_passkey_notify_evt(hdev, skb);
6708 case HCI_EV_KEYPRESS_NOTIFY:
6709 hci_keypress_notify_evt(hdev, skb);
6712 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6713 hci_simple_pair_complete_evt(hdev, skb);
6716 case HCI_EV_REMOTE_HOST_FEATURES:
6717 hci_remote_host_features_evt(hdev, skb);
6720 case HCI_EV_LE_META:
6721 hci_le_meta_evt(hdev, skb);
6724 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6725 hci_remote_oob_data_request_evt(hdev, skb);
6728 #if IS_ENABLED(CONFIG_BT_HS)
6729 case HCI_EV_CHANNEL_SELECTED:
6730 hci_chan_selected_evt(hdev, skb);
6733 case HCI_EV_PHY_LINK_COMPLETE:
6734 hci_phy_link_complete_evt(hdev, skb);
6737 case HCI_EV_LOGICAL_LINK_COMPLETE:
6738 hci_loglink_complete_evt(hdev, skb);
6741 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6742 hci_disconn_loglink_complete_evt(hdev, skb);
6745 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6746 hci_disconn_phylink_complete_evt(hdev, skb);
6750 case HCI_EV_NUM_COMP_BLOCKS:
6751 hci_num_comp_blocks_evt(hdev, skb);
6755 case HCI_EV_VENDOR_SPECIFIC:
6756 hci_vendor_specific_evt(hdev, skb);
6760 msft_vendor_evt(hdev, skb);
6765 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6770 req_complete(hdev, status, opcode);
6771 } else if (req_complete_skb) {
6772 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6773 kfree_skb(orig_skb);
6776 req_complete_skb(hdev, status, opcode, orig_skb);
6780 kfree_skb(orig_skb);
6782 hdev->stat.evt_rx++;