2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
40 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
41 "\x00\x00\x00\x00\x00\x00\x00\x00"
43 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
45 /* Handle HCI Event packets */
47 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
50 __u8 status = *((__u8 *) skb->data);
52 BT_DBG("%s status 0x%2.2x", hdev->name, status);
54 /* It is possible that we receive Inquiry Complete event right
55 * before we receive Inquiry Cancel Command Complete event, in
56 * which case the latter event should have status of Command
57 * Disallowed (0x0c). This should not be treated as error, since
58 * we actually achieve what Inquiry Cancel wants to achieve,
59 * which is to end the last Inquiry session.
61 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
62 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
71 clear_bit(HCI_INQUIRY, &hdev->flags);
72 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
73 wake_up_bit(&hdev->flags, HCI_INQUIRY);
76 /* Set discovery state to stopped if we're not doing LE active
79 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
80 hdev->le_scan_type != LE_SCAN_ACTIVE)
81 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
84 hci_conn_check_pending(hdev);
87 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
89 __u8 status = *((__u8 *) skb->data);
91 BT_DBG("%s status 0x%2.2x", hdev->name, status);
96 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
99 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
101 __u8 status = *((__u8 *) skb->data);
103 BT_DBG("%s status 0x%2.2x", hdev->name, status);
108 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
110 hci_conn_check_pending(hdev);
113 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
116 BT_DBG("%s", hdev->name);
119 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
121 struct hci_rp_role_discovery *rp = (void *) skb->data;
122 struct hci_conn *conn;
124 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
131 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
133 conn->role = rp->role;
135 hci_dev_unlock(hdev);
138 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
140 struct hci_rp_read_link_policy *rp = (void *) skb->data;
141 struct hci_conn *conn;
143 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
152 conn->link_policy = __le16_to_cpu(rp->policy);
154 hci_dev_unlock(hdev);
157 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
159 struct hci_rp_write_link_policy *rp = (void *) skb->data;
160 struct hci_conn *conn;
163 struct hci_cp_write_link_policy cp;
164 struct hci_conn *sco_conn;
167 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
172 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
178 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
180 conn->link_policy = get_unaligned_le16(sent + 2);
183 sco_conn = hci_conn_hash_lookup_sco(hdev);
184 if (sco_conn && bacmp(&sco_conn->dst, &conn->dst) == 0 &&
185 conn->link_policy & HCI_LP_SNIFF) {
186 BT_ERR("SNIFF is not allowed during sco connection");
187 cp.handle = __cpu_to_le16(conn->handle);
188 cp.policy = __cpu_to_le16(conn->link_policy & ~HCI_LP_SNIFF);
189 hci_send_cmd(hdev, HCI_OP_WRITE_LINK_POLICY, sizeof(cp), &cp);
193 hci_dev_unlock(hdev);
196 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
199 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
201 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
206 hdev->link_policy = __le16_to_cpu(rp->policy);
209 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
212 __u8 status = *((__u8 *) skb->data);
215 BT_DBG("%s status 0x%2.2x", hdev->name, status);
220 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
224 hdev->link_policy = get_unaligned_le16(sent);
227 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
229 __u8 status = *((__u8 *) skb->data);
231 BT_DBG("%s status 0x%2.2x", hdev->name, status);
233 clear_bit(HCI_RESET, &hdev->flags);
238 /* Reset all non-persistent flags */
239 hci_dev_clear_volatile_flags(hdev);
241 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
243 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
244 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
246 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
247 hdev->adv_data_len = 0;
249 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
250 hdev->scan_rsp_data_len = 0;
252 hdev->le_scan_type = LE_SCAN_PASSIVE;
254 hdev->ssp_debug_mode = 0;
256 hci_bdaddr_list_clear(&hdev->le_accept_list);
257 hci_bdaddr_list_clear(&hdev->le_resolv_list);
260 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
263 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
264 struct hci_cp_read_stored_link_key *sent;
266 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
268 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
272 if (!rp->status && sent->read_all == 0x01) {
273 hdev->stored_max_keys = rp->max_keys;
274 hdev->stored_num_keys = rp->num_keys;
278 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
281 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
283 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
288 if (rp->num_keys <= hdev->stored_num_keys)
289 hdev->stored_num_keys -= rp->num_keys;
291 hdev->stored_num_keys = 0;
294 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
296 __u8 status = *((__u8 *) skb->data);
299 BT_DBG("%s status 0x%2.2x", hdev->name, status);
301 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
307 if (hci_dev_test_flag(hdev, HCI_MGMT))
308 mgmt_set_local_name_complete(hdev, sent, status);
310 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
312 hci_dev_unlock(hdev);
315 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
317 struct hci_rp_read_local_name *rp = (void *) skb->data;
319 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
324 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
325 hci_dev_test_flag(hdev, HCI_CONFIG))
326 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
329 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
331 __u8 status = *((__u8 *) skb->data);
334 BT_DBG("%s status 0x%2.2x", hdev->name, status);
336 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
343 __u8 param = *((__u8 *) sent);
345 if (param == AUTH_ENABLED)
346 set_bit(HCI_AUTH, &hdev->flags);
348 clear_bit(HCI_AUTH, &hdev->flags);
351 if (hci_dev_test_flag(hdev, HCI_MGMT))
352 mgmt_auth_enable_complete(hdev, status);
354 hci_dev_unlock(hdev);
357 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
359 __u8 status = *((__u8 *) skb->data);
363 BT_DBG("%s status 0x%2.2x", hdev->name, status);
368 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
372 param = *((__u8 *) sent);
375 set_bit(HCI_ENCRYPT, &hdev->flags);
377 clear_bit(HCI_ENCRYPT, &hdev->flags);
380 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
382 __u8 status = *((__u8 *) skb->data);
386 BT_DBG("%s status 0x%2.2x", hdev->name, status);
388 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
392 param = *((__u8 *) sent);
397 hdev->discov_timeout = 0;
401 if (param & SCAN_INQUIRY)
402 set_bit(HCI_ISCAN, &hdev->flags);
404 clear_bit(HCI_ISCAN, &hdev->flags);
406 if (param & SCAN_PAGE)
407 set_bit(HCI_PSCAN, &hdev->flags);
409 clear_bit(HCI_PSCAN, &hdev->flags);
412 hci_dev_unlock(hdev);
415 static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb)
417 __u8 status = *((__u8 *)skb->data);
418 struct hci_cp_set_event_filter *cp;
421 BT_DBG("%s status 0x%2.2x", hdev->name, status);
426 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
430 cp = (struct hci_cp_set_event_filter *)sent;
432 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
433 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
435 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
438 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
440 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
442 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
447 memcpy(hdev->dev_class, rp->dev_class, 3);
449 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
450 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
453 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
455 __u8 status = *((__u8 *) skb->data);
458 BT_DBG("%s status 0x%2.2x", hdev->name, status);
460 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
467 memcpy(hdev->dev_class, sent, 3);
469 if (hci_dev_test_flag(hdev, HCI_MGMT))
470 mgmt_set_class_of_dev_complete(hdev, sent, status);
472 hci_dev_unlock(hdev);
475 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
477 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
480 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
485 setting = __le16_to_cpu(rp->voice_setting);
487 if (hdev->voice_setting == setting)
490 hdev->voice_setting = setting;
492 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
495 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
498 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
501 __u8 status = *((__u8 *) skb->data);
505 BT_DBG("%s status 0x%2.2x", hdev->name, status);
510 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
514 setting = get_unaligned_le16(sent);
516 if (hdev->voice_setting == setting)
519 hdev->voice_setting = setting;
521 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
524 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
527 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
530 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
532 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
537 hdev->num_iac = rp->num_iac;
539 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
542 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
544 __u8 status = *((__u8 *) skb->data);
545 struct hci_cp_write_ssp_mode *sent;
547 BT_DBG("%s status 0x%2.2x", hdev->name, status);
549 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
557 hdev->features[1][0] |= LMP_HOST_SSP;
559 hdev->features[1][0] &= ~LMP_HOST_SSP;
562 if (hci_dev_test_flag(hdev, HCI_MGMT))
563 mgmt_ssp_enable_complete(hdev, sent->mode, status);
566 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
568 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
571 hci_dev_unlock(hdev);
574 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
576 u8 status = *((u8 *) skb->data);
577 struct hci_cp_write_sc_support *sent;
579 BT_DBG("%s status 0x%2.2x", hdev->name, status);
581 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
589 hdev->features[1][0] |= LMP_HOST_SC;
591 hdev->features[1][0] &= ~LMP_HOST_SC;
594 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
596 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
598 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
601 hci_dev_unlock(hdev);
604 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
606 struct hci_rp_read_local_version *rp = (void *) skb->data;
608 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
613 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
614 hci_dev_test_flag(hdev, HCI_CONFIG)) {
615 hdev->hci_ver = rp->hci_ver;
616 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
617 hdev->lmp_ver = rp->lmp_ver;
618 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
619 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
623 static void hci_cc_read_local_commands(struct hci_dev *hdev,
626 struct hci_rp_read_local_commands *rp = (void *) skb->data;
628 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
633 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
634 hci_dev_test_flag(hdev, HCI_CONFIG))
635 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
638 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
641 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
642 struct hci_conn *conn;
644 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
651 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
653 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
655 hci_dev_unlock(hdev);
658 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
661 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
662 struct hci_conn *conn;
665 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
670 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
676 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
678 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
680 hci_dev_unlock(hdev);
683 static void hci_cc_read_local_features(struct hci_dev *hdev,
686 struct hci_rp_read_local_features *rp = (void *) skb->data;
688 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
693 memcpy(hdev->features, rp->features, 8);
695 /* Adjust default settings according to features
696 * supported by device. */
698 if (hdev->features[0][0] & LMP_3SLOT)
699 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
701 if (hdev->features[0][0] & LMP_5SLOT)
702 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
704 if (hdev->features[0][1] & LMP_HV2) {
705 hdev->pkt_type |= (HCI_HV2);
706 hdev->esco_type |= (ESCO_HV2);
709 if (hdev->features[0][1] & LMP_HV3) {
710 hdev->pkt_type |= (HCI_HV3);
711 hdev->esco_type |= (ESCO_HV3);
714 if (lmp_esco_capable(hdev))
715 hdev->esco_type |= (ESCO_EV3);
717 if (hdev->features[0][4] & LMP_EV4)
718 hdev->esco_type |= (ESCO_EV4);
720 if (hdev->features[0][4] & LMP_EV5)
721 hdev->esco_type |= (ESCO_EV5);
723 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
724 hdev->esco_type |= (ESCO_2EV3);
726 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
727 hdev->esco_type |= (ESCO_3EV3);
729 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
730 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
733 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
736 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
738 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
743 if (hdev->max_page < rp->max_page)
744 hdev->max_page = rp->max_page;
746 if (rp->page < HCI_MAX_PAGES)
747 memcpy(hdev->features[rp->page], rp->features, 8);
750 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
753 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
755 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
760 hdev->flow_ctl_mode = rp->mode;
763 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
765 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
767 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
772 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
773 hdev->sco_mtu = rp->sco_mtu;
774 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
775 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
777 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
782 hdev->acl_cnt = hdev->acl_pkts;
783 hdev->sco_cnt = hdev->sco_pkts;
785 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
786 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
789 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
791 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
793 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
798 if (test_bit(HCI_INIT, &hdev->flags))
799 bacpy(&hdev->bdaddr, &rp->bdaddr);
801 if (hci_dev_test_flag(hdev, HCI_SETUP))
802 bacpy(&hdev->setup_addr, &rp->bdaddr);
805 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
808 struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
810 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
815 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
816 hci_dev_test_flag(hdev, HCI_CONFIG)) {
817 hdev->pairing_opts = rp->pairing_opts;
818 hdev->max_enc_key_size = rp->max_key_size;
822 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
825 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
827 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
832 if (test_bit(HCI_INIT, &hdev->flags)) {
833 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
834 hdev->page_scan_window = __le16_to_cpu(rp->window);
838 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
841 u8 status = *((u8 *) skb->data);
842 struct hci_cp_write_page_scan_activity *sent;
844 BT_DBG("%s status 0x%2.2x", hdev->name, status);
849 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
853 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
854 hdev->page_scan_window = __le16_to_cpu(sent->window);
857 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
860 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
862 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
867 if (test_bit(HCI_INIT, &hdev->flags))
868 hdev->page_scan_type = rp->type;
871 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
874 u8 status = *((u8 *) skb->data);
877 BT_DBG("%s status 0x%2.2x", hdev->name, status);
882 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
884 hdev->page_scan_type = *type;
887 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
890 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
892 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
897 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
898 hdev->block_len = __le16_to_cpu(rp->block_len);
899 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
901 hdev->block_cnt = hdev->num_blocks;
903 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
904 hdev->block_cnt, hdev->block_len);
907 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
909 struct hci_rp_read_clock *rp = (void *) skb->data;
910 struct hci_cp_read_clock *cp;
911 struct hci_conn *conn;
913 BT_DBG("%s", hdev->name);
915 if (skb->len < sizeof(*rp))
923 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
927 if (cp->which == 0x00) {
928 hdev->clock = le32_to_cpu(rp->clock);
932 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
934 conn->clock = le32_to_cpu(rp->clock);
935 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
939 hci_dev_unlock(hdev);
942 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
945 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
947 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
952 hdev->amp_status = rp->amp_status;
953 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
954 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
955 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
956 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
957 hdev->amp_type = rp->amp_type;
958 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
959 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
960 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
961 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
964 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
967 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
969 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
974 hdev->inq_tx_power = rp->tx_power;
977 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
980 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
982 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
987 hdev->err_data_reporting = rp->err_data_reporting;
990 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
993 __u8 status = *((__u8 *)skb->data);
994 struct hci_cp_write_def_err_data_reporting *cp;
996 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1001 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1005 hdev->err_data_reporting = cp->err_data_reporting;
1008 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
1010 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
1011 struct hci_cp_pin_code_reply *cp;
1012 struct hci_conn *conn;
1014 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1018 if (hci_dev_test_flag(hdev, HCI_MGMT))
1019 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1024 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1028 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1030 conn->pin_length = cp->pin_len;
1033 hci_dev_unlock(hdev);
1036 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1038 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1040 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1044 if (hci_dev_test_flag(hdev, HCI_MGMT))
1045 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1048 hci_dev_unlock(hdev);
1051 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1052 struct sk_buff *skb)
1054 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1056 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1061 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1062 hdev->le_pkts = rp->le_max_pkt;
1064 hdev->le_cnt = hdev->le_pkts;
1066 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1069 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1070 struct sk_buff *skb)
1072 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1074 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1079 memcpy(hdev->le_features, rp->features, 8);
1082 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1083 struct sk_buff *skb)
1085 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1087 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1092 hdev->adv_tx_power = rp->tx_power;
1095 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1097 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1099 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1103 if (hci_dev_test_flag(hdev, HCI_MGMT))
1104 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1107 hci_dev_unlock(hdev);
1110 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1111 struct sk_buff *skb)
1113 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1115 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1119 if (hci_dev_test_flag(hdev, HCI_MGMT))
1120 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1121 ACL_LINK, 0, rp->status);
1123 hci_dev_unlock(hdev);
1126 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1128 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1130 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1134 if (hci_dev_test_flag(hdev, HCI_MGMT))
1135 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1138 hci_dev_unlock(hdev);
1141 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1142 struct sk_buff *skb)
1144 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1146 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1150 if (hci_dev_test_flag(hdev, HCI_MGMT))
1151 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1152 ACL_LINK, 0, rp->status);
1154 hci_dev_unlock(hdev);
1157 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1158 struct sk_buff *skb)
1160 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1162 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1165 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1166 struct sk_buff *skb)
1168 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1170 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1173 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1175 __u8 status = *((__u8 *) skb->data);
1178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1183 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1189 bacpy(&hdev->random_addr, sent);
1191 if (!bacmp(&hdev->rpa, sent)) {
1192 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1193 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1194 secs_to_jiffies(hdev->rpa_timeout));
1197 hci_dev_unlock(hdev);
1200 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1202 __u8 status = *((__u8 *) skb->data);
1203 struct hci_cp_le_set_default_phy *cp;
1205 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1210 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1216 hdev->le_tx_def_phys = cp->tx_phys;
1217 hdev->le_rx_def_phys = cp->rx_phys;
1219 hci_dev_unlock(hdev);
1222 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1223 struct sk_buff *skb)
1225 __u8 status = *((__u8 *) skb->data);
1226 struct hci_cp_le_set_adv_set_rand_addr *cp;
1227 struct adv_info *adv;
1232 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1233 /* Update only in case the adv instance since handle 0x00 shall be using
1234 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1235 * non-extended adverting.
1237 if (!cp || !cp->handle)
1242 adv = hci_find_adv_instance(hdev, cp->handle);
1244 bacpy(&adv->random_addr, &cp->bdaddr);
1245 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1246 adv->rpa_expired = false;
1247 queue_delayed_work(hdev->workqueue,
1248 &adv->rpa_expired_cb,
1249 secs_to_jiffies(hdev->rpa_timeout));
1253 hci_dev_unlock(hdev);
1256 static void hci_cc_le_read_transmit_power(struct hci_dev *hdev,
1257 struct sk_buff *skb)
1259 struct hci_rp_le_read_transmit_power *rp = (void *)skb->data;
1261 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1266 hdev->min_le_tx_power = rp->min_le_tx_power;
1267 hdev->max_le_tx_power = rp->max_le_tx_power;
1270 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1272 __u8 *sent, status = *((__u8 *) skb->data);
1274 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1279 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1285 /* If we're doing connection initiation as peripheral. Set a
1286 * timeout in case something goes wrong.
1289 struct hci_conn *conn;
1291 hci_dev_set_flag(hdev, HCI_LE_ADV);
1293 conn = hci_lookup_le_connect(hdev);
1295 queue_delayed_work(hdev->workqueue,
1296 &conn->le_conn_timeout,
1297 conn->conn_timeout);
1299 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1302 hci_dev_unlock(hdev);
1305 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1306 struct sk_buff *skb)
1308 struct hci_cp_le_set_ext_adv_enable *cp;
1309 struct hci_cp_ext_adv_set *set;
1310 __u8 status = *((__u8 *) skb->data);
1311 struct adv_info *adv = NULL, *n;
1313 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1318 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1322 set = (void *)cp->data;
1326 if (cp->num_of_sets)
1327 adv = hci_find_adv_instance(hdev, set->handle);
1330 struct hci_conn *conn;
1332 hci_dev_set_flag(hdev, HCI_LE_ADV);
1335 adv->enabled = true;
1337 conn = hci_lookup_le_connect(hdev);
1339 queue_delayed_work(hdev->workqueue,
1340 &conn->le_conn_timeout,
1341 conn->conn_timeout);
1343 if (cp->num_of_sets) {
1345 adv->enabled = false;
1347 /* If just one instance was disabled check if there are
1348 * any other instance enabled before clearing HCI_LE_ADV
1350 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1356 /* All instances shall be considered disabled */
1357 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1359 adv->enabled = false;
1362 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1366 hci_dev_unlock(hdev);
1369 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1371 struct hci_cp_le_set_scan_param *cp;
1372 __u8 status = *((__u8 *) skb->data);
1374 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1379 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1385 hdev->le_scan_type = cp->type;
1387 hci_dev_unlock(hdev);
1390 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1391 struct sk_buff *skb)
1393 struct hci_cp_le_set_ext_scan_params *cp;
1394 __u8 status = *((__u8 *) skb->data);
1395 struct hci_cp_le_scan_phy_params *phy_param;
1397 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1402 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1406 phy_param = (void *)cp->data;
1410 hdev->le_scan_type = phy_param->type;
1412 hci_dev_unlock(hdev);
1415 static bool has_pending_adv_report(struct hci_dev *hdev)
1417 struct discovery_state *d = &hdev->discovery;
1419 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1422 static void clear_pending_adv_report(struct hci_dev *hdev)
1424 struct discovery_state *d = &hdev->discovery;
1426 bacpy(&d->last_adv_addr, BDADDR_ANY);
1427 d->last_adv_data_len = 0;
1431 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1432 u8 bdaddr_type, s8 rssi, u32 flags,
1435 struct discovery_state *d = &hdev->discovery;
1437 if (len > HCI_MAX_AD_LENGTH)
1440 bacpy(&d->last_adv_addr, bdaddr);
1441 d->last_adv_addr_type = bdaddr_type;
1442 d->last_adv_rssi = rssi;
1443 d->last_adv_flags = flags;
1444 memcpy(d->last_adv_data, data, len);
1445 d->last_adv_data_len = len;
1449 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1454 case LE_SCAN_ENABLE:
1455 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1456 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1457 clear_pending_adv_report(hdev);
1460 case LE_SCAN_DISABLE:
1461 /* We do this here instead of when setting DISCOVERY_STOPPED
1462 * since the latter would potentially require waiting for
1463 * inquiry to stop too.
1465 if (has_pending_adv_report(hdev)) {
1466 struct discovery_state *d = &hdev->discovery;
1468 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1469 d->last_adv_addr_type, NULL,
1470 d->last_adv_rssi, d->last_adv_flags,
1472 d->last_adv_data_len, NULL, 0);
1475 /* Cancel this timer so that we don't try to disable scanning
1476 * when it's already disabled.
1478 cancel_delayed_work(&hdev->le_scan_disable);
1480 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1482 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1483 * interrupted scanning due to a connect request. Mark
1484 * therefore discovery as stopped. If this was not
1485 * because of a connect request advertising might have
1486 * been disabled because of active scanning, so
1487 * re-enable it again if necessary.
1489 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1490 #ifndef TIZEN_BT /* The below line is kernel bug. */
1491 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1493 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
1495 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1496 hdev->discovery.state == DISCOVERY_FINDING)
1497 hci_req_reenable_advertising(hdev);
1502 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1507 hci_dev_unlock(hdev);
1510 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1511 struct sk_buff *skb)
1513 struct hci_cp_le_set_scan_enable *cp;
1514 __u8 status = *((__u8 *) skb->data);
1516 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1521 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1525 le_set_scan_enable_complete(hdev, cp->enable);
1528 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1529 struct sk_buff *skb)
1531 struct hci_cp_le_set_ext_scan_enable *cp;
1532 __u8 status = *((__u8 *) skb->data);
1534 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1539 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1543 le_set_scan_enable_complete(hdev, cp->enable);
1546 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1547 struct sk_buff *skb)
1549 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1551 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1557 hdev->le_num_of_adv_sets = rp->num_of_sets;
1560 static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev,
1561 struct sk_buff *skb)
1563 struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data;
1565 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1570 hdev->le_accept_list_size = rp->size;
1573 static void hci_cc_le_clear_accept_list(struct hci_dev *hdev,
1574 struct sk_buff *skb)
1576 __u8 status = *((__u8 *) skb->data);
1578 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1584 hci_bdaddr_list_clear(&hdev->le_accept_list);
1585 hci_dev_unlock(hdev);
1588 static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev,
1589 struct sk_buff *skb)
1591 struct hci_cp_le_add_to_accept_list *sent;
1592 __u8 status = *((__u8 *) skb->data);
1594 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1599 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1604 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1606 hci_dev_unlock(hdev);
1609 static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev,
1610 struct sk_buff *skb)
1612 struct hci_cp_le_del_from_accept_list *sent;
1613 __u8 status = *((__u8 *) skb->data);
1615 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1620 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1625 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1627 hci_dev_unlock(hdev);
1630 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1631 struct sk_buff *skb)
1633 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1635 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1640 memcpy(hdev->le_states, rp->le_states, 8);
1643 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1644 struct sk_buff *skb)
1646 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1648 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1657 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1658 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1661 mgmt_le_read_host_suggested_data_length_complete(hdev, rp->status);
1663 hci_dev_unlock(hdev);
1667 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1668 struct sk_buff *skb)
1670 struct hci_cp_le_write_def_data_len *sent;
1671 __u8 status = *((__u8 *) skb->data);
1673 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1682 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1690 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1691 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1695 mgmt_le_write_host_suggested_data_length_complete(hdev, status);
1699 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1700 struct sk_buff *skb)
1702 struct hci_cp_le_add_to_resolv_list *sent;
1703 __u8 status = *((__u8 *) skb->data);
1705 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1710 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1715 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1716 sent->bdaddr_type, sent->peer_irk,
1718 hci_dev_unlock(hdev);
1721 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1722 struct sk_buff *skb)
1724 struct hci_cp_le_del_from_resolv_list *sent;
1725 __u8 status = *((__u8 *) skb->data);
1727 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1732 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1737 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1739 hci_dev_unlock(hdev);
1742 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1743 struct sk_buff *skb)
1745 __u8 status = *((__u8 *) skb->data);
1747 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1753 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1754 hci_dev_unlock(hdev);
1757 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1758 struct sk_buff *skb)
1760 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1762 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1767 hdev->le_resolv_list_size = rp->size;
1770 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1771 struct sk_buff *skb)
1773 __u8 *sent, status = *((__u8 *) skb->data);
1775 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1780 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1787 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1789 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1791 hci_dev_unlock(hdev);
1794 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1795 struct sk_buff *skb)
1797 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1799 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1808 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1809 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1810 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1811 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1814 mgmt_le_read_maximum_data_length_complete(hdev, rp->status);
1815 hci_dev_unlock(hdev);
1819 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1820 struct sk_buff *skb)
1822 struct hci_cp_write_le_host_supported *sent;
1823 __u8 status = *((__u8 *) skb->data);
1825 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1830 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1837 hdev->features[1][0] |= LMP_HOST_LE;
1838 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1840 hdev->features[1][0] &= ~LMP_HOST_LE;
1841 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1842 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1846 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1848 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1850 hci_dev_unlock(hdev);
1853 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1855 struct hci_cp_le_set_adv_param *cp;
1856 u8 status = *((u8 *) skb->data);
1858 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1863 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1868 hdev->adv_addr_type = cp->own_address_type;
1869 hci_dev_unlock(hdev);
1872 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1874 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1875 struct hci_cp_le_set_ext_adv_params *cp;
1876 struct adv_info *adv_instance;
1878 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1883 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1888 hdev->adv_addr_type = cp->own_addr_type;
1890 /* Store in hdev for instance 0 */
1891 hdev->adv_tx_power = rp->tx_power;
1893 adv_instance = hci_find_adv_instance(hdev, cp->handle);
1895 adv_instance->tx_power = rp->tx_power;
1897 /* Update adv data as tx power is known now */
1898 hci_req_update_adv_data(hdev, cp->handle);
1900 hci_dev_unlock(hdev);
1904 static void hci_cc_enable_rssi(struct hci_dev *hdev,
1905 struct sk_buff *skb)
1907 struct hci_cc_rsp_enable_rssi *rp = (void *)skb->data;
1909 BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
1910 hdev->name, rp->status, rp->le_ext_opcode);
1912 mgmt_enable_rssi_cc(hdev, rp, rp->status);
1915 static void hci_cc_get_raw_rssi(struct hci_dev *hdev,
1916 struct sk_buff *skb)
1918 struct hci_cc_rp_get_raw_rssi *rp = (void *)skb->data;
1920 BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
1921 hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
1923 mgmt_raw_rssi_response(hdev, rp, rp->status);
1926 static void hci_vendor_ext_rssi_link_alert_evt(struct hci_dev *hdev,
1927 struct sk_buff *skb)
1929 struct hci_ev_vendor_specific_rssi_alert *ev = (void *)skb->data;
1931 BT_DBG("RSSI event LE_RSSI_LINK_ALERT %X", LE_RSSI_LINK_ALERT);
1933 mgmt_rssi_alert_evt(hdev, ev->conn_handle, ev->alert_type,
1937 static void hci_vendor_specific_group_ext_evt(struct hci_dev *hdev,
1938 struct sk_buff *skb)
1940 struct hci_ev_ext_vendor_specific *ev = (void *)skb->data;
1941 __u8 event_le_ext_sub_code;
1943 BT_DBG("RSSI event LE_META_VENDOR_SPECIFIC_GROUP_EVENT: %X",
1944 LE_META_VENDOR_SPECIFIC_GROUP_EVENT);
1946 skb_pull(skb, sizeof(*ev));
1947 event_le_ext_sub_code = ev->event_le_ext_sub_code;
1949 switch (event_le_ext_sub_code) {
1950 case LE_RSSI_LINK_ALERT:
1951 hci_vendor_ext_rssi_link_alert_evt(hdev, skb);
1959 static void hci_vendor_multi_adv_state_change_evt(struct hci_dev *hdev,
1960 struct sk_buff *skb)
1962 struct hci_ev_vendor_specific_multi_adv_state *ev = (void *)skb->data;
1964 BT_DBG("LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT");
1966 mgmt_multi_adv_state_change_evt(hdev, ev->adv_instance,
1967 ev->state_change_reason,
1968 ev->connection_handle);
1971 static void hci_vendor_specific_evt(struct hci_dev *hdev, struct sk_buff *skb)
1973 struct hci_ev_vendor_specific *ev = (void *)skb->data;
1974 __u8 event_sub_code;
1976 BT_DBG("hci_vendor_specific_evt");
1978 skb_pull(skb, sizeof(*ev));
1979 event_sub_code = ev->event_sub_code;
1981 switch (event_sub_code) {
1982 case LE_META_VENDOR_SPECIFIC_GROUP_EVENT:
1983 hci_vendor_specific_group_ext_evt(hdev, skb);
1986 case LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT:
1987 hci_vendor_multi_adv_state_change_evt(hdev, skb);
1995 static void hci_le_data_length_changed_complete_evt(struct hci_dev *hdev,
1996 struct sk_buff *skb)
1998 struct hci_ev_le_data_len_change *ev = (void *)skb->data;
1999 struct hci_conn *conn;
2001 BT_DBG("%s status", hdev->name);
2005 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2007 conn->tx_len = le16_to_cpu(ev->tx_len);
2008 conn->tx_time = le16_to_cpu(ev->tx_time);
2009 conn->rx_len = le16_to_cpu(ev->rx_len);
2010 conn->rx_time = le16_to_cpu(ev->rx_time);
2013 mgmt_le_data_length_change_complete(hdev, &conn->dst,
2014 conn->tx_len, conn->tx_time,
2015 conn->rx_len, conn->rx_time);
2017 hci_dev_unlock(hdev);
2021 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
2023 struct hci_rp_read_rssi *rp = (void *) skb->data;
2024 struct hci_conn *conn;
2026 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
2033 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2035 conn->rssi = rp->rssi;
2037 hci_dev_unlock(hdev);
2040 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
2042 struct hci_cp_read_tx_power *sent;
2043 struct hci_rp_read_tx_power *rp = (void *) skb->data;
2044 struct hci_conn *conn;
2046 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
2051 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2057 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2061 switch (sent->type) {
2063 conn->tx_power = rp->tx_power;
2066 conn->max_tx_power = rp->tx_power;
2071 hci_dev_unlock(hdev);
2074 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
2076 u8 status = *((u8 *) skb->data);
2079 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2084 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2086 hdev->ssp_debug_mode = *mode;
2089 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2091 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2094 hci_conn_check_pending(hdev);
2098 set_bit(HCI_INQUIRY, &hdev->flags);
2101 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2103 struct hci_cp_create_conn *cp;
2104 struct hci_conn *conn;
2106 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2108 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2114 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2116 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
2119 if (conn && conn->state == BT_CONNECT) {
2120 if (status != 0x0c || conn->attempt > 2) {
2121 conn->state = BT_CLOSED;
2122 hci_connect_cfm(conn, status);
2125 conn->state = BT_CONNECT2;
2129 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
2132 bt_dev_err(hdev, "no memory for new connection");
2136 hci_dev_unlock(hdev);
2139 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2141 struct hci_cp_add_sco *cp;
2142 struct hci_conn *acl, *sco;
2145 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2150 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2154 handle = __le16_to_cpu(cp->handle);
2156 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2160 acl = hci_conn_hash_lookup_handle(hdev, handle);
2164 sco->state = BT_CLOSED;
2166 hci_connect_cfm(sco, status);
2171 hci_dev_unlock(hdev);
2174 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2176 struct hci_cp_auth_requested *cp;
2177 struct hci_conn *conn;
2179 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2184 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2190 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2192 if (conn->state == BT_CONFIG) {
2193 hci_connect_cfm(conn, status);
2194 hci_conn_drop(conn);
2198 hci_dev_unlock(hdev);
2201 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2203 struct hci_cp_set_conn_encrypt *cp;
2204 struct hci_conn *conn;
2206 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2211 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2217 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2219 if (conn->state == BT_CONFIG) {
2220 hci_connect_cfm(conn, status);
2221 hci_conn_drop(conn);
2225 hci_dev_unlock(hdev);
2228 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2229 struct hci_conn *conn)
2231 if (conn->state != BT_CONFIG || !conn->out)
2234 if (conn->pending_sec_level == BT_SECURITY_SDP)
2237 /* Only request authentication for SSP connections or non-SSP
2238 * devices with sec_level MEDIUM or HIGH or if MITM protection
2241 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2242 conn->pending_sec_level != BT_SECURITY_FIPS &&
2243 conn->pending_sec_level != BT_SECURITY_HIGH &&
2244 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2250 static int hci_resolve_name(struct hci_dev *hdev,
2251 struct inquiry_entry *e)
2253 struct hci_cp_remote_name_req cp;
2255 memset(&cp, 0, sizeof(cp));
2257 bacpy(&cp.bdaddr, &e->data.bdaddr);
2258 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2259 cp.pscan_mode = e->data.pscan_mode;
2260 cp.clock_offset = e->data.clock_offset;
2262 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2265 static bool hci_resolve_next_name(struct hci_dev *hdev)
2267 struct discovery_state *discov = &hdev->discovery;
2268 struct inquiry_entry *e;
2270 if (list_empty(&discov->resolve))
2273 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2277 if (hci_resolve_name(hdev, e) == 0) {
2278 e->name_state = NAME_PENDING;
2285 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2286 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2288 struct discovery_state *discov = &hdev->discovery;
2289 struct inquiry_entry *e;
2292 /* Update the mgmt connected state if necessary. Be careful with
2293 * conn objects that exist but are not (yet) connected however.
2294 * Only those in BT_CONFIG or BT_CONNECTED states can be
2295 * considered connected.
2298 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
2299 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2300 mgmt_device_connected(hdev, conn, 0, name, name_len);
2302 mgmt_device_name_update(hdev, bdaddr, name, name_len);
2306 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2307 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2308 mgmt_device_connected(hdev, conn, name, name_len);
2311 if (discov->state == DISCOVERY_STOPPED)
2314 if (discov->state == DISCOVERY_STOPPING)
2315 goto discov_complete;
2317 if (discov->state != DISCOVERY_RESOLVING)
2320 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2321 /* If the device was not found in a list of found devices names of which
2322 * are pending. there is no need to continue resolving a next name as it
2323 * will be done upon receiving another Remote Name Request Complete
2330 e->name_state = NAME_KNOWN;
2331 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2332 e->data.rssi, name, name_len);
2334 e->name_state = NAME_NOT_KNOWN;
2337 if (hci_resolve_next_name(hdev))
2341 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2344 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2346 struct hci_cp_remote_name_req *cp;
2347 struct hci_conn *conn;
2349 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2351 /* If successful wait for the name req complete event before
2352 * checking for the need to do authentication */
2356 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2362 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2364 if (hci_dev_test_flag(hdev, HCI_MGMT))
2365 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2370 if (!hci_outgoing_auth_needed(hdev, conn))
2373 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2374 struct hci_cp_auth_requested auth_cp;
2376 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2378 auth_cp.handle = __cpu_to_le16(conn->handle);
2379 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2380 sizeof(auth_cp), &auth_cp);
2384 hci_dev_unlock(hdev);
2387 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2389 struct hci_cp_read_remote_features *cp;
2390 struct hci_conn *conn;
2392 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2397 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2403 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2405 if (conn->state == BT_CONFIG) {
2406 hci_connect_cfm(conn, status);
2407 hci_conn_drop(conn);
2411 hci_dev_unlock(hdev);
2414 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2416 struct hci_cp_read_remote_ext_features *cp;
2417 struct hci_conn *conn;
2419 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2424 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2430 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2432 if (conn->state == BT_CONFIG) {
2433 hci_connect_cfm(conn, status);
2434 hci_conn_drop(conn);
2438 hci_dev_unlock(hdev);
2441 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2443 struct hci_cp_setup_sync_conn *cp;
2444 struct hci_conn *acl, *sco;
2447 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2452 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2456 handle = __le16_to_cpu(cp->handle);
2458 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2462 acl = hci_conn_hash_lookup_handle(hdev, handle);
2466 sco->state = BT_CLOSED;
2468 hci_connect_cfm(sco, status);
2473 hci_dev_unlock(hdev);
2476 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2478 struct hci_cp_sniff_mode *cp;
2479 struct hci_conn *conn;
2481 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2486 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2492 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2494 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2496 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2497 hci_sco_setup(conn, status);
2500 hci_dev_unlock(hdev);
2503 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2505 struct hci_cp_exit_sniff_mode *cp;
2506 struct hci_conn *conn;
2508 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2513 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2519 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2521 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2523 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2524 hci_sco_setup(conn, status);
2527 hci_dev_unlock(hdev);
2530 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2532 struct hci_cp_disconnect *cp;
2533 struct hci_conn *conn;
2538 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2544 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2546 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2547 conn->dst_type, status);
2549 if (conn->type == LE_LINK) {
2550 hdev->cur_adv_instance = conn->adv_instance;
2551 hci_req_reenable_advertising(hdev);
2554 /* If the disconnection failed for any reason, the upper layer
2555 * does not retry to disconnect in current implementation.
2556 * Hence, we need to do some basic cleanup here and re-enable
2557 * advertising if necessary.
2562 hci_dev_unlock(hdev);
2565 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2566 u8 peer_addr_type, u8 own_address_type,
2569 struct hci_conn *conn;
2571 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2576 /* When using controller based address resolution, then the new
2577 * address types 0x02 and 0x03 are used. These types need to be
2578 * converted back into either public address or random address type
2580 if (use_ll_privacy(hdev) &&
2581 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2582 switch (own_address_type) {
2583 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2584 own_address_type = ADDR_LE_DEV_PUBLIC;
2586 case ADDR_LE_DEV_RANDOM_RESOLVED:
2587 own_address_type = ADDR_LE_DEV_RANDOM;
2592 /* Store the initiator and responder address information which
2593 * is needed for SMP. These values will not change during the
2594 * lifetime of the connection.
2596 conn->init_addr_type = own_address_type;
2597 if (own_address_type == ADDR_LE_DEV_RANDOM)
2598 bacpy(&conn->init_addr, &hdev->random_addr);
2600 bacpy(&conn->init_addr, &hdev->bdaddr);
2602 conn->resp_addr_type = peer_addr_type;
2603 bacpy(&conn->resp_addr, peer_addr);
2605 /* We don't want the connection attempt to stick around
2606 * indefinitely since LE doesn't have a page timeout concept
2607 * like BR/EDR. Set a timer for any connection that doesn't use
2608 * the accept list for connecting.
2610 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2611 queue_delayed_work(conn->hdev->workqueue,
2612 &conn->le_conn_timeout,
2613 conn->conn_timeout);
2616 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2618 struct hci_cp_le_create_conn *cp;
2620 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2622 /* All connection failure handling is taken care of by the
2623 * hci_le_conn_failed function which is triggered by the HCI
2624 * request completion callbacks used for connecting.
2629 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2635 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2636 cp->own_address_type, cp->filter_policy);
2638 hci_dev_unlock(hdev);
2641 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2643 struct hci_cp_le_ext_create_conn *cp;
2645 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2647 /* All connection failure handling is taken care of by the
2648 * hci_le_conn_failed function which is triggered by the HCI
2649 * request completion callbacks used for connecting.
2654 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2660 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2661 cp->own_addr_type, cp->filter_policy);
2663 hci_dev_unlock(hdev);
2666 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2668 struct hci_cp_le_read_remote_features *cp;
2669 struct hci_conn *conn;
2671 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2676 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2682 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2684 if (conn->state == BT_CONFIG) {
2685 hci_connect_cfm(conn, status);
2686 hci_conn_drop(conn);
2690 hci_dev_unlock(hdev);
2693 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2695 struct hci_cp_le_start_enc *cp;
2696 struct hci_conn *conn;
2698 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2705 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2709 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2713 if (conn->state != BT_CONNECTED)
2716 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2717 hci_conn_drop(conn);
2720 hci_dev_unlock(hdev);
2723 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2725 struct hci_cp_switch_role *cp;
2726 struct hci_conn *conn;
2728 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2733 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2739 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2741 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2743 hci_dev_unlock(hdev);
2746 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2748 __u8 status = *((__u8 *) skb->data);
2749 struct discovery_state *discov = &hdev->discovery;
2750 struct inquiry_entry *e;
2752 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2754 hci_conn_check_pending(hdev);
2756 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2759 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2760 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2762 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2767 if (discov->state != DISCOVERY_FINDING)
2770 if (list_empty(&discov->resolve)) {
2771 /* When BR/EDR inquiry is active and no LE scanning is in
2772 * progress, then change discovery state to indicate completion.
2774 * When running LE scanning and BR/EDR inquiry simultaneously
2775 * and the LE scan already finished, then change the discovery
2776 * state to indicate completion.
2778 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2779 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2780 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2784 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2785 if (e && hci_resolve_name(hdev, e) == 0) {
2786 e->name_state = NAME_PENDING;
2787 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2789 /* When BR/EDR inquiry is active and no LE scanning is in
2790 * progress, then change discovery state to indicate completion.
2792 * When running LE scanning and BR/EDR inquiry simultaneously
2793 * and the LE scan already finished, then change the discovery
2794 * state to indicate completion.
2796 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2797 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2798 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2802 hci_dev_unlock(hdev);
2805 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2807 struct inquiry_data data;
2808 struct inquiry_info *info = (void *) (skb->data + 1);
2809 int num_rsp = *((__u8 *) skb->data);
2811 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2813 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2816 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2821 for (; num_rsp; num_rsp--, info++) {
2824 bacpy(&data.bdaddr, &info->bdaddr);
2825 data.pscan_rep_mode = info->pscan_rep_mode;
2826 data.pscan_period_mode = info->pscan_period_mode;
2827 data.pscan_mode = info->pscan_mode;
2828 memcpy(data.dev_class, info->dev_class, 3);
2829 data.clock_offset = info->clock_offset;
2830 data.rssi = HCI_RSSI_INVALID;
2831 data.ssp_mode = 0x00;
2833 flags = hci_inquiry_cache_update(hdev, &data, false);
2835 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2836 info->dev_class, HCI_RSSI_INVALID,
2837 flags, NULL, 0, NULL, 0);
2840 hci_dev_unlock(hdev);
2843 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2845 struct hci_ev_conn_complete *ev = (void *) skb->data;
2846 struct hci_conn *conn;
2848 BT_DBG("%s", hdev->name);
2852 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2854 /* Connection may not exist if auto-connected. Check the bredr
2855 * allowlist to see if this device is allowed to auto connect.
2856 * If link is an ACL type, create a connection class
2859 * Auto-connect will only occur if the event filter is
2860 * programmed with a given address. Right now, event filter is
2861 * only used during suspend.
2863 if (ev->link_type == ACL_LINK &&
2864 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
2867 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2870 bt_dev_err(hdev, "no memory for new conn");
2874 if (ev->link_type != SCO_LINK)
2877 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2882 conn->type = SCO_LINK;
2887 conn->handle = __le16_to_cpu(ev->handle);
2889 if (conn->type == ACL_LINK) {
2890 conn->state = BT_CONFIG;
2891 hci_conn_hold(conn);
2893 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2894 !hci_find_link_key(hdev, &ev->bdaddr))
2895 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2897 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2899 conn->state = BT_CONNECTED;
2901 hci_debugfs_create_conn(conn);
2902 hci_conn_add_sysfs(conn);
2904 if (test_bit(HCI_AUTH, &hdev->flags))
2905 set_bit(HCI_CONN_AUTH, &conn->flags);
2907 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2908 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2910 /* Get remote features */
2911 if (conn->type == ACL_LINK) {
2912 struct hci_cp_read_remote_features cp;
2913 cp.handle = ev->handle;
2914 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2917 hci_req_update_scan(hdev);
2920 /* Set packet type for incoming connection */
2921 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2922 struct hci_cp_change_conn_ptype cp;
2923 cp.handle = ev->handle;
2924 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2925 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2929 if (get_link_mode(conn) & HCI_LM_MASTER)
2930 hci_conn_change_supervision_timeout(conn,
2931 LINK_SUPERVISION_TIMEOUT);
2934 conn->state = BT_CLOSED;
2935 if (conn->type == ACL_LINK)
2936 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2937 conn->dst_type, ev->status);
2940 if (conn->type == ACL_LINK)
2941 hci_sco_setup(conn, ev->status);
2944 hci_connect_cfm(conn, ev->status);
2946 } else if (ev->link_type == SCO_LINK) {
2947 switch (conn->setting & SCO_AIRMODE_MASK) {
2948 case SCO_AIRMODE_CVSD:
2950 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2954 hci_connect_cfm(conn, ev->status);
2958 hci_dev_unlock(hdev);
2960 hci_conn_check_pending(hdev);
2963 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2965 struct hci_cp_reject_conn_req cp;
2967 bacpy(&cp.bdaddr, bdaddr);
2968 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2969 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2972 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2974 struct hci_ev_conn_request *ev = (void *) skb->data;
2975 int mask = hdev->link_mode;
2976 struct inquiry_entry *ie;
2977 struct hci_conn *conn;
2980 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2983 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2986 if (!(mask & HCI_LM_ACCEPT)) {
2987 hci_reject_conn(hdev, &ev->bdaddr);
2993 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
2995 hci_reject_conn(hdev, &ev->bdaddr);
2999 /* Require HCI_CONNECTABLE or an accept list entry to accept the
3000 * connection. These features are only touched through mgmt so
3001 * only do the checks if HCI_MGMT is set.
3003 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3004 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3005 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3007 hci_reject_conn(hdev, &ev->bdaddr);
3011 /* Connection accepted */
3013 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3015 memcpy(ie->data.dev_class, ev->dev_class, 3);
3018 if ((ev->link_type == SCO_LINK || ev->link_type == ESCO_LINK) &&
3019 hci_conn_hash_lookup_sco(hdev)) {
3020 struct hci_cp_reject_conn_req cp;
3022 bacpy(&cp.bdaddr, &ev->bdaddr);
3023 cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
3024 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ,
3026 hci_dev_unlock(hdev);
3031 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3034 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3037 bt_dev_err(hdev, "no memory for new connection");
3042 memcpy(conn->dev_class, ev->dev_class, 3);
3044 hci_dev_unlock(hdev);
3046 if (ev->link_type == ACL_LINK ||
3047 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3048 struct hci_cp_accept_conn_req cp;
3049 conn->state = BT_CONNECT;
3051 bacpy(&cp.bdaddr, &ev->bdaddr);
3053 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3054 cp.role = 0x00; /* Become central */
3056 cp.role = 0x01; /* Remain peripheral */
3058 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3059 } else if (!(flags & HCI_PROTO_DEFER)) {
3060 struct hci_cp_accept_sync_conn_req cp;
3061 conn->state = BT_CONNECT;
3063 bacpy(&cp.bdaddr, &ev->bdaddr);
3064 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3066 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
3067 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
3068 cp.max_latency = cpu_to_le16(0xffff);
3069 cp.content_format = cpu_to_le16(hdev->voice_setting);
3070 cp.retrans_effort = 0xff;
3072 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3075 conn->state = BT_CONNECT2;
3076 hci_connect_cfm(conn, 0);
3081 hci_dev_unlock(hdev);
3084 static u8 hci_to_mgmt_reason(u8 err)
3087 case HCI_ERROR_CONNECTION_TIMEOUT:
3088 return MGMT_DEV_DISCONN_TIMEOUT;
3089 case HCI_ERROR_REMOTE_USER_TERM:
3090 case HCI_ERROR_REMOTE_LOW_RESOURCES:
3091 case HCI_ERROR_REMOTE_POWER_OFF:
3092 return MGMT_DEV_DISCONN_REMOTE;
3093 case HCI_ERROR_LOCAL_HOST_TERM:
3094 return MGMT_DEV_DISCONN_LOCAL_HOST;
3096 return MGMT_DEV_DISCONN_UNKNOWN;
3100 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3102 struct hci_ev_disconn_complete *ev = (void *) skb->data;
3104 struct hci_conn_params *params;
3105 struct hci_conn *conn;
3106 bool mgmt_connected;
3108 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3112 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3117 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3118 conn->dst_type, ev->status);
3122 conn->state = BT_CLOSED;
3124 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3126 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3127 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3129 reason = hci_to_mgmt_reason(ev->reason);
3131 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3132 reason, mgmt_connected);
3134 if (conn->type == ACL_LINK) {
3135 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3136 hci_remove_link_key(hdev, &conn->dst);
3138 hci_req_update_scan(hdev);
3141 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3143 switch (params->auto_connect) {
3144 case HCI_AUTO_CONN_LINK_LOSS:
3145 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3149 case HCI_AUTO_CONN_DIRECT:
3150 case HCI_AUTO_CONN_ALWAYS:
3151 list_del_init(¶ms->action);
3152 list_add(¶ms->action, &hdev->pend_le_conns);
3153 hci_update_background_scan(hdev);
3161 hci_disconn_cfm(conn, ev->reason);
3163 /* The suspend notifier is waiting for all devices to disconnect so
3164 * clear the bit from pending tasks and inform the wait queue.
3166 if (list_empty(&hdev->conn_hash.list) &&
3167 test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
3168 wake_up(&hdev->suspend_wait_q);
3171 /* Re-enable advertising if necessary, since it might
3172 * have been disabled by the connection. From the
3173 * HCI_LE_Set_Advertise_Enable command description in
3174 * the core specification (v4.0):
3175 * "The Controller shall continue advertising until the Host
3176 * issues an LE_Set_Advertise_Enable command with
3177 * Advertising_Enable set to 0x00 (Advertising is disabled)
3178 * or until a connection is created or until the Advertising
3179 * is timed out due to Directed Advertising."
3181 if (conn->type == LE_LINK) {
3182 hdev->cur_adv_instance = conn->adv_instance;
3183 hci_req_reenable_advertising(hdev);
3189 if (type == ACL_LINK && !hci_conn_num(hdev, ACL_LINK)) {
3193 iscan = test_bit(HCI_ISCAN, &hdev->flags);
3194 pscan = test_bit(HCI_PSCAN, &hdev->flags);
3195 if (!iscan && !pscan) {
3196 u8 scan_enable = SCAN_PAGE;
3198 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE,
3199 sizeof(scan_enable), &scan_enable);
3205 hci_dev_unlock(hdev);
3208 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3210 struct hci_ev_auth_complete *ev = (void *) skb->data;
3211 struct hci_conn *conn;
3213 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3217 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3222 /* PIN or Key Missing patch */
3223 BT_DBG("remote_auth %x, remote_cap %x, auth_type %x, io_capability %x",
3224 conn->remote_auth, conn->remote_cap,
3225 conn->auth_type, conn->io_capability);
3227 if (ev->status == 0x06 && hci_conn_ssp_enabled(conn)) {
3228 struct hci_cp_auth_requested cp;
3230 BT_DBG("Pin or key missing");
3231 hci_remove_link_key(hdev, &conn->dst);
3232 cp.handle = cpu_to_le16(conn->handle);
3233 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
3240 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3242 if (!hci_conn_ssp_enabled(conn) &&
3243 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3244 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3246 set_bit(HCI_CONN_AUTH, &conn->flags);
3247 conn->sec_level = conn->pending_sec_level;
3250 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3251 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3253 mgmt_auth_failed(conn, ev->status);
3256 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3257 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3259 if (conn->state == BT_CONFIG) {
3260 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3261 struct hci_cp_set_conn_encrypt cp;
3262 cp.handle = ev->handle;
3264 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3267 conn->state = BT_CONNECTED;
3268 hci_connect_cfm(conn, ev->status);
3269 hci_conn_drop(conn);
3272 hci_auth_cfm(conn, ev->status);
3274 hci_conn_hold(conn);
3275 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3276 hci_conn_drop(conn);
3279 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3281 struct hci_cp_set_conn_encrypt cp;
3282 cp.handle = ev->handle;
3284 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3287 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3288 hci_encrypt_cfm(conn, ev->status);
3293 hci_dev_unlock(hdev);
3296 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
3298 struct hci_ev_remote_name *ev = (void *) skb->data;
3299 struct hci_conn *conn;
3301 BT_DBG("%s", hdev->name);
3303 hci_conn_check_pending(hdev);
3307 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3309 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3312 if (ev->status == 0)
3313 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3314 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3316 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3322 if (!hci_outgoing_auth_needed(hdev, conn))
3325 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3326 struct hci_cp_auth_requested cp;
3328 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3330 cp.handle = __cpu_to_le16(conn->handle);
3331 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3335 hci_dev_unlock(hdev);
3338 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3339 u16 opcode, struct sk_buff *skb)
3341 const struct hci_rp_read_enc_key_size *rp;
3342 struct hci_conn *conn;
3345 BT_DBG("%s status 0x%02x", hdev->name, status);
3347 if (!skb || skb->len < sizeof(*rp)) {
3348 bt_dev_err(hdev, "invalid read key size response");
3352 rp = (void *)skb->data;
3353 handle = le16_to_cpu(rp->handle);
3357 conn = hci_conn_hash_lookup_handle(hdev, handle);
3361 /* While unexpected, the read_enc_key_size command may fail. The most
3362 * secure approach is to then assume the key size is 0 to force a
3366 bt_dev_err(hdev, "failed to read key size for handle %u",
3368 conn->enc_key_size = 0;
3370 conn->enc_key_size = rp->key_size;
3373 hci_encrypt_cfm(conn, 0);
3376 hci_dev_unlock(hdev);
3379 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3381 struct hci_ev_encrypt_change *ev = (void *) skb->data;
3382 struct hci_conn *conn;
3384 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3388 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3394 /* Encryption implies authentication */
3395 set_bit(HCI_CONN_AUTH, &conn->flags);
3396 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3397 conn->sec_level = conn->pending_sec_level;
3399 /* P-256 authentication key implies FIPS */
3400 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3401 set_bit(HCI_CONN_FIPS, &conn->flags);
3403 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3404 conn->type == LE_LINK)
3405 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3407 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3408 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3412 /* We should disregard the current RPA and generate a new one
3413 * whenever the encryption procedure fails.
3415 if (ev->status && conn->type == LE_LINK) {
3416 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3417 hci_adv_instances_set_rpa_expired(hdev, true);
3420 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3422 /* Check link security requirements are met */
3423 if (!hci_conn_check_link_mode(conn))
3424 ev->status = HCI_ERROR_AUTH_FAILURE;
3426 if (ev->status && conn->state == BT_CONNECTED) {
3427 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3428 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3430 /* Notify upper layers so they can cleanup before
3433 hci_encrypt_cfm(conn, ev->status);
3434 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3435 hci_conn_drop(conn);
3439 /* Try reading the encryption key size for encrypted ACL links */
3440 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3441 struct hci_cp_read_enc_key_size cp;
3442 struct hci_request req;
3444 /* Only send HCI_Read_Encryption_Key_Size if the
3445 * controller really supports it. If it doesn't, assume
3446 * the default size (16).
3448 if (!(hdev->commands[20] & 0x10)) {
3449 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3453 hci_req_init(&req, hdev);
3455 cp.handle = cpu_to_le16(conn->handle);
3456 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3458 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3459 bt_dev_err(hdev, "sending read key size failed");
3460 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3467 /* Set the default Authenticated Payload Timeout after
3468 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3469 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3470 * sent when the link is active and Encryption is enabled, the conn
3471 * type can be either LE or ACL and controller must support LMP Ping.
3472 * Ensure for AES-CCM encryption as well.
3474 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3475 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3476 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3477 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3478 struct hci_cp_write_auth_payload_to cp;
3480 cp.handle = cpu_to_le16(conn->handle);
3481 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3482 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3487 hci_encrypt_cfm(conn, ev->status);
3490 hci_dev_unlock(hdev);
3493 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3494 struct sk_buff *skb)
3496 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3497 struct hci_conn *conn;
3499 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3503 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3506 set_bit(HCI_CONN_SECURE, &conn->flags);
3508 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3510 hci_key_change_cfm(conn, ev->status);
3513 hci_dev_unlock(hdev);
3516 static void hci_remote_features_evt(struct hci_dev *hdev,
3517 struct sk_buff *skb)
3519 struct hci_ev_remote_features *ev = (void *) skb->data;
3520 struct hci_conn *conn;
3522 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3526 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3531 memcpy(conn->features[0], ev->features, 8);
3533 if (conn->state != BT_CONFIG)
3536 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3537 lmp_ext_feat_capable(conn)) {
3538 struct hci_cp_read_remote_ext_features cp;
3539 cp.handle = ev->handle;
3541 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3546 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3547 struct hci_cp_remote_name_req cp;
3548 memset(&cp, 0, sizeof(cp));
3549 bacpy(&cp.bdaddr, &conn->dst);
3550 cp.pscan_rep_mode = 0x02;
3551 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3552 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3553 mgmt_device_connected(hdev, conn, NULL, 0);
3555 if (!hci_outgoing_auth_needed(hdev, conn)) {
3556 conn->state = BT_CONNECTED;
3557 hci_connect_cfm(conn, ev->status);
3558 hci_conn_drop(conn);
3562 hci_dev_unlock(hdev);
3565 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3567 cancel_delayed_work(&hdev->cmd_timer);
3569 if (!test_bit(HCI_RESET, &hdev->flags)) {
3571 cancel_delayed_work(&hdev->ncmd_timer);
3572 atomic_set(&hdev->cmd_cnt, 1);
3574 schedule_delayed_work(&hdev->ncmd_timer,
3580 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3581 u16 *opcode, u8 *status,
3582 hci_req_complete_t *req_complete,
3583 hci_req_complete_skb_t *req_complete_skb)
3585 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3587 *opcode = __le16_to_cpu(ev->opcode);
3588 *status = skb->data[sizeof(*ev)];
3590 skb_pull(skb, sizeof(*ev));
3593 case HCI_OP_INQUIRY_CANCEL:
3594 hci_cc_inquiry_cancel(hdev, skb, status);
3597 case HCI_OP_PERIODIC_INQ:
3598 hci_cc_periodic_inq(hdev, skb);
3601 case HCI_OP_EXIT_PERIODIC_INQ:
3602 hci_cc_exit_periodic_inq(hdev, skb);
3605 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3606 hci_cc_remote_name_req_cancel(hdev, skb);
3609 case HCI_OP_ROLE_DISCOVERY:
3610 hci_cc_role_discovery(hdev, skb);
3613 case HCI_OP_READ_LINK_POLICY:
3614 hci_cc_read_link_policy(hdev, skb);
3617 case HCI_OP_WRITE_LINK_POLICY:
3618 hci_cc_write_link_policy(hdev, skb);
3621 case HCI_OP_READ_DEF_LINK_POLICY:
3622 hci_cc_read_def_link_policy(hdev, skb);
3625 case HCI_OP_WRITE_DEF_LINK_POLICY:
3626 hci_cc_write_def_link_policy(hdev, skb);
3630 hci_cc_reset(hdev, skb);
3633 case HCI_OP_READ_STORED_LINK_KEY:
3634 hci_cc_read_stored_link_key(hdev, skb);
3637 case HCI_OP_DELETE_STORED_LINK_KEY:
3638 hci_cc_delete_stored_link_key(hdev, skb);
3641 case HCI_OP_WRITE_LOCAL_NAME:
3642 hci_cc_write_local_name(hdev, skb);
3645 case HCI_OP_READ_LOCAL_NAME:
3646 hci_cc_read_local_name(hdev, skb);
3649 case HCI_OP_WRITE_AUTH_ENABLE:
3650 hci_cc_write_auth_enable(hdev, skb);
3653 case HCI_OP_WRITE_ENCRYPT_MODE:
3654 hci_cc_write_encrypt_mode(hdev, skb);
3657 case HCI_OP_WRITE_SCAN_ENABLE:
3658 hci_cc_write_scan_enable(hdev, skb);
3661 case HCI_OP_SET_EVENT_FLT:
3662 hci_cc_set_event_filter(hdev, skb);
3665 case HCI_OP_READ_CLASS_OF_DEV:
3666 hci_cc_read_class_of_dev(hdev, skb);
3669 case HCI_OP_WRITE_CLASS_OF_DEV:
3670 hci_cc_write_class_of_dev(hdev, skb);
3673 case HCI_OP_READ_VOICE_SETTING:
3674 hci_cc_read_voice_setting(hdev, skb);
3677 case HCI_OP_WRITE_VOICE_SETTING:
3678 hci_cc_write_voice_setting(hdev, skb);
3681 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3682 hci_cc_read_num_supported_iac(hdev, skb);
3685 case HCI_OP_WRITE_SSP_MODE:
3686 hci_cc_write_ssp_mode(hdev, skb);
3689 case HCI_OP_WRITE_SC_SUPPORT:
3690 hci_cc_write_sc_support(hdev, skb);
3693 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3694 hci_cc_read_auth_payload_timeout(hdev, skb);
3697 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3698 hci_cc_write_auth_payload_timeout(hdev, skb);
3701 case HCI_OP_READ_LOCAL_VERSION:
3702 hci_cc_read_local_version(hdev, skb);
3705 case HCI_OP_READ_LOCAL_COMMANDS:
3706 hci_cc_read_local_commands(hdev, skb);
3709 case HCI_OP_READ_LOCAL_FEATURES:
3710 hci_cc_read_local_features(hdev, skb);
3713 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3714 hci_cc_read_local_ext_features(hdev, skb);
3717 case HCI_OP_READ_BUFFER_SIZE:
3718 hci_cc_read_buffer_size(hdev, skb);
3721 case HCI_OP_READ_BD_ADDR:
3722 hci_cc_read_bd_addr(hdev, skb);
3725 case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3726 hci_cc_read_local_pairing_opts(hdev, skb);
3729 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3730 hci_cc_read_page_scan_activity(hdev, skb);
3733 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3734 hci_cc_write_page_scan_activity(hdev, skb);
3737 case HCI_OP_READ_PAGE_SCAN_TYPE:
3738 hci_cc_read_page_scan_type(hdev, skb);
3741 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3742 hci_cc_write_page_scan_type(hdev, skb);
3745 case HCI_OP_READ_DATA_BLOCK_SIZE:
3746 hci_cc_read_data_block_size(hdev, skb);
3749 case HCI_OP_READ_FLOW_CONTROL_MODE:
3750 hci_cc_read_flow_control_mode(hdev, skb);
3753 case HCI_OP_READ_LOCAL_AMP_INFO:
3754 hci_cc_read_local_amp_info(hdev, skb);
3757 case HCI_OP_READ_CLOCK:
3758 hci_cc_read_clock(hdev, skb);
3761 case HCI_OP_READ_INQ_RSP_TX_POWER:
3762 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3765 case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3766 hci_cc_read_def_err_data_reporting(hdev, skb);
3769 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3770 hci_cc_write_def_err_data_reporting(hdev, skb);
3773 case HCI_OP_PIN_CODE_REPLY:
3774 hci_cc_pin_code_reply(hdev, skb);
3777 case HCI_OP_PIN_CODE_NEG_REPLY:
3778 hci_cc_pin_code_neg_reply(hdev, skb);
3781 case HCI_OP_READ_LOCAL_OOB_DATA:
3782 hci_cc_read_local_oob_data(hdev, skb);
3785 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3786 hci_cc_read_local_oob_ext_data(hdev, skb);
3789 case HCI_OP_LE_READ_BUFFER_SIZE:
3790 hci_cc_le_read_buffer_size(hdev, skb);
3793 case HCI_OP_LE_READ_LOCAL_FEATURES:
3794 hci_cc_le_read_local_features(hdev, skb);
3797 case HCI_OP_LE_READ_ADV_TX_POWER:
3798 hci_cc_le_read_adv_tx_power(hdev, skb);
3801 case HCI_OP_USER_CONFIRM_REPLY:
3802 hci_cc_user_confirm_reply(hdev, skb);
3805 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3806 hci_cc_user_confirm_neg_reply(hdev, skb);
3809 case HCI_OP_USER_PASSKEY_REPLY:
3810 hci_cc_user_passkey_reply(hdev, skb);
3813 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3814 hci_cc_user_passkey_neg_reply(hdev, skb);
3817 case HCI_OP_LE_SET_RANDOM_ADDR:
3818 hci_cc_le_set_random_addr(hdev, skb);
3821 case HCI_OP_LE_SET_ADV_ENABLE:
3822 hci_cc_le_set_adv_enable(hdev, skb);
3825 case HCI_OP_LE_SET_SCAN_PARAM:
3826 hci_cc_le_set_scan_param(hdev, skb);
3829 case HCI_OP_LE_SET_SCAN_ENABLE:
3830 hci_cc_le_set_scan_enable(hdev, skb);
3833 case HCI_OP_LE_READ_ACCEPT_LIST_SIZE:
3834 hci_cc_le_read_accept_list_size(hdev, skb);
3837 case HCI_OP_LE_CLEAR_ACCEPT_LIST:
3838 hci_cc_le_clear_accept_list(hdev, skb);
3841 case HCI_OP_LE_ADD_TO_ACCEPT_LIST:
3842 hci_cc_le_add_to_accept_list(hdev, skb);
3845 case HCI_OP_LE_DEL_FROM_ACCEPT_LIST:
3846 hci_cc_le_del_from_accept_list(hdev, skb);
3849 case HCI_OP_LE_READ_SUPPORTED_STATES:
3850 hci_cc_le_read_supported_states(hdev, skb);
3853 case HCI_OP_LE_READ_DEF_DATA_LEN:
3854 hci_cc_le_read_def_data_len(hdev, skb);
3857 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3858 hci_cc_le_write_def_data_len(hdev, skb);
3861 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3862 hci_cc_le_add_to_resolv_list(hdev, skb);
3865 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3866 hci_cc_le_del_from_resolv_list(hdev, skb);
3869 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3870 hci_cc_le_clear_resolv_list(hdev, skb);
3873 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3874 hci_cc_le_read_resolv_list_size(hdev, skb);
3877 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3878 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3881 case HCI_OP_LE_READ_MAX_DATA_LEN:
3882 hci_cc_le_read_max_data_len(hdev, skb);
3885 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3886 hci_cc_write_le_host_supported(hdev, skb);
3889 case HCI_OP_LE_SET_ADV_PARAM:
3890 hci_cc_set_adv_param(hdev, skb);
3893 case HCI_OP_READ_RSSI:
3894 hci_cc_read_rssi(hdev, skb);
3897 case HCI_OP_READ_TX_POWER:
3898 hci_cc_read_tx_power(hdev, skb);
3901 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3902 hci_cc_write_ssp_debug_mode(hdev, skb);
3905 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3906 hci_cc_le_set_ext_scan_param(hdev, skb);
3909 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3910 hci_cc_le_set_ext_scan_enable(hdev, skb);
3913 case HCI_OP_LE_SET_DEFAULT_PHY:
3914 hci_cc_le_set_default_phy(hdev, skb);
3917 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3918 hci_cc_le_read_num_adv_sets(hdev, skb);
3921 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3922 hci_cc_set_ext_adv_param(hdev, skb);
3925 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3926 hci_cc_le_set_ext_adv_enable(hdev, skb);
3929 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3930 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3933 case HCI_OP_LE_READ_TRANSMIT_POWER:
3934 hci_cc_le_read_transmit_power(hdev, skb);
3937 case HCI_OP_ENABLE_RSSI:
3938 hci_cc_enable_rssi(hdev, skb);
3941 case HCI_OP_GET_RAW_RSSI:
3942 hci_cc_get_raw_rssi(hdev, skb);
3946 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3950 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3952 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3955 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3957 "unexpected event for opcode 0x%4.4x", *opcode);
3961 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3962 queue_work(hdev->workqueue, &hdev->cmd_work);
3965 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3966 u16 *opcode, u8 *status,
3967 hci_req_complete_t *req_complete,
3968 hci_req_complete_skb_t *req_complete_skb)
3970 struct hci_ev_cmd_status *ev = (void *) skb->data;
3972 skb_pull(skb, sizeof(*ev));
3974 *opcode = __le16_to_cpu(ev->opcode);
3975 *status = ev->status;
3978 case HCI_OP_INQUIRY:
3979 hci_cs_inquiry(hdev, ev->status);
3982 case HCI_OP_CREATE_CONN:
3983 hci_cs_create_conn(hdev, ev->status);
3986 case HCI_OP_DISCONNECT:
3987 hci_cs_disconnect(hdev, ev->status);
3990 case HCI_OP_ADD_SCO:
3991 hci_cs_add_sco(hdev, ev->status);
3994 case HCI_OP_AUTH_REQUESTED:
3995 hci_cs_auth_requested(hdev, ev->status);
3998 case HCI_OP_SET_CONN_ENCRYPT:
3999 hci_cs_set_conn_encrypt(hdev, ev->status);
4002 case HCI_OP_REMOTE_NAME_REQ:
4003 hci_cs_remote_name_req(hdev, ev->status);
4006 case HCI_OP_READ_REMOTE_FEATURES:
4007 hci_cs_read_remote_features(hdev, ev->status);
4010 case HCI_OP_READ_REMOTE_EXT_FEATURES:
4011 hci_cs_read_remote_ext_features(hdev, ev->status);
4014 case HCI_OP_SETUP_SYNC_CONN:
4015 hci_cs_setup_sync_conn(hdev, ev->status);
4018 case HCI_OP_SNIFF_MODE:
4019 hci_cs_sniff_mode(hdev, ev->status);
4022 case HCI_OP_EXIT_SNIFF_MODE:
4023 hci_cs_exit_sniff_mode(hdev, ev->status);
4026 case HCI_OP_SWITCH_ROLE:
4027 hci_cs_switch_role(hdev, ev->status);
4030 case HCI_OP_LE_CREATE_CONN:
4031 hci_cs_le_create_conn(hdev, ev->status);
4034 case HCI_OP_LE_READ_REMOTE_FEATURES:
4035 hci_cs_le_read_remote_features(hdev, ev->status);
4038 case HCI_OP_LE_START_ENC:
4039 hci_cs_le_start_enc(hdev, ev->status);
4042 case HCI_OP_LE_EXT_CREATE_CONN:
4043 hci_cs_le_ext_create_conn(hdev, ev->status);
4047 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
4051 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4053 /* Indicate request completion if the command failed. Also, if
4054 * we're not waiting for a special event and we get a success
4055 * command status we should try to flag the request as completed
4056 * (since for this kind of commands there will not be a command
4060 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
4061 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4064 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4066 "unexpected event for opcode 0x%4.4x", *opcode);
4070 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4071 queue_work(hdev->workqueue, &hdev->cmd_work);
4074 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
4076 struct hci_ev_hardware_error *ev = (void *) skb->data;
4080 mgmt_hardware_error(hdev, ev->code);
4081 hci_dev_unlock(hdev);
4083 hdev->hw_error_code = ev->code;
4085 queue_work(hdev->req_workqueue, &hdev->error_reset);
4088 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4090 struct hci_ev_role_change *ev = (void *) skb->data;
4091 struct hci_conn *conn;
4093 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4097 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4100 conn->role = ev->role;
4102 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4104 hci_role_switch_cfm(conn, ev->status, ev->role);
4106 if (!ev->status && (get_link_mode(conn) & HCI_LM_MASTER))
4107 hci_conn_change_supervision_timeout(conn,
4108 LINK_SUPERVISION_TIMEOUT);
4112 hci_dev_unlock(hdev);
4115 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
4117 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
4120 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4121 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4125 if (skb->len < sizeof(*ev) ||
4126 skb->len < struct_size(ev, handles, ev->num_hndl)) {
4127 BT_DBG("%s bad parameters", hdev->name);
4131 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
4133 for (i = 0; i < ev->num_hndl; i++) {
4134 struct hci_comp_pkts_info *info = &ev->handles[i];
4135 struct hci_conn *conn;
4136 __u16 handle, count;
4138 handle = __le16_to_cpu(info->handle);
4139 count = __le16_to_cpu(info->count);
4141 conn = hci_conn_hash_lookup_handle(hdev, handle);
4145 conn->sent -= count;
4147 switch (conn->type) {
4149 hdev->acl_cnt += count;
4150 if (hdev->acl_cnt > hdev->acl_pkts)
4151 hdev->acl_cnt = hdev->acl_pkts;
4155 if (hdev->le_pkts) {
4156 hdev->le_cnt += count;
4157 if (hdev->le_cnt > hdev->le_pkts)
4158 hdev->le_cnt = hdev->le_pkts;
4160 hdev->acl_cnt += count;
4161 if (hdev->acl_cnt > hdev->acl_pkts)
4162 hdev->acl_cnt = hdev->acl_pkts;
4167 hdev->sco_cnt += count;
4168 if (hdev->sco_cnt > hdev->sco_pkts)
4169 hdev->sco_cnt = hdev->sco_pkts;
4173 bt_dev_err(hdev, "unknown type %d conn %p",
4179 queue_work(hdev->workqueue, &hdev->tx_work);
4182 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4185 struct hci_chan *chan;
4187 switch (hdev->dev_type) {
4189 return hci_conn_hash_lookup_handle(hdev, handle);
4191 chan = hci_chan_lookup_handle(hdev, handle);
4196 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4203 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
4205 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
4208 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4209 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4213 if (skb->len < sizeof(*ev) ||
4214 skb->len < struct_size(ev, handles, ev->num_hndl)) {
4215 BT_DBG("%s bad parameters", hdev->name);
4219 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
4222 for (i = 0; i < ev->num_hndl; i++) {
4223 struct hci_comp_blocks_info *info = &ev->handles[i];
4224 struct hci_conn *conn = NULL;
4225 __u16 handle, block_count;
4227 handle = __le16_to_cpu(info->handle);
4228 block_count = __le16_to_cpu(info->blocks);
4230 conn = __hci_conn_lookup_handle(hdev, handle);
4234 conn->sent -= block_count;
4236 switch (conn->type) {
4239 hdev->block_cnt += block_count;
4240 if (hdev->block_cnt > hdev->num_blocks)
4241 hdev->block_cnt = hdev->num_blocks;
4245 bt_dev_err(hdev, "unknown type %d conn %p",
4251 queue_work(hdev->workqueue, &hdev->tx_work);
4254 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4256 struct hci_ev_mode_change *ev = (void *) skb->data;
4257 struct hci_conn *conn;
4259 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4263 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4265 conn->mode = ev->mode;
4267 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4269 if (conn->mode == HCI_CM_ACTIVE)
4270 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4272 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4275 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4276 hci_sco_setup(conn, ev->status);
4279 hci_dev_unlock(hdev);
4282 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4284 struct hci_ev_pin_code_req *ev = (void *) skb->data;
4285 struct hci_conn *conn;
4287 BT_DBG("%s", hdev->name);
4291 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4295 if (conn->state == BT_CONNECTED) {
4296 hci_conn_hold(conn);
4297 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4298 hci_conn_drop(conn);
4301 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4302 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4303 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4304 sizeof(ev->bdaddr), &ev->bdaddr);
4305 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4308 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4313 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4317 hci_dev_unlock(hdev);
4320 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4322 if (key_type == HCI_LK_CHANGED_COMBINATION)
4325 conn->pin_length = pin_len;
4326 conn->key_type = key_type;
4329 case HCI_LK_LOCAL_UNIT:
4330 case HCI_LK_REMOTE_UNIT:
4331 case HCI_LK_DEBUG_COMBINATION:
4333 case HCI_LK_COMBINATION:
4335 conn->pending_sec_level = BT_SECURITY_HIGH;
4337 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4339 case HCI_LK_UNAUTH_COMBINATION_P192:
4340 case HCI_LK_UNAUTH_COMBINATION_P256:
4341 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4343 case HCI_LK_AUTH_COMBINATION_P192:
4344 conn->pending_sec_level = BT_SECURITY_HIGH;
4346 case HCI_LK_AUTH_COMBINATION_P256:
4347 conn->pending_sec_level = BT_SECURITY_FIPS;
4352 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4354 struct hci_ev_link_key_req *ev = (void *) skb->data;
4355 struct hci_cp_link_key_reply cp;
4356 struct hci_conn *conn;
4357 struct link_key *key;
4359 BT_DBG("%s", hdev->name);
4361 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4366 key = hci_find_link_key(hdev, &ev->bdaddr);
4368 BT_DBG("%s link key not found for %pMR", hdev->name,
4373 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4376 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4378 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4380 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4381 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4382 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4383 BT_DBG("%s ignoring unauthenticated key", hdev->name);
4387 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4388 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4389 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4390 BT_DBG("%s ignoring key unauthenticated for high security",
4395 conn_set_key(conn, key->type, key->pin_len);
4398 bacpy(&cp.bdaddr, &ev->bdaddr);
4399 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4401 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4403 hci_dev_unlock(hdev);
4408 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4409 hci_dev_unlock(hdev);
4412 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4414 struct hci_ev_link_key_notify *ev = (void *) skb->data;
4415 struct hci_conn *conn;
4416 struct link_key *key;
4420 BT_DBG("%s", hdev->name);
4424 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4428 hci_conn_hold(conn);
4429 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4430 hci_conn_drop(conn);
4432 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4433 conn_set_key(conn, ev->key_type, conn->pin_length);
4435 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4438 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4439 ev->key_type, pin_len, &persistent);
4443 /* Update connection information since adding the key will have
4444 * fixed up the type in the case of changed combination keys.
4446 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4447 conn_set_key(conn, key->type, key->pin_len);
4449 mgmt_new_link_key(hdev, key, persistent);
4451 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4452 * is set. If it's not set simply remove the key from the kernel
4453 * list (we've still notified user space about it but with
4454 * store_hint being 0).
4456 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4457 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4458 list_del_rcu(&key->list);
4459 kfree_rcu(key, rcu);
4464 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4466 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4469 hci_dev_unlock(hdev);
4472 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4474 struct hci_ev_clock_offset *ev = (void *) skb->data;
4475 struct hci_conn *conn;
4477 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4481 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4482 if (conn && !ev->status) {
4483 struct inquiry_entry *ie;
4485 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4487 ie->data.clock_offset = ev->clock_offset;
4488 ie->timestamp = jiffies;
4492 hci_dev_unlock(hdev);
4495 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4497 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4498 struct hci_conn *conn;
4500 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4504 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4505 if (conn && !ev->status)
4506 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4508 hci_dev_unlock(hdev);
4511 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4513 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4514 struct inquiry_entry *ie;
4516 BT_DBG("%s", hdev->name);
4520 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4522 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4523 ie->timestamp = jiffies;
4526 hci_dev_unlock(hdev);
4529 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4530 struct sk_buff *skb)
4532 struct inquiry_data data;
4533 int num_rsp = *((__u8 *) skb->data);
4535 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4540 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4545 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4546 struct inquiry_info_with_rssi_and_pscan_mode *info;
4547 info = (void *) (skb->data + 1);
4549 if (skb->len < num_rsp * sizeof(*info) + 1)
4552 for (; num_rsp; num_rsp--, info++) {
4555 bacpy(&data.bdaddr, &info->bdaddr);
4556 data.pscan_rep_mode = info->pscan_rep_mode;
4557 data.pscan_period_mode = info->pscan_period_mode;
4558 data.pscan_mode = info->pscan_mode;
4559 memcpy(data.dev_class, info->dev_class, 3);
4560 data.clock_offset = info->clock_offset;
4561 data.rssi = info->rssi;
4562 data.ssp_mode = 0x00;
4564 flags = hci_inquiry_cache_update(hdev, &data, false);
4566 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4567 info->dev_class, info->rssi,
4568 flags, NULL, 0, NULL, 0);
4571 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4573 if (skb->len < num_rsp * sizeof(*info) + 1)
4576 for (; num_rsp; num_rsp--, info++) {
4579 bacpy(&data.bdaddr, &info->bdaddr);
4580 data.pscan_rep_mode = info->pscan_rep_mode;
4581 data.pscan_period_mode = info->pscan_period_mode;
4582 data.pscan_mode = 0x00;
4583 memcpy(data.dev_class, info->dev_class, 3);
4584 data.clock_offset = info->clock_offset;
4585 data.rssi = info->rssi;
4586 data.ssp_mode = 0x00;
4588 flags = hci_inquiry_cache_update(hdev, &data, false);
4590 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4591 info->dev_class, info->rssi,
4592 flags, NULL, 0, NULL, 0);
4597 hci_dev_unlock(hdev);
4600 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4601 struct sk_buff *skb)
4603 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4604 struct hci_conn *conn;
4606 BT_DBG("%s", hdev->name);
4610 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4614 if (ev->page < HCI_MAX_PAGES)
4615 memcpy(conn->features[ev->page], ev->features, 8);
4617 if (!ev->status && ev->page == 0x01) {
4618 struct inquiry_entry *ie;
4620 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4622 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4624 if (ev->features[0] & LMP_HOST_SSP) {
4625 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4627 /* It is mandatory by the Bluetooth specification that
4628 * Extended Inquiry Results are only used when Secure
4629 * Simple Pairing is enabled, but some devices violate
4632 * To make these devices work, the internal SSP
4633 * enabled flag needs to be cleared if the remote host
4634 * features do not indicate SSP support */
4635 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4638 if (ev->features[0] & LMP_HOST_SC)
4639 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4642 if (conn->state != BT_CONFIG)
4645 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4646 struct hci_cp_remote_name_req cp;
4647 memset(&cp, 0, sizeof(cp));
4648 bacpy(&cp.bdaddr, &conn->dst);
4649 cp.pscan_rep_mode = 0x02;
4650 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4651 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4652 mgmt_device_connected(hdev, conn, NULL, 0);
4654 if (!hci_outgoing_auth_needed(hdev, conn)) {
4655 conn->state = BT_CONNECTED;
4656 hci_connect_cfm(conn, ev->status);
4657 hci_conn_drop(conn);
4661 hci_dev_unlock(hdev);
4664 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4665 struct sk_buff *skb)
4667 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4668 struct hci_conn *conn;
4670 switch (ev->link_type) {
4675 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4676 * for HCI_Synchronous_Connection_Complete is limited to
4677 * either SCO or eSCO
4679 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4683 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4687 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4689 if (ev->link_type == ESCO_LINK)
4692 /* When the link type in the event indicates SCO connection
4693 * and lookup of the connection object fails, then check
4694 * if an eSCO connection object exists.
4696 * The core limits the synchronous connections to either
4697 * SCO or eSCO. The eSCO connection is preferred and tried
4698 * to be setup first and until successfully established,
4699 * the link type will be hinted as eSCO.
4701 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4706 switch (ev->status) {
4708 /* The synchronous connection complete event should only be
4709 * sent once per new connection. Receiving a successful
4710 * complete event when the connection status is already
4711 * BT_CONNECTED means that the device is misbehaving and sent
4712 * multiple complete event packets for the same new connection.
4714 * Registering the device more than once can corrupt kernel
4715 * memory, hence upon detecting this invalid event, we report
4716 * an error and ignore the packet.
4718 if (conn->state == BT_CONNECTED) {
4719 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4723 conn->handle = __le16_to_cpu(ev->handle);
4724 conn->state = BT_CONNECTED;
4725 conn->type = ev->link_type;
4727 hci_debugfs_create_conn(conn);
4728 hci_conn_add_sysfs(conn);
4731 case 0x10: /* Connection Accept Timeout */
4732 case 0x0d: /* Connection Rejected due to Limited Resources */
4733 case 0x11: /* Unsupported Feature or Parameter Value */
4734 case 0x1c: /* SCO interval rejected */
4735 case 0x1a: /* Unsupported Remote Feature */
4736 case 0x1e: /* Invalid LMP Parameters */
4737 case 0x1f: /* Unspecified error */
4738 case 0x20: /* Unsupported LMP Parameter value */
4740 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4741 (hdev->esco_type & EDR_ESCO_MASK);
4742 if (hci_setup_sync(conn, conn->link->handle))
4748 conn->state = BT_CLOSED;
4752 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4754 switch (ev->air_mode) {
4757 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4761 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4765 hci_connect_cfm(conn, ev->status);
4770 hci_dev_unlock(hdev);
4773 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4777 while (parsed < eir_len) {
4778 u8 field_len = eir[0];
4783 parsed += field_len + 1;
4784 eir += field_len + 1;
4790 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4791 struct sk_buff *skb)
4793 struct inquiry_data data;
4794 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4795 int num_rsp = *((__u8 *) skb->data);
4798 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4800 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4803 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4808 for (; num_rsp; num_rsp--, info++) {
4812 bacpy(&data.bdaddr, &info->bdaddr);
4813 data.pscan_rep_mode = info->pscan_rep_mode;
4814 data.pscan_period_mode = info->pscan_period_mode;
4815 data.pscan_mode = 0x00;
4816 memcpy(data.dev_class, info->dev_class, 3);
4817 data.clock_offset = info->clock_offset;
4818 data.rssi = info->rssi;
4819 data.ssp_mode = 0x01;
4821 if (hci_dev_test_flag(hdev, HCI_MGMT))
4822 name_known = eir_get_data(info->data,
4824 EIR_NAME_COMPLETE, NULL);
4828 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4830 eir_len = eir_get_length(info->data, sizeof(info->data));
4832 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4833 info->dev_class, info->rssi,
4834 flags, info->data, eir_len, NULL, 0);
4837 hci_dev_unlock(hdev);
4840 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4841 struct sk_buff *skb)
4843 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4844 struct hci_conn *conn;
4846 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4847 __le16_to_cpu(ev->handle));
4851 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4855 /* For BR/EDR the necessary steps are taken through the
4856 * auth_complete event.
4858 if (conn->type != LE_LINK)
4862 conn->sec_level = conn->pending_sec_level;
4864 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4866 if (ev->status && conn->state == BT_CONNECTED) {
4867 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4868 hci_conn_drop(conn);
4872 if (conn->state == BT_CONFIG) {
4874 conn->state = BT_CONNECTED;
4876 hci_connect_cfm(conn, ev->status);
4877 hci_conn_drop(conn);
4879 hci_auth_cfm(conn, ev->status);
4881 hci_conn_hold(conn);
4882 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4883 hci_conn_drop(conn);
4887 hci_dev_unlock(hdev);
4890 static u8 hci_get_auth_req(struct hci_conn *conn)
4893 if (conn->remote_auth == HCI_AT_GENERAL_BONDING_MITM) {
4894 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4895 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4896 return HCI_AT_GENERAL_BONDING_MITM;
4900 /* If remote requests no-bonding follow that lead */
4901 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4902 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4903 return conn->remote_auth | (conn->auth_type & 0x01);
4905 /* If both remote and local have enough IO capabilities, require
4908 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4909 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4910 return conn->remote_auth | 0x01;
4912 /* No MITM protection possible so ignore remote requirement */
4913 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4916 static u8 bredr_oob_data_present(struct hci_conn *conn)
4918 struct hci_dev *hdev = conn->hdev;
4919 struct oob_data *data;
4921 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4925 if (bredr_sc_enabled(hdev)) {
4926 /* When Secure Connections is enabled, then just
4927 * return the present value stored with the OOB
4928 * data. The stored value contains the right present
4929 * information. However it can only be trusted when
4930 * not in Secure Connection Only mode.
4932 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4933 return data->present;
4935 /* When Secure Connections Only mode is enabled, then
4936 * the P-256 values are required. If they are not
4937 * available, then do not declare that OOB data is
4940 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4941 !memcmp(data->hash256, ZERO_KEY, 16))
4947 /* When Secure Connections is not enabled or actually
4948 * not supported by the hardware, then check that if
4949 * P-192 data values are present.
4951 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4952 !memcmp(data->hash192, ZERO_KEY, 16))
4958 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4960 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4961 struct hci_conn *conn;
4963 BT_DBG("%s", hdev->name);
4967 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4971 hci_conn_hold(conn);
4973 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4976 /* Allow pairing if we're pairable, the initiators of the
4977 * pairing or if the remote is not requesting bonding.
4979 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4980 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4981 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4982 struct hci_cp_io_capability_reply cp;
4984 bacpy(&cp.bdaddr, &ev->bdaddr);
4985 /* Change the IO capability from KeyboardDisplay
4986 * to DisplayYesNo as it is not supported by BT spec. */
4987 cp.capability = (conn->io_capability == 0x04) ?
4988 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4990 /* If we are initiators, there is no remote information yet */
4991 if (conn->remote_auth == 0xff) {
4992 /* Request MITM protection if our IO caps allow it
4993 * except for the no-bonding case.
4995 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4996 conn->auth_type != HCI_AT_NO_BONDING)
4997 conn->auth_type |= 0x01;
4999 conn->auth_type = hci_get_auth_req(conn);
5002 /* If we're not bondable, force one of the non-bondable
5003 * authentication requirement values.
5005 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5006 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5008 cp.authentication = conn->auth_type;
5009 cp.oob_data = bredr_oob_data_present(conn);
5011 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5014 struct hci_cp_io_capability_neg_reply cp;
5016 bacpy(&cp.bdaddr, &ev->bdaddr);
5017 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5019 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5024 hci_dev_unlock(hdev);
5027 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
5029 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
5030 struct hci_conn *conn;
5032 BT_DBG("%s", hdev->name);
5036 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5040 conn->remote_cap = ev->capability;
5041 conn->remote_auth = ev->authentication;
5044 hci_dev_unlock(hdev);
5047 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
5048 struct sk_buff *skb)
5050 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
5051 int loc_mitm, rem_mitm, confirm_hint = 0;
5052 struct hci_conn *conn;
5054 BT_DBG("%s", hdev->name);
5058 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5061 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5065 loc_mitm = (conn->auth_type & 0x01);
5066 rem_mitm = (conn->remote_auth & 0x01);
5068 /* If we require MITM but the remote device can't provide that
5069 * (it has NoInputNoOutput) then reject the confirmation
5070 * request. We check the security level here since it doesn't
5071 * necessarily match conn->auth_type.
5073 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5074 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5075 BT_DBG("Rejecting request: remote device can't provide MITM");
5076 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5077 sizeof(ev->bdaddr), &ev->bdaddr);
5081 /* If no side requires MITM protection; auto-accept */
5082 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5083 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5085 /* If we're not the initiators request authorization to
5086 * proceed from user space (mgmt_user_confirm with
5087 * confirm_hint set to 1). The exception is if neither
5088 * side had MITM or if the local IO capability is
5089 * NoInputNoOutput, in which case we do auto-accept
5091 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5092 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5093 (loc_mitm || rem_mitm)) {
5094 BT_DBG("Confirming auto-accept as acceptor");
5099 /* If there already exists link key in local host, leave the
5100 * decision to user space since the remote device could be
5101 * legitimate or malicious.
5103 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5104 bt_dev_dbg(hdev, "Local host already has link key");
5109 BT_DBG("Auto-accept of user confirmation with %ums delay",
5110 hdev->auto_accept_delay);
5112 if (hdev->auto_accept_delay > 0) {
5113 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5114 queue_delayed_work(conn->hdev->workqueue,
5115 &conn->auto_accept_work, delay);
5119 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5120 sizeof(ev->bdaddr), &ev->bdaddr);
5125 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5126 le32_to_cpu(ev->passkey), confirm_hint);
5129 hci_dev_unlock(hdev);
5132 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
5133 struct sk_buff *skb)
5135 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
5137 BT_DBG("%s", hdev->name);
5139 if (hci_dev_test_flag(hdev, HCI_MGMT))
5140 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5143 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
5144 struct sk_buff *skb)
5146 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
5147 struct hci_conn *conn;
5149 BT_DBG("%s", hdev->name);
5151 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5155 conn->passkey_notify = __le32_to_cpu(ev->passkey);
5156 conn->passkey_entered = 0;
5158 if (hci_dev_test_flag(hdev, HCI_MGMT))
5159 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5160 conn->dst_type, conn->passkey_notify,
5161 conn->passkey_entered);
5164 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
5166 struct hci_ev_keypress_notify *ev = (void *) skb->data;
5167 struct hci_conn *conn;
5169 BT_DBG("%s", hdev->name);
5171 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5176 case HCI_KEYPRESS_STARTED:
5177 conn->passkey_entered = 0;
5180 case HCI_KEYPRESS_ENTERED:
5181 conn->passkey_entered++;
5184 case HCI_KEYPRESS_ERASED:
5185 conn->passkey_entered--;
5188 case HCI_KEYPRESS_CLEARED:
5189 conn->passkey_entered = 0;
5192 case HCI_KEYPRESS_COMPLETED:
5196 if (hci_dev_test_flag(hdev, HCI_MGMT))
5197 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5198 conn->dst_type, conn->passkey_notify,
5199 conn->passkey_entered);
5202 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
5203 struct sk_buff *skb)
5205 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
5206 struct hci_conn *conn;
5208 BT_DBG("%s", hdev->name);
5212 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5216 /* Reset the authentication requirement to unknown */
5217 conn->remote_auth = 0xff;
5219 /* To avoid duplicate auth_failed events to user space we check
5220 * the HCI_CONN_AUTH_PEND flag which will be set if we
5221 * initiated the authentication. A traditional auth_complete
5222 * event gets always produced as initiator and is also mapped to
5223 * the mgmt_auth_failed event */
5224 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5225 mgmt_auth_failed(conn, ev->status);
5227 hci_conn_drop(conn);
5230 hci_dev_unlock(hdev);
5233 static void hci_remote_host_features_evt(struct hci_dev *hdev,
5234 struct sk_buff *skb)
5236 struct hci_ev_remote_host_features *ev = (void *) skb->data;
5237 struct inquiry_entry *ie;
5238 struct hci_conn *conn;
5240 BT_DBG("%s", hdev->name);
5244 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5246 memcpy(conn->features[1], ev->features, 8);
5248 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5250 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5252 hci_dev_unlock(hdev);
5255 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
5256 struct sk_buff *skb)
5258 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
5259 struct oob_data *data;
5261 BT_DBG("%s", hdev->name);
5265 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5268 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5270 struct hci_cp_remote_oob_data_neg_reply cp;
5272 bacpy(&cp.bdaddr, &ev->bdaddr);
5273 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5278 if (bredr_sc_enabled(hdev)) {
5279 struct hci_cp_remote_oob_ext_data_reply cp;
5281 bacpy(&cp.bdaddr, &ev->bdaddr);
5282 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5283 memset(cp.hash192, 0, sizeof(cp.hash192));
5284 memset(cp.rand192, 0, sizeof(cp.rand192));
5286 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5287 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5289 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5290 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5292 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5295 struct hci_cp_remote_oob_data_reply cp;
5297 bacpy(&cp.bdaddr, &ev->bdaddr);
5298 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5299 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5301 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5306 hci_dev_unlock(hdev);
5309 #if IS_ENABLED(CONFIG_BT_HS)
5310 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5312 struct hci_ev_channel_selected *ev = (void *)skb->data;
5313 struct hci_conn *hcon;
5315 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5317 skb_pull(skb, sizeof(*ev));
5319 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5323 amp_read_loc_assoc_final_data(hdev, hcon);
5326 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
5327 struct sk_buff *skb)
5329 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
5330 struct hci_conn *hcon, *bredr_hcon;
5332 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
5337 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5349 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5351 hcon->state = BT_CONNECTED;
5352 bacpy(&hcon->dst, &bredr_hcon->dst);
5354 hci_conn_hold(hcon);
5355 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5356 hci_conn_drop(hcon);
5358 hci_debugfs_create_conn(hcon);
5359 hci_conn_add_sysfs(hcon);
5361 amp_physical_cfm(bredr_hcon, hcon);
5364 hci_dev_unlock(hdev);
5367 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5369 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
5370 struct hci_conn *hcon;
5371 struct hci_chan *hchan;
5372 struct amp_mgr *mgr;
5374 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5375 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
5378 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5382 /* Create AMP hchan */
5383 hchan = hci_chan_create(hcon);
5387 hchan->handle = le16_to_cpu(ev->handle);
5390 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5392 mgr = hcon->amp_mgr;
5393 if (mgr && mgr->bredr_chan) {
5394 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5396 l2cap_chan_lock(bredr_chan);
5398 bredr_chan->conn->mtu = hdev->block_mtu;
5399 l2cap_logical_cfm(bredr_chan, hchan, 0);
5400 hci_conn_hold(hcon);
5402 l2cap_chan_unlock(bredr_chan);
5406 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5407 struct sk_buff *skb)
5409 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5410 struct hci_chan *hchan;
5412 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5413 le16_to_cpu(ev->handle), ev->status);
5420 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5421 if (!hchan || !hchan->amp)
5424 amp_destroy_logical_link(hchan, ev->reason);
5427 hci_dev_unlock(hdev);
5430 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5431 struct sk_buff *skb)
5433 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5434 struct hci_conn *hcon;
5436 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5443 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5444 if (hcon && hcon->type == AMP_LINK) {
5445 hcon->state = BT_CLOSED;
5446 hci_disconn_cfm(hcon, ev->reason);
5450 hci_dev_unlock(hdev);
5454 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5455 u8 bdaddr_type, bdaddr_t *local_rpa)
5458 conn->dst_type = bdaddr_type;
5459 conn->resp_addr_type = bdaddr_type;
5460 bacpy(&conn->resp_addr, bdaddr);
5462 /* Check if the controller has set a Local RPA then it must be
5463 * used instead or hdev->rpa.
5465 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5466 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5467 bacpy(&conn->init_addr, local_rpa);
5468 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5469 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5470 bacpy(&conn->init_addr, &conn->hdev->rpa);
5472 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5473 &conn->init_addr_type);
5476 conn->resp_addr_type = conn->hdev->adv_addr_type;
5477 /* Check if the controller has set a Local RPA then it must be
5478 * used instead or hdev->rpa.
5480 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5481 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5482 bacpy(&conn->resp_addr, local_rpa);
5483 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5484 /* In case of ext adv, resp_addr will be updated in
5485 * Adv Terminated event.
5487 if (!ext_adv_capable(conn->hdev))
5488 bacpy(&conn->resp_addr,
5489 &conn->hdev->random_addr);
5491 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5494 conn->init_addr_type = bdaddr_type;
5495 bacpy(&conn->init_addr, bdaddr);
5497 /* For incoming connections, set the default minimum
5498 * and maximum connection interval. They will be used
5499 * to check if the parameters are in range and if not
5500 * trigger the connection update procedure.
5502 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5503 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5507 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5508 bdaddr_t *bdaddr, u8 bdaddr_type,
5509 bdaddr_t *local_rpa, u8 role, u16 handle,
5510 u16 interval, u16 latency,
5511 u16 supervision_timeout)
5513 struct hci_conn_params *params;
5514 struct hci_conn *conn;
5515 struct smp_irk *irk;
5520 /* All controllers implicitly stop advertising in the event of a
5521 * connection, so ensure that the state bit is cleared.
5523 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5525 conn = hci_lookup_le_connect(hdev);
5527 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5529 bt_dev_err(hdev, "no memory for new connection");
5533 conn->dst_type = bdaddr_type;
5535 /* If we didn't have a hci_conn object previously
5536 * but we're in central role this must be something
5537 * initiated using an accept list. Since accept list based
5538 * connections are not "first class citizens" we don't
5539 * have full tracking of them. Therefore, we go ahead
5540 * with a "best effort" approach of determining the
5541 * initiator address based on the HCI_PRIVACY flag.
5544 conn->resp_addr_type = bdaddr_type;
5545 bacpy(&conn->resp_addr, bdaddr);
5546 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5547 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5548 bacpy(&conn->init_addr, &hdev->rpa);
5550 hci_copy_identity_address(hdev,
5552 &conn->init_addr_type);
5557 /* LE auto connect */
5558 bacpy(&conn->dst, bdaddr);
5560 cancel_delayed_work(&conn->le_conn_timeout);
5563 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5565 /* Lookup the identity address from the stored connection
5566 * address and address type.
5568 * When establishing connections to an identity address, the
5569 * connection procedure will store the resolvable random
5570 * address first. Now if it can be converted back into the
5571 * identity address, start using the identity address from
5574 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5576 bacpy(&conn->dst, &irk->bdaddr);
5577 conn->dst_type = irk->addr_type;
5580 /* When using controller based address resolution, then the new
5581 * address types 0x02 and 0x03 are used. These types need to be
5582 * converted back into either public address or random address type
5584 if (use_ll_privacy(hdev) &&
5585 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5586 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
5587 switch (conn->dst_type) {
5588 case ADDR_LE_DEV_PUBLIC_RESOLVED:
5589 conn->dst_type = ADDR_LE_DEV_PUBLIC;
5591 case ADDR_LE_DEV_RANDOM_RESOLVED:
5592 conn->dst_type = ADDR_LE_DEV_RANDOM;
5598 hci_le_conn_failed(conn, status);
5602 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5603 addr_type = BDADDR_LE_PUBLIC;
5605 addr_type = BDADDR_LE_RANDOM;
5607 /* Drop the connection if the device is blocked */
5608 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5609 hci_conn_drop(conn);
5613 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5614 mgmt_device_connected(hdev, conn, NULL, 0);
5616 conn->sec_level = BT_SECURITY_LOW;
5617 conn->handle = handle;
5618 conn->state = BT_CONFIG;
5620 /* Store current advertising instance as connection advertising instance
5621 * when sotfware rotation is in use so it can be re-enabled when
5624 if (!ext_adv_capable(hdev))
5625 conn->adv_instance = hdev->cur_adv_instance;
5627 conn->le_conn_interval = interval;
5628 conn->le_conn_latency = latency;
5629 conn->le_supv_timeout = supervision_timeout;
5631 hci_debugfs_create_conn(conn);
5632 hci_conn_add_sysfs(conn);
5634 /* The remote features procedure is defined for central
5635 * role only. So only in case of an initiated connection
5636 * request the remote features.
5638 * If the local controller supports peripheral-initiated features
5639 * exchange, then requesting the remote features in peripheral
5640 * role is possible. Otherwise just transition into the
5641 * connected state without requesting the remote features.
5644 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5645 struct hci_cp_le_read_remote_features cp;
5647 cp.handle = __cpu_to_le16(conn->handle);
5649 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5652 hci_conn_hold(conn);
5654 conn->state = BT_CONNECTED;
5655 hci_connect_cfm(conn, status);
5658 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5661 list_del_init(¶ms->action);
5663 hci_conn_drop(params->conn);
5664 hci_conn_put(params->conn);
5665 params->conn = NULL;
5670 hci_update_background_scan(hdev);
5671 hci_dev_unlock(hdev);
5674 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5676 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5678 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5680 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5681 NULL, ev->role, le16_to_cpu(ev->handle),
5682 le16_to_cpu(ev->interval),
5683 le16_to_cpu(ev->latency),
5684 le16_to_cpu(ev->supervision_timeout));
5687 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5688 struct sk_buff *skb)
5690 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5692 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5694 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5695 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5696 le16_to_cpu(ev->interval),
5697 le16_to_cpu(ev->latency),
5698 le16_to_cpu(ev->supervision_timeout));
5700 if (use_ll_privacy(hdev) &&
5701 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5702 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5703 hci_req_disable_address_resolution(hdev);
5706 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5708 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5709 struct hci_conn *conn;
5710 struct adv_info *adv;
5712 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5714 adv = hci_find_adv_instance(hdev, ev->handle);
5720 /* Remove advertising as it has been terminated */
5721 hci_remove_adv_instance(hdev, ev->handle);
5722 mgmt_advertising_removed(NULL, hdev, ev->handle);
5728 adv->enabled = false;
5730 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5732 /* Store handle in the connection so the correct advertising
5733 * instance can be re-enabled when disconnected.
5735 conn->adv_instance = ev->handle;
5737 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5738 bacmp(&conn->resp_addr, BDADDR_ANY))
5742 bacpy(&conn->resp_addr, &hdev->random_addr);
5747 bacpy(&conn->resp_addr, &adv->random_addr);
5751 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5752 struct sk_buff *skb)
5754 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5755 struct hci_conn *conn;
5757 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5764 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5768 hci_dev_unlock(hdev);
5769 mgmt_le_conn_update_failed(hdev, &conn->dst,
5770 conn->type, conn->dst_type, ev->status);
5774 conn->le_conn_interval = le16_to_cpu(ev->interval);
5775 conn->le_conn_latency = le16_to_cpu(ev->latency);
5776 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5779 hci_dev_unlock(hdev);
5782 mgmt_le_conn_updated(hdev, &conn->dst, conn->type,
5783 conn->dst_type, conn->le_conn_interval,
5784 conn->le_conn_latency, conn->le_supv_timeout);
5788 /* This function requires the caller holds hdev->lock */
5789 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5791 u8 addr_type, u8 adv_type,
5792 bdaddr_t *direct_rpa)
5794 struct hci_conn *conn;
5795 struct hci_conn_params *params;
5797 /* If the event is not connectable don't proceed further */
5798 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5801 /* Ignore if the device is blocked */
5802 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type))
5805 /* Most controller will fail if we try to create new connections
5806 * while we have an existing one in peripheral role.
5808 if (hdev->conn_hash.le_num_peripheral > 0 &&
5809 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5810 !(hdev->le_states[3] & 0x10)))
5813 /* If we're not connectable only connect devices that we have in
5814 * our pend_le_conns list.
5816 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5821 if (!params->explicit_connect) {
5822 switch (params->auto_connect) {
5823 case HCI_AUTO_CONN_DIRECT:
5824 /* Only devices advertising with ADV_DIRECT_IND are
5825 * triggering a connection attempt. This is allowing
5826 * incoming connections from peripheral devices.
5828 if (adv_type != LE_ADV_DIRECT_IND)
5831 case HCI_AUTO_CONN_ALWAYS:
5832 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5833 * are triggering a connection attempt. This means
5834 * that incoming connections from peripheral device are
5835 * accepted and also outgoing connections to peripheral
5836 * devices are established when found.
5844 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5845 hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
5847 if (!IS_ERR(conn)) {
5848 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5849 * by higher layer that tried to connect, if no then
5850 * store the pointer since we don't really have any
5851 * other owner of the object besides the params that
5852 * triggered it. This way we can abort the connection if
5853 * the parameters get removed and keep the reference
5854 * count consistent once the connection is established.
5857 if (!params->explicit_connect)
5858 params->conn = hci_conn_get(conn);
5863 switch (PTR_ERR(conn)) {
5865 /* If hci_connect() returns -EBUSY it means there is already
5866 * an LE connection attempt going on. Since controllers don't
5867 * support more than one connection attempt at the time, we
5868 * don't consider this an error case.
5872 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5879 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5880 u8 bdaddr_type, bdaddr_t *direct_addr,
5881 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5885 struct discovery_state *d = &hdev->discovery;
5887 struct smp_irk *irk;
5888 struct hci_conn *conn;
5897 case LE_ADV_DIRECT_IND:
5898 case LE_ADV_SCAN_IND:
5899 case LE_ADV_NONCONN_IND:
5900 case LE_ADV_SCAN_RSP:
5903 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5904 "type: 0x%02x", type);
5908 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5909 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5913 /* Find the end of the data in case the report contains padded zero
5914 * bytes at the end causing an invalid length value.
5916 * When data is NULL, len is 0 so there is no need for extra ptr
5917 * check as 'ptr < data + 0' is already false in such case.
5919 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5920 if (ptr + 1 + *ptr > data + len)
5924 /* Adjust for actual length. This handles the case when remote
5925 * device is advertising with incorrect data length.
5929 /* If the direct address is present, then this report is from
5930 * a LE Direct Advertising Report event. In that case it is
5931 * important to see if the address is matching the local
5932 * controller address.
5935 /* Only resolvable random addresses are valid for these
5936 * kind of reports and others can be ignored.
5938 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5941 /* If the controller is not using resolvable random
5942 * addresses, then this report can be ignored.
5944 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5947 /* If the local IRK of the controller does not match
5948 * with the resolvable random address provided, then
5949 * this report can be ignored.
5951 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5955 /* Check if we need to convert to identity address */
5956 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5958 bdaddr = &irk->bdaddr;
5959 bdaddr_type = irk->addr_type;
5962 /* Check if we have been requested to connect to this device.
5964 * direct_addr is set only for directed advertising reports (it is NULL
5965 * for advertising reports) and is already verified to be RPA above.
5967 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5969 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5970 /* Store report for later inclusion by
5971 * mgmt_device_connected
5973 memcpy(conn->le_adv_data, data, len);
5974 conn->le_adv_data_len = len;
5977 /* Passive scanning shouldn't trigger any device found events,
5978 * except for devices marked as CONN_REPORT for which we do send
5979 * device found events, or advertisement monitoring requested.
5981 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5982 if (type == LE_ADV_DIRECT_IND)
5986 /* Handle all adv packet in platform */
5987 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5988 bdaddr, bdaddr_type) &&
5989 idr_is_empty(&hdev->adv_monitors_idr))
5993 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5994 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5998 mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5999 rssi, flags, data, len, NULL, 0, type);
6001 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6002 rssi, flags, data, len, NULL, 0);
6007 /* When receiving non-connectable or scannable undirected
6008 * advertising reports, this means that the remote device is
6009 * not connectable and then clearly indicate this in the
6010 * device found event.
6012 * When receiving a scan response, then there is no way to
6013 * know if the remote device is connectable or not. However
6014 * since scan responses are merged with a previously seen
6015 * advertising report, the flags field from that report
6018 * In the really unlikely case that a controller get confused
6019 * and just sends a scan response event, then it is marked as
6020 * not connectable as well.
6022 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
6023 type == LE_ADV_SCAN_RSP)
6024 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6029 /* Disable adv ind and scan rsp merging */
6030 mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6031 rssi, flags, data, len, NULL, 0, type);
6033 /* If there's nothing pending either store the data from this
6034 * event or send an immediate device found event if the data
6035 * should not be stored for later.
6037 if (!ext_adv && !has_pending_adv_report(hdev)) {
6038 /* If the report will trigger a SCAN_REQ store it for
6041 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6042 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6043 rssi, flags, data, len);
6047 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6048 rssi, flags, data, len, NULL, 0);
6052 /* Check if the pending report is for the same device as the new one */
6053 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6054 bdaddr_type == d->last_adv_addr_type);
6056 /* If the pending data doesn't match this report or this isn't a
6057 * scan response (e.g. we got a duplicate ADV_IND) then force
6058 * sending of the pending data.
6060 if (type != LE_ADV_SCAN_RSP || !match) {
6061 /* Send out whatever is in the cache, but skip duplicates */
6063 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6064 d->last_adv_addr_type, NULL,
6065 d->last_adv_rssi, d->last_adv_flags,
6067 d->last_adv_data_len, NULL, 0);
6069 /* If the new report will trigger a SCAN_REQ store it for
6072 if (!ext_adv && (type == LE_ADV_IND ||
6073 type == LE_ADV_SCAN_IND)) {
6074 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6075 rssi, flags, data, len);
6079 /* The advertising reports cannot be merged, so clear
6080 * the pending report and send out a device found event.
6082 clear_pending_adv_report(hdev);
6083 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6084 rssi, flags, data, len, NULL, 0);
6088 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6089 * the new event is a SCAN_RSP. We can therefore proceed with
6090 * sending a merged device found event.
6092 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6093 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6094 d->last_adv_data, d->last_adv_data_len, data, len);
6095 clear_pending_adv_report(hdev);
6099 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
6101 u8 num_reports = skb->data[0];
6102 void *ptr = &skb->data[1];
6106 while (num_reports--) {
6107 struct hci_ev_le_advertising_info *ev = ptr;
6110 if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
6111 bt_dev_err(hdev, "Malicious advertising data.");
6115 if (ev->length <= HCI_MAX_AD_LENGTH &&
6116 ev->data + ev->length <= skb_tail_pointer(skb)) {
6117 rssi = ev->data[ev->length];
6118 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
6119 ev->bdaddr_type, NULL, 0, rssi,
6120 ev->data, ev->length, false);
6122 bt_dev_err(hdev, "Dropping invalid advertising data");
6125 ptr += sizeof(*ev) + ev->length + 1;
6128 hci_dev_unlock(hdev);
6131 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6133 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6135 case LE_LEGACY_ADV_IND:
6137 case LE_LEGACY_ADV_DIRECT_IND:
6138 return LE_ADV_DIRECT_IND;
6139 case LE_LEGACY_ADV_SCAN_IND:
6140 return LE_ADV_SCAN_IND;
6141 case LE_LEGACY_NONCONN_IND:
6142 return LE_ADV_NONCONN_IND;
6143 case LE_LEGACY_SCAN_RSP_ADV:
6144 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6145 return LE_ADV_SCAN_RSP;
6151 if (evt_type & LE_EXT_ADV_CONN_IND) {
6152 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6153 return LE_ADV_DIRECT_IND;
6158 if (evt_type & LE_EXT_ADV_SCAN_RSP)
6159 return LE_ADV_SCAN_RSP;
6161 if (evt_type & LE_EXT_ADV_SCAN_IND)
6162 return LE_ADV_SCAN_IND;
6164 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6165 evt_type & LE_EXT_ADV_DIRECT_IND)
6166 return LE_ADV_NONCONN_IND;
6169 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6172 return LE_ADV_INVALID;
6175 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
6177 u8 num_reports = skb->data[0];
6178 void *ptr = &skb->data[1];
6182 while (num_reports--) {
6183 struct hci_ev_le_ext_adv_report *ev = ptr;
6187 evt_type = __le16_to_cpu(ev->evt_type);
6188 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6189 if (legacy_evt_type != LE_ADV_INVALID) {
6190 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
6191 ev->bdaddr_type, NULL, 0, ev->rssi,
6192 ev->data, ev->length,
6193 !(evt_type & LE_EXT_ADV_LEGACY_PDU));
6196 ptr += sizeof(*ev) + ev->length;
6199 hci_dev_unlock(hdev);
6202 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
6203 struct sk_buff *skb)
6205 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
6206 struct hci_conn *conn;
6208 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6212 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6215 memcpy(conn->features[0], ev->features, 8);
6217 if (conn->state == BT_CONFIG) {
6220 /* If the local controller supports peripheral-initiated
6221 * features exchange, but the remote controller does
6222 * not, then it is possible that the error code 0x1a
6223 * for unsupported remote feature gets returned.
6225 * In this specific case, allow the connection to
6226 * transition into connected state and mark it as
6229 if (!conn->out && ev->status == 0x1a &&
6230 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6233 status = ev->status;
6235 conn->state = BT_CONNECTED;
6236 hci_connect_cfm(conn, status);
6237 hci_conn_drop(conn);
6241 hci_dev_unlock(hdev);
6244 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
6246 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
6247 struct hci_cp_le_ltk_reply cp;
6248 struct hci_cp_le_ltk_neg_reply neg;
6249 struct hci_conn *conn;
6250 struct smp_ltk *ltk;
6252 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
6256 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6260 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6264 if (smp_ltk_is_sc(ltk)) {
6265 /* With SC both EDiv and Rand are set to zero */
6266 if (ev->ediv || ev->rand)
6269 /* For non-SC keys check that EDiv and Rand match */
6270 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6274 memcpy(cp.ltk, ltk->val, ltk->enc_size);
6275 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6276 cp.handle = cpu_to_le16(conn->handle);
6278 conn->pending_sec_level = smp_ltk_sec_level(ltk);
6280 conn->enc_key_size = ltk->enc_size;
6282 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6284 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6285 * temporary key used to encrypt a connection following
6286 * pairing. It is used during the Encrypted Session Setup to
6287 * distribute the keys. Later, security can be re-established
6288 * using a distributed LTK.
6290 if (ltk->type == SMP_STK) {
6291 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6292 list_del_rcu(<k->list);
6293 kfree_rcu(ltk, rcu);
6295 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6298 hci_dev_unlock(hdev);
6303 neg.handle = ev->handle;
6304 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6305 hci_dev_unlock(hdev);
6308 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6311 struct hci_cp_le_conn_param_req_neg_reply cp;
6313 cp.handle = cpu_to_le16(handle);
6316 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6320 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
6321 struct sk_buff *skb)
6323 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
6324 struct hci_cp_le_conn_param_req_reply cp;
6325 struct hci_conn *hcon;
6326 u16 handle, min, max, latency, timeout;
6328 handle = le16_to_cpu(ev->handle);
6329 min = le16_to_cpu(ev->interval_min);
6330 max = le16_to_cpu(ev->interval_max);
6331 latency = le16_to_cpu(ev->latency);
6332 timeout = le16_to_cpu(ev->timeout);
6334 hcon = hci_conn_hash_lookup_handle(hdev, handle);
6335 if (!hcon || hcon->state != BT_CONNECTED)
6336 return send_conn_param_neg_reply(hdev, handle,
6337 HCI_ERROR_UNKNOWN_CONN_ID);
6339 if (hci_check_conn_params(min, max, latency, timeout))
6340 return send_conn_param_neg_reply(hdev, handle,
6341 HCI_ERROR_INVALID_LL_PARAMS);
6343 if (hcon->role == HCI_ROLE_MASTER) {
6344 struct hci_conn_params *params;
6349 params = hci_conn_params_lookup(hdev, &hcon->dst,
6352 params->conn_min_interval = min;
6353 params->conn_max_interval = max;
6354 params->conn_latency = latency;
6355 params->supervision_timeout = timeout;
6361 hci_dev_unlock(hdev);
6363 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6364 store_hint, min, max, latency, timeout);
6367 cp.handle = ev->handle;
6368 cp.interval_min = ev->interval_min;
6369 cp.interval_max = ev->interval_max;
6370 cp.latency = ev->latency;
6371 cp.timeout = ev->timeout;
6375 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6378 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
6379 struct sk_buff *skb)
6381 u8 num_reports = skb->data[0];
6382 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
6384 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
6389 for (; num_reports; num_reports--, ev++)
6390 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
6391 ev->bdaddr_type, &ev->direct_addr,
6392 ev->direct_addr_type, ev->rssi, NULL, 0,
6395 hci_dev_unlock(hdev);
6398 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
6400 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
6401 struct hci_conn *conn;
6403 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6410 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6414 conn->le_tx_phy = ev->tx_phy;
6415 conn->le_rx_phy = ev->rx_phy;
6418 hci_dev_unlock(hdev);
6421 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
6423 struct hci_ev_le_meta *le_ev = (void *) skb->data;
6425 skb_pull(skb, sizeof(*le_ev));
6427 switch (le_ev->subevent) {
6428 case HCI_EV_LE_CONN_COMPLETE:
6429 hci_le_conn_complete_evt(hdev, skb);
6432 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
6433 hci_le_conn_update_complete_evt(hdev, skb);
6436 case HCI_EV_LE_ADVERTISING_REPORT:
6437 hci_le_adv_report_evt(hdev, skb);
6440 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
6441 hci_le_remote_feat_complete_evt(hdev, skb);
6444 case HCI_EV_LE_LTK_REQ:
6445 hci_le_ltk_request_evt(hdev, skb);
6448 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
6449 hci_le_remote_conn_param_req_evt(hdev, skb);
6452 case HCI_EV_LE_DIRECT_ADV_REPORT:
6453 hci_le_direct_adv_report_evt(hdev, skb);
6456 case HCI_EV_LE_PHY_UPDATE_COMPLETE:
6457 hci_le_phy_update_evt(hdev, skb);
6460 case HCI_EV_LE_EXT_ADV_REPORT:
6461 hci_le_ext_adv_report_evt(hdev, skb);
6464 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
6465 hci_le_enh_conn_complete_evt(hdev, skb);
6468 case HCI_EV_LE_EXT_ADV_SET_TERM:
6469 hci_le_ext_adv_term_evt(hdev, skb);
6472 case HCI_EV_LE_DATA_LEN_CHANGE:
6473 hci_le_data_length_changed_complete_evt(hdev, skb);
6482 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6483 u8 event, struct sk_buff *skb)
6485 struct hci_ev_cmd_complete *ev;
6486 struct hci_event_hdr *hdr;
6491 if (skb->len < sizeof(*hdr)) {
6492 bt_dev_err(hdev, "too short HCI event");
6496 hdr = (void *) skb->data;
6497 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6500 if (hdr->evt != event)
6505 /* Check if request ended in Command Status - no way to retrieve
6506 * any extra parameters in this case.
6508 if (hdr->evt == HCI_EV_CMD_STATUS)
6511 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6512 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6517 if (skb->len < sizeof(*ev)) {
6518 bt_dev_err(hdev, "too short cmd_complete event");
6522 ev = (void *) skb->data;
6523 skb_pull(skb, sizeof(*ev));
6525 if (opcode != __le16_to_cpu(ev->opcode)) {
6526 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6527 __le16_to_cpu(ev->opcode));
6534 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6535 struct sk_buff *skb)
6537 struct hci_ev_le_advertising_info *adv;
6538 struct hci_ev_le_direct_adv_info *direct_adv;
6539 struct hci_ev_le_ext_adv_report *ext_adv;
6540 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6541 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6545 /* If we are currently suspended and this is the first BT event seen,
6546 * save the wake reason associated with the event.
6548 if (!hdev->suspended || hdev->wake_reason)
6551 /* Default to remote wake. Values for wake_reason are documented in the
6552 * Bluez mgmt api docs.
6554 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6556 /* Once configured for remote wakeup, we should only wake up for
6557 * reconnections. It's useful to see which device is waking us up so
6558 * keep track of the bdaddr of the connection event that woke us up.
6560 if (event == HCI_EV_CONN_REQUEST) {
6561 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6562 hdev->wake_addr_type = BDADDR_BREDR;
6563 } else if (event == HCI_EV_CONN_COMPLETE) {
6564 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6565 hdev->wake_addr_type = BDADDR_BREDR;
6566 } else if (event == HCI_EV_LE_META) {
6567 struct hci_ev_le_meta *le_ev = (void *)skb->data;
6568 u8 subevent = le_ev->subevent;
6569 u8 *ptr = &skb->data[sizeof(*le_ev)];
6570 u8 num_reports = *ptr;
6572 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6573 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6574 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6576 adv = (void *)(ptr + 1);
6577 direct_adv = (void *)(ptr + 1);
6578 ext_adv = (void *)(ptr + 1);
6581 case HCI_EV_LE_ADVERTISING_REPORT:
6582 bacpy(&hdev->wake_addr, &adv->bdaddr);
6583 hdev->wake_addr_type = adv->bdaddr_type;
6585 case HCI_EV_LE_DIRECT_ADV_REPORT:
6586 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6587 hdev->wake_addr_type = direct_adv->bdaddr_type;
6589 case HCI_EV_LE_EXT_ADV_REPORT:
6590 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6591 hdev->wake_addr_type = ext_adv->bdaddr_type;
6596 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6600 hci_dev_unlock(hdev);
6603 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6605 struct hci_event_hdr *hdr = (void *) skb->data;
6606 hci_req_complete_t req_complete = NULL;
6607 hci_req_complete_skb_t req_complete_skb = NULL;
6608 struct sk_buff *orig_skb = NULL;
6609 u8 status = 0, event = hdr->evt, req_evt = 0;
6610 u16 opcode = HCI_OP_NOP;
6613 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6617 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6618 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6619 opcode = __le16_to_cpu(cmd_hdr->opcode);
6620 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6625 /* If it looks like we might end up having to call
6626 * req_complete_skb, store a pristine copy of the skb since the
6627 * various handlers may modify the original one through
6628 * skb_pull() calls, etc.
6630 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6631 event == HCI_EV_CMD_COMPLETE)
6632 orig_skb = skb_clone(skb, GFP_KERNEL);
6634 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6636 /* Store wake reason if we're suspended */
6637 hci_store_wake_reason(hdev, event, skb);
6640 case HCI_EV_INQUIRY_COMPLETE:
6641 hci_inquiry_complete_evt(hdev, skb);
6644 case HCI_EV_INQUIRY_RESULT:
6645 hci_inquiry_result_evt(hdev, skb);
6648 case HCI_EV_CONN_COMPLETE:
6649 hci_conn_complete_evt(hdev, skb);
6652 case HCI_EV_CONN_REQUEST:
6653 hci_conn_request_evt(hdev, skb);
6656 case HCI_EV_DISCONN_COMPLETE:
6657 hci_disconn_complete_evt(hdev, skb);
6660 case HCI_EV_AUTH_COMPLETE:
6661 hci_auth_complete_evt(hdev, skb);
6664 case HCI_EV_REMOTE_NAME:
6665 hci_remote_name_evt(hdev, skb);
6668 case HCI_EV_ENCRYPT_CHANGE:
6669 hci_encrypt_change_evt(hdev, skb);
6672 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6673 hci_change_link_key_complete_evt(hdev, skb);
6676 case HCI_EV_REMOTE_FEATURES:
6677 hci_remote_features_evt(hdev, skb);
6680 case HCI_EV_CMD_COMPLETE:
6681 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6682 &req_complete, &req_complete_skb);
6685 case HCI_EV_CMD_STATUS:
6686 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6690 case HCI_EV_HARDWARE_ERROR:
6691 hci_hardware_error_evt(hdev, skb);
6694 case HCI_EV_ROLE_CHANGE:
6695 hci_role_change_evt(hdev, skb);
6698 case HCI_EV_NUM_COMP_PKTS:
6699 hci_num_comp_pkts_evt(hdev, skb);
6702 case HCI_EV_MODE_CHANGE:
6703 hci_mode_change_evt(hdev, skb);
6706 case HCI_EV_PIN_CODE_REQ:
6707 hci_pin_code_request_evt(hdev, skb);
6710 case HCI_EV_LINK_KEY_REQ:
6711 hci_link_key_request_evt(hdev, skb);
6714 case HCI_EV_LINK_KEY_NOTIFY:
6715 hci_link_key_notify_evt(hdev, skb);
6718 case HCI_EV_CLOCK_OFFSET:
6719 hci_clock_offset_evt(hdev, skb);
6722 case HCI_EV_PKT_TYPE_CHANGE:
6723 hci_pkt_type_change_evt(hdev, skb);
6726 case HCI_EV_PSCAN_REP_MODE:
6727 hci_pscan_rep_mode_evt(hdev, skb);
6730 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6731 hci_inquiry_result_with_rssi_evt(hdev, skb);
6734 case HCI_EV_REMOTE_EXT_FEATURES:
6735 hci_remote_ext_features_evt(hdev, skb);
6738 case HCI_EV_SYNC_CONN_COMPLETE:
6739 hci_sync_conn_complete_evt(hdev, skb);
6742 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6743 hci_extended_inquiry_result_evt(hdev, skb);
6746 case HCI_EV_KEY_REFRESH_COMPLETE:
6747 hci_key_refresh_complete_evt(hdev, skb);
6750 case HCI_EV_IO_CAPA_REQUEST:
6751 hci_io_capa_request_evt(hdev, skb);
6754 case HCI_EV_IO_CAPA_REPLY:
6755 hci_io_capa_reply_evt(hdev, skb);
6758 case HCI_EV_USER_CONFIRM_REQUEST:
6759 hci_user_confirm_request_evt(hdev, skb);
6762 case HCI_EV_USER_PASSKEY_REQUEST:
6763 hci_user_passkey_request_evt(hdev, skb);
6766 case HCI_EV_USER_PASSKEY_NOTIFY:
6767 hci_user_passkey_notify_evt(hdev, skb);
6770 case HCI_EV_KEYPRESS_NOTIFY:
6771 hci_keypress_notify_evt(hdev, skb);
6774 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6775 hci_simple_pair_complete_evt(hdev, skb);
6778 case HCI_EV_REMOTE_HOST_FEATURES:
6779 hci_remote_host_features_evt(hdev, skb);
6782 case HCI_EV_LE_META:
6783 hci_le_meta_evt(hdev, skb);
6786 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6787 hci_remote_oob_data_request_evt(hdev, skb);
6790 #if IS_ENABLED(CONFIG_BT_HS)
6791 case HCI_EV_CHANNEL_SELECTED:
6792 hci_chan_selected_evt(hdev, skb);
6795 case HCI_EV_PHY_LINK_COMPLETE:
6796 hci_phy_link_complete_evt(hdev, skb);
6799 case HCI_EV_LOGICAL_LINK_COMPLETE:
6800 hci_loglink_complete_evt(hdev, skb);
6803 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6804 hci_disconn_loglink_complete_evt(hdev, skb);
6807 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6808 hci_disconn_phylink_complete_evt(hdev, skb);
6812 case HCI_EV_NUM_COMP_BLOCKS:
6813 hci_num_comp_blocks_evt(hdev, skb);
6817 case HCI_EV_VENDOR_SPECIFIC:
6818 hci_vendor_specific_evt(hdev, skb);
6822 msft_vendor_evt(hdev, skb);
6827 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6832 req_complete(hdev, status, opcode);
6833 } else if (req_complete_skb) {
6834 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6835 kfree_skb(orig_skb);
6838 req_complete_skb(hdev, status, opcode, orig_skb);
6842 kfree_skb(orig_skb);
6844 hdev->stat.evt_rx++;