2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
40 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
41 "\x00\x00\x00\x00\x00\x00\x00\x00"
43 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
45 /* Handle HCI Event packets */
47 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
50 __u8 status = *((__u8 *) skb->data);
52 BT_DBG("%s status 0x%2.2x", hdev->name, status);
54 /* It is possible that we receive Inquiry Complete event right
55 * before we receive Inquiry Cancel Command Complete event, in
56 * which case the latter event should have status of Command
57 * Disallowed (0x0c). This should not be treated as error, since
58 * we actually achieve what Inquiry Cancel wants to achieve,
59 * which is to end the last Inquiry session.
61 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
62 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
71 clear_bit(HCI_INQUIRY, &hdev->flags);
72 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
73 wake_up_bit(&hdev->flags, HCI_INQUIRY);
76 /* Set discovery state to stopped if we're not doing LE active
79 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
80 hdev->le_scan_type != LE_SCAN_ACTIVE)
81 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
84 hci_conn_check_pending(hdev);
87 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
89 __u8 status = *((__u8 *) skb->data);
91 BT_DBG("%s status 0x%2.2x", hdev->name, status);
96 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
99 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
101 __u8 status = *((__u8 *) skb->data);
103 BT_DBG("%s status 0x%2.2x", hdev->name, status);
108 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
110 hci_conn_check_pending(hdev);
113 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
116 BT_DBG("%s", hdev->name);
119 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
121 struct hci_rp_role_discovery *rp = (void *) skb->data;
122 struct hci_conn *conn;
124 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
131 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
133 conn->role = rp->role;
135 hci_dev_unlock(hdev);
138 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
140 struct hci_rp_read_link_policy *rp = (void *) skb->data;
141 struct hci_conn *conn;
143 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
152 conn->link_policy = __le16_to_cpu(rp->policy);
154 hci_dev_unlock(hdev);
157 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
159 struct hci_rp_write_link_policy *rp = (void *) skb->data;
160 struct hci_conn *conn;
163 struct hci_cp_write_link_policy cp;
164 struct hci_conn *sco_conn;
167 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
172 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
178 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
180 conn->link_policy = get_unaligned_le16(sent + 2);
183 sco_conn = hci_conn_hash_lookup_sco(hdev);
184 if (sco_conn && bacmp(&sco_conn->dst, &conn->dst) == 0 &&
185 conn->link_policy & HCI_LP_SNIFF) {
186 BT_ERR("SNIFF is not allowed during sco connection");
187 cp.handle = __cpu_to_le16(conn->handle);
188 cp.policy = __cpu_to_le16(conn->link_policy & ~HCI_LP_SNIFF);
189 hci_send_cmd(hdev, HCI_OP_WRITE_LINK_POLICY, sizeof(cp), &cp);
193 hci_dev_unlock(hdev);
196 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
199 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
201 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
206 hdev->link_policy = __le16_to_cpu(rp->policy);
209 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
212 __u8 status = *((__u8 *) skb->data);
215 BT_DBG("%s status 0x%2.2x", hdev->name, status);
220 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
224 hdev->link_policy = get_unaligned_le16(sent);
227 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
229 __u8 status = *((__u8 *) skb->data);
231 BT_DBG("%s status 0x%2.2x", hdev->name, status);
233 clear_bit(HCI_RESET, &hdev->flags);
238 /* Reset all non-persistent flags */
239 hci_dev_clear_volatile_flags(hdev);
241 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
243 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
244 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
246 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
247 hdev->adv_data_len = 0;
249 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
250 hdev->scan_rsp_data_len = 0;
252 hdev->le_scan_type = LE_SCAN_PASSIVE;
254 hdev->ssp_debug_mode = 0;
256 hci_bdaddr_list_clear(&hdev->le_accept_list);
257 hci_bdaddr_list_clear(&hdev->le_resolv_list);
260 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
263 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
264 struct hci_cp_read_stored_link_key *sent;
266 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
268 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
272 if (!rp->status && sent->read_all == 0x01) {
273 hdev->stored_max_keys = rp->max_keys;
274 hdev->stored_num_keys = rp->num_keys;
278 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
281 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
283 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
288 if (rp->num_keys <= hdev->stored_num_keys)
289 hdev->stored_num_keys -= rp->num_keys;
291 hdev->stored_num_keys = 0;
294 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
296 __u8 status = *((__u8 *) skb->data);
299 BT_DBG("%s status 0x%2.2x", hdev->name, status);
301 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
307 if (hci_dev_test_flag(hdev, HCI_MGMT))
308 mgmt_set_local_name_complete(hdev, sent, status);
310 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
312 hci_dev_unlock(hdev);
315 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
317 struct hci_rp_read_local_name *rp = (void *) skb->data;
319 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
324 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
325 hci_dev_test_flag(hdev, HCI_CONFIG))
326 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
329 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
331 __u8 status = *((__u8 *) skb->data);
334 BT_DBG("%s status 0x%2.2x", hdev->name, status);
336 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
343 __u8 param = *((__u8 *) sent);
345 if (param == AUTH_ENABLED)
346 set_bit(HCI_AUTH, &hdev->flags);
348 clear_bit(HCI_AUTH, &hdev->flags);
351 if (hci_dev_test_flag(hdev, HCI_MGMT))
352 mgmt_auth_enable_complete(hdev, status);
354 hci_dev_unlock(hdev);
357 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
359 __u8 status = *((__u8 *) skb->data);
363 BT_DBG("%s status 0x%2.2x", hdev->name, status);
368 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
372 param = *((__u8 *) sent);
375 set_bit(HCI_ENCRYPT, &hdev->flags);
377 clear_bit(HCI_ENCRYPT, &hdev->flags);
380 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
382 __u8 status = *((__u8 *) skb->data);
386 BT_DBG("%s status 0x%2.2x", hdev->name, status);
388 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
392 param = *((__u8 *) sent);
397 hdev->discov_timeout = 0;
401 if (param & SCAN_INQUIRY)
402 set_bit(HCI_ISCAN, &hdev->flags);
404 clear_bit(HCI_ISCAN, &hdev->flags);
406 if (param & SCAN_PAGE)
407 set_bit(HCI_PSCAN, &hdev->flags);
409 clear_bit(HCI_PSCAN, &hdev->flags);
412 hci_dev_unlock(hdev);
415 static void hci_cc_set_event_filter(struct hci_dev *hdev, struct sk_buff *skb)
417 __u8 status = *((__u8 *)skb->data);
418 struct hci_cp_set_event_filter *cp;
421 BT_DBG("%s status 0x%2.2x", hdev->name, status);
426 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
430 cp = (struct hci_cp_set_event_filter *)sent;
432 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
433 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
435 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
438 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
440 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
442 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
447 memcpy(hdev->dev_class, rp->dev_class, 3);
449 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
450 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
453 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
455 __u8 status = *((__u8 *) skb->data);
458 BT_DBG("%s status 0x%2.2x", hdev->name, status);
460 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
467 memcpy(hdev->dev_class, sent, 3);
469 if (hci_dev_test_flag(hdev, HCI_MGMT))
470 mgmt_set_class_of_dev_complete(hdev, sent, status);
472 hci_dev_unlock(hdev);
475 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
477 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
480 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
485 setting = __le16_to_cpu(rp->voice_setting);
487 if (hdev->voice_setting == setting)
490 hdev->voice_setting = setting;
492 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
495 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
498 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
501 __u8 status = *((__u8 *) skb->data);
505 BT_DBG("%s status 0x%2.2x", hdev->name, status);
510 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
514 setting = get_unaligned_le16(sent);
516 if (hdev->voice_setting == setting)
519 hdev->voice_setting = setting;
521 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
524 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
527 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
530 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
532 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
537 hdev->num_iac = rp->num_iac;
539 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
542 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
544 __u8 status = *((__u8 *) skb->data);
545 struct hci_cp_write_ssp_mode *sent;
547 BT_DBG("%s status 0x%2.2x", hdev->name, status);
549 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
557 hdev->features[1][0] |= LMP_HOST_SSP;
559 hdev->features[1][0] &= ~LMP_HOST_SSP;
562 if (hci_dev_test_flag(hdev, HCI_MGMT))
563 mgmt_ssp_enable_complete(hdev, sent->mode, status);
566 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
568 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
571 hci_dev_unlock(hdev);
574 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
576 u8 status = *((u8 *) skb->data);
577 struct hci_cp_write_sc_support *sent;
579 BT_DBG("%s status 0x%2.2x", hdev->name, status);
581 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
589 hdev->features[1][0] |= LMP_HOST_SC;
591 hdev->features[1][0] &= ~LMP_HOST_SC;
594 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
596 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
598 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
601 hci_dev_unlock(hdev);
604 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
606 struct hci_rp_read_local_version *rp = (void *) skb->data;
608 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
613 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
614 hci_dev_test_flag(hdev, HCI_CONFIG)) {
615 hdev->hci_ver = rp->hci_ver;
616 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
617 hdev->lmp_ver = rp->lmp_ver;
618 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
619 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
623 static void hci_cc_read_local_commands(struct hci_dev *hdev,
626 struct hci_rp_read_local_commands *rp = (void *) skb->data;
628 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
633 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
634 hci_dev_test_flag(hdev, HCI_CONFIG))
635 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
638 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
641 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
642 struct hci_conn *conn;
644 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
651 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
653 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
655 hci_dev_unlock(hdev);
658 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
661 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
662 struct hci_conn *conn;
665 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
670 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
676 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
678 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
680 hci_dev_unlock(hdev);
683 static void hci_cc_read_local_features(struct hci_dev *hdev,
686 struct hci_rp_read_local_features *rp = (void *) skb->data;
688 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
693 memcpy(hdev->features, rp->features, 8);
695 /* Adjust default settings according to features
696 * supported by device. */
698 if (hdev->features[0][0] & LMP_3SLOT)
699 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
701 if (hdev->features[0][0] & LMP_5SLOT)
702 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
704 if (hdev->features[0][1] & LMP_HV2) {
705 hdev->pkt_type |= (HCI_HV2);
706 hdev->esco_type |= (ESCO_HV2);
709 if (hdev->features[0][1] & LMP_HV3) {
710 hdev->pkt_type |= (HCI_HV3);
711 hdev->esco_type |= (ESCO_HV3);
714 if (lmp_esco_capable(hdev))
715 hdev->esco_type |= (ESCO_EV3);
717 if (hdev->features[0][4] & LMP_EV4)
718 hdev->esco_type |= (ESCO_EV4);
720 if (hdev->features[0][4] & LMP_EV5)
721 hdev->esco_type |= (ESCO_EV5);
723 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
724 hdev->esco_type |= (ESCO_2EV3);
726 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
727 hdev->esco_type |= (ESCO_3EV3);
729 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
730 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
733 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
736 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
738 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
743 if (hdev->max_page < rp->max_page)
744 hdev->max_page = rp->max_page;
746 if (rp->page < HCI_MAX_PAGES)
747 memcpy(hdev->features[rp->page], rp->features, 8);
750 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
753 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
755 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
760 hdev->flow_ctl_mode = rp->mode;
763 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
765 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
767 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
772 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
773 hdev->sco_mtu = rp->sco_mtu;
774 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
775 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
777 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
782 hdev->acl_cnt = hdev->acl_pkts;
783 hdev->sco_cnt = hdev->sco_pkts;
785 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
786 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
789 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
791 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
793 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
798 if (test_bit(HCI_INIT, &hdev->flags))
799 bacpy(&hdev->bdaddr, &rp->bdaddr);
801 if (hci_dev_test_flag(hdev, HCI_SETUP))
802 bacpy(&hdev->setup_addr, &rp->bdaddr);
805 static void hci_cc_read_local_pairing_opts(struct hci_dev *hdev,
808 struct hci_rp_read_local_pairing_opts *rp = (void *) skb->data;
810 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
815 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
816 hci_dev_test_flag(hdev, HCI_CONFIG)) {
817 hdev->pairing_opts = rp->pairing_opts;
818 hdev->max_enc_key_size = rp->max_key_size;
822 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
825 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
827 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
832 if (test_bit(HCI_INIT, &hdev->flags)) {
833 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
834 hdev->page_scan_window = __le16_to_cpu(rp->window);
838 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
841 u8 status = *((u8 *) skb->data);
842 struct hci_cp_write_page_scan_activity *sent;
844 BT_DBG("%s status 0x%2.2x", hdev->name, status);
849 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
853 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
854 hdev->page_scan_window = __le16_to_cpu(sent->window);
857 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
860 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
862 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
867 if (test_bit(HCI_INIT, &hdev->flags))
868 hdev->page_scan_type = rp->type;
871 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
874 u8 status = *((u8 *) skb->data);
877 BT_DBG("%s status 0x%2.2x", hdev->name, status);
882 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
884 hdev->page_scan_type = *type;
887 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
890 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
892 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
897 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
898 hdev->block_len = __le16_to_cpu(rp->block_len);
899 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
901 hdev->block_cnt = hdev->num_blocks;
903 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
904 hdev->block_cnt, hdev->block_len);
907 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
909 struct hci_rp_read_clock *rp = (void *) skb->data;
910 struct hci_cp_read_clock *cp;
911 struct hci_conn *conn;
913 BT_DBG("%s", hdev->name);
915 if (skb->len < sizeof(*rp))
923 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
927 if (cp->which == 0x00) {
928 hdev->clock = le32_to_cpu(rp->clock);
932 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
934 conn->clock = le32_to_cpu(rp->clock);
935 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
939 hci_dev_unlock(hdev);
942 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
945 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
947 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
952 hdev->amp_status = rp->amp_status;
953 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
954 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
955 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
956 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
957 hdev->amp_type = rp->amp_type;
958 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
959 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
960 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
961 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
964 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
967 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
969 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
974 hdev->inq_tx_power = rp->tx_power;
977 static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
980 struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
982 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
987 hdev->err_data_reporting = rp->err_data_reporting;
990 static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
993 __u8 status = *((__u8 *)skb->data);
994 struct hci_cp_write_def_err_data_reporting *cp;
996 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1001 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1005 hdev->err_data_reporting = cp->err_data_reporting;
1008 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
1010 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
1011 struct hci_cp_pin_code_reply *cp;
1012 struct hci_conn *conn;
1014 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1018 if (hci_dev_test_flag(hdev, HCI_MGMT))
1019 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1024 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1028 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1030 conn->pin_length = cp->pin_len;
1033 hci_dev_unlock(hdev);
1036 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1038 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1040 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1044 if (hci_dev_test_flag(hdev, HCI_MGMT))
1045 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1048 hci_dev_unlock(hdev);
1051 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1052 struct sk_buff *skb)
1054 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1056 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1061 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1062 hdev->le_pkts = rp->le_max_pkt;
1064 hdev->le_cnt = hdev->le_pkts;
1066 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1069 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
1070 struct sk_buff *skb)
1072 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
1074 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1079 memcpy(hdev->le_features, rp->features, 8);
1082 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1083 struct sk_buff *skb)
1085 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1087 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1092 hdev->adv_tx_power = rp->tx_power;
1095 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1097 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1099 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1103 if (hci_dev_test_flag(hdev, HCI_MGMT))
1104 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1107 hci_dev_unlock(hdev);
1110 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1111 struct sk_buff *skb)
1113 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1115 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1119 if (hci_dev_test_flag(hdev, HCI_MGMT))
1120 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1121 ACL_LINK, 0, rp->status);
1123 hci_dev_unlock(hdev);
1126 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1128 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1130 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1134 if (hci_dev_test_flag(hdev, HCI_MGMT))
1135 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1138 hci_dev_unlock(hdev);
1141 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1142 struct sk_buff *skb)
1144 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1146 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1150 if (hci_dev_test_flag(hdev, HCI_MGMT))
1151 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1152 ACL_LINK, 0, rp->status);
1154 hci_dev_unlock(hdev);
1157 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1158 struct sk_buff *skb)
1160 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1162 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1165 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1166 struct sk_buff *skb)
1168 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1170 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1173 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1175 __u8 status = *((__u8 *) skb->data);
1178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1183 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1189 bacpy(&hdev->random_addr, sent);
1191 if (!bacmp(&hdev->rpa, sent)) {
1192 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1193 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1194 secs_to_jiffies(hdev->rpa_timeout));
1197 hci_dev_unlock(hdev);
1200 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1202 __u8 status = *((__u8 *) skb->data);
1203 struct hci_cp_le_set_default_phy *cp;
1205 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1210 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1216 hdev->le_tx_def_phys = cp->tx_phys;
1217 hdev->le_rx_def_phys = cp->rx_phys;
1219 hci_dev_unlock(hdev);
1222 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1223 struct sk_buff *skb)
1225 __u8 status = *((__u8 *) skb->data);
1226 struct hci_cp_le_set_adv_set_rand_addr *cp;
1227 struct adv_info *adv;
1232 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1233 /* Update only in case the adv instance since handle 0x00 shall be using
1234 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1235 * non-extended adverting.
1237 if (!cp || !cp->handle)
1242 adv = hci_find_adv_instance(hdev, cp->handle);
1244 bacpy(&adv->random_addr, &cp->bdaddr);
1245 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1246 adv->rpa_expired = false;
1247 queue_delayed_work(hdev->workqueue,
1248 &adv->rpa_expired_cb,
1249 secs_to_jiffies(hdev->rpa_timeout));
1253 hci_dev_unlock(hdev);
1256 static void hci_cc_le_read_transmit_power(struct hci_dev *hdev,
1257 struct sk_buff *skb)
1259 struct hci_rp_le_read_transmit_power *rp = (void *)skb->data;
1261 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1266 hdev->min_le_tx_power = rp->min_le_tx_power;
1267 hdev->max_le_tx_power = rp->max_le_tx_power;
1270 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1272 __u8 *sent, status = *((__u8 *) skb->data);
1274 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1279 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1285 /* If we're doing connection initiation as peripheral. Set a
1286 * timeout in case something goes wrong.
1289 struct hci_conn *conn;
1291 hci_dev_set_flag(hdev, HCI_LE_ADV);
1293 conn = hci_lookup_le_connect(hdev);
1295 queue_delayed_work(hdev->workqueue,
1296 &conn->le_conn_timeout,
1297 conn->conn_timeout);
1299 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1302 hci_dev_unlock(hdev);
1305 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1306 struct sk_buff *skb)
1308 struct hci_cp_le_set_ext_adv_enable *cp;
1309 struct hci_cp_ext_adv_set *set;
1310 __u8 status = *((__u8 *) skb->data);
1311 struct adv_info *adv = NULL, *n;
1313 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1318 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1322 set = (void *)cp->data;
1326 if (cp->num_of_sets)
1327 adv = hci_find_adv_instance(hdev, set->handle);
1330 struct hci_conn *conn;
1332 hci_dev_set_flag(hdev, HCI_LE_ADV);
1335 adv->enabled = true;
1337 conn = hci_lookup_le_connect(hdev);
1339 queue_delayed_work(hdev->workqueue,
1340 &conn->le_conn_timeout,
1341 conn->conn_timeout);
1344 adv->enabled = false;
1345 /* If just one instance was disabled check if there are
1346 * any other instance enabled before clearing HCI_LE_ADV
1348 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1354 /* All instances shall be considered disabled */
1355 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1357 adv->enabled = false;
1360 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1364 hci_dev_unlock(hdev);
1367 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1369 struct hci_cp_le_set_scan_param *cp;
1370 __u8 status = *((__u8 *) skb->data);
1372 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1377 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1383 hdev->le_scan_type = cp->type;
1385 hci_dev_unlock(hdev);
1388 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1389 struct sk_buff *skb)
1391 struct hci_cp_le_set_ext_scan_params *cp;
1392 __u8 status = *((__u8 *) skb->data);
1393 struct hci_cp_le_scan_phy_params *phy_param;
1395 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1400 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1404 phy_param = (void *)cp->data;
1408 hdev->le_scan_type = phy_param->type;
1410 hci_dev_unlock(hdev);
1413 static bool has_pending_adv_report(struct hci_dev *hdev)
1415 struct discovery_state *d = &hdev->discovery;
1417 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1420 static void clear_pending_adv_report(struct hci_dev *hdev)
1422 struct discovery_state *d = &hdev->discovery;
1424 bacpy(&d->last_adv_addr, BDADDR_ANY);
1425 d->last_adv_data_len = 0;
1429 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1430 u8 bdaddr_type, s8 rssi, u32 flags,
1433 struct discovery_state *d = &hdev->discovery;
1435 if (len > HCI_MAX_AD_LENGTH)
1438 bacpy(&d->last_adv_addr, bdaddr);
1439 d->last_adv_addr_type = bdaddr_type;
1440 d->last_adv_rssi = rssi;
1441 d->last_adv_flags = flags;
1442 memcpy(d->last_adv_data, data, len);
1443 d->last_adv_data_len = len;
1447 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1452 case LE_SCAN_ENABLE:
1453 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1454 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1455 clear_pending_adv_report(hdev);
1458 case LE_SCAN_DISABLE:
1459 /* We do this here instead of when setting DISCOVERY_STOPPED
1460 * since the latter would potentially require waiting for
1461 * inquiry to stop too.
1463 if (has_pending_adv_report(hdev)) {
1464 struct discovery_state *d = &hdev->discovery;
1466 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1467 d->last_adv_addr_type, NULL,
1468 d->last_adv_rssi, d->last_adv_flags,
1470 d->last_adv_data_len, NULL, 0);
1473 /* Cancel this timer so that we don't try to disable scanning
1474 * when it's already disabled.
1476 cancel_delayed_work(&hdev->le_scan_disable);
1478 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1480 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1481 * interrupted scanning due to a connect request. Mark
1482 * therefore discovery as stopped. If this was not
1483 * because of a connect request advertising might have
1484 * been disabled because of active scanning, so
1485 * re-enable it again if necessary.
1487 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1488 #ifndef TIZEN_BT /* The below line is kernel bug. */
1489 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1491 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
1493 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1494 hdev->discovery.state == DISCOVERY_FINDING)
1495 hci_req_reenable_advertising(hdev);
1500 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1505 hci_dev_unlock(hdev);
1508 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1509 struct sk_buff *skb)
1511 struct hci_cp_le_set_scan_enable *cp;
1512 __u8 status = *((__u8 *) skb->data);
1514 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1519 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1523 le_set_scan_enable_complete(hdev, cp->enable);
1526 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1527 struct sk_buff *skb)
1529 struct hci_cp_le_set_ext_scan_enable *cp;
1530 __u8 status = *((__u8 *) skb->data);
1532 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1537 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1541 le_set_scan_enable_complete(hdev, cp->enable);
1544 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1545 struct sk_buff *skb)
1547 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1549 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1555 hdev->le_num_of_adv_sets = rp->num_of_sets;
1558 static void hci_cc_le_read_accept_list_size(struct hci_dev *hdev,
1559 struct sk_buff *skb)
1561 struct hci_rp_le_read_accept_list_size *rp = (void *)skb->data;
1563 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1568 hdev->le_accept_list_size = rp->size;
1571 static void hci_cc_le_clear_accept_list(struct hci_dev *hdev,
1572 struct sk_buff *skb)
1574 __u8 status = *((__u8 *) skb->data);
1576 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1581 hci_bdaddr_list_clear(&hdev->le_accept_list);
1584 static void hci_cc_le_add_to_accept_list(struct hci_dev *hdev,
1585 struct sk_buff *skb)
1587 struct hci_cp_le_add_to_accept_list *sent;
1588 __u8 status = *((__u8 *) skb->data);
1590 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1595 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1599 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1603 static void hci_cc_le_del_from_accept_list(struct hci_dev *hdev,
1604 struct sk_buff *skb)
1606 struct hci_cp_le_del_from_accept_list *sent;
1607 __u8 status = *((__u8 *) skb->data);
1609 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1614 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1618 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1622 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1623 struct sk_buff *skb)
1625 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1627 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1632 memcpy(hdev->le_states, rp->le_states, 8);
1635 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1636 struct sk_buff *skb)
1638 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1640 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1645 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1646 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1649 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1650 struct sk_buff *skb)
1652 struct hci_cp_le_write_def_data_len *sent;
1653 __u8 status = *((__u8 *) skb->data);
1655 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1660 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1664 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1665 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1668 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1669 struct sk_buff *skb)
1671 struct hci_cp_le_add_to_resolv_list *sent;
1672 __u8 status = *((__u8 *) skb->data);
1674 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1679 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1683 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1684 sent->bdaddr_type, sent->peer_irk,
1688 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1689 struct sk_buff *skb)
1691 struct hci_cp_le_del_from_resolv_list *sent;
1692 __u8 status = *((__u8 *) skb->data);
1694 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1699 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1703 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1707 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1708 struct sk_buff *skb)
1710 __u8 status = *((__u8 *) skb->data);
1712 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1717 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1720 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1721 struct sk_buff *skb)
1723 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1725 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1730 hdev->le_resolv_list_size = rp->size;
1733 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1734 struct sk_buff *skb)
1736 __u8 *sent, status = *((__u8 *) skb->data);
1738 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1743 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1750 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1752 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1754 hci_dev_unlock(hdev);
1757 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1758 struct sk_buff *skb)
1760 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1762 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1767 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1768 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1769 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1770 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1773 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1774 struct sk_buff *skb)
1776 struct hci_cp_write_le_host_supported *sent;
1777 __u8 status = *((__u8 *) skb->data);
1779 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1784 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1791 hdev->features[1][0] |= LMP_HOST_LE;
1792 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1794 hdev->features[1][0] &= ~LMP_HOST_LE;
1795 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1796 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1800 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1802 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1804 hci_dev_unlock(hdev);
1807 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1809 struct hci_cp_le_set_adv_param *cp;
1810 u8 status = *((u8 *) skb->data);
1812 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1817 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1822 hdev->adv_addr_type = cp->own_address_type;
1823 hci_dev_unlock(hdev);
1826 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1828 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1829 struct hci_cp_le_set_ext_adv_params *cp;
1830 struct adv_info *adv_instance;
1832 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1837 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1842 hdev->adv_addr_type = cp->own_addr_type;
1844 /* Store in hdev for instance 0 */
1845 hdev->adv_tx_power = rp->tx_power;
1847 adv_instance = hci_find_adv_instance(hdev, cp->handle);
1849 adv_instance->tx_power = rp->tx_power;
1851 /* Update adv data as tx power is known now */
1852 hci_req_update_adv_data(hdev, cp->handle);
1854 hci_dev_unlock(hdev);
1858 static void hci_cc_enable_rssi(struct hci_dev *hdev,
1859 struct sk_buff *skb)
1861 struct hci_cc_rsp_enable_rssi *rp = (void *)skb->data;
1863 BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
1864 hdev->name, rp->status, rp->le_ext_opcode);
1866 mgmt_enable_rssi_cc(hdev, rp, rp->status);
1869 static void hci_cc_get_raw_rssi(struct hci_dev *hdev,
1870 struct sk_buff *skb)
1872 struct hci_cc_rp_get_raw_rssi *rp = (void *)skb->data;
1874 BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
1875 hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
1877 mgmt_raw_rssi_response(hdev, rp, rp->status);
1880 static void hci_vendor_ext_rssi_link_alert_evt(struct hci_dev *hdev,
1881 struct sk_buff *skb)
1883 struct hci_ev_vendor_specific_rssi_alert *ev = (void *)skb->data;
1885 BT_DBG("RSSI event LE_RSSI_LINK_ALERT %X", LE_RSSI_LINK_ALERT);
1887 mgmt_rssi_alert_evt(hdev, ev->conn_handle, ev->alert_type,
1891 static void hci_vendor_specific_group_ext_evt(struct hci_dev *hdev,
1892 struct sk_buff *skb)
1894 struct hci_ev_ext_vendor_specific *ev = (void *)skb->data;
1895 __u8 event_le_ext_sub_code;
1897 BT_DBG("RSSI event LE_META_VENDOR_SPECIFIC_GROUP_EVENT: %X",
1898 LE_META_VENDOR_SPECIFIC_GROUP_EVENT);
1900 skb_pull(skb, sizeof(*ev));
1901 event_le_ext_sub_code = ev->event_le_ext_sub_code;
1903 switch (event_le_ext_sub_code) {
1904 case LE_RSSI_LINK_ALERT:
1905 hci_vendor_ext_rssi_link_alert_evt(hdev, skb);
1913 static void hci_vendor_multi_adv_state_change_evt(struct hci_dev *hdev,
1914 struct sk_buff *skb)
1916 struct hci_ev_vendor_specific_multi_adv_state *ev = (void *)skb->data;
1918 BT_DBG("LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT");
1920 mgmt_multi_adv_state_change_evt(hdev, ev->adv_instance,
1921 ev->state_change_reason,
1922 ev->connection_handle);
1925 static void hci_vendor_specific_evt(struct hci_dev *hdev, struct sk_buff *skb)
1927 struct hci_ev_vendor_specific *ev = (void *)skb->data;
1928 __u8 event_sub_code;
1930 BT_DBG("hci_vendor_specific_evt");
1932 skb_pull(skb, sizeof(*ev));
1933 event_sub_code = ev->event_sub_code;
1935 switch (event_sub_code) {
1936 case LE_META_VENDOR_SPECIFIC_GROUP_EVENT:
1937 hci_vendor_specific_group_ext_evt(hdev, skb);
1940 case LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT:
1941 hci_vendor_multi_adv_state_change_evt(hdev, skb);
1950 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1952 struct hci_rp_read_rssi *rp = (void *) skb->data;
1953 struct hci_conn *conn;
1955 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1962 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1964 conn->rssi = rp->rssi;
1966 hci_dev_unlock(hdev);
1969 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1971 struct hci_cp_read_tx_power *sent;
1972 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1973 struct hci_conn *conn;
1975 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1980 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1986 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1990 switch (sent->type) {
1992 conn->tx_power = rp->tx_power;
1995 conn->max_tx_power = rp->tx_power;
2000 hci_dev_unlock(hdev);
2003 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
2005 u8 status = *((u8 *) skb->data);
2008 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2013 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2015 hdev->ssp_debug_mode = *mode;
2018 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2020 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2023 hci_conn_check_pending(hdev);
2027 set_bit(HCI_INQUIRY, &hdev->flags);
2030 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2032 struct hci_cp_create_conn *cp;
2033 struct hci_conn *conn;
2035 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2037 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2043 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2045 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
2048 if (conn && conn->state == BT_CONNECT) {
2049 if (status != 0x0c || conn->attempt > 2) {
2050 conn->state = BT_CLOSED;
2051 hci_connect_cfm(conn, status);
2054 conn->state = BT_CONNECT2;
2058 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
2061 bt_dev_err(hdev, "no memory for new connection");
2065 hci_dev_unlock(hdev);
2068 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2070 struct hci_cp_add_sco *cp;
2071 struct hci_conn *acl, *sco;
2074 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2079 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2083 handle = __le16_to_cpu(cp->handle);
2085 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2089 acl = hci_conn_hash_lookup_handle(hdev, handle);
2093 sco->state = BT_CLOSED;
2095 hci_connect_cfm(sco, status);
2100 hci_dev_unlock(hdev);
2103 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2105 struct hci_cp_auth_requested *cp;
2106 struct hci_conn *conn;
2108 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2113 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2119 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2121 if (conn->state == BT_CONFIG) {
2122 hci_connect_cfm(conn, status);
2123 hci_conn_drop(conn);
2127 hci_dev_unlock(hdev);
2130 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2132 struct hci_cp_set_conn_encrypt *cp;
2133 struct hci_conn *conn;
2135 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2140 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2146 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2148 if (conn->state == BT_CONFIG) {
2149 hci_connect_cfm(conn, status);
2150 hci_conn_drop(conn);
2154 hci_dev_unlock(hdev);
2157 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2158 struct hci_conn *conn)
2160 if (conn->state != BT_CONFIG || !conn->out)
2163 if (conn->pending_sec_level == BT_SECURITY_SDP)
2166 /* Only request authentication for SSP connections or non-SSP
2167 * devices with sec_level MEDIUM or HIGH or if MITM protection
2170 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2171 conn->pending_sec_level != BT_SECURITY_FIPS &&
2172 conn->pending_sec_level != BT_SECURITY_HIGH &&
2173 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2179 static int hci_resolve_name(struct hci_dev *hdev,
2180 struct inquiry_entry *e)
2182 struct hci_cp_remote_name_req cp;
2184 memset(&cp, 0, sizeof(cp));
2186 bacpy(&cp.bdaddr, &e->data.bdaddr);
2187 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2188 cp.pscan_mode = e->data.pscan_mode;
2189 cp.clock_offset = e->data.clock_offset;
2191 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2194 static bool hci_resolve_next_name(struct hci_dev *hdev)
2196 struct discovery_state *discov = &hdev->discovery;
2197 struct inquiry_entry *e;
2199 if (list_empty(&discov->resolve))
2202 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2206 if (hci_resolve_name(hdev, e) == 0) {
2207 e->name_state = NAME_PENDING;
2214 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2215 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2217 struct discovery_state *discov = &hdev->discovery;
2218 struct inquiry_entry *e;
2221 /* Update the mgmt connected state if necessary. Be careful with
2222 * conn objects that exist but are not (yet) connected however.
2223 * Only those in BT_CONFIG or BT_CONNECTED states can be
2224 * considered connected.
2227 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
2228 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2229 mgmt_device_connected(hdev, conn, 0, name, name_len);
2231 mgmt_device_name_update(hdev, bdaddr, name, name_len);
2235 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2236 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2237 mgmt_device_connected(hdev, conn, name, name_len);
2240 if (discov->state == DISCOVERY_STOPPED)
2243 if (discov->state == DISCOVERY_STOPPING)
2244 goto discov_complete;
2246 if (discov->state != DISCOVERY_RESOLVING)
2249 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2250 /* If the device was not found in a list of found devices names of which
2251 * are pending. there is no need to continue resolving a next name as it
2252 * will be done upon receiving another Remote Name Request Complete
2259 e->name_state = NAME_KNOWN;
2260 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2261 e->data.rssi, name, name_len);
2263 e->name_state = NAME_NOT_KNOWN;
2266 if (hci_resolve_next_name(hdev))
2270 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2273 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2275 struct hci_cp_remote_name_req *cp;
2276 struct hci_conn *conn;
2278 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2280 /* If successful wait for the name req complete event before
2281 * checking for the need to do authentication */
2285 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2291 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2293 if (hci_dev_test_flag(hdev, HCI_MGMT))
2294 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2299 if (!hci_outgoing_auth_needed(hdev, conn))
2302 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2303 struct hci_cp_auth_requested auth_cp;
2305 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2307 auth_cp.handle = __cpu_to_le16(conn->handle);
2308 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2309 sizeof(auth_cp), &auth_cp);
2313 hci_dev_unlock(hdev);
2316 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2318 struct hci_cp_read_remote_features *cp;
2319 struct hci_conn *conn;
2321 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2326 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2332 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2334 if (conn->state == BT_CONFIG) {
2335 hci_connect_cfm(conn, status);
2336 hci_conn_drop(conn);
2340 hci_dev_unlock(hdev);
2343 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2345 struct hci_cp_read_remote_ext_features *cp;
2346 struct hci_conn *conn;
2348 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2353 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2359 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2361 if (conn->state == BT_CONFIG) {
2362 hci_connect_cfm(conn, status);
2363 hci_conn_drop(conn);
2367 hci_dev_unlock(hdev);
2370 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2372 struct hci_cp_setup_sync_conn *cp;
2373 struct hci_conn *acl, *sco;
2376 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2381 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2385 handle = __le16_to_cpu(cp->handle);
2387 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2391 acl = hci_conn_hash_lookup_handle(hdev, handle);
2395 sco->state = BT_CLOSED;
2397 hci_connect_cfm(sco, status);
2402 hci_dev_unlock(hdev);
2405 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2407 struct hci_cp_sniff_mode *cp;
2408 struct hci_conn *conn;
2410 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2415 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2421 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2423 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2425 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2426 hci_sco_setup(conn, status);
2429 hci_dev_unlock(hdev);
2432 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2434 struct hci_cp_exit_sniff_mode *cp;
2435 struct hci_conn *conn;
2437 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2442 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2448 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2450 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2452 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2453 hci_sco_setup(conn, status);
2456 hci_dev_unlock(hdev);
2459 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2461 struct hci_cp_disconnect *cp;
2462 struct hci_conn *conn;
2467 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2473 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2475 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2476 conn->dst_type, status);
2478 if (conn->type == LE_LINK) {
2479 hdev->cur_adv_instance = conn->adv_instance;
2480 hci_req_reenable_advertising(hdev);
2483 /* If the disconnection failed for any reason, the upper layer
2484 * does not retry to disconnect in current implementation.
2485 * Hence, we need to do some basic cleanup here and re-enable
2486 * advertising if necessary.
2491 hci_dev_unlock(hdev);
2494 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2495 u8 peer_addr_type, u8 own_address_type,
2498 struct hci_conn *conn;
2500 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2505 /* When using controller based address resolution, then the new
2506 * address types 0x02 and 0x03 are used. These types need to be
2507 * converted back into either public address or random address type
2509 if (use_ll_privacy(hdev) &&
2510 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
2511 switch (own_address_type) {
2512 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2513 own_address_type = ADDR_LE_DEV_PUBLIC;
2515 case ADDR_LE_DEV_RANDOM_RESOLVED:
2516 own_address_type = ADDR_LE_DEV_RANDOM;
2521 /* Store the initiator and responder address information which
2522 * is needed for SMP. These values will not change during the
2523 * lifetime of the connection.
2525 conn->init_addr_type = own_address_type;
2526 if (own_address_type == ADDR_LE_DEV_RANDOM)
2527 bacpy(&conn->init_addr, &hdev->random_addr);
2529 bacpy(&conn->init_addr, &hdev->bdaddr);
2531 conn->resp_addr_type = peer_addr_type;
2532 bacpy(&conn->resp_addr, peer_addr);
2534 /* We don't want the connection attempt to stick around
2535 * indefinitely since LE doesn't have a page timeout concept
2536 * like BR/EDR. Set a timer for any connection that doesn't use
2537 * the accept list for connecting.
2539 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2540 queue_delayed_work(conn->hdev->workqueue,
2541 &conn->le_conn_timeout,
2542 conn->conn_timeout);
2545 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2547 struct hci_cp_le_create_conn *cp;
2549 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2551 /* All connection failure handling is taken care of by the
2552 * hci_le_conn_failed function which is triggered by the HCI
2553 * request completion callbacks used for connecting.
2558 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2564 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2565 cp->own_address_type, cp->filter_policy);
2567 hci_dev_unlock(hdev);
2570 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2572 struct hci_cp_le_ext_create_conn *cp;
2574 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2576 /* All connection failure handling is taken care of by the
2577 * hci_le_conn_failed function which is triggered by the HCI
2578 * request completion callbacks used for connecting.
2583 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2589 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2590 cp->own_addr_type, cp->filter_policy);
2592 hci_dev_unlock(hdev);
2595 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2597 struct hci_cp_le_read_remote_features *cp;
2598 struct hci_conn *conn;
2600 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2605 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2611 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2613 if (conn->state == BT_CONFIG) {
2614 hci_connect_cfm(conn, status);
2615 hci_conn_drop(conn);
2619 hci_dev_unlock(hdev);
2622 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2624 struct hci_cp_le_start_enc *cp;
2625 struct hci_conn *conn;
2627 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2634 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2638 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2642 if (conn->state != BT_CONNECTED)
2645 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2646 hci_conn_drop(conn);
2649 hci_dev_unlock(hdev);
2652 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2654 struct hci_cp_switch_role *cp;
2655 struct hci_conn *conn;
2657 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2662 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2668 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2670 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2672 hci_dev_unlock(hdev);
2675 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2677 __u8 status = *((__u8 *) skb->data);
2678 struct discovery_state *discov = &hdev->discovery;
2679 struct inquiry_entry *e;
2681 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2683 hci_conn_check_pending(hdev);
2685 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2688 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2689 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2691 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2696 if (discov->state != DISCOVERY_FINDING)
2699 if (list_empty(&discov->resolve)) {
2700 /* When BR/EDR inquiry is active and no LE scanning is in
2701 * progress, then change discovery state to indicate completion.
2703 * When running LE scanning and BR/EDR inquiry simultaneously
2704 * and the LE scan already finished, then change the discovery
2705 * state to indicate completion.
2707 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2708 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2709 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2713 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2714 if (e && hci_resolve_name(hdev, e) == 0) {
2715 e->name_state = NAME_PENDING;
2716 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2718 /* When BR/EDR inquiry is active and no LE scanning is in
2719 * progress, then change discovery state to indicate completion.
2721 * When running LE scanning and BR/EDR inquiry simultaneously
2722 * and the LE scan already finished, then change the discovery
2723 * state to indicate completion.
2725 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2726 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2727 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2731 hci_dev_unlock(hdev);
2734 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2736 struct inquiry_data data;
2737 struct inquiry_info *info = (void *) (skb->data + 1);
2738 int num_rsp = *((__u8 *) skb->data);
2740 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2742 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2745 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2750 for (; num_rsp; num_rsp--, info++) {
2753 bacpy(&data.bdaddr, &info->bdaddr);
2754 data.pscan_rep_mode = info->pscan_rep_mode;
2755 data.pscan_period_mode = info->pscan_period_mode;
2756 data.pscan_mode = info->pscan_mode;
2757 memcpy(data.dev_class, info->dev_class, 3);
2758 data.clock_offset = info->clock_offset;
2759 data.rssi = HCI_RSSI_INVALID;
2760 data.ssp_mode = 0x00;
2762 flags = hci_inquiry_cache_update(hdev, &data, false);
2764 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2765 info->dev_class, HCI_RSSI_INVALID,
2766 flags, NULL, 0, NULL, 0);
2769 hci_dev_unlock(hdev);
2772 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2774 struct hci_ev_conn_complete *ev = (void *) skb->data;
2775 struct hci_conn *conn;
2777 BT_DBG("%s", hdev->name);
2781 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2783 /* Connection may not exist if auto-connected. Check the bredr
2784 * allowlist to see if this device is allowed to auto connect.
2785 * If link is an ACL type, create a connection class
2788 * Auto-connect will only occur if the event filter is
2789 * programmed with a given address. Right now, event filter is
2790 * only used during suspend.
2792 if (ev->link_type == ACL_LINK &&
2793 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
2796 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2799 bt_dev_err(hdev, "no memory for new conn");
2803 if (ev->link_type != SCO_LINK)
2806 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
2811 conn->type = SCO_LINK;
2816 conn->handle = __le16_to_cpu(ev->handle);
2818 if (conn->type == ACL_LINK) {
2819 conn->state = BT_CONFIG;
2820 hci_conn_hold(conn);
2822 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2823 !hci_find_link_key(hdev, &ev->bdaddr))
2824 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2826 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2828 conn->state = BT_CONNECTED;
2830 hci_debugfs_create_conn(conn);
2831 hci_conn_add_sysfs(conn);
2833 if (test_bit(HCI_AUTH, &hdev->flags))
2834 set_bit(HCI_CONN_AUTH, &conn->flags);
2836 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2837 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2839 /* Get remote features */
2840 if (conn->type == ACL_LINK) {
2841 struct hci_cp_read_remote_features cp;
2842 cp.handle = ev->handle;
2843 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2846 hci_req_update_scan(hdev);
2849 /* Set packet type for incoming connection */
2850 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2851 struct hci_cp_change_conn_ptype cp;
2852 cp.handle = ev->handle;
2853 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2854 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2858 if (get_link_mode(conn) & HCI_LM_MASTER)
2859 hci_conn_change_supervision_timeout(conn,
2860 LINK_SUPERVISION_TIMEOUT);
2863 conn->state = BT_CLOSED;
2864 if (conn->type == ACL_LINK)
2865 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2866 conn->dst_type, ev->status);
2869 if (conn->type == ACL_LINK)
2870 hci_sco_setup(conn, ev->status);
2873 hci_connect_cfm(conn, ev->status);
2875 } else if (ev->link_type == SCO_LINK) {
2876 switch (conn->setting & SCO_AIRMODE_MASK) {
2877 case SCO_AIRMODE_CVSD:
2879 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
2883 hci_connect_cfm(conn, ev->status);
2887 hci_dev_unlock(hdev);
2889 hci_conn_check_pending(hdev);
2892 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2894 struct hci_cp_reject_conn_req cp;
2896 bacpy(&cp.bdaddr, bdaddr);
2897 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2898 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2901 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2903 struct hci_ev_conn_request *ev = (void *) skb->data;
2904 int mask = hdev->link_mode;
2905 struct inquiry_entry *ie;
2906 struct hci_conn *conn;
2909 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2912 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2915 if (!(mask & HCI_LM_ACCEPT)) {
2916 hci_reject_conn(hdev, &ev->bdaddr);
2920 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
2922 hci_reject_conn(hdev, &ev->bdaddr);
2926 /* Require HCI_CONNECTABLE or an accept list entry to accept the
2927 * connection. These features are only touched through mgmt so
2928 * only do the checks if HCI_MGMT is set.
2930 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2931 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2932 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
2934 hci_reject_conn(hdev, &ev->bdaddr);
2938 /* Connection accepted */
2942 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2944 memcpy(ie->data.dev_class, ev->dev_class, 3);
2947 if ((ev->link_type == SCO_LINK || ev->link_type == ESCO_LINK) &&
2948 hci_conn_hash_lookup_sco(hdev)) {
2949 struct hci_cp_reject_conn_req cp;
2951 bacpy(&cp.bdaddr, &ev->bdaddr);
2952 cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2953 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ,
2955 hci_dev_unlock(hdev);
2960 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2963 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2966 bt_dev_err(hdev, "no memory for new connection");
2967 hci_dev_unlock(hdev);
2972 memcpy(conn->dev_class, ev->dev_class, 3);
2974 hci_dev_unlock(hdev);
2976 if (ev->link_type == ACL_LINK ||
2977 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2978 struct hci_cp_accept_conn_req cp;
2979 conn->state = BT_CONNECT;
2981 bacpy(&cp.bdaddr, &ev->bdaddr);
2983 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2984 cp.role = 0x00; /* Become central */
2986 cp.role = 0x01; /* Remain peripheral */
2988 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2989 } else if (!(flags & HCI_PROTO_DEFER)) {
2990 struct hci_cp_accept_sync_conn_req cp;
2991 conn->state = BT_CONNECT;
2993 bacpy(&cp.bdaddr, &ev->bdaddr);
2994 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2996 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2997 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2998 cp.max_latency = cpu_to_le16(0xffff);
2999 cp.content_format = cpu_to_le16(hdev->voice_setting);
3000 cp.retrans_effort = 0xff;
3002 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3005 conn->state = BT_CONNECT2;
3006 hci_connect_cfm(conn, 0);
3010 static u8 hci_to_mgmt_reason(u8 err)
3013 case HCI_ERROR_CONNECTION_TIMEOUT:
3014 return MGMT_DEV_DISCONN_TIMEOUT;
3015 case HCI_ERROR_REMOTE_USER_TERM:
3016 case HCI_ERROR_REMOTE_LOW_RESOURCES:
3017 case HCI_ERROR_REMOTE_POWER_OFF:
3018 return MGMT_DEV_DISCONN_REMOTE;
3019 case HCI_ERROR_LOCAL_HOST_TERM:
3020 return MGMT_DEV_DISCONN_LOCAL_HOST;
3022 return MGMT_DEV_DISCONN_UNKNOWN;
3026 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3028 struct hci_ev_disconn_complete *ev = (void *) skb->data;
3030 struct hci_conn_params *params;
3031 struct hci_conn *conn;
3032 bool mgmt_connected;
3034 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3038 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3043 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3044 conn->dst_type, ev->status);
3048 conn->state = BT_CLOSED;
3050 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3052 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3053 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3055 reason = hci_to_mgmt_reason(ev->reason);
3057 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3058 reason, mgmt_connected);
3060 if (conn->type == ACL_LINK) {
3061 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3062 hci_remove_link_key(hdev, &conn->dst);
3064 hci_req_update_scan(hdev);
3067 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3069 switch (params->auto_connect) {
3070 case HCI_AUTO_CONN_LINK_LOSS:
3071 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3075 case HCI_AUTO_CONN_DIRECT:
3076 case HCI_AUTO_CONN_ALWAYS:
3077 list_del_init(¶ms->action);
3078 list_add(¶ms->action, &hdev->pend_le_conns);
3079 hci_update_background_scan(hdev);
3087 hci_disconn_cfm(conn, ev->reason);
3089 /* The suspend notifier is waiting for all devices to disconnect so
3090 * clear the bit from pending tasks and inform the wait queue.
3092 if (list_empty(&hdev->conn_hash.list) &&
3093 test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
3094 wake_up(&hdev->suspend_wait_q);
3097 /* Re-enable advertising if necessary, since it might
3098 * have been disabled by the connection. From the
3099 * HCI_LE_Set_Advertise_Enable command description in
3100 * the core specification (v4.0):
3101 * "The Controller shall continue advertising until the Host
3102 * issues an LE_Set_Advertise_Enable command with
3103 * Advertising_Enable set to 0x00 (Advertising is disabled)
3104 * or until a connection is created or until the Advertising
3105 * is timed out due to Directed Advertising."
3107 if (conn->type == LE_LINK) {
3108 hdev->cur_adv_instance = conn->adv_instance;
3109 hci_req_reenable_advertising(hdev);
3115 if (type == ACL_LINK && !hci_conn_num(hdev, ACL_LINK)) {
3119 iscan = test_bit(HCI_ISCAN, &hdev->flags);
3120 pscan = test_bit(HCI_PSCAN, &hdev->flags);
3121 if (!iscan && !pscan) {
3122 u8 scan_enable = SCAN_PAGE;
3124 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE,
3125 sizeof(scan_enable), &scan_enable);
3131 hci_dev_unlock(hdev);
3134 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3136 struct hci_ev_auth_complete *ev = (void *) skb->data;
3137 struct hci_conn *conn;
3139 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3143 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3148 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3150 if (!hci_conn_ssp_enabled(conn) &&
3151 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3152 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3154 set_bit(HCI_CONN_AUTH, &conn->flags);
3155 conn->sec_level = conn->pending_sec_level;
3158 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3159 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3161 mgmt_auth_failed(conn, ev->status);
3164 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3165 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3167 if (conn->state == BT_CONFIG) {
3168 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3169 struct hci_cp_set_conn_encrypt cp;
3170 cp.handle = ev->handle;
3172 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3175 conn->state = BT_CONNECTED;
3176 hci_connect_cfm(conn, ev->status);
3177 hci_conn_drop(conn);
3180 hci_auth_cfm(conn, ev->status);
3182 hci_conn_hold(conn);
3183 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3184 hci_conn_drop(conn);
3187 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3189 struct hci_cp_set_conn_encrypt cp;
3190 cp.handle = ev->handle;
3192 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3195 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3196 hci_encrypt_cfm(conn, ev->status);
3201 hci_dev_unlock(hdev);
3204 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
3206 struct hci_ev_remote_name *ev = (void *) skb->data;
3207 struct hci_conn *conn;
3209 BT_DBG("%s", hdev->name);
3211 hci_conn_check_pending(hdev);
3215 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3217 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3220 if (ev->status == 0)
3221 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3222 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3224 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3230 if (!hci_outgoing_auth_needed(hdev, conn))
3233 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3234 struct hci_cp_auth_requested cp;
3236 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3238 cp.handle = __cpu_to_le16(conn->handle);
3239 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3243 hci_dev_unlock(hdev);
3246 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
3247 u16 opcode, struct sk_buff *skb)
3249 const struct hci_rp_read_enc_key_size *rp;
3250 struct hci_conn *conn;
3253 BT_DBG("%s status 0x%02x", hdev->name, status);
3255 if (!skb || skb->len < sizeof(*rp)) {
3256 bt_dev_err(hdev, "invalid read key size response");
3260 rp = (void *)skb->data;
3261 handle = le16_to_cpu(rp->handle);
3265 conn = hci_conn_hash_lookup_handle(hdev, handle);
3269 /* While unexpected, the read_enc_key_size command may fail. The most
3270 * secure approach is to then assume the key size is 0 to force a
3274 bt_dev_err(hdev, "failed to read key size for handle %u",
3276 conn->enc_key_size = 0;
3278 conn->enc_key_size = rp->key_size;
3281 hci_encrypt_cfm(conn, 0);
3284 hci_dev_unlock(hdev);
3287 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3289 struct hci_ev_encrypt_change *ev = (void *) skb->data;
3290 struct hci_conn *conn;
3292 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3296 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3302 /* Encryption implies authentication */
3303 set_bit(HCI_CONN_AUTH, &conn->flags);
3304 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3305 conn->sec_level = conn->pending_sec_level;
3307 /* P-256 authentication key implies FIPS */
3308 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3309 set_bit(HCI_CONN_FIPS, &conn->flags);
3311 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3312 conn->type == LE_LINK)
3313 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3315 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3316 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3320 /* We should disregard the current RPA and generate a new one
3321 * whenever the encryption procedure fails.
3323 if (ev->status && conn->type == LE_LINK) {
3324 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3325 hci_adv_instances_set_rpa_expired(hdev, true);
3328 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3330 /* Check link security requirements are met */
3331 if (!hci_conn_check_link_mode(conn))
3332 ev->status = HCI_ERROR_AUTH_FAILURE;
3334 if (ev->status && conn->state == BT_CONNECTED) {
3335 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3336 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3338 /* Notify upper layers so they can cleanup before
3341 hci_encrypt_cfm(conn, ev->status);
3342 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3343 hci_conn_drop(conn);
3347 /* Try reading the encryption key size for encrypted ACL links */
3348 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3349 struct hci_cp_read_enc_key_size cp;
3350 struct hci_request req;
3352 /* Only send HCI_Read_Encryption_Key_Size if the
3353 * controller really supports it. If it doesn't, assume
3354 * the default size (16).
3356 if (!(hdev->commands[20] & 0x10)) {
3357 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3361 hci_req_init(&req, hdev);
3363 cp.handle = cpu_to_le16(conn->handle);
3364 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3366 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3367 bt_dev_err(hdev, "sending read key size failed");
3368 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3375 /* Set the default Authenticated Payload Timeout after
3376 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3377 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3378 * sent when the link is active and Encryption is enabled, the conn
3379 * type can be either LE or ACL and controller must support LMP Ping.
3380 * Ensure for AES-CCM encryption as well.
3382 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3383 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3384 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3385 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3386 struct hci_cp_write_auth_payload_to cp;
3388 cp.handle = cpu_to_le16(conn->handle);
3389 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3390 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3395 hci_encrypt_cfm(conn, ev->status);
3398 hci_dev_unlock(hdev);
3401 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3402 struct sk_buff *skb)
3404 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3405 struct hci_conn *conn;
3407 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3411 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3414 set_bit(HCI_CONN_SECURE, &conn->flags);
3416 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3418 hci_key_change_cfm(conn, ev->status);
3421 hci_dev_unlock(hdev);
3424 static void hci_remote_features_evt(struct hci_dev *hdev,
3425 struct sk_buff *skb)
3427 struct hci_ev_remote_features *ev = (void *) skb->data;
3428 struct hci_conn *conn;
3430 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3434 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3439 memcpy(conn->features[0], ev->features, 8);
3441 if (conn->state != BT_CONFIG)
3444 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3445 lmp_ext_feat_capable(conn)) {
3446 struct hci_cp_read_remote_ext_features cp;
3447 cp.handle = ev->handle;
3449 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3454 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3455 struct hci_cp_remote_name_req cp;
3456 memset(&cp, 0, sizeof(cp));
3457 bacpy(&cp.bdaddr, &conn->dst);
3458 cp.pscan_rep_mode = 0x02;
3459 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3460 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3461 mgmt_device_connected(hdev, conn, NULL, 0);
3463 if (!hci_outgoing_auth_needed(hdev, conn)) {
3464 conn->state = BT_CONNECTED;
3465 hci_connect_cfm(conn, ev->status);
3466 hci_conn_drop(conn);
3470 hci_dev_unlock(hdev);
3473 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3475 cancel_delayed_work(&hdev->cmd_timer);
3477 if (!test_bit(HCI_RESET, &hdev->flags)) {
3479 cancel_delayed_work(&hdev->ncmd_timer);
3480 atomic_set(&hdev->cmd_cnt, 1);
3482 schedule_delayed_work(&hdev->ncmd_timer,
3488 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3489 u16 *opcode, u8 *status,
3490 hci_req_complete_t *req_complete,
3491 hci_req_complete_skb_t *req_complete_skb)
3493 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3495 *opcode = __le16_to_cpu(ev->opcode);
3496 *status = skb->data[sizeof(*ev)];
3498 skb_pull(skb, sizeof(*ev));
3501 case HCI_OP_INQUIRY_CANCEL:
3502 hci_cc_inquiry_cancel(hdev, skb, status);
3505 case HCI_OP_PERIODIC_INQ:
3506 hci_cc_periodic_inq(hdev, skb);
3509 case HCI_OP_EXIT_PERIODIC_INQ:
3510 hci_cc_exit_periodic_inq(hdev, skb);
3513 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3514 hci_cc_remote_name_req_cancel(hdev, skb);
3517 case HCI_OP_ROLE_DISCOVERY:
3518 hci_cc_role_discovery(hdev, skb);
3521 case HCI_OP_READ_LINK_POLICY:
3522 hci_cc_read_link_policy(hdev, skb);
3525 case HCI_OP_WRITE_LINK_POLICY:
3526 hci_cc_write_link_policy(hdev, skb);
3529 case HCI_OP_READ_DEF_LINK_POLICY:
3530 hci_cc_read_def_link_policy(hdev, skb);
3533 case HCI_OP_WRITE_DEF_LINK_POLICY:
3534 hci_cc_write_def_link_policy(hdev, skb);
3538 hci_cc_reset(hdev, skb);
3541 case HCI_OP_READ_STORED_LINK_KEY:
3542 hci_cc_read_stored_link_key(hdev, skb);
3545 case HCI_OP_DELETE_STORED_LINK_KEY:
3546 hci_cc_delete_stored_link_key(hdev, skb);
3549 case HCI_OP_WRITE_LOCAL_NAME:
3550 hci_cc_write_local_name(hdev, skb);
3553 case HCI_OP_READ_LOCAL_NAME:
3554 hci_cc_read_local_name(hdev, skb);
3557 case HCI_OP_WRITE_AUTH_ENABLE:
3558 hci_cc_write_auth_enable(hdev, skb);
3561 case HCI_OP_WRITE_ENCRYPT_MODE:
3562 hci_cc_write_encrypt_mode(hdev, skb);
3565 case HCI_OP_WRITE_SCAN_ENABLE:
3566 hci_cc_write_scan_enable(hdev, skb);
3569 case HCI_OP_SET_EVENT_FLT:
3570 hci_cc_set_event_filter(hdev, skb);
3573 case HCI_OP_READ_CLASS_OF_DEV:
3574 hci_cc_read_class_of_dev(hdev, skb);
3577 case HCI_OP_WRITE_CLASS_OF_DEV:
3578 hci_cc_write_class_of_dev(hdev, skb);
3581 case HCI_OP_READ_VOICE_SETTING:
3582 hci_cc_read_voice_setting(hdev, skb);
3585 case HCI_OP_WRITE_VOICE_SETTING:
3586 hci_cc_write_voice_setting(hdev, skb);
3589 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3590 hci_cc_read_num_supported_iac(hdev, skb);
3593 case HCI_OP_WRITE_SSP_MODE:
3594 hci_cc_write_ssp_mode(hdev, skb);
3597 case HCI_OP_WRITE_SC_SUPPORT:
3598 hci_cc_write_sc_support(hdev, skb);
3601 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3602 hci_cc_read_auth_payload_timeout(hdev, skb);
3605 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3606 hci_cc_write_auth_payload_timeout(hdev, skb);
3609 case HCI_OP_READ_LOCAL_VERSION:
3610 hci_cc_read_local_version(hdev, skb);
3613 case HCI_OP_READ_LOCAL_COMMANDS:
3614 hci_cc_read_local_commands(hdev, skb);
3617 case HCI_OP_READ_LOCAL_FEATURES:
3618 hci_cc_read_local_features(hdev, skb);
3621 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3622 hci_cc_read_local_ext_features(hdev, skb);
3625 case HCI_OP_READ_BUFFER_SIZE:
3626 hci_cc_read_buffer_size(hdev, skb);
3629 case HCI_OP_READ_BD_ADDR:
3630 hci_cc_read_bd_addr(hdev, skb);
3633 case HCI_OP_READ_LOCAL_PAIRING_OPTS:
3634 hci_cc_read_local_pairing_opts(hdev, skb);
3637 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3638 hci_cc_read_page_scan_activity(hdev, skb);
3641 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3642 hci_cc_write_page_scan_activity(hdev, skb);
3645 case HCI_OP_READ_PAGE_SCAN_TYPE:
3646 hci_cc_read_page_scan_type(hdev, skb);
3649 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3650 hci_cc_write_page_scan_type(hdev, skb);
3653 case HCI_OP_READ_DATA_BLOCK_SIZE:
3654 hci_cc_read_data_block_size(hdev, skb);
3657 case HCI_OP_READ_FLOW_CONTROL_MODE:
3658 hci_cc_read_flow_control_mode(hdev, skb);
3661 case HCI_OP_READ_LOCAL_AMP_INFO:
3662 hci_cc_read_local_amp_info(hdev, skb);
3665 case HCI_OP_READ_CLOCK:
3666 hci_cc_read_clock(hdev, skb);
3669 case HCI_OP_READ_INQ_RSP_TX_POWER:
3670 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3673 case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
3674 hci_cc_read_def_err_data_reporting(hdev, skb);
3677 case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
3678 hci_cc_write_def_err_data_reporting(hdev, skb);
3681 case HCI_OP_PIN_CODE_REPLY:
3682 hci_cc_pin_code_reply(hdev, skb);
3685 case HCI_OP_PIN_CODE_NEG_REPLY:
3686 hci_cc_pin_code_neg_reply(hdev, skb);
3689 case HCI_OP_READ_LOCAL_OOB_DATA:
3690 hci_cc_read_local_oob_data(hdev, skb);
3693 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3694 hci_cc_read_local_oob_ext_data(hdev, skb);
3697 case HCI_OP_LE_READ_BUFFER_SIZE:
3698 hci_cc_le_read_buffer_size(hdev, skb);
3701 case HCI_OP_LE_READ_LOCAL_FEATURES:
3702 hci_cc_le_read_local_features(hdev, skb);
3705 case HCI_OP_LE_READ_ADV_TX_POWER:
3706 hci_cc_le_read_adv_tx_power(hdev, skb);
3709 case HCI_OP_USER_CONFIRM_REPLY:
3710 hci_cc_user_confirm_reply(hdev, skb);
3713 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3714 hci_cc_user_confirm_neg_reply(hdev, skb);
3717 case HCI_OP_USER_PASSKEY_REPLY:
3718 hci_cc_user_passkey_reply(hdev, skb);
3721 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3722 hci_cc_user_passkey_neg_reply(hdev, skb);
3725 case HCI_OP_LE_SET_RANDOM_ADDR:
3726 hci_cc_le_set_random_addr(hdev, skb);
3729 case HCI_OP_LE_SET_ADV_ENABLE:
3730 hci_cc_le_set_adv_enable(hdev, skb);
3733 case HCI_OP_LE_SET_SCAN_PARAM:
3734 hci_cc_le_set_scan_param(hdev, skb);
3737 case HCI_OP_LE_SET_SCAN_ENABLE:
3738 hci_cc_le_set_scan_enable(hdev, skb);
3741 case HCI_OP_LE_READ_ACCEPT_LIST_SIZE:
3742 hci_cc_le_read_accept_list_size(hdev, skb);
3745 case HCI_OP_LE_CLEAR_ACCEPT_LIST:
3746 hci_cc_le_clear_accept_list(hdev, skb);
3749 case HCI_OP_LE_ADD_TO_ACCEPT_LIST:
3750 hci_cc_le_add_to_accept_list(hdev, skb);
3753 case HCI_OP_LE_DEL_FROM_ACCEPT_LIST:
3754 hci_cc_le_del_from_accept_list(hdev, skb);
3757 case HCI_OP_LE_READ_SUPPORTED_STATES:
3758 hci_cc_le_read_supported_states(hdev, skb);
3761 case HCI_OP_LE_READ_DEF_DATA_LEN:
3762 hci_cc_le_read_def_data_len(hdev, skb);
3765 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3766 hci_cc_le_write_def_data_len(hdev, skb);
3769 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3770 hci_cc_le_add_to_resolv_list(hdev, skb);
3773 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3774 hci_cc_le_del_from_resolv_list(hdev, skb);
3777 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3778 hci_cc_le_clear_resolv_list(hdev, skb);
3781 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3782 hci_cc_le_read_resolv_list_size(hdev, skb);
3785 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3786 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3789 case HCI_OP_LE_READ_MAX_DATA_LEN:
3790 hci_cc_le_read_max_data_len(hdev, skb);
3793 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3794 hci_cc_write_le_host_supported(hdev, skb);
3797 case HCI_OP_LE_SET_ADV_PARAM:
3798 hci_cc_set_adv_param(hdev, skb);
3801 case HCI_OP_READ_RSSI:
3802 hci_cc_read_rssi(hdev, skb);
3805 case HCI_OP_READ_TX_POWER:
3806 hci_cc_read_tx_power(hdev, skb);
3809 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3810 hci_cc_write_ssp_debug_mode(hdev, skb);
3813 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3814 hci_cc_le_set_ext_scan_param(hdev, skb);
3817 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3818 hci_cc_le_set_ext_scan_enable(hdev, skb);
3821 case HCI_OP_LE_SET_DEFAULT_PHY:
3822 hci_cc_le_set_default_phy(hdev, skb);
3825 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3826 hci_cc_le_read_num_adv_sets(hdev, skb);
3829 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3830 hci_cc_set_ext_adv_param(hdev, skb);
3833 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3834 hci_cc_le_set_ext_adv_enable(hdev, skb);
3837 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3838 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3841 case HCI_OP_LE_READ_TRANSMIT_POWER:
3842 hci_cc_le_read_transmit_power(hdev, skb);
3845 case HCI_OP_ENABLE_RSSI:
3846 hci_cc_enable_rssi(hdev, skb);
3849 case HCI_OP_GET_RAW_RSSI:
3850 hci_cc_get_raw_rssi(hdev, skb);
3854 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3858 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3860 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3863 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3865 "unexpected event for opcode 0x%4.4x", *opcode);
3869 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3870 queue_work(hdev->workqueue, &hdev->cmd_work);
3873 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3874 u16 *opcode, u8 *status,
3875 hci_req_complete_t *req_complete,
3876 hci_req_complete_skb_t *req_complete_skb)
3878 struct hci_ev_cmd_status *ev = (void *) skb->data;
3880 skb_pull(skb, sizeof(*ev));
3882 *opcode = __le16_to_cpu(ev->opcode);
3883 *status = ev->status;
3886 case HCI_OP_INQUIRY:
3887 hci_cs_inquiry(hdev, ev->status);
3890 case HCI_OP_CREATE_CONN:
3891 hci_cs_create_conn(hdev, ev->status);
3894 case HCI_OP_DISCONNECT:
3895 hci_cs_disconnect(hdev, ev->status);
3898 case HCI_OP_ADD_SCO:
3899 hci_cs_add_sco(hdev, ev->status);
3902 case HCI_OP_AUTH_REQUESTED:
3903 hci_cs_auth_requested(hdev, ev->status);
3906 case HCI_OP_SET_CONN_ENCRYPT:
3907 hci_cs_set_conn_encrypt(hdev, ev->status);
3910 case HCI_OP_REMOTE_NAME_REQ:
3911 hci_cs_remote_name_req(hdev, ev->status);
3914 case HCI_OP_READ_REMOTE_FEATURES:
3915 hci_cs_read_remote_features(hdev, ev->status);
3918 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3919 hci_cs_read_remote_ext_features(hdev, ev->status);
3922 case HCI_OP_SETUP_SYNC_CONN:
3923 hci_cs_setup_sync_conn(hdev, ev->status);
3926 case HCI_OP_SNIFF_MODE:
3927 hci_cs_sniff_mode(hdev, ev->status);
3930 case HCI_OP_EXIT_SNIFF_MODE:
3931 hci_cs_exit_sniff_mode(hdev, ev->status);
3934 case HCI_OP_SWITCH_ROLE:
3935 hci_cs_switch_role(hdev, ev->status);
3938 case HCI_OP_LE_CREATE_CONN:
3939 hci_cs_le_create_conn(hdev, ev->status);
3942 case HCI_OP_LE_READ_REMOTE_FEATURES:
3943 hci_cs_le_read_remote_features(hdev, ev->status);
3946 case HCI_OP_LE_START_ENC:
3947 hci_cs_le_start_enc(hdev, ev->status);
3950 case HCI_OP_LE_EXT_CREATE_CONN:
3951 hci_cs_le_ext_create_conn(hdev, ev->status);
3955 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3959 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
3961 /* Indicate request completion if the command failed. Also, if
3962 * we're not waiting for a special event and we get a success
3963 * command status we should try to flag the request as completed
3964 * (since for this kind of commands there will not be a command
3968 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3969 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3972 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3974 "unexpected event for opcode 0x%4.4x", *opcode);
3978 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3979 queue_work(hdev->workqueue, &hdev->cmd_work);
3982 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3984 struct hci_ev_hardware_error *ev = (void *) skb->data;
3988 mgmt_hardware_error(hdev, ev->code);
3989 hci_dev_unlock(hdev);
3991 hdev->hw_error_code = ev->code;
3993 queue_work(hdev->req_workqueue, &hdev->error_reset);
3996 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3998 struct hci_ev_role_change *ev = (void *) skb->data;
3999 struct hci_conn *conn;
4001 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4005 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4008 conn->role = ev->role;
4010 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4012 hci_role_switch_cfm(conn, ev->status, ev->role);
4014 if (!ev->status && (get_link_mode(conn) & HCI_LM_MASTER))
4015 hci_conn_change_supervision_timeout(conn,
4016 LINK_SUPERVISION_TIMEOUT);
4020 hci_dev_unlock(hdev);
4023 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
4025 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
4028 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4029 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4033 if (skb->len < sizeof(*ev) ||
4034 skb->len < struct_size(ev, handles, ev->num_hndl)) {
4035 BT_DBG("%s bad parameters", hdev->name);
4039 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
4041 for (i = 0; i < ev->num_hndl; i++) {
4042 struct hci_comp_pkts_info *info = &ev->handles[i];
4043 struct hci_conn *conn;
4044 __u16 handle, count;
4046 handle = __le16_to_cpu(info->handle);
4047 count = __le16_to_cpu(info->count);
4049 conn = hci_conn_hash_lookup_handle(hdev, handle);
4053 conn->sent -= count;
4055 switch (conn->type) {
4057 hdev->acl_cnt += count;
4058 if (hdev->acl_cnt > hdev->acl_pkts)
4059 hdev->acl_cnt = hdev->acl_pkts;
4063 if (hdev->le_pkts) {
4064 hdev->le_cnt += count;
4065 if (hdev->le_cnt > hdev->le_pkts)
4066 hdev->le_cnt = hdev->le_pkts;
4068 hdev->acl_cnt += count;
4069 if (hdev->acl_cnt > hdev->acl_pkts)
4070 hdev->acl_cnt = hdev->acl_pkts;
4075 hdev->sco_cnt += count;
4076 if (hdev->sco_cnt > hdev->sco_pkts)
4077 hdev->sco_cnt = hdev->sco_pkts;
4081 bt_dev_err(hdev, "unknown type %d conn %p",
4087 queue_work(hdev->workqueue, &hdev->tx_work);
4090 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4093 struct hci_chan *chan;
4095 switch (hdev->dev_type) {
4097 return hci_conn_hash_lookup_handle(hdev, handle);
4099 chan = hci_chan_lookup_handle(hdev, handle);
4104 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4111 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
4113 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
4116 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4117 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4121 if (skb->len < sizeof(*ev) ||
4122 skb->len < struct_size(ev, handles, ev->num_hndl)) {
4123 BT_DBG("%s bad parameters", hdev->name);
4127 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
4130 for (i = 0; i < ev->num_hndl; i++) {
4131 struct hci_comp_blocks_info *info = &ev->handles[i];
4132 struct hci_conn *conn = NULL;
4133 __u16 handle, block_count;
4135 handle = __le16_to_cpu(info->handle);
4136 block_count = __le16_to_cpu(info->blocks);
4138 conn = __hci_conn_lookup_handle(hdev, handle);
4142 conn->sent -= block_count;
4144 switch (conn->type) {
4147 hdev->block_cnt += block_count;
4148 if (hdev->block_cnt > hdev->num_blocks)
4149 hdev->block_cnt = hdev->num_blocks;
4153 bt_dev_err(hdev, "unknown type %d conn %p",
4159 queue_work(hdev->workqueue, &hdev->tx_work);
4162 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4164 struct hci_ev_mode_change *ev = (void *) skb->data;
4165 struct hci_conn *conn;
4167 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4171 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4173 conn->mode = ev->mode;
4175 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4177 if (conn->mode == HCI_CM_ACTIVE)
4178 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4180 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4183 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4184 hci_sco_setup(conn, ev->status);
4187 hci_dev_unlock(hdev);
4190 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4192 struct hci_ev_pin_code_req *ev = (void *) skb->data;
4193 struct hci_conn *conn;
4195 BT_DBG("%s", hdev->name);
4199 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4203 if (conn->state == BT_CONNECTED) {
4204 hci_conn_hold(conn);
4205 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4206 hci_conn_drop(conn);
4209 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4210 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4211 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4212 sizeof(ev->bdaddr), &ev->bdaddr);
4213 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4216 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4221 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4225 hci_dev_unlock(hdev);
4228 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4230 if (key_type == HCI_LK_CHANGED_COMBINATION)
4233 conn->pin_length = pin_len;
4234 conn->key_type = key_type;
4237 case HCI_LK_LOCAL_UNIT:
4238 case HCI_LK_REMOTE_UNIT:
4239 case HCI_LK_DEBUG_COMBINATION:
4241 case HCI_LK_COMBINATION:
4243 conn->pending_sec_level = BT_SECURITY_HIGH;
4245 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4247 case HCI_LK_UNAUTH_COMBINATION_P192:
4248 case HCI_LK_UNAUTH_COMBINATION_P256:
4249 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4251 case HCI_LK_AUTH_COMBINATION_P192:
4252 conn->pending_sec_level = BT_SECURITY_HIGH;
4254 case HCI_LK_AUTH_COMBINATION_P256:
4255 conn->pending_sec_level = BT_SECURITY_FIPS;
4260 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4262 struct hci_ev_link_key_req *ev = (void *) skb->data;
4263 struct hci_cp_link_key_reply cp;
4264 struct hci_conn *conn;
4265 struct link_key *key;
4267 BT_DBG("%s", hdev->name);
4269 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4274 key = hci_find_link_key(hdev, &ev->bdaddr);
4276 BT_DBG("%s link key not found for %pMR", hdev->name,
4281 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
4284 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4286 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4288 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4289 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4290 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4291 BT_DBG("%s ignoring unauthenticated key", hdev->name);
4295 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4296 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4297 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4298 BT_DBG("%s ignoring key unauthenticated for high security",
4303 conn_set_key(conn, key->type, key->pin_len);
4306 bacpy(&cp.bdaddr, &ev->bdaddr);
4307 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4309 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4311 hci_dev_unlock(hdev);
4316 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4317 hci_dev_unlock(hdev);
4320 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4322 struct hci_ev_link_key_notify *ev = (void *) skb->data;
4323 struct hci_conn *conn;
4324 struct link_key *key;
4328 BT_DBG("%s", hdev->name);
4332 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4336 hci_conn_hold(conn);
4337 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4338 hci_conn_drop(conn);
4340 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4341 conn_set_key(conn, ev->key_type, conn->pin_length);
4343 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4346 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4347 ev->key_type, pin_len, &persistent);
4351 /* Update connection information since adding the key will have
4352 * fixed up the type in the case of changed combination keys.
4354 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4355 conn_set_key(conn, key->type, key->pin_len);
4357 mgmt_new_link_key(hdev, key, persistent);
4359 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4360 * is set. If it's not set simply remove the key from the kernel
4361 * list (we've still notified user space about it but with
4362 * store_hint being 0).
4364 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4365 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4366 list_del_rcu(&key->list);
4367 kfree_rcu(key, rcu);
4372 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4374 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4377 hci_dev_unlock(hdev);
4380 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4382 struct hci_ev_clock_offset *ev = (void *) skb->data;
4383 struct hci_conn *conn;
4385 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4389 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4390 if (conn && !ev->status) {
4391 struct inquiry_entry *ie;
4393 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4395 ie->data.clock_offset = ev->clock_offset;
4396 ie->timestamp = jiffies;
4400 hci_dev_unlock(hdev);
4403 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4405 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4406 struct hci_conn *conn;
4408 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4412 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4413 if (conn && !ev->status)
4414 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4416 hci_dev_unlock(hdev);
4419 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4421 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4422 struct inquiry_entry *ie;
4424 BT_DBG("%s", hdev->name);
4428 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4430 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4431 ie->timestamp = jiffies;
4434 hci_dev_unlock(hdev);
4437 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4438 struct sk_buff *skb)
4440 struct inquiry_data data;
4441 int num_rsp = *((__u8 *) skb->data);
4443 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4448 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4453 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4454 struct inquiry_info_with_rssi_and_pscan_mode *info;
4455 info = (void *) (skb->data + 1);
4457 if (skb->len < num_rsp * sizeof(*info) + 1)
4460 for (; num_rsp; num_rsp--, info++) {
4463 bacpy(&data.bdaddr, &info->bdaddr);
4464 data.pscan_rep_mode = info->pscan_rep_mode;
4465 data.pscan_period_mode = info->pscan_period_mode;
4466 data.pscan_mode = info->pscan_mode;
4467 memcpy(data.dev_class, info->dev_class, 3);
4468 data.clock_offset = info->clock_offset;
4469 data.rssi = info->rssi;
4470 data.ssp_mode = 0x00;
4472 flags = hci_inquiry_cache_update(hdev, &data, false);
4474 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4475 info->dev_class, info->rssi,
4476 flags, NULL, 0, NULL, 0);
4479 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4481 if (skb->len < num_rsp * sizeof(*info) + 1)
4484 for (; num_rsp; num_rsp--, info++) {
4487 bacpy(&data.bdaddr, &info->bdaddr);
4488 data.pscan_rep_mode = info->pscan_rep_mode;
4489 data.pscan_period_mode = info->pscan_period_mode;
4490 data.pscan_mode = 0x00;
4491 memcpy(data.dev_class, info->dev_class, 3);
4492 data.clock_offset = info->clock_offset;
4493 data.rssi = info->rssi;
4494 data.ssp_mode = 0x00;
4496 flags = hci_inquiry_cache_update(hdev, &data, false);
4498 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4499 info->dev_class, info->rssi,
4500 flags, NULL, 0, NULL, 0);
4505 hci_dev_unlock(hdev);
4508 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4509 struct sk_buff *skb)
4511 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4512 struct hci_conn *conn;
4514 BT_DBG("%s", hdev->name);
4518 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4522 if (ev->page < HCI_MAX_PAGES)
4523 memcpy(conn->features[ev->page], ev->features, 8);
4525 if (!ev->status && ev->page == 0x01) {
4526 struct inquiry_entry *ie;
4528 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4530 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4532 if (ev->features[0] & LMP_HOST_SSP) {
4533 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4535 /* It is mandatory by the Bluetooth specification that
4536 * Extended Inquiry Results are only used when Secure
4537 * Simple Pairing is enabled, but some devices violate
4540 * To make these devices work, the internal SSP
4541 * enabled flag needs to be cleared if the remote host
4542 * features do not indicate SSP support */
4543 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4546 if (ev->features[0] & LMP_HOST_SC)
4547 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4550 if (conn->state != BT_CONFIG)
4553 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4554 struct hci_cp_remote_name_req cp;
4555 memset(&cp, 0, sizeof(cp));
4556 bacpy(&cp.bdaddr, &conn->dst);
4557 cp.pscan_rep_mode = 0x02;
4558 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4559 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4560 mgmt_device_connected(hdev, conn, NULL, 0);
4562 if (!hci_outgoing_auth_needed(hdev, conn)) {
4563 conn->state = BT_CONNECTED;
4564 hci_connect_cfm(conn, ev->status);
4565 hci_conn_drop(conn);
4569 hci_dev_unlock(hdev);
4572 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4573 struct sk_buff *skb)
4575 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4576 struct hci_conn *conn;
4578 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4582 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4584 if (ev->link_type == ESCO_LINK)
4587 /* When the link type in the event indicates SCO connection
4588 * and lookup of the connection object fails, then check
4589 * if an eSCO connection object exists.
4591 * The core limits the synchronous connections to either
4592 * SCO or eSCO. The eSCO connection is preferred and tried
4593 * to be setup first and until successfully established,
4594 * the link type will be hinted as eSCO.
4596 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4601 switch (ev->status) {
4603 /* The synchronous connection complete event should only be
4604 * sent once per new connection. Receiving a successful
4605 * complete event when the connection status is already
4606 * BT_CONNECTED means that the device is misbehaving and sent
4607 * multiple complete event packets for the same new connection.
4609 * Registering the device more than once can corrupt kernel
4610 * memory, hence upon detecting this invalid event, we report
4611 * an error and ignore the packet.
4613 if (conn->state == BT_CONNECTED) {
4614 bt_dev_err(hdev, "Ignoring connect complete event for existing connection");
4618 conn->handle = __le16_to_cpu(ev->handle);
4619 conn->state = BT_CONNECTED;
4620 conn->type = ev->link_type;
4622 hci_debugfs_create_conn(conn);
4623 hci_conn_add_sysfs(conn);
4626 case 0x10: /* Connection Accept Timeout */
4627 case 0x0d: /* Connection Rejected due to Limited Resources */
4628 case 0x11: /* Unsupported Feature or Parameter Value */
4629 case 0x1c: /* SCO interval rejected */
4630 case 0x1a: /* Unsupported Remote Feature */
4631 case 0x1e: /* Invalid LMP Parameters */
4632 case 0x1f: /* Unspecified error */
4633 case 0x20: /* Unsupported LMP Parameter value */
4635 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4636 (hdev->esco_type & EDR_ESCO_MASK);
4637 if (hci_setup_sync(conn, conn->link->handle))
4643 conn->state = BT_CLOSED;
4647 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
4649 switch (ev->air_mode) {
4652 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
4656 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
4660 hci_connect_cfm(conn, ev->status);
4665 hci_dev_unlock(hdev);
4668 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4672 while (parsed < eir_len) {
4673 u8 field_len = eir[0];
4678 parsed += field_len + 1;
4679 eir += field_len + 1;
4685 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4686 struct sk_buff *skb)
4688 struct inquiry_data data;
4689 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4690 int num_rsp = *((__u8 *) skb->data);
4693 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4695 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
4698 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4703 for (; num_rsp; num_rsp--, info++) {
4707 bacpy(&data.bdaddr, &info->bdaddr);
4708 data.pscan_rep_mode = info->pscan_rep_mode;
4709 data.pscan_period_mode = info->pscan_period_mode;
4710 data.pscan_mode = 0x00;
4711 memcpy(data.dev_class, info->dev_class, 3);
4712 data.clock_offset = info->clock_offset;
4713 data.rssi = info->rssi;
4714 data.ssp_mode = 0x01;
4716 if (hci_dev_test_flag(hdev, HCI_MGMT))
4717 name_known = eir_get_data(info->data,
4719 EIR_NAME_COMPLETE, NULL);
4723 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4725 eir_len = eir_get_length(info->data, sizeof(info->data));
4727 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4728 info->dev_class, info->rssi,
4729 flags, info->data, eir_len, NULL, 0);
4732 hci_dev_unlock(hdev);
4735 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4736 struct sk_buff *skb)
4738 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4739 struct hci_conn *conn;
4741 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4742 __le16_to_cpu(ev->handle));
4746 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4750 /* For BR/EDR the necessary steps are taken through the
4751 * auth_complete event.
4753 if (conn->type != LE_LINK)
4757 conn->sec_level = conn->pending_sec_level;
4759 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4761 if (ev->status && conn->state == BT_CONNECTED) {
4762 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4763 hci_conn_drop(conn);
4767 if (conn->state == BT_CONFIG) {
4769 conn->state = BT_CONNECTED;
4771 hci_connect_cfm(conn, ev->status);
4772 hci_conn_drop(conn);
4774 hci_auth_cfm(conn, ev->status);
4776 hci_conn_hold(conn);
4777 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4778 hci_conn_drop(conn);
4782 hci_dev_unlock(hdev);
4785 static u8 hci_get_auth_req(struct hci_conn *conn)
4788 if (conn->remote_auth == HCI_AT_GENERAL_BONDING_MITM) {
4789 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4790 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4791 return HCI_AT_GENERAL_BONDING_MITM;
4795 /* If remote requests no-bonding follow that lead */
4796 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4797 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4798 return conn->remote_auth | (conn->auth_type & 0x01);
4800 /* If both remote and local have enough IO capabilities, require
4803 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4804 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4805 return conn->remote_auth | 0x01;
4807 /* No MITM protection possible so ignore remote requirement */
4808 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4811 static u8 bredr_oob_data_present(struct hci_conn *conn)
4813 struct hci_dev *hdev = conn->hdev;
4814 struct oob_data *data;
4816 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4820 if (bredr_sc_enabled(hdev)) {
4821 /* When Secure Connections is enabled, then just
4822 * return the present value stored with the OOB
4823 * data. The stored value contains the right present
4824 * information. However it can only be trusted when
4825 * not in Secure Connection Only mode.
4827 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4828 return data->present;
4830 /* When Secure Connections Only mode is enabled, then
4831 * the P-256 values are required. If they are not
4832 * available, then do not declare that OOB data is
4835 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4836 !memcmp(data->hash256, ZERO_KEY, 16))
4842 /* When Secure Connections is not enabled or actually
4843 * not supported by the hardware, then check that if
4844 * P-192 data values are present.
4846 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4847 !memcmp(data->hash192, ZERO_KEY, 16))
4853 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4855 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4856 struct hci_conn *conn;
4858 BT_DBG("%s", hdev->name);
4862 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4866 hci_conn_hold(conn);
4868 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4871 /* Allow pairing if we're pairable, the initiators of the
4872 * pairing or if the remote is not requesting bonding.
4874 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4875 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4876 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4877 struct hci_cp_io_capability_reply cp;
4879 bacpy(&cp.bdaddr, &ev->bdaddr);
4880 /* Change the IO capability from KeyboardDisplay
4881 * to DisplayYesNo as it is not supported by BT spec. */
4882 cp.capability = (conn->io_capability == 0x04) ?
4883 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4885 /* If we are initiators, there is no remote information yet */
4886 if (conn->remote_auth == 0xff) {
4887 /* Request MITM protection if our IO caps allow it
4888 * except for the no-bonding case.
4890 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4891 conn->auth_type != HCI_AT_NO_BONDING)
4892 conn->auth_type |= 0x01;
4894 conn->auth_type = hci_get_auth_req(conn);
4897 /* If we're not bondable, force one of the non-bondable
4898 * authentication requirement values.
4900 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4901 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4903 cp.authentication = conn->auth_type;
4904 cp.oob_data = bredr_oob_data_present(conn);
4906 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4909 struct hci_cp_io_capability_neg_reply cp;
4911 bacpy(&cp.bdaddr, &ev->bdaddr);
4912 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4914 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4919 hci_dev_unlock(hdev);
4922 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4924 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4925 struct hci_conn *conn;
4927 BT_DBG("%s", hdev->name);
4931 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4935 conn->remote_cap = ev->capability;
4936 conn->remote_auth = ev->authentication;
4939 hci_dev_unlock(hdev);
4942 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4943 struct sk_buff *skb)
4945 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4946 int loc_mitm, rem_mitm, confirm_hint = 0;
4947 struct hci_conn *conn;
4949 BT_DBG("%s", hdev->name);
4953 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4956 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4960 loc_mitm = (conn->auth_type & 0x01);
4961 rem_mitm = (conn->remote_auth & 0x01);
4963 /* If we require MITM but the remote device can't provide that
4964 * (it has NoInputNoOutput) then reject the confirmation
4965 * request. We check the security level here since it doesn't
4966 * necessarily match conn->auth_type.
4968 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4969 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4970 BT_DBG("Rejecting request: remote device can't provide MITM");
4971 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4972 sizeof(ev->bdaddr), &ev->bdaddr);
4976 /* If no side requires MITM protection; auto-accept */
4977 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4978 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4980 /* If we're not the initiators request authorization to
4981 * proceed from user space (mgmt_user_confirm with
4982 * confirm_hint set to 1). The exception is if neither
4983 * side had MITM or if the local IO capability is
4984 * NoInputNoOutput, in which case we do auto-accept
4986 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4987 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4988 (loc_mitm || rem_mitm)) {
4989 BT_DBG("Confirming auto-accept as acceptor");
4994 /* If there already exists link key in local host, leave the
4995 * decision to user space since the remote device could be
4996 * legitimate or malicious.
4998 if (hci_find_link_key(hdev, &ev->bdaddr)) {
4999 bt_dev_dbg(hdev, "Local host already has link key");
5004 BT_DBG("Auto-accept of user confirmation with %ums delay",
5005 hdev->auto_accept_delay);
5007 if (hdev->auto_accept_delay > 0) {
5008 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5009 queue_delayed_work(conn->hdev->workqueue,
5010 &conn->auto_accept_work, delay);
5014 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5015 sizeof(ev->bdaddr), &ev->bdaddr);
5020 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5021 le32_to_cpu(ev->passkey), confirm_hint);
5024 hci_dev_unlock(hdev);
5027 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
5028 struct sk_buff *skb)
5030 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
5032 BT_DBG("%s", hdev->name);
5034 if (hci_dev_test_flag(hdev, HCI_MGMT))
5035 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5038 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
5039 struct sk_buff *skb)
5041 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
5042 struct hci_conn *conn;
5044 BT_DBG("%s", hdev->name);
5046 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5050 conn->passkey_notify = __le32_to_cpu(ev->passkey);
5051 conn->passkey_entered = 0;
5053 if (hci_dev_test_flag(hdev, HCI_MGMT))
5054 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5055 conn->dst_type, conn->passkey_notify,
5056 conn->passkey_entered);
5059 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
5061 struct hci_ev_keypress_notify *ev = (void *) skb->data;
5062 struct hci_conn *conn;
5064 BT_DBG("%s", hdev->name);
5066 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5071 case HCI_KEYPRESS_STARTED:
5072 conn->passkey_entered = 0;
5075 case HCI_KEYPRESS_ENTERED:
5076 conn->passkey_entered++;
5079 case HCI_KEYPRESS_ERASED:
5080 conn->passkey_entered--;
5083 case HCI_KEYPRESS_CLEARED:
5084 conn->passkey_entered = 0;
5087 case HCI_KEYPRESS_COMPLETED:
5091 if (hci_dev_test_flag(hdev, HCI_MGMT))
5092 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5093 conn->dst_type, conn->passkey_notify,
5094 conn->passkey_entered);
5097 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
5098 struct sk_buff *skb)
5100 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
5101 struct hci_conn *conn;
5103 BT_DBG("%s", hdev->name);
5107 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5111 /* Reset the authentication requirement to unknown */
5112 conn->remote_auth = 0xff;
5114 /* To avoid duplicate auth_failed events to user space we check
5115 * the HCI_CONN_AUTH_PEND flag which will be set if we
5116 * initiated the authentication. A traditional auth_complete
5117 * event gets always produced as initiator and is also mapped to
5118 * the mgmt_auth_failed event */
5119 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5120 mgmt_auth_failed(conn, ev->status);
5122 hci_conn_drop(conn);
5125 hci_dev_unlock(hdev);
5128 static void hci_remote_host_features_evt(struct hci_dev *hdev,
5129 struct sk_buff *skb)
5131 struct hci_ev_remote_host_features *ev = (void *) skb->data;
5132 struct inquiry_entry *ie;
5133 struct hci_conn *conn;
5135 BT_DBG("%s", hdev->name);
5139 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5141 memcpy(conn->features[1], ev->features, 8);
5143 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5145 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5147 hci_dev_unlock(hdev);
5150 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
5151 struct sk_buff *skb)
5153 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
5154 struct oob_data *data;
5156 BT_DBG("%s", hdev->name);
5160 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5163 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5165 struct hci_cp_remote_oob_data_neg_reply cp;
5167 bacpy(&cp.bdaddr, &ev->bdaddr);
5168 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5173 if (bredr_sc_enabled(hdev)) {
5174 struct hci_cp_remote_oob_ext_data_reply cp;
5176 bacpy(&cp.bdaddr, &ev->bdaddr);
5177 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5178 memset(cp.hash192, 0, sizeof(cp.hash192));
5179 memset(cp.rand192, 0, sizeof(cp.rand192));
5181 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5182 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5184 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5185 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5187 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5190 struct hci_cp_remote_oob_data_reply cp;
5192 bacpy(&cp.bdaddr, &ev->bdaddr);
5193 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5194 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5196 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5201 hci_dev_unlock(hdev);
5204 #if IS_ENABLED(CONFIG_BT_HS)
5205 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5207 struct hci_ev_channel_selected *ev = (void *)skb->data;
5208 struct hci_conn *hcon;
5210 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5212 skb_pull(skb, sizeof(*ev));
5214 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5218 amp_read_loc_assoc_final_data(hdev, hcon);
5221 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
5222 struct sk_buff *skb)
5224 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
5225 struct hci_conn *hcon, *bredr_hcon;
5227 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
5232 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5244 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5246 hcon->state = BT_CONNECTED;
5247 bacpy(&hcon->dst, &bredr_hcon->dst);
5249 hci_conn_hold(hcon);
5250 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5251 hci_conn_drop(hcon);
5253 hci_debugfs_create_conn(hcon);
5254 hci_conn_add_sysfs(hcon);
5256 amp_physical_cfm(bredr_hcon, hcon);
5259 hci_dev_unlock(hdev);
5262 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5264 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
5265 struct hci_conn *hcon;
5266 struct hci_chan *hchan;
5267 struct amp_mgr *mgr;
5269 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5270 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
5273 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5277 /* Create AMP hchan */
5278 hchan = hci_chan_create(hcon);
5282 hchan->handle = le16_to_cpu(ev->handle);
5285 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5287 mgr = hcon->amp_mgr;
5288 if (mgr && mgr->bredr_chan) {
5289 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5291 l2cap_chan_lock(bredr_chan);
5293 bredr_chan->conn->mtu = hdev->block_mtu;
5294 l2cap_logical_cfm(bredr_chan, hchan, 0);
5295 hci_conn_hold(hcon);
5297 l2cap_chan_unlock(bredr_chan);
5301 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
5302 struct sk_buff *skb)
5304 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
5305 struct hci_chan *hchan;
5307 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
5308 le16_to_cpu(ev->handle), ev->status);
5315 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5316 if (!hchan || !hchan->amp)
5319 amp_destroy_logical_link(hchan, ev->reason);
5322 hci_dev_unlock(hdev);
5325 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
5326 struct sk_buff *skb)
5328 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
5329 struct hci_conn *hcon;
5331 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5338 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5340 hcon->state = BT_CLOSED;
5344 hci_dev_unlock(hdev);
5348 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5349 u8 bdaddr_type, bdaddr_t *local_rpa)
5352 conn->dst_type = bdaddr_type;
5353 conn->resp_addr_type = bdaddr_type;
5354 bacpy(&conn->resp_addr, bdaddr);
5356 /* Check if the controller has set a Local RPA then it must be
5357 * used instead or hdev->rpa.
5359 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5360 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5361 bacpy(&conn->init_addr, local_rpa);
5362 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5363 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5364 bacpy(&conn->init_addr, &conn->hdev->rpa);
5366 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5367 &conn->init_addr_type);
5370 conn->resp_addr_type = conn->hdev->adv_addr_type;
5371 /* Check if the controller has set a Local RPA then it must be
5372 * used instead or hdev->rpa.
5374 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5375 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5376 bacpy(&conn->resp_addr, local_rpa);
5377 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5378 /* In case of ext adv, resp_addr will be updated in
5379 * Adv Terminated event.
5381 if (!ext_adv_capable(conn->hdev))
5382 bacpy(&conn->resp_addr,
5383 &conn->hdev->random_addr);
5385 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5388 conn->init_addr_type = bdaddr_type;
5389 bacpy(&conn->init_addr, bdaddr);
5391 /* For incoming connections, set the default minimum
5392 * and maximum connection interval. They will be used
5393 * to check if the parameters are in range and if not
5394 * trigger the connection update procedure.
5396 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5397 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5401 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5402 bdaddr_t *bdaddr, u8 bdaddr_type,
5403 bdaddr_t *local_rpa, u8 role, u16 handle,
5404 u16 interval, u16 latency,
5405 u16 supervision_timeout)
5407 struct hci_conn_params *params;
5408 struct hci_conn *conn;
5409 struct smp_irk *irk;
5414 /* All controllers implicitly stop advertising in the event of a
5415 * connection, so ensure that the state bit is cleared.
5417 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5419 conn = hci_lookup_le_connect(hdev);
5421 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5423 bt_dev_err(hdev, "no memory for new connection");
5427 conn->dst_type = bdaddr_type;
5429 /* If we didn't have a hci_conn object previously
5430 * but we're in central role this must be something
5431 * initiated using an accept list. Since accept list based
5432 * connections are not "first class citizens" we don't
5433 * have full tracking of them. Therefore, we go ahead
5434 * with a "best effort" approach of determining the
5435 * initiator address based on the HCI_PRIVACY flag.
5438 conn->resp_addr_type = bdaddr_type;
5439 bacpy(&conn->resp_addr, bdaddr);
5440 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5441 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5442 bacpy(&conn->init_addr, &hdev->rpa);
5444 hci_copy_identity_address(hdev,
5446 &conn->init_addr_type);
5451 /* LE auto connect */
5452 bacpy(&conn->dst, bdaddr);
5454 cancel_delayed_work(&conn->le_conn_timeout);
5457 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5459 /* Lookup the identity address from the stored connection
5460 * address and address type.
5462 * When establishing connections to an identity address, the
5463 * connection procedure will store the resolvable random
5464 * address first. Now if it can be converted back into the
5465 * identity address, start using the identity address from
5468 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5470 bacpy(&conn->dst, &irk->bdaddr);
5471 conn->dst_type = irk->addr_type;
5474 /* When using controller based address resolution, then the new
5475 * address types 0x02 and 0x03 are used. These types need to be
5476 * converted back into either public address or random address type
5478 if (use_ll_privacy(hdev) &&
5479 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5480 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
5481 switch (conn->dst_type) {
5482 case ADDR_LE_DEV_PUBLIC_RESOLVED:
5483 conn->dst_type = ADDR_LE_DEV_PUBLIC;
5485 case ADDR_LE_DEV_RANDOM_RESOLVED:
5486 conn->dst_type = ADDR_LE_DEV_RANDOM;
5492 hci_le_conn_failed(conn, status);
5496 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5497 addr_type = BDADDR_LE_PUBLIC;
5499 addr_type = BDADDR_LE_RANDOM;
5501 /* Drop the connection if the device is blocked */
5502 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5503 hci_conn_drop(conn);
5507 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5508 mgmt_device_connected(hdev, conn, NULL, 0);
5510 conn->sec_level = BT_SECURITY_LOW;
5511 conn->handle = handle;
5512 conn->state = BT_CONFIG;
5514 /* Store current advertising instance as connection advertising instance
5515 * when sotfware rotation is in use so it can be re-enabled when
5518 if (!ext_adv_capable(hdev))
5519 conn->adv_instance = hdev->cur_adv_instance;
5521 conn->le_conn_interval = interval;
5522 conn->le_conn_latency = latency;
5523 conn->le_supv_timeout = supervision_timeout;
5525 hci_debugfs_create_conn(conn);
5526 hci_conn_add_sysfs(conn);
5528 /* The remote features procedure is defined for central
5529 * role only. So only in case of an initiated connection
5530 * request the remote features.
5532 * If the local controller supports peripheral-initiated features
5533 * exchange, then requesting the remote features in peripheral
5534 * role is possible. Otherwise just transition into the
5535 * connected state without requesting the remote features.
5538 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5539 struct hci_cp_le_read_remote_features cp;
5541 cp.handle = __cpu_to_le16(conn->handle);
5543 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5546 hci_conn_hold(conn);
5548 conn->state = BT_CONNECTED;
5549 hci_connect_cfm(conn, status);
5552 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5555 list_del_init(¶ms->action);
5557 hci_conn_drop(params->conn);
5558 hci_conn_put(params->conn);
5559 params->conn = NULL;
5564 hci_update_background_scan(hdev);
5565 hci_dev_unlock(hdev);
5568 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5570 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5572 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5574 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5575 NULL, ev->role, le16_to_cpu(ev->handle),
5576 le16_to_cpu(ev->interval),
5577 le16_to_cpu(ev->latency),
5578 le16_to_cpu(ev->supervision_timeout));
5581 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5582 struct sk_buff *skb)
5584 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5586 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5588 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5589 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5590 le16_to_cpu(ev->interval),
5591 le16_to_cpu(ev->latency),
5592 le16_to_cpu(ev->supervision_timeout));
5594 if (use_ll_privacy(hdev) &&
5595 hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) &&
5596 hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
5597 hci_req_disable_address_resolution(hdev);
5600 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5602 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5603 struct hci_conn *conn;
5604 struct adv_info *adv;
5606 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5608 adv = hci_find_adv_instance(hdev, ev->handle);
5614 /* Remove advertising as it has been terminated */
5615 hci_remove_adv_instance(hdev, ev->handle);
5616 mgmt_advertising_removed(NULL, hdev, ev->handle);
5622 adv->enabled = false;
5624 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5626 /* Store handle in the connection so the correct advertising
5627 * instance can be re-enabled when disconnected.
5629 conn->adv_instance = ev->handle;
5631 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
5632 bacmp(&conn->resp_addr, BDADDR_ANY))
5636 bacpy(&conn->resp_addr, &hdev->random_addr);
5641 bacpy(&conn->resp_addr, &adv->random_addr);
5645 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5646 struct sk_buff *skb)
5648 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5649 struct hci_conn *conn;
5651 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5658 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5662 hci_dev_unlock(hdev);
5663 mgmt_le_conn_update_failed(hdev, &conn->dst,
5664 conn->type, conn->dst_type, ev->status);
5668 conn->le_conn_interval = le16_to_cpu(ev->interval);
5669 conn->le_conn_latency = le16_to_cpu(ev->latency);
5670 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5673 hci_dev_unlock(hdev);
5676 mgmt_le_conn_updated(hdev, &conn->dst, conn->type,
5677 conn->dst_type, conn->le_conn_interval,
5678 conn->le_conn_latency, conn->le_supv_timeout);
5682 /* This function requires the caller holds hdev->lock */
5683 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5685 u8 addr_type, u8 adv_type,
5686 bdaddr_t *direct_rpa)
5688 struct hci_conn *conn;
5689 struct hci_conn_params *params;
5691 /* If the event is not connectable don't proceed further */
5692 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5695 /* Ignore if the device is blocked */
5696 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type))
5699 /* Most controller will fail if we try to create new connections
5700 * while we have an existing one in peripheral role.
5702 if (hdev->conn_hash.le_num_peripheral > 0 &&
5703 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
5704 !(hdev->le_states[3] & 0x10)))
5707 /* If we're not connectable only connect devices that we have in
5708 * our pend_le_conns list.
5710 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5715 if (!params->explicit_connect) {
5716 switch (params->auto_connect) {
5717 case HCI_AUTO_CONN_DIRECT:
5718 /* Only devices advertising with ADV_DIRECT_IND are
5719 * triggering a connection attempt. This is allowing
5720 * incoming connections from peripheral devices.
5722 if (adv_type != LE_ADV_DIRECT_IND)
5725 case HCI_AUTO_CONN_ALWAYS:
5726 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5727 * are triggering a connection attempt. This means
5728 * that incoming connections from peripheral device are
5729 * accepted and also outgoing connections to peripheral
5730 * devices are established when found.
5738 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5739 hdev->def_le_autoconnect_timeout, HCI_ROLE_MASTER,
5741 if (!IS_ERR(conn)) {
5742 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5743 * by higher layer that tried to connect, if no then
5744 * store the pointer since we don't really have any
5745 * other owner of the object besides the params that
5746 * triggered it. This way we can abort the connection if
5747 * the parameters get removed and keep the reference
5748 * count consistent once the connection is established.
5751 if (!params->explicit_connect)
5752 params->conn = hci_conn_get(conn);
5757 switch (PTR_ERR(conn)) {
5759 /* If hci_connect() returns -EBUSY it means there is already
5760 * an LE connection attempt going on. Since controllers don't
5761 * support more than one connection attempt at the time, we
5762 * don't consider this an error case.
5766 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5773 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5774 u8 bdaddr_type, bdaddr_t *direct_addr,
5775 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
5779 struct discovery_state *d = &hdev->discovery;
5781 struct smp_irk *irk;
5782 struct hci_conn *conn;
5791 case LE_ADV_DIRECT_IND:
5792 case LE_ADV_SCAN_IND:
5793 case LE_ADV_NONCONN_IND:
5794 case LE_ADV_SCAN_RSP:
5797 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5798 "type: 0x%02x", type);
5802 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
5803 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
5807 /* Find the end of the data in case the report contains padded zero
5808 * bytes at the end causing an invalid length value.
5810 * When data is NULL, len is 0 so there is no need for extra ptr
5811 * check as 'ptr < data + 0' is already false in such case.
5813 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5814 if (ptr + 1 + *ptr > data + len)
5818 /* Adjust for actual length. This handles the case when remote
5819 * device is advertising with incorrect data length.
5823 /* If the direct address is present, then this report is from
5824 * a LE Direct Advertising Report event. In that case it is
5825 * important to see if the address is matching the local
5826 * controller address.
5829 /* Only resolvable random addresses are valid for these
5830 * kind of reports and others can be ignored.
5832 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5835 /* If the controller is not using resolvable random
5836 * addresses, then this report can be ignored.
5838 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5841 /* If the local IRK of the controller does not match
5842 * with the resolvable random address provided, then
5843 * this report can be ignored.
5845 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5849 /* Check if we need to convert to identity address */
5850 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5852 bdaddr = &irk->bdaddr;
5853 bdaddr_type = irk->addr_type;
5856 /* Check if we have been requested to connect to this device.
5858 * direct_addr is set only for directed advertising reports (it is NULL
5859 * for advertising reports) and is already verified to be RPA above.
5861 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5863 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
5864 /* Store report for later inclusion by
5865 * mgmt_device_connected
5867 memcpy(conn->le_adv_data, data, len);
5868 conn->le_adv_data_len = len;
5871 /* Passive scanning shouldn't trigger any device found events,
5872 * except for devices marked as CONN_REPORT for which we do send
5873 * device found events, or advertisement monitoring requested.
5875 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5876 if (type == LE_ADV_DIRECT_IND)
5880 /* Handle all adv packet in platform */
5881 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5882 bdaddr, bdaddr_type) &&
5883 idr_is_empty(&hdev->adv_monitors_idr))
5887 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5888 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5892 mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5893 rssi, flags, data, len, NULL, 0, type);
5895 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5896 rssi, flags, data, len, NULL, 0);
5901 /* When receiving non-connectable or scannable undirected
5902 * advertising reports, this means that the remote device is
5903 * not connectable and then clearly indicate this in the
5904 * device found event.
5906 * When receiving a scan response, then there is no way to
5907 * know if the remote device is connectable or not. However
5908 * since scan responses are merged with a previously seen
5909 * advertising report, the flags field from that report
5912 * In the really unlikely case that a controller get confused
5913 * and just sends a scan response event, then it is marked as
5914 * not connectable as well.
5916 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5917 type == LE_ADV_SCAN_RSP)
5918 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5923 /* Disable adv ind and scan rsp merging */
5924 mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5925 rssi, flags, data, len, NULL, 0, type);
5927 /* If there's nothing pending either store the data from this
5928 * event or send an immediate device found event if the data
5929 * should not be stored for later.
5931 if (!ext_adv && !has_pending_adv_report(hdev)) {
5932 /* If the report will trigger a SCAN_REQ store it for
5935 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5936 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5937 rssi, flags, data, len);
5941 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5942 rssi, flags, data, len, NULL, 0);
5946 /* Check if the pending report is for the same device as the new one */
5947 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5948 bdaddr_type == d->last_adv_addr_type);
5950 /* If the pending data doesn't match this report or this isn't a
5951 * scan response (e.g. we got a duplicate ADV_IND) then force
5952 * sending of the pending data.
5954 if (type != LE_ADV_SCAN_RSP || !match) {
5955 /* Send out whatever is in the cache, but skip duplicates */
5957 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5958 d->last_adv_addr_type, NULL,
5959 d->last_adv_rssi, d->last_adv_flags,
5961 d->last_adv_data_len, NULL, 0);
5963 /* If the new report will trigger a SCAN_REQ store it for
5966 if (!ext_adv && (type == LE_ADV_IND ||
5967 type == LE_ADV_SCAN_IND)) {
5968 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5969 rssi, flags, data, len);
5973 /* The advertising reports cannot be merged, so clear
5974 * the pending report and send out a device found event.
5976 clear_pending_adv_report(hdev);
5977 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5978 rssi, flags, data, len, NULL, 0);
5982 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5983 * the new event is a SCAN_RSP. We can therefore proceed with
5984 * sending a merged device found event.
5986 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5987 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5988 d->last_adv_data, d->last_adv_data_len, data, len);
5989 clear_pending_adv_report(hdev);
5993 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5995 u8 num_reports = skb->data[0];
5996 void *ptr = &skb->data[1];
6000 while (num_reports--) {
6001 struct hci_ev_le_advertising_info *ev = ptr;
6004 if (ev->length <= HCI_MAX_AD_LENGTH) {
6005 rssi = ev->data[ev->length];
6006 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
6007 ev->bdaddr_type, NULL, 0, rssi,
6008 ev->data, ev->length, false);
6010 bt_dev_err(hdev, "Dropping invalid advertising data");
6013 ptr += sizeof(*ev) + ev->length + 1;
6016 hci_dev_unlock(hdev);
6019 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6021 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6023 case LE_LEGACY_ADV_IND:
6025 case LE_LEGACY_ADV_DIRECT_IND:
6026 return LE_ADV_DIRECT_IND;
6027 case LE_LEGACY_ADV_SCAN_IND:
6028 return LE_ADV_SCAN_IND;
6029 case LE_LEGACY_NONCONN_IND:
6030 return LE_ADV_NONCONN_IND;
6031 case LE_LEGACY_SCAN_RSP_ADV:
6032 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6033 return LE_ADV_SCAN_RSP;
6039 if (evt_type & LE_EXT_ADV_CONN_IND) {
6040 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6041 return LE_ADV_DIRECT_IND;
6046 if (evt_type & LE_EXT_ADV_SCAN_RSP)
6047 return LE_ADV_SCAN_RSP;
6049 if (evt_type & LE_EXT_ADV_SCAN_IND)
6050 return LE_ADV_SCAN_IND;
6052 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6053 evt_type & LE_EXT_ADV_DIRECT_IND)
6054 return LE_ADV_NONCONN_IND;
6057 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6060 return LE_ADV_INVALID;
6063 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
6065 u8 num_reports = skb->data[0];
6066 void *ptr = &skb->data[1];
6070 while (num_reports--) {
6071 struct hci_ev_le_ext_adv_report *ev = ptr;
6075 evt_type = __le16_to_cpu(ev->evt_type);
6076 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6077 if (legacy_evt_type != LE_ADV_INVALID) {
6078 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
6079 ev->bdaddr_type, NULL, 0, ev->rssi,
6080 ev->data, ev->length,
6081 !(evt_type & LE_EXT_ADV_LEGACY_PDU));
6084 ptr += sizeof(*ev) + ev->length;
6087 hci_dev_unlock(hdev);
6090 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
6091 struct sk_buff *skb)
6093 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
6094 struct hci_conn *conn;
6096 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6100 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6103 memcpy(conn->features[0], ev->features, 8);
6105 if (conn->state == BT_CONFIG) {
6108 /* If the local controller supports peripheral-initiated
6109 * features exchange, but the remote controller does
6110 * not, then it is possible that the error code 0x1a
6111 * for unsupported remote feature gets returned.
6113 * In this specific case, allow the connection to
6114 * transition into connected state and mark it as
6117 if (!conn->out && ev->status == 0x1a &&
6118 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6121 status = ev->status;
6123 conn->state = BT_CONNECTED;
6124 hci_connect_cfm(conn, status);
6125 hci_conn_drop(conn);
6129 hci_dev_unlock(hdev);
6132 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
6134 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
6135 struct hci_cp_le_ltk_reply cp;
6136 struct hci_cp_le_ltk_neg_reply neg;
6137 struct hci_conn *conn;
6138 struct smp_ltk *ltk;
6140 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
6144 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6148 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6152 if (smp_ltk_is_sc(ltk)) {
6153 /* With SC both EDiv and Rand are set to zero */
6154 if (ev->ediv || ev->rand)
6157 /* For non-SC keys check that EDiv and Rand match */
6158 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6162 memcpy(cp.ltk, ltk->val, ltk->enc_size);
6163 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6164 cp.handle = cpu_to_le16(conn->handle);
6166 conn->pending_sec_level = smp_ltk_sec_level(ltk);
6168 conn->enc_key_size = ltk->enc_size;
6170 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6172 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6173 * temporary key used to encrypt a connection following
6174 * pairing. It is used during the Encrypted Session Setup to
6175 * distribute the keys. Later, security can be re-established
6176 * using a distributed LTK.
6178 if (ltk->type == SMP_STK) {
6179 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6180 list_del_rcu(<k->list);
6181 kfree_rcu(ltk, rcu);
6183 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6186 hci_dev_unlock(hdev);
6191 neg.handle = ev->handle;
6192 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6193 hci_dev_unlock(hdev);
6196 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6199 struct hci_cp_le_conn_param_req_neg_reply cp;
6201 cp.handle = cpu_to_le16(handle);
6204 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6208 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
6209 struct sk_buff *skb)
6211 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
6212 struct hci_cp_le_conn_param_req_reply cp;
6213 struct hci_conn *hcon;
6214 u16 handle, min, max, latency, timeout;
6216 handle = le16_to_cpu(ev->handle);
6217 min = le16_to_cpu(ev->interval_min);
6218 max = le16_to_cpu(ev->interval_max);
6219 latency = le16_to_cpu(ev->latency);
6220 timeout = le16_to_cpu(ev->timeout);
6222 hcon = hci_conn_hash_lookup_handle(hdev, handle);
6223 if (!hcon || hcon->state != BT_CONNECTED)
6224 return send_conn_param_neg_reply(hdev, handle,
6225 HCI_ERROR_UNKNOWN_CONN_ID);
6227 if (hci_check_conn_params(min, max, latency, timeout))
6228 return send_conn_param_neg_reply(hdev, handle,
6229 HCI_ERROR_INVALID_LL_PARAMS);
6231 if (hcon->role == HCI_ROLE_MASTER) {
6232 struct hci_conn_params *params;
6237 params = hci_conn_params_lookup(hdev, &hcon->dst,
6240 params->conn_min_interval = min;
6241 params->conn_max_interval = max;
6242 params->conn_latency = latency;
6243 params->supervision_timeout = timeout;
6249 hci_dev_unlock(hdev);
6251 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6252 store_hint, min, max, latency, timeout);
6255 cp.handle = ev->handle;
6256 cp.interval_min = ev->interval_min;
6257 cp.interval_max = ev->interval_max;
6258 cp.latency = ev->latency;
6259 cp.timeout = ev->timeout;
6263 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6266 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
6267 struct sk_buff *skb)
6269 u8 num_reports = skb->data[0];
6270 struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
6272 if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
6277 for (; num_reports; num_reports--, ev++)
6278 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
6279 ev->bdaddr_type, &ev->direct_addr,
6280 ev->direct_addr_type, ev->rssi, NULL, 0,
6283 hci_dev_unlock(hdev);
6286 static void hci_le_phy_update_evt(struct hci_dev *hdev, struct sk_buff *skb)
6288 struct hci_ev_le_phy_update_complete *ev = (void *) skb->data;
6289 struct hci_conn *conn;
6291 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6298 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6302 conn->le_tx_phy = ev->tx_phy;
6303 conn->le_rx_phy = ev->rx_phy;
6306 hci_dev_unlock(hdev);
6309 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
6311 struct hci_ev_le_meta *le_ev = (void *) skb->data;
6313 skb_pull(skb, sizeof(*le_ev));
6315 switch (le_ev->subevent) {
6316 case HCI_EV_LE_CONN_COMPLETE:
6317 hci_le_conn_complete_evt(hdev, skb);
6320 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
6321 hci_le_conn_update_complete_evt(hdev, skb);
6324 case HCI_EV_LE_ADVERTISING_REPORT:
6325 hci_le_adv_report_evt(hdev, skb);
6328 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
6329 hci_le_remote_feat_complete_evt(hdev, skb);
6332 case HCI_EV_LE_LTK_REQ:
6333 hci_le_ltk_request_evt(hdev, skb);
6336 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
6337 hci_le_remote_conn_param_req_evt(hdev, skb);
6340 case HCI_EV_LE_DIRECT_ADV_REPORT:
6341 hci_le_direct_adv_report_evt(hdev, skb);
6344 case HCI_EV_LE_PHY_UPDATE_COMPLETE:
6345 hci_le_phy_update_evt(hdev, skb);
6348 case HCI_EV_LE_EXT_ADV_REPORT:
6349 hci_le_ext_adv_report_evt(hdev, skb);
6352 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
6353 hci_le_enh_conn_complete_evt(hdev, skb);
6356 case HCI_EV_LE_EXT_ADV_SET_TERM:
6357 hci_le_ext_adv_term_evt(hdev, skb);
6365 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
6366 u8 event, struct sk_buff *skb)
6368 struct hci_ev_cmd_complete *ev;
6369 struct hci_event_hdr *hdr;
6374 if (skb->len < sizeof(*hdr)) {
6375 bt_dev_err(hdev, "too short HCI event");
6379 hdr = (void *) skb->data;
6380 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6383 if (hdr->evt != event)
6388 /* Check if request ended in Command Status - no way to retrieve
6389 * any extra parameters in this case.
6391 if (hdr->evt == HCI_EV_CMD_STATUS)
6394 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
6395 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
6400 if (skb->len < sizeof(*ev)) {
6401 bt_dev_err(hdev, "too short cmd_complete event");
6405 ev = (void *) skb->data;
6406 skb_pull(skb, sizeof(*ev));
6408 if (opcode != __le16_to_cpu(ev->opcode)) {
6409 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
6410 __le16_to_cpu(ev->opcode));
6417 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
6418 struct sk_buff *skb)
6420 struct hci_ev_le_advertising_info *adv;
6421 struct hci_ev_le_direct_adv_info *direct_adv;
6422 struct hci_ev_le_ext_adv_report *ext_adv;
6423 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
6424 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
6428 /* If we are currently suspended and this is the first BT event seen,
6429 * save the wake reason associated with the event.
6431 if (!hdev->suspended || hdev->wake_reason)
6434 /* Default to remote wake. Values for wake_reason are documented in the
6435 * Bluez mgmt api docs.
6437 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
6439 /* Once configured for remote wakeup, we should only wake up for
6440 * reconnections. It's useful to see which device is waking us up so
6441 * keep track of the bdaddr of the connection event that woke us up.
6443 if (event == HCI_EV_CONN_REQUEST) {
6444 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
6445 hdev->wake_addr_type = BDADDR_BREDR;
6446 } else if (event == HCI_EV_CONN_COMPLETE) {
6447 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
6448 hdev->wake_addr_type = BDADDR_BREDR;
6449 } else if (event == HCI_EV_LE_META) {
6450 struct hci_ev_le_meta *le_ev = (void *)skb->data;
6451 u8 subevent = le_ev->subevent;
6452 u8 *ptr = &skb->data[sizeof(*le_ev)];
6453 u8 num_reports = *ptr;
6455 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
6456 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
6457 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
6459 adv = (void *)(ptr + 1);
6460 direct_adv = (void *)(ptr + 1);
6461 ext_adv = (void *)(ptr + 1);
6464 case HCI_EV_LE_ADVERTISING_REPORT:
6465 bacpy(&hdev->wake_addr, &adv->bdaddr);
6466 hdev->wake_addr_type = adv->bdaddr_type;
6468 case HCI_EV_LE_DIRECT_ADV_REPORT:
6469 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
6470 hdev->wake_addr_type = direct_adv->bdaddr_type;
6472 case HCI_EV_LE_EXT_ADV_REPORT:
6473 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
6474 hdev->wake_addr_type = ext_adv->bdaddr_type;
6479 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
6483 hci_dev_unlock(hdev);
6486 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
6488 struct hci_event_hdr *hdr = (void *) skb->data;
6489 hci_req_complete_t req_complete = NULL;
6490 hci_req_complete_skb_t req_complete_skb = NULL;
6491 struct sk_buff *orig_skb = NULL;
6492 u8 status = 0, event = hdr->evt, req_evt = 0;
6493 u16 opcode = HCI_OP_NOP;
6496 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
6500 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
6501 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
6502 opcode = __le16_to_cpu(cmd_hdr->opcode);
6503 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
6508 /* If it looks like we might end up having to call
6509 * req_complete_skb, store a pristine copy of the skb since the
6510 * various handlers may modify the original one through
6511 * skb_pull() calls, etc.
6513 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
6514 event == HCI_EV_CMD_COMPLETE)
6515 orig_skb = skb_clone(skb, GFP_KERNEL);
6517 skb_pull(skb, HCI_EVENT_HDR_SIZE);
6519 /* Store wake reason if we're suspended */
6520 hci_store_wake_reason(hdev, event, skb);
6523 case HCI_EV_INQUIRY_COMPLETE:
6524 hci_inquiry_complete_evt(hdev, skb);
6527 case HCI_EV_INQUIRY_RESULT:
6528 hci_inquiry_result_evt(hdev, skb);
6531 case HCI_EV_CONN_COMPLETE:
6532 hci_conn_complete_evt(hdev, skb);
6535 case HCI_EV_CONN_REQUEST:
6536 hci_conn_request_evt(hdev, skb);
6539 case HCI_EV_DISCONN_COMPLETE:
6540 hci_disconn_complete_evt(hdev, skb);
6543 case HCI_EV_AUTH_COMPLETE:
6544 hci_auth_complete_evt(hdev, skb);
6547 case HCI_EV_REMOTE_NAME:
6548 hci_remote_name_evt(hdev, skb);
6551 case HCI_EV_ENCRYPT_CHANGE:
6552 hci_encrypt_change_evt(hdev, skb);
6555 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
6556 hci_change_link_key_complete_evt(hdev, skb);
6559 case HCI_EV_REMOTE_FEATURES:
6560 hci_remote_features_evt(hdev, skb);
6563 case HCI_EV_CMD_COMPLETE:
6564 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
6565 &req_complete, &req_complete_skb);
6568 case HCI_EV_CMD_STATUS:
6569 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
6573 case HCI_EV_HARDWARE_ERROR:
6574 hci_hardware_error_evt(hdev, skb);
6577 case HCI_EV_ROLE_CHANGE:
6578 hci_role_change_evt(hdev, skb);
6581 case HCI_EV_NUM_COMP_PKTS:
6582 hci_num_comp_pkts_evt(hdev, skb);
6585 case HCI_EV_MODE_CHANGE:
6586 hci_mode_change_evt(hdev, skb);
6589 case HCI_EV_PIN_CODE_REQ:
6590 hci_pin_code_request_evt(hdev, skb);
6593 case HCI_EV_LINK_KEY_REQ:
6594 hci_link_key_request_evt(hdev, skb);
6597 case HCI_EV_LINK_KEY_NOTIFY:
6598 hci_link_key_notify_evt(hdev, skb);
6601 case HCI_EV_CLOCK_OFFSET:
6602 hci_clock_offset_evt(hdev, skb);
6605 case HCI_EV_PKT_TYPE_CHANGE:
6606 hci_pkt_type_change_evt(hdev, skb);
6609 case HCI_EV_PSCAN_REP_MODE:
6610 hci_pscan_rep_mode_evt(hdev, skb);
6613 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
6614 hci_inquiry_result_with_rssi_evt(hdev, skb);
6617 case HCI_EV_REMOTE_EXT_FEATURES:
6618 hci_remote_ext_features_evt(hdev, skb);
6621 case HCI_EV_SYNC_CONN_COMPLETE:
6622 hci_sync_conn_complete_evt(hdev, skb);
6625 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6626 hci_extended_inquiry_result_evt(hdev, skb);
6629 case HCI_EV_KEY_REFRESH_COMPLETE:
6630 hci_key_refresh_complete_evt(hdev, skb);
6633 case HCI_EV_IO_CAPA_REQUEST:
6634 hci_io_capa_request_evt(hdev, skb);
6637 case HCI_EV_IO_CAPA_REPLY:
6638 hci_io_capa_reply_evt(hdev, skb);
6641 case HCI_EV_USER_CONFIRM_REQUEST:
6642 hci_user_confirm_request_evt(hdev, skb);
6645 case HCI_EV_USER_PASSKEY_REQUEST:
6646 hci_user_passkey_request_evt(hdev, skb);
6649 case HCI_EV_USER_PASSKEY_NOTIFY:
6650 hci_user_passkey_notify_evt(hdev, skb);
6653 case HCI_EV_KEYPRESS_NOTIFY:
6654 hci_keypress_notify_evt(hdev, skb);
6657 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6658 hci_simple_pair_complete_evt(hdev, skb);
6661 case HCI_EV_REMOTE_HOST_FEATURES:
6662 hci_remote_host_features_evt(hdev, skb);
6665 case HCI_EV_LE_META:
6666 hci_le_meta_evt(hdev, skb);
6669 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6670 hci_remote_oob_data_request_evt(hdev, skb);
6673 #if IS_ENABLED(CONFIG_BT_HS)
6674 case HCI_EV_CHANNEL_SELECTED:
6675 hci_chan_selected_evt(hdev, skb);
6678 case HCI_EV_PHY_LINK_COMPLETE:
6679 hci_phy_link_complete_evt(hdev, skb);
6682 case HCI_EV_LOGICAL_LINK_COMPLETE:
6683 hci_loglink_complete_evt(hdev, skb);
6686 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6687 hci_disconn_loglink_complete_evt(hdev, skb);
6690 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6691 hci_disconn_phylink_complete_evt(hdev, skb);
6695 case HCI_EV_NUM_COMP_BLOCKS:
6696 hci_num_comp_blocks_evt(hdev, skb);
6700 case HCI_EV_VENDOR_SPECIFIC:
6701 hci_vendor_specific_evt(hdev, skb);
6705 msft_vendor_evt(hdev, skb);
6710 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6715 req_complete(hdev, status, opcode);
6716 } else if (req_complete_skb) {
6717 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6718 kfree_skb(orig_skb);
6721 req_complete_skb(hdev, status, opcode, orig_skb);
6725 kfree_skb(orig_skb);
6727 hdev->stat.evt_rx++;