2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
42 /* Handle HCI Event packets */
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
46 __u8 status = *((__u8 *) skb->data);
48 BT_DBG("%s status 0x%2.2x", hdev->name, status);
53 clear_bit(HCI_INQUIRY, &hdev->flags);
54 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
55 wake_up_bit(&hdev->flags, HCI_INQUIRY);
58 /* Set discovery state to stopped if we're not doing LE active
61 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
62 hdev->le_scan_type != LE_SCAN_ACTIVE)
63 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
66 hci_conn_check_pending(hdev);
69 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
71 __u8 status = *((__u8 *) skb->data);
73 BT_DBG("%s status 0x%2.2x", hdev->name, status);
78 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
81 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
83 __u8 status = *((__u8 *) skb->data);
85 BT_DBG("%s status 0x%2.2x", hdev->name, status);
90 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
92 hci_conn_check_pending(hdev);
95 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
98 BT_DBG("%s", hdev->name);
101 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
103 struct hci_rp_role_discovery *rp = (void *) skb->data;
104 struct hci_conn *conn;
106 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
113 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
115 conn->role = rp->role;
117 hci_dev_unlock(hdev);
120 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
122 struct hci_rp_read_link_policy *rp = (void *) skb->data;
123 struct hci_conn *conn;
125 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
132 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
134 conn->link_policy = __le16_to_cpu(rp->policy);
136 hci_dev_unlock(hdev);
139 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
141 struct hci_rp_write_link_policy *rp = (void *) skb->data;
142 struct hci_conn *conn;
145 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
150 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
156 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
158 conn->link_policy = get_unaligned_le16(sent + 2);
160 hci_dev_unlock(hdev);
163 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
166 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
168 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
173 hdev->link_policy = __le16_to_cpu(rp->policy);
176 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
179 __u8 status = *((__u8 *) skb->data);
182 BT_DBG("%s status 0x%2.2x", hdev->name, status);
187 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
191 hdev->link_policy = get_unaligned_le16(sent);
194 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
196 __u8 status = *((__u8 *) skb->data);
198 BT_DBG("%s status 0x%2.2x", hdev->name, status);
200 clear_bit(HCI_RESET, &hdev->flags);
205 /* Reset all non-persistent flags */
206 hci_dev_clear_volatile_flags(hdev);
208 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
210 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
211 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
213 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
214 hdev->adv_data_len = 0;
216 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
217 hdev->scan_rsp_data_len = 0;
219 hdev->le_scan_type = LE_SCAN_PASSIVE;
221 hdev->ssp_debug_mode = 0;
223 hci_bdaddr_list_clear(&hdev->le_white_list);
224 hci_bdaddr_list_clear(&hdev->le_resolv_list);
227 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
230 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
231 struct hci_cp_read_stored_link_key *sent;
233 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
235 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
239 if (!rp->status && sent->read_all == 0x01) {
240 hdev->stored_max_keys = rp->max_keys;
241 hdev->stored_num_keys = rp->num_keys;
245 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
248 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
250 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
255 if (rp->num_keys <= hdev->stored_num_keys)
256 hdev->stored_num_keys -= rp->num_keys;
258 hdev->stored_num_keys = 0;
261 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
263 __u8 status = *((__u8 *) skb->data);
266 BT_DBG("%s status 0x%2.2x", hdev->name, status);
268 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
274 if (hci_dev_test_flag(hdev, HCI_MGMT))
275 mgmt_set_local_name_complete(hdev, sent, status);
277 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
279 hci_dev_unlock(hdev);
282 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
284 struct hci_rp_read_local_name *rp = (void *) skb->data;
286 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
291 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
292 hci_dev_test_flag(hdev, HCI_CONFIG))
293 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
296 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
298 __u8 status = *((__u8 *) skb->data);
301 BT_DBG("%s status 0x%2.2x", hdev->name, status);
303 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
310 __u8 param = *((__u8 *) sent);
312 if (param == AUTH_ENABLED)
313 set_bit(HCI_AUTH, &hdev->flags);
315 clear_bit(HCI_AUTH, &hdev->flags);
318 if (hci_dev_test_flag(hdev, HCI_MGMT))
319 mgmt_auth_enable_complete(hdev, status);
321 hci_dev_unlock(hdev);
324 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
326 __u8 status = *((__u8 *) skb->data);
330 BT_DBG("%s status 0x%2.2x", hdev->name, status);
335 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
339 param = *((__u8 *) sent);
342 set_bit(HCI_ENCRYPT, &hdev->flags);
344 clear_bit(HCI_ENCRYPT, &hdev->flags);
347 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
349 __u8 status = *((__u8 *) skb->data);
353 BT_DBG("%s status 0x%2.2x", hdev->name, status);
355 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
359 param = *((__u8 *) sent);
364 hdev->discov_timeout = 0;
368 if (param & SCAN_INQUIRY)
369 set_bit(HCI_ISCAN, &hdev->flags);
371 clear_bit(HCI_ISCAN, &hdev->flags);
373 if (param & SCAN_PAGE)
374 set_bit(HCI_PSCAN, &hdev->flags);
376 clear_bit(HCI_PSCAN, &hdev->flags);
379 hci_dev_unlock(hdev);
382 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
384 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
386 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
391 memcpy(hdev->dev_class, rp->dev_class, 3);
393 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
394 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
397 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
399 __u8 status = *((__u8 *) skb->data);
402 BT_DBG("%s status 0x%2.2x", hdev->name, status);
404 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
411 memcpy(hdev->dev_class, sent, 3);
413 if (hci_dev_test_flag(hdev, HCI_MGMT))
414 mgmt_set_class_of_dev_complete(hdev, sent, status);
416 hci_dev_unlock(hdev);
419 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
421 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
424 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
429 setting = __le16_to_cpu(rp->voice_setting);
431 if (hdev->voice_setting == setting)
434 hdev->voice_setting = setting;
436 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
439 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
442 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
445 __u8 status = *((__u8 *) skb->data);
449 BT_DBG("%s status 0x%2.2x", hdev->name, status);
454 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
458 setting = get_unaligned_le16(sent);
460 if (hdev->voice_setting == setting)
463 hdev->voice_setting = setting;
465 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
468 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
471 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
474 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
476 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
481 hdev->num_iac = rp->num_iac;
483 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
486 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
488 __u8 status = *((__u8 *) skb->data);
489 struct hci_cp_write_ssp_mode *sent;
491 BT_DBG("%s status 0x%2.2x", hdev->name, status);
493 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
501 hdev->features[1][0] |= LMP_HOST_SSP;
503 hdev->features[1][0] &= ~LMP_HOST_SSP;
506 if (hci_dev_test_flag(hdev, HCI_MGMT))
507 mgmt_ssp_enable_complete(hdev, sent->mode, status);
510 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
512 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
515 hci_dev_unlock(hdev);
518 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
520 u8 status = *((u8 *) skb->data);
521 struct hci_cp_write_sc_support *sent;
523 BT_DBG("%s status 0x%2.2x", hdev->name, status);
525 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
533 hdev->features[1][0] |= LMP_HOST_SC;
535 hdev->features[1][0] &= ~LMP_HOST_SC;
538 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
540 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
542 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
545 hci_dev_unlock(hdev);
548 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
550 struct hci_rp_read_local_version *rp = (void *) skb->data;
552 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
557 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
558 hci_dev_test_flag(hdev, HCI_CONFIG)) {
559 hdev->hci_ver = rp->hci_ver;
560 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
561 hdev->lmp_ver = rp->lmp_ver;
562 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
563 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
567 static void hci_cc_read_local_commands(struct hci_dev *hdev,
570 struct hci_rp_read_local_commands *rp = (void *) skb->data;
572 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
577 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
578 hci_dev_test_flag(hdev, HCI_CONFIG))
579 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
582 static void hci_cc_read_local_features(struct hci_dev *hdev,
585 struct hci_rp_read_local_features *rp = (void *) skb->data;
587 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
592 memcpy(hdev->features, rp->features, 8);
594 /* Adjust default settings according to features
595 * supported by device. */
597 if (hdev->features[0][0] & LMP_3SLOT)
598 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
600 if (hdev->features[0][0] & LMP_5SLOT)
601 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
603 if (hdev->features[0][1] & LMP_HV2) {
604 hdev->pkt_type |= (HCI_HV2);
605 hdev->esco_type |= (ESCO_HV2);
608 if (hdev->features[0][1] & LMP_HV3) {
609 hdev->pkt_type |= (HCI_HV3);
610 hdev->esco_type |= (ESCO_HV3);
613 if (lmp_esco_capable(hdev))
614 hdev->esco_type |= (ESCO_EV3);
616 if (hdev->features[0][4] & LMP_EV4)
617 hdev->esco_type |= (ESCO_EV4);
619 if (hdev->features[0][4] & LMP_EV5)
620 hdev->esco_type |= (ESCO_EV5);
622 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
623 hdev->esco_type |= (ESCO_2EV3);
625 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
626 hdev->esco_type |= (ESCO_3EV3);
628 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
629 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
632 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
635 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
637 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
642 if (hdev->max_page < rp->max_page)
643 hdev->max_page = rp->max_page;
645 if (rp->page < HCI_MAX_PAGES)
646 memcpy(hdev->features[rp->page], rp->features, 8);
649 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
652 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
654 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
659 hdev->flow_ctl_mode = rp->mode;
662 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
664 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
666 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
671 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
672 hdev->sco_mtu = rp->sco_mtu;
673 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
674 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
676 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
681 hdev->acl_cnt = hdev->acl_pkts;
682 hdev->sco_cnt = hdev->sco_pkts;
684 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
685 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
688 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
690 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
692 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
697 if (test_bit(HCI_INIT, &hdev->flags))
698 bacpy(&hdev->bdaddr, &rp->bdaddr);
700 if (hci_dev_test_flag(hdev, HCI_SETUP))
701 bacpy(&hdev->setup_addr, &rp->bdaddr);
704 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
707 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
709 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
714 if (test_bit(HCI_INIT, &hdev->flags)) {
715 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
716 hdev->page_scan_window = __le16_to_cpu(rp->window);
720 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
723 u8 status = *((u8 *) skb->data);
724 struct hci_cp_write_page_scan_activity *sent;
726 BT_DBG("%s status 0x%2.2x", hdev->name, status);
731 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
735 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
736 hdev->page_scan_window = __le16_to_cpu(sent->window);
739 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
742 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
744 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
749 if (test_bit(HCI_INIT, &hdev->flags))
750 hdev->page_scan_type = rp->type;
753 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
756 u8 status = *((u8 *) skb->data);
759 BT_DBG("%s status 0x%2.2x", hdev->name, status);
764 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
766 hdev->page_scan_type = *type;
769 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
772 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
774 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
779 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
780 hdev->block_len = __le16_to_cpu(rp->block_len);
781 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
783 hdev->block_cnt = hdev->num_blocks;
785 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
786 hdev->block_cnt, hdev->block_len);
789 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
791 struct hci_rp_read_clock *rp = (void *) skb->data;
792 struct hci_cp_read_clock *cp;
793 struct hci_conn *conn;
795 BT_DBG("%s", hdev->name);
797 if (skb->len < sizeof(*rp))
805 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
809 if (cp->which == 0x00) {
810 hdev->clock = le32_to_cpu(rp->clock);
814 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
816 conn->clock = le32_to_cpu(rp->clock);
817 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
821 hci_dev_unlock(hdev);
824 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
827 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
829 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
834 hdev->amp_status = rp->amp_status;
835 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
836 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
837 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
838 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
839 hdev->amp_type = rp->amp_type;
840 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
841 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
842 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
843 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
846 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
849 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
851 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
856 hdev->inq_tx_power = rp->tx_power;
859 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
861 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
862 struct hci_cp_pin_code_reply *cp;
863 struct hci_conn *conn;
865 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
869 if (hci_dev_test_flag(hdev, HCI_MGMT))
870 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
875 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
879 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
881 conn->pin_length = cp->pin_len;
884 hci_dev_unlock(hdev);
887 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
889 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
891 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
895 if (hci_dev_test_flag(hdev, HCI_MGMT))
896 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
899 hci_dev_unlock(hdev);
902 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
905 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
907 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
912 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
913 hdev->le_pkts = rp->le_max_pkt;
915 hdev->le_cnt = hdev->le_pkts;
917 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
920 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
923 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
925 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
930 memcpy(hdev->le_features, rp->features, 8);
933 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
936 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
938 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
943 hdev->adv_tx_power = rp->tx_power;
946 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
948 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
950 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
954 if (hci_dev_test_flag(hdev, HCI_MGMT))
955 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
958 hci_dev_unlock(hdev);
961 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
964 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
966 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
970 if (hci_dev_test_flag(hdev, HCI_MGMT))
971 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
972 ACL_LINK, 0, rp->status);
974 hci_dev_unlock(hdev);
977 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
979 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
981 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
985 if (hci_dev_test_flag(hdev, HCI_MGMT))
986 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
989 hci_dev_unlock(hdev);
992 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
995 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
997 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1001 if (hci_dev_test_flag(hdev, HCI_MGMT))
1002 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1003 ACL_LINK, 0, rp->status);
1005 hci_dev_unlock(hdev);
1008 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1009 struct sk_buff *skb)
1011 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1013 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1016 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1017 struct sk_buff *skb)
1019 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1021 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1024 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1026 __u8 status = *((__u8 *) skb->data);
1029 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1034 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1040 bacpy(&hdev->random_addr, sent);
1042 hci_dev_unlock(hdev);
1045 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1047 __u8 status = *((__u8 *) skb->data);
1048 struct hci_cp_le_set_default_phy *cp;
1050 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1055 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1061 hdev->le_tx_def_phys = cp->tx_phys;
1062 hdev->le_rx_def_phys = cp->rx_phys;
1064 hci_dev_unlock(hdev);
1067 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1068 struct sk_buff *skb)
1070 __u8 status = *((__u8 *) skb->data);
1071 struct hci_cp_le_set_adv_set_rand_addr *cp;
1072 struct adv_info *adv_instance;
1077 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1083 if (!hdev->cur_adv_instance) {
1084 /* Store in hdev for instance 0 (Set adv and Directed advs) */
1085 bacpy(&hdev->random_addr, &cp->bdaddr);
1087 adv_instance = hci_find_adv_instance(hdev,
1088 hdev->cur_adv_instance);
1090 bacpy(&adv_instance->random_addr, &cp->bdaddr);
1093 hci_dev_unlock(hdev);
1096 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1098 __u8 *sent, status = *((__u8 *) skb->data);
1100 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1105 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1111 /* If we're doing connection initiation as peripheral. Set a
1112 * timeout in case something goes wrong.
1115 struct hci_conn *conn;
1117 hci_dev_set_flag(hdev, HCI_LE_ADV);
1119 conn = hci_lookup_le_connect(hdev);
1121 queue_delayed_work(hdev->workqueue,
1122 &conn->le_conn_timeout,
1123 conn->conn_timeout);
1125 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1128 hci_dev_unlock(hdev);
1131 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1132 struct sk_buff *skb)
1134 struct hci_cp_le_set_ext_adv_enable *cp;
1135 __u8 status = *((__u8 *) skb->data);
1137 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1142 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1149 struct hci_conn *conn;
1151 hci_dev_set_flag(hdev, HCI_LE_ADV);
1153 conn = hci_lookup_le_connect(hdev);
1155 queue_delayed_work(hdev->workqueue,
1156 &conn->le_conn_timeout,
1157 conn->conn_timeout);
1159 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1162 hci_dev_unlock(hdev);
1165 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1167 struct hci_cp_le_set_scan_param *cp;
1168 __u8 status = *((__u8 *) skb->data);
1170 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1175 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1181 hdev->le_scan_type = cp->type;
1183 hci_dev_unlock(hdev);
1186 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1187 struct sk_buff *skb)
1189 struct hci_cp_le_set_ext_scan_params *cp;
1190 __u8 status = *((__u8 *) skb->data);
1191 struct hci_cp_le_scan_phy_params *phy_param;
1193 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1198 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1202 phy_param = (void *)cp->data;
1206 hdev->le_scan_type = phy_param->type;
1208 hci_dev_unlock(hdev);
1211 static bool has_pending_adv_report(struct hci_dev *hdev)
1213 struct discovery_state *d = &hdev->discovery;
1215 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1218 static void clear_pending_adv_report(struct hci_dev *hdev)
1220 struct discovery_state *d = &hdev->discovery;
1222 bacpy(&d->last_adv_addr, BDADDR_ANY);
1223 d->last_adv_data_len = 0;
1226 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1227 u8 bdaddr_type, s8 rssi, u32 flags,
1230 struct discovery_state *d = &hdev->discovery;
1232 bacpy(&d->last_adv_addr, bdaddr);
1233 d->last_adv_addr_type = bdaddr_type;
1234 d->last_adv_rssi = rssi;
1235 d->last_adv_flags = flags;
1236 memcpy(d->last_adv_data, data, len);
1237 d->last_adv_data_len = len;
1240 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1245 case LE_SCAN_ENABLE:
1246 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1247 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1248 clear_pending_adv_report(hdev);
1251 case LE_SCAN_DISABLE:
1252 /* We do this here instead of when setting DISCOVERY_STOPPED
1253 * since the latter would potentially require waiting for
1254 * inquiry to stop too.
1256 if (has_pending_adv_report(hdev)) {
1257 struct discovery_state *d = &hdev->discovery;
1259 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1260 d->last_adv_addr_type, NULL,
1261 d->last_adv_rssi, d->last_adv_flags,
1263 d->last_adv_data_len, NULL, 0);
1266 /* Cancel this timer so that we don't try to disable scanning
1267 * when it's already disabled.
1269 cancel_delayed_work(&hdev->le_scan_disable);
1271 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1273 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1274 * interrupted scanning due to a connect request. Mark
1275 * therefore discovery as stopped. If this was not
1276 * because of a connect request advertising might have
1277 * been disabled because of active scanning, so
1278 * re-enable it again if necessary.
1280 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1281 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1282 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1283 hdev->discovery.state == DISCOVERY_FINDING)
1284 hci_req_reenable_advertising(hdev);
1289 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1294 hci_dev_unlock(hdev);
1297 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1298 struct sk_buff *skb)
1300 struct hci_cp_le_set_scan_enable *cp;
1301 __u8 status = *((__u8 *) skb->data);
1303 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1308 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1312 le_set_scan_enable_complete(hdev, cp->enable);
1315 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1316 struct sk_buff *skb)
1318 struct hci_cp_le_set_ext_scan_enable *cp;
1319 __u8 status = *((__u8 *) skb->data);
1321 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1326 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1330 le_set_scan_enable_complete(hdev, cp->enable);
1333 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1334 struct sk_buff *skb)
1336 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1338 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1344 hdev->le_num_of_adv_sets = rp->num_of_sets;
1347 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1348 struct sk_buff *skb)
1350 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1352 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1357 hdev->le_white_list_size = rp->size;
1360 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1361 struct sk_buff *skb)
1363 __u8 status = *((__u8 *) skb->data);
1365 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1370 hci_bdaddr_list_clear(&hdev->le_white_list);
1373 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1374 struct sk_buff *skb)
1376 struct hci_cp_le_add_to_white_list *sent;
1377 __u8 status = *((__u8 *) skb->data);
1379 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1384 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1388 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1392 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1393 struct sk_buff *skb)
1395 struct hci_cp_le_del_from_white_list *sent;
1396 __u8 status = *((__u8 *) skb->data);
1398 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1403 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1407 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1411 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1412 struct sk_buff *skb)
1414 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1416 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1421 memcpy(hdev->le_states, rp->le_states, 8);
1424 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1425 struct sk_buff *skb)
1427 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1429 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1434 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1435 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1438 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1439 struct sk_buff *skb)
1441 struct hci_cp_le_write_def_data_len *sent;
1442 __u8 status = *((__u8 *) skb->data);
1444 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1449 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1453 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1454 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1457 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1458 struct sk_buff *skb)
1460 __u8 status = *((__u8 *) skb->data);
1462 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1467 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1470 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1471 struct sk_buff *skb)
1473 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1475 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1480 hdev->le_resolv_list_size = rp->size;
1483 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1484 struct sk_buff *skb)
1486 __u8 *sent, status = *((__u8 *) skb->data);
1488 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1493 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1500 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1502 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1504 hci_dev_unlock(hdev);
1507 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1508 struct sk_buff *skb)
1510 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1512 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1517 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1518 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1519 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1520 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1523 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1524 struct sk_buff *skb)
1526 struct hci_cp_write_le_host_supported *sent;
1527 __u8 status = *((__u8 *) skb->data);
1529 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1534 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1541 hdev->features[1][0] |= LMP_HOST_LE;
1542 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1544 hdev->features[1][0] &= ~LMP_HOST_LE;
1545 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1546 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1550 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1552 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1554 hci_dev_unlock(hdev);
1557 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1559 struct hci_cp_le_set_adv_param *cp;
1560 u8 status = *((u8 *) skb->data);
1562 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1567 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1572 hdev->adv_addr_type = cp->own_address_type;
1573 hci_dev_unlock(hdev);
1576 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1578 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1579 struct hci_cp_le_set_ext_adv_params *cp;
1580 struct adv_info *adv_instance;
1582 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1587 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1592 hdev->adv_addr_type = cp->own_addr_type;
1593 if (!hdev->cur_adv_instance) {
1594 /* Store in hdev for instance 0 */
1595 hdev->adv_tx_power = rp->tx_power;
1597 adv_instance = hci_find_adv_instance(hdev,
1598 hdev->cur_adv_instance);
1600 adv_instance->tx_power = rp->tx_power;
1602 /* Update adv data as tx power is known now */
1603 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1604 hci_dev_unlock(hdev);
1607 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1609 struct hci_rp_read_rssi *rp = (void *) skb->data;
1610 struct hci_conn *conn;
1612 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1619 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1621 conn->rssi = rp->rssi;
1623 hci_dev_unlock(hdev);
1626 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1628 struct hci_cp_read_tx_power *sent;
1629 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1630 struct hci_conn *conn;
1632 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1637 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1643 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1647 switch (sent->type) {
1649 conn->tx_power = rp->tx_power;
1652 conn->max_tx_power = rp->tx_power;
1657 hci_dev_unlock(hdev);
1660 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1662 u8 status = *((u8 *) skb->data);
1665 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1670 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1672 hdev->ssp_debug_mode = *mode;
1675 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1677 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1680 hci_conn_check_pending(hdev);
1684 set_bit(HCI_INQUIRY, &hdev->flags);
1687 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1689 struct hci_cp_create_conn *cp;
1690 struct hci_conn *conn;
1692 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1694 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1700 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1702 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1705 if (conn && conn->state == BT_CONNECT) {
1706 if (status != 0x0c || conn->attempt > 2) {
1707 conn->state = BT_CLOSED;
1708 hci_connect_cfm(conn, status);
1711 conn->state = BT_CONNECT2;
1715 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1718 bt_dev_err(hdev, "no memory for new connection");
1722 hci_dev_unlock(hdev);
1725 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1727 struct hci_cp_add_sco *cp;
1728 struct hci_conn *acl, *sco;
1731 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1736 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1740 handle = __le16_to_cpu(cp->handle);
1742 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1746 acl = hci_conn_hash_lookup_handle(hdev, handle);
1750 sco->state = BT_CLOSED;
1752 hci_connect_cfm(sco, status);
1757 hci_dev_unlock(hdev);
1760 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1762 struct hci_cp_auth_requested *cp;
1763 struct hci_conn *conn;
1765 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1770 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1776 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1778 if (conn->state == BT_CONFIG) {
1779 hci_connect_cfm(conn, status);
1780 hci_conn_drop(conn);
1784 hci_dev_unlock(hdev);
1787 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1789 struct hci_cp_set_conn_encrypt *cp;
1790 struct hci_conn *conn;
1792 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1797 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1803 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1805 if (conn->state == BT_CONFIG) {
1806 hci_connect_cfm(conn, status);
1807 hci_conn_drop(conn);
1811 hci_dev_unlock(hdev);
1814 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1815 struct hci_conn *conn)
1817 if (conn->state != BT_CONFIG || !conn->out)
1820 if (conn->pending_sec_level == BT_SECURITY_SDP)
1823 /* Only request authentication for SSP connections or non-SSP
1824 * devices with sec_level MEDIUM or HIGH or if MITM protection
1827 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1828 conn->pending_sec_level != BT_SECURITY_FIPS &&
1829 conn->pending_sec_level != BT_SECURITY_HIGH &&
1830 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1836 static int hci_resolve_name(struct hci_dev *hdev,
1837 struct inquiry_entry *e)
1839 struct hci_cp_remote_name_req cp;
1841 memset(&cp, 0, sizeof(cp));
1843 bacpy(&cp.bdaddr, &e->data.bdaddr);
1844 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1845 cp.pscan_mode = e->data.pscan_mode;
1846 cp.clock_offset = e->data.clock_offset;
1848 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1851 static bool hci_resolve_next_name(struct hci_dev *hdev)
1853 struct discovery_state *discov = &hdev->discovery;
1854 struct inquiry_entry *e;
1856 if (list_empty(&discov->resolve))
1859 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1863 if (hci_resolve_name(hdev, e) == 0) {
1864 e->name_state = NAME_PENDING;
1871 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1872 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1874 struct discovery_state *discov = &hdev->discovery;
1875 struct inquiry_entry *e;
1877 /* Update the mgmt connected state if necessary. Be careful with
1878 * conn objects that exist but are not (yet) connected however.
1879 * Only those in BT_CONFIG or BT_CONNECTED states can be
1880 * considered connected.
1883 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1884 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1885 mgmt_device_connected(hdev, conn, 0, name, name_len);
1887 if (discov->state == DISCOVERY_STOPPED)
1890 if (discov->state == DISCOVERY_STOPPING)
1891 goto discov_complete;
1893 if (discov->state != DISCOVERY_RESOLVING)
1896 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1897 /* If the device was not found in a list of found devices names of which
1898 * are pending. there is no need to continue resolving a next name as it
1899 * will be done upon receiving another Remote Name Request Complete
1906 e->name_state = NAME_KNOWN;
1907 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1908 e->data.rssi, name, name_len);
1910 e->name_state = NAME_NOT_KNOWN;
1913 if (hci_resolve_next_name(hdev))
1917 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1920 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1922 struct hci_cp_remote_name_req *cp;
1923 struct hci_conn *conn;
1925 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1927 /* If successful wait for the name req complete event before
1928 * checking for the need to do authentication */
1932 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1938 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1940 if (hci_dev_test_flag(hdev, HCI_MGMT))
1941 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1946 if (!hci_outgoing_auth_needed(hdev, conn))
1949 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1950 struct hci_cp_auth_requested auth_cp;
1952 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1954 auth_cp.handle = __cpu_to_le16(conn->handle);
1955 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1956 sizeof(auth_cp), &auth_cp);
1960 hci_dev_unlock(hdev);
1963 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1965 struct hci_cp_read_remote_features *cp;
1966 struct hci_conn *conn;
1968 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1973 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1979 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1981 if (conn->state == BT_CONFIG) {
1982 hci_connect_cfm(conn, status);
1983 hci_conn_drop(conn);
1987 hci_dev_unlock(hdev);
1990 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1992 struct hci_cp_read_remote_ext_features *cp;
1993 struct hci_conn *conn;
1995 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2000 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2006 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2008 if (conn->state == BT_CONFIG) {
2009 hci_connect_cfm(conn, status);
2010 hci_conn_drop(conn);
2014 hci_dev_unlock(hdev);
2017 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2019 struct hci_cp_setup_sync_conn *cp;
2020 struct hci_conn *acl, *sco;
2023 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2028 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2032 handle = __le16_to_cpu(cp->handle);
2034 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2038 acl = hci_conn_hash_lookup_handle(hdev, handle);
2042 sco->state = BT_CLOSED;
2044 hci_connect_cfm(sco, status);
2049 hci_dev_unlock(hdev);
2052 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2054 struct hci_cp_sniff_mode *cp;
2055 struct hci_conn *conn;
2057 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2062 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2068 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2070 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2072 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2073 hci_sco_setup(conn, status);
2076 hci_dev_unlock(hdev);
2079 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2081 struct hci_cp_exit_sniff_mode *cp;
2082 struct hci_conn *conn;
2084 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2089 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2095 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2097 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2099 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2100 hci_sco_setup(conn, status);
2103 hci_dev_unlock(hdev);
2106 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2108 struct hci_cp_disconnect *cp;
2109 struct hci_conn *conn;
2114 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2120 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2122 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2123 conn->dst_type, status);
2125 hci_dev_unlock(hdev);
2128 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2129 u8 peer_addr_type, u8 own_address_type,
2132 struct hci_conn *conn;
2134 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2139 /* Store the initiator and responder address information which
2140 * is needed for SMP. These values will not change during the
2141 * lifetime of the connection.
2143 conn->init_addr_type = own_address_type;
2144 if (own_address_type == ADDR_LE_DEV_RANDOM)
2145 bacpy(&conn->init_addr, &hdev->random_addr);
2147 bacpy(&conn->init_addr, &hdev->bdaddr);
2149 conn->resp_addr_type = peer_addr_type;
2150 bacpy(&conn->resp_addr, peer_addr);
2152 /* We don't want the connection attempt to stick around
2153 * indefinitely since LE doesn't have a page timeout concept
2154 * like BR/EDR. Set a timer for any connection that doesn't use
2155 * the white list for connecting.
2157 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2158 queue_delayed_work(conn->hdev->workqueue,
2159 &conn->le_conn_timeout,
2160 conn->conn_timeout);
2163 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2165 struct hci_cp_le_create_conn *cp;
2167 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2169 /* All connection failure handling is taken care of by the
2170 * hci_le_conn_failed function which is triggered by the HCI
2171 * request completion callbacks used for connecting.
2176 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2182 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2183 cp->own_address_type, cp->filter_policy);
2185 hci_dev_unlock(hdev);
2188 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2190 struct hci_cp_le_ext_create_conn *cp;
2192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2194 /* All connection failure handling is taken care of by the
2195 * hci_le_conn_failed function which is triggered by the HCI
2196 * request completion callbacks used for connecting.
2201 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2207 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2208 cp->own_addr_type, cp->filter_policy);
2210 hci_dev_unlock(hdev);
2213 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2215 struct hci_cp_le_read_remote_features *cp;
2216 struct hci_conn *conn;
2218 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2223 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2229 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2231 if (conn->state == BT_CONFIG) {
2232 hci_connect_cfm(conn, status);
2233 hci_conn_drop(conn);
2237 hci_dev_unlock(hdev);
2240 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2242 struct hci_cp_le_start_enc *cp;
2243 struct hci_conn *conn;
2245 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2252 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2256 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2260 if (conn->state != BT_CONNECTED)
2263 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2264 hci_conn_drop(conn);
2267 hci_dev_unlock(hdev);
2270 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2272 struct hci_cp_switch_role *cp;
2273 struct hci_conn *conn;
2275 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2280 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2286 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2288 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2290 hci_dev_unlock(hdev);
2293 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2295 __u8 status = *((__u8 *) skb->data);
2296 struct discovery_state *discov = &hdev->discovery;
2297 struct inquiry_entry *e;
2299 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2301 hci_conn_check_pending(hdev);
2303 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2306 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2307 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2309 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2314 if (discov->state != DISCOVERY_FINDING)
2317 if (list_empty(&discov->resolve)) {
2318 /* When BR/EDR inquiry is active and no LE scanning is in
2319 * progress, then change discovery state to indicate completion.
2321 * When running LE scanning and BR/EDR inquiry simultaneously
2322 * and the LE scan already finished, then change the discovery
2323 * state to indicate completion.
2325 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2326 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2327 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2331 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2332 if (e && hci_resolve_name(hdev, e) == 0) {
2333 e->name_state = NAME_PENDING;
2334 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2336 /* When BR/EDR inquiry is active and no LE scanning is in
2337 * progress, then change discovery state to indicate completion.
2339 * When running LE scanning and BR/EDR inquiry simultaneously
2340 * and the LE scan already finished, then change the discovery
2341 * state to indicate completion.
2343 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2344 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2345 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2349 hci_dev_unlock(hdev);
2352 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2354 struct inquiry_data data;
2355 struct inquiry_info *info = (void *) (skb->data + 1);
2356 int num_rsp = *((__u8 *) skb->data);
2358 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2363 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2368 for (; num_rsp; num_rsp--, info++) {
2371 bacpy(&data.bdaddr, &info->bdaddr);
2372 data.pscan_rep_mode = info->pscan_rep_mode;
2373 data.pscan_period_mode = info->pscan_period_mode;
2374 data.pscan_mode = info->pscan_mode;
2375 memcpy(data.dev_class, info->dev_class, 3);
2376 data.clock_offset = info->clock_offset;
2377 data.rssi = HCI_RSSI_INVALID;
2378 data.ssp_mode = 0x00;
2380 flags = hci_inquiry_cache_update(hdev, &data, false);
2382 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2383 info->dev_class, HCI_RSSI_INVALID,
2384 flags, NULL, 0, NULL, 0);
2387 hci_dev_unlock(hdev);
2390 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2392 struct hci_ev_conn_complete *ev = (void *) skb->data;
2393 struct hci_conn *conn;
2395 BT_DBG("%s", hdev->name);
2399 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2401 if (ev->link_type != SCO_LINK)
2404 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2408 conn->type = SCO_LINK;
2412 conn->handle = __le16_to_cpu(ev->handle);
2414 if (conn->type == ACL_LINK) {
2415 conn->state = BT_CONFIG;
2416 hci_conn_hold(conn);
2418 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2419 !hci_find_link_key(hdev, &ev->bdaddr))
2420 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2422 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2424 conn->state = BT_CONNECTED;
2426 hci_debugfs_create_conn(conn);
2427 hci_conn_add_sysfs(conn);
2429 if (test_bit(HCI_AUTH, &hdev->flags))
2430 set_bit(HCI_CONN_AUTH, &conn->flags);
2432 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2433 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2435 /* Get remote features */
2436 if (conn->type == ACL_LINK) {
2437 struct hci_cp_read_remote_features cp;
2438 cp.handle = ev->handle;
2439 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2442 hci_req_update_scan(hdev);
2445 /* Set packet type for incoming connection */
2446 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2447 struct hci_cp_change_conn_ptype cp;
2448 cp.handle = ev->handle;
2449 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2450 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2454 conn->state = BT_CLOSED;
2455 if (conn->type == ACL_LINK)
2456 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2457 conn->dst_type, ev->status);
2460 if (conn->type == ACL_LINK)
2461 hci_sco_setup(conn, ev->status);
2464 hci_connect_cfm(conn, ev->status);
2466 } else if (ev->link_type != ACL_LINK)
2467 hci_connect_cfm(conn, ev->status);
2470 hci_dev_unlock(hdev);
2472 hci_conn_check_pending(hdev);
2475 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2477 struct hci_cp_reject_conn_req cp;
2479 bacpy(&cp.bdaddr, bdaddr);
2480 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2481 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2484 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2486 struct hci_ev_conn_request *ev = (void *) skb->data;
2487 int mask = hdev->link_mode;
2488 struct inquiry_entry *ie;
2489 struct hci_conn *conn;
2492 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2495 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2498 if (!(mask & HCI_LM_ACCEPT)) {
2499 hci_reject_conn(hdev, &ev->bdaddr);
2503 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2505 hci_reject_conn(hdev, &ev->bdaddr);
2509 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2510 * connection. These features are only touched through mgmt so
2511 * only do the checks if HCI_MGMT is set.
2513 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2514 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2515 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2517 hci_reject_conn(hdev, &ev->bdaddr);
2521 /* Connection accepted */
2525 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2527 memcpy(ie->data.dev_class, ev->dev_class, 3);
2529 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2532 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2535 bt_dev_err(hdev, "no memory for new connection");
2536 hci_dev_unlock(hdev);
2541 memcpy(conn->dev_class, ev->dev_class, 3);
2543 hci_dev_unlock(hdev);
2545 if (ev->link_type == ACL_LINK ||
2546 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2547 struct hci_cp_accept_conn_req cp;
2548 conn->state = BT_CONNECT;
2550 bacpy(&cp.bdaddr, &ev->bdaddr);
2552 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2553 cp.role = 0x00; /* Become master */
2555 cp.role = 0x01; /* Remain slave */
2557 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2558 } else if (!(flags & HCI_PROTO_DEFER)) {
2559 struct hci_cp_accept_sync_conn_req cp;
2560 conn->state = BT_CONNECT;
2562 bacpy(&cp.bdaddr, &ev->bdaddr);
2563 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2565 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2566 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2567 cp.max_latency = cpu_to_le16(0xffff);
2568 cp.content_format = cpu_to_le16(hdev->voice_setting);
2569 cp.retrans_effort = 0xff;
2571 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2574 conn->state = BT_CONNECT2;
2575 hci_connect_cfm(conn, 0);
2579 static u8 hci_to_mgmt_reason(u8 err)
2582 case HCI_ERROR_CONNECTION_TIMEOUT:
2583 return MGMT_DEV_DISCONN_TIMEOUT;
2584 case HCI_ERROR_REMOTE_USER_TERM:
2585 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2586 case HCI_ERROR_REMOTE_POWER_OFF:
2587 return MGMT_DEV_DISCONN_REMOTE;
2588 case HCI_ERROR_LOCAL_HOST_TERM:
2589 return MGMT_DEV_DISCONN_LOCAL_HOST;
2591 return MGMT_DEV_DISCONN_UNKNOWN;
2595 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2597 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2599 struct hci_conn_params *params;
2600 struct hci_conn *conn;
2601 bool mgmt_connected;
2604 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2608 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2613 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2614 conn->dst_type, ev->status);
2618 conn->state = BT_CLOSED;
2620 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2622 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2623 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2625 reason = hci_to_mgmt_reason(ev->reason);
2627 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2628 reason, mgmt_connected);
2630 if (conn->type == ACL_LINK) {
2631 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2632 hci_remove_link_key(hdev, &conn->dst);
2634 hci_req_update_scan(hdev);
2637 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2639 switch (params->auto_connect) {
2640 case HCI_AUTO_CONN_LINK_LOSS:
2641 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2645 case HCI_AUTO_CONN_DIRECT:
2646 case HCI_AUTO_CONN_ALWAYS:
2647 list_del_init(¶ms->action);
2648 list_add(¶ms->action, &hdev->pend_le_conns);
2649 hci_update_background_scan(hdev);
2659 hci_disconn_cfm(conn, ev->reason);
2662 /* Re-enable advertising if necessary, since it might
2663 * have been disabled by the connection. From the
2664 * HCI_LE_Set_Advertise_Enable command description in
2665 * the core specification (v4.0):
2666 * "The Controller shall continue advertising until the Host
2667 * issues an LE_Set_Advertise_Enable command with
2668 * Advertising_Enable set to 0x00 (Advertising is disabled)
2669 * or until a connection is created or until the Advertising
2670 * is timed out due to Directed Advertising."
2672 if (type == LE_LINK)
2673 hci_req_reenable_advertising(hdev);
2676 hci_dev_unlock(hdev);
2679 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2681 struct hci_ev_auth_complete *ev = (void *) skb->data;
2682 struct hci_conn *conn;
2684 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2688 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2693 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2695 if (!hci_conn_ssp_enabled(conn) &&
2696 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2697 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2699 set_bit(HCI_CONN_AUTH, &conn->flags);
2700 conn->sec_level = conn->pending_sec_level;
2703 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2704 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2706 mgmt_auth_failed(conn, ev->status);
2709 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2710 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2712 if (conn->state == BT_CONFIG) {
2713 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2714 struct hci_cp_set_conn_encrypt cp;
2715 cp.handle = ev->handle;
2717 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2720 conn->state = BT_CONNECTED;
2721 hci_connect_cfm(conn, ev->status);
2722 hci_conn_drop(conn);
2725 hci_auth_cfm(conn, ev->status);
2727 hci_conn_hold(conn);
2728 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2729 hci_conn_drop(conn);
2732 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2734 struct hci_cp_set_conn_encrypt cp;
2735 cp.handle = ev->handle;
2737 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2740 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2741 hci_encrypt_cfm(conn, ev->status, 0x00);
2746 hci_dev_unlock(hdev);
2749 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2751 struct hci_ev_remote_name *ev = (void *) skb->data;
2752 struct hci_conn *conn;
2754 BT_DBG("%s", hdev->name);
2756 hci_conn_check_pending(hdev);
2760 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2762 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2765 if (ev->status == 0)
2766 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2767 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2769 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2775 if (!hci_outgoing_auth_needed(hdev, conn))
2778 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2779 struct hci_cp_auth_requested cp;
2781 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2783 cp.handle = __cpu_to_le16(conn->handle);
2784 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2788 hci_dev_unlock(hdev);
2791 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2792 u16 opcode, struct sk_buff *skb)
2794 const struct hci_rp_read_enc_key_size *rp;
2795 struct hci_conn *conn;
2798 BT_DBG("%s status 0x%02x", hdev->name, status);
2800 if (!skb || skb->len < sizeof(*rp)) {
2801 bt_dev_err(hdev, "invalid read key size response");
2805 rp = (void *)skb->data;
2806 handle = le16_to_cpu(rp->handle);
2810 conn = hci_conn_hash_lookup_handle(hdev, handle);
2814 /* If we fail to read the encryption key size, assume maximum
2815 * (which is the same we do also when this HCI command isn't
2819 bt_dev_err(hdev, "failed to read key size for handle %u",
2821 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2823 conn->enc_key_size = rp->key_size;
2826 if (conn->state == BT_CONFIG) {
2827 conn->state = BT_CONNECTED;
2828 hci_connect_cfm(conn, 0);
2829 hci_conn_drop(conn);
2833 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2835 else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
2840 hci_encrypt_cfm(conn, 0, encrypt);
2844 hci_dev_unlock(hdev);
2847 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2849 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2850 struct hci_conn *conn;
2852 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2856 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2862 /* Encryption implies authentication */
2863 set_bit(HCI_CONN_AUTH, &conn->flags);
2864 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2865 conn->sec_level = conn->pending_sec_level;
2867 /* P-256 authentication key implies FIPS */
2868 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2869 set_bit(HCI_CONN_FIPS, &conn->flags);
2871 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2872 conn->type == LE_LINK)
2873 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2875 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2876 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2880 /* We should disregard the current RPA and generate a new one
2881 * whenever the encryption procedure fails.
2883 if (ev->status && conn->type == LE_LINK) {
2884 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2885 hci_adv_instances_set_rpa_expired(hdev, true);
2888 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2890 if (ev->status && conn->state == BT_CONNECTED) {
2891 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2892 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2894 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2895 hci_conn_drop(conn);
2899 /* In Secure Connections Only mode, do not allow any connections
2900 * that are not encrypted with AES-CCM using a P-256 authenticated
2903 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
2904 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2905 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2906 hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2907 hci_conn_drop(conn);
2911 /* Try reading the encryption key size for encrypted ACL links */
2912 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
2913 struct hci_cp_read_enc_key_size cp;
2914 struct hci_request req;
2916 /* Only send HCI_Read_Encryption_Key_Size if the
2917 * controller really supports it. If it doesn't, assume
2918 * the default size (16).
2920 if (!(hdev->commands[20] & 0x10)) {
2921 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2925 hci_req_init(&req, hdev);
2927 cp.handle = cpu_to_le16(conn->handle);
2928 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
2930 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
2931 bt_dev_err(hdev, "sending read key size failed");
2932 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2940 if (conn->state == BT_CONFIG) {
2942 conn->state = BT_CONNECTED;
2944 hci_connect_cfm(conn, ev->status);
2945 hci_conn_drop(conn);
2947 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2950 hci_dev_unlock(hdev);
2953 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2954 struct sk_buff *skb)
2956 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2957 struct hci_conn *conn;
2959 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2963 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2966 set_bit(HCI_CONN_SECURE, &conn->flags);
2968 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2970 hci_key_change_cfm(conn, ev->status);
2973 hci_dev_unlock(hdev);
2976 static void hci_remote_features_evt(struct hci_dev *hdev,
2977 struct sk_buff *skb)
2979 struct hci_ev_remote_features *ev = (void *) skb->data;
2980 struct hci_conn *conn;
2982 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2986 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2991 memcpy(conn->features[0], ev->features, 8);
2993 if (conn->state != BT_CONFIG)
2996 if (!ev->status && lmp_ext_feat_capable(hdev) &&
2997 lmp_ext_feat_capable(conn)) {
2998 struct hci_cp_read_remote_ext_features cp;
2999 cp.handle = ev->handle;
3001 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3006 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3007 struct hci_cp_remote_name_req cp;
3008 memset(&cp, 0, sizeof(cp));
3009 bacpy(&cp.bdaddr, &conn->dst);
3010 cp.pscan_rep_mode = 0x02;
3011 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3012 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3013 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3015 if (!hci_outgoing_auth_needed(hdev, conn)) {
3016 conn->state = BT_CONNECTED;
3017 hci_connect_cfm(conn, ev->status);
3018 hci_conn_drop(conn);
3022 hci_dev_unlock(hdev);
3025 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3026 u16 *opcode, u8 *status,
3027 hci_req_complete_t *req_complete,
3028 hci_req_complete_skb_t *req_complete_skb)
3030 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3032 *opcode = __le16_to_cpu(ev->opcode);
3033 *status = skb->data[sizeof(*ev)];
3035 skb_pull(skb, sizeof(*ev));
3038 case HCI_OP_INQUIRY_CANCEL:
3039 hci_cc_inquiry_cancel(hdev, skb);
3042 case HCI_OP_PERIODIC_INQ:
3043 hci_cc_periodic_inq(hdev, skb);
3046 case HCI_OP_EXIT_PERIODIC_INQ:
3047 hci_cc_exit_periodic_inq(hdev, skb);
3050 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3051 hci_cc_remote_name_req_cancel(hdev, skb);
3054 case HCI_OP_ROLE_DISCOVERY:
3055 hci_cc_role_discovery(hdev, skb);
3058 case HCI_OP_READ_LINK_POLICY:
3059 hci_cc_read_link_policy(hdev, skb);
3062 case HCI_OP_WRITE_LINK_POLICY:
3063 hci_cc_write_link_policy(hdev, skb);
3066 case HCI_OP_READ_DEF_LINK_POLICY:
3067 hci_cc_read_def_link_policy(hdev, skb);
3070 case HCI_OP_WRITE_DEF_LINK_POLICY:
3071 hci_cc_write_def_link_policy(hdev, skb);
3075 hci_cc_reset(hdev, skb);
3078 case HCI_OP_READ_STORED_LINK_KEY:
3079 hci_cc_read_stored_link_key(hdev, skb);
3082 case HCI_OP_DELETE_STORED_LINK_KEY:
3083 hci_cc_delete_stored_link_key(hdev, skb);
3086 case HCI_OP_WRITE_LOCAL_NAME:
3087 hci_cc_write_local_name(hdev, skb);
3090 case HCI_OP_READ_LOCAL_NAME:
3091 hci_cc_read_local_name(hdev, skb);
3094 case HCI_OP_WRITE_AUTH_ENABLE:
3095 hci_cc_write_auth_enable(hdev, skb);
3098 case HCI_OP_WRITE_ENCRYPT_MODE:
3099 hci_cc_write_encrypt_mode(hdev, skb);
3102 case HCI_OP_WRITE_SCAN_ENABLE:
3103 hci_cc_write_scan_enable(hdev, skb);
3106 case HCI_OP_READ_CLASS_OF_DEV:
3107 hci_cc_read_class_of_dev(hdev, skb);
3110 case HCI_OP_WRITE_CLASS_OF_DEV:
3111 hci_cc_write_class_of_dev(hdev, skb);
3114 case HCI_OP_READ_VOICE_SETTING:
3115 hci_cc_read_voice_setting(hdev, skb);
3118 case HCI_OP_WRITE_VOICE_SETTING:
3119 hci_cc_write_voice_setting(hdev, skb);
3122 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3123 hci_cc_read_num_supported_iac(hdev, skb);
3126 case HCI_OP_WRITE_SSP_MODE:
3127 hci_cc_write_ssp_mode(hdev, skb);
3130 case HCI_OP_WRITE_SC_SUPPORT:
3131 hci_cc_write_sc_support(hdev, skb);
3134 case HCI_OP_READ_LOCAL_VERSION:
3135 hci_cc_read_local_version(hdev, skb);
3138 case HCI_OP_READ_LOCAL_COMMANDS:
3139 hci_cc_read_local_commands(hdev, skb);
3142 case HCI_OP_READ_LOCAL_FEATURES:
3143 hci_cc_read_local_features(hdev, skb);
3146 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3147 hci_cc_read_local_ext_features(hdev, skb);
3150 case HCI_OP_READ_BUFFER_SIZE:
3151 hci_cc_read_buffer_size(hdev, skb);
3154 case HCI_OP_READ_BD_ADDR:
3155 hci_cc_read_bd_addr(hdev, skb);
3158 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3159 hci_cc_read_page_scan_activity(hdev, skb);
3162 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3163 hci_cc_write_page_scan_activity(hdev, skb);
3166 case HCI_OP_READ_PAGE_SCAN_TYPE:
3167 hci_cc_read_page_scan_type(hdev, skb);
3170 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3171 hci_cc_write_page_scan_type(hdev, skb);
3174 case HCI_OP_READ_DATA_BLOCK_SIZE:
3175 hci_cc_read_data_block_size(hdev, skb);
3178 case HCI_OP_READ_FLOW_CONTROL_MODE:
3179 hci_cc_read_flow_control_mode(hdev, skb);
3182 case HCI_OP_READ_LOCAL_AMP_INFO:
3183 hci_cc_read_local_amp_info(hdev, skb);
3186 case HCI_OP_READ_CLOCK:
3187 hci_cc_read_clock(hdev, skb);
3190 case HCI_OP_READ_INQ_RSP_TX_POWER:
3191 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3194 case HCI_OP_PIN_CODE_REPLY:
3195 hci_cc_pin_code_reply(hdev, skb);
3198 case HCI_OP_PIN_CODE_NEG_REPLY:
3199 hci_cc_pin_code_neg_reply(hdev, skb);
3202 case HCI_OP_READ_LOCAL_OOB_DATA:
3203 hci_cc_read_local_oob_data(hdev, skb);
3206 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3207 hci_cc_read_local_oob_ext_data(hdev, skb);
3210 case HCI_OP_LE_READ_BUFFER_SIZE:
3211 hci_cc_le_read_buffer_size(hdev, skb);
3214 case HCI_OP_LE_READ_LOCAL_FEATURES:
3215 hci_cc_le_read_local_features(hdev, skb);
3218 case HCI_OP_LE_READ_ADV_TX_POWER:
3219 hci_cc_le_read_adv_tx_power(hdev, skb);
3222 case HCI_OP_USER_CONFIRM_REPLY:
3223 hci_cc_user_confirm_reply(hdev, skb);
3226 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3227 hci_cc_user_confirm_neg_reply(hdev, skb);
3230 case HCI_OP_USER_PASSKEY_REPLY:
3231 hci_cc_user_passkey_reply(hdev, skb);
3234 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3235 hci_cc_user_passkey_neg_reply(hdev, skb);
3238 case HCI_OP_LE_SET_RANDOM_ADDR:
3239 hci_cc_le_set_random_addr(hdev, skb);
3242 case HCI_OP_LE_SET_ADV_ENABLE:
3243 hci_cc_le_set_adv_enable(hdev, skb);
3246 case HCI_OP_LE_SET_SCAN_PARAM:
3247 hci_cc_le_set_scan_param(hdev, skb);
3250 case HCI_OP_LE_SET_SCAN_ENABLE:
3251 hci_cc_le_set_scan_enable(hdev, skb);
3254 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3255 hci_cc_le_read_white_list_size(hdev, skb);
3258 case HCI_OP_LE_CLEAR_WHITE_LIST:
3259 hci_cc_le_clear_white_list(hdev, skb);
3262 case HCI_OP_LE_ADD_TO_WHITE_LIST:
3263 hci_cc_le_add_to_white_list(hdev, skb);
3266 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3267 hci_cc_le_del_from_white_list(hdev, skb);
3270 case HCI_OP_LE_READ_SUPPORTED_STATES:
3271 hci_cc_le_read_supported_states(hdev, skb);
3274 case HCI_OP_LE_READ_DEF_DATA_LEN:
3275 hci_cc_le_read_def_data_len(hdev, skb);
3278 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3279 hci_cc_le_write_def_data_len(hdev, skb);
3282 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3283 hci_cc_le_clear_resolv_list(hdev, skb);
3286 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3287 hci_cc_le_read_resolv_list_size(hdev, skb);
3290 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3291 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3294 case HCI_OP_LE_READ_MAX_DATA_LEN:
3295 hci_cc_le_read_max_data_len(hdev, skb);
3298 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3299 hci_cc_write_le_host_supported(hdev, skb);
3302 case HCI_OP_LE_SET_ADV_PARAM:
3303 hci_cc_set_adv_param(hdev, skb);
3306 case HCI_OP_READ_RSSI:
3307 hci_cc_read_rssi(hdev, skb);
3310 case HCI_OP_READ_TX_POWER:
3311 hci_cc_read_tx_power(hdev, skb);
3314 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3315 hci_cc_write_ssp_debug_mode(hdev, skb);
3318 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3319 hci_cc_le_set_ext_scan_param(hdev, skb);
3322 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3323 hci_cc_le_set_ext_scan_enable(hdev, skb);
3326 case HCI_OP_LE_SET_DEFAULT_PHY:
3327 hci_cc_le_set_default_phy(hdev, skb);
3330 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3331 hci_cc_le_read_num_adv_sets(hdev, skb);
3334 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3335 hci_cc_set_ext_adv_param(hdev, skb);
3338 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3339 hci_cc_le_set_ext_adv_enable(hdev, skb);
3342 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3343 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3347 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3351 if (*opcode != HCI_OP_NOP)
3352 cancel_delayed_work(&hdev->cmd_timer);
3354 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3355 atomic_set(&hdev->cmd_cnt, 1);
3357 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3360 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3362 "unexpected event for opcode 0x%4.4x", *opcode);
3366 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3367 queue_work(hdev->workqueue, &hdev->cmd_work);
3370 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3371 u16 *opcode, u8 *status,
3372 hci_req_complete_t *req_complete,
3373 hci_req_complete_skb_t *req_complete_skb)
3375 struct hci_ev_cmd_status *ev = (void *) skb->data;
3377 skb_pull(skb, sizeof(*ev));
3379 *opcode = __le16_to_cpu(ev->opcode);
3380 *status = ev->status;
3383 case HCI_OP_INQUIRY:
3384 hci_cs_inquiry(hdev, ev->status);
3387 case HCI_OP_CREATE_CONN:
3388 hci_cs_create_conn(hdev, ev->status);
3391 case HCI_OP_DISCONNECT:
3392 hci_cs_disconnect(hdev, ev->status);
3395 case HCI_OP_ADD_SCO:
3396 hci_cs_add_sco(hdev, ev->status);
3399 case HCI_OP_AUTH_REQUESTED:
3400 hci_cs_auth_requested(hdev, ev->status);
3403 case HCI_OP_SET_CONN_ENCRYPT:
3404 hci_cs_set_conn_encrypt(hdev, ev->status);
3407 case HCI_OP_REMOTE_NAME_REQ:
3408 hci_cs_remote_name_req(hdev, ev->status);
3411 case HCI_OP_READ_REMOTE_FEATURES:
3412 hci_cs_read_remote_features(hdev, ev->status);
3415 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3416 hci_cs_read_remote_ext_features(hdev, ev->status);
3419 case HCI_OP_SETUP_SYNC_CONN:
3420 hci_cs_setup_sync_conn(hdev, ev->status);
3423 case HCI_OP_SNIFF_MODE:
3424 hci_cs_sniff_mode(hdev, ev->status);
3427 case HCI_OP_EXIT_SNIFF_MODE:
3428 hci_cs_exit_sniff_mode(hdev, ev->status);
3431 case HCI_OP_SWITCH_ROLE:
3432 hci_cs_switch_role(hdev, ev->status);
3435 case HCI_OP_LE_CREATE_CONN:
3436 hci_cs_le_create_conn(hdev, ev->status);
3439 case HCI_OP_LE_READ_REMOTE_FEATURES:
3440 hci_cs_le_read_remote_features(hdev, ev->status);
3443 case HCI_OP_LE_START_ENC:
3444 hci_cs_le_start_enc(hdev, ev->status);
3447 case HCI_OP_LE_EXT_CREATE_CONN:
3448 hci_cs_le_ext_create_conn(hdev, ev->status);
3452 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3456 if (*opcode != HCI_OP_NOP)
3457 cancel_delayed_work(&hdev->cmd_timer);
3459 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3460 atomic_set(&hdev->cmd_cnt, 1);
3462 /* Indicate request completion if the command failed. Also, if
3463 * we're not waiting for a special event and we get a success
3464 * command status we should try to flag the request as completed
3465 * (since for this kind of commands there will not be a command
3469 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3470 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3473 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3475 "unexpected event for opcode 0x%4.4x", *opcode);
3479 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3480 queue_work(hdev->workqueue, &hdev->cmd_work);
3483 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3485 struct hci_ev_hardware_error *ev = (void *) skb->data;
3487 hdev->hw_error_code = ev->code;
3489 queue_work(hdev->req_workqueue, &hdev->error_reset);
3492 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3494 struct hci_ev_role_change *ev = (void *) skb->data;
3495 struct hci_conn *conn;
3497 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3501 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3504 conn->role = ev->role;
3506 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3508 hci_role_switch_cfm(conn, ev->status, ev->role);
3511 hci_dev_unlock(hdev);
3514 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3516 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3519 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3520 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3524 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3525 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3526 BT_DBG("%s bad parameters", hdev->name);
3530 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3532 for (i = 0; i < ev->num_hndl; i++) {
3533 struct hci_comp_pkts_info *info = &ev->handles[i];
3534 struct hci_conn *conn;
3535 __u16 handle, count;
3537 handle = __le16_to_cpu(info->handle);
3538 count = __le16_to_cpu(info->count);
3540 conn = hci_conn_hash_lookup_handle(hdev, handle);
3544 conn->sent -= count;
3546 switch (conn->type) {
3548 hdev->acl_cnt += count;
3549 if (hdev->acl_cnt > hdev->acl_pkts)
3550 hdev->acl_cnt = hdev->acl_pkts;
3554 if (hdev->le_pkts) {
3555 hdev->le_cnt += count;
3556 if (hdev->le_cnt > hdev->le_pkts)
3557 hdev->le_cnt = hdev->le_pkts;
3559 hdev->acl_cnt += count;
3560 if (hdev->acl_cnt > hdev->acl_pkts)
3561 hdev->acl_cnt = hdev->acl_pkts;
3566 hdev->sco_cnt += count;
3567 if (hdev->sco_cnt > hdev->sco_pkts)
3568 hdev->sco_cnt = hdev->sco_pkts;
3572 bt_dev_err(hdev, "unknown type %d conn %p",
3578 queue_work(hdev->workqueue, &hdev->tx_work);
3581 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3584 struct hci_chan *chan;
3586 switch (hdev->dev_type) {
3588 return hci_conn_hash_lookup_handle(hdev, handle);
3590 chan = hci_chan_lookup_handle(hdev, handle);
3595 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3602 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3604 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3607 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3608 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3612 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3613 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3614 BT_DBG("%s bad parameters", hdev->name);
3618 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3621 for (i = 0; i < ev->num_hndl; i++) {
3622 struct hci_comp_blocks_info *info = &ev->handles[i];
3623 struct hci_conn *conn = NULL;
3624 __u16 handle, block_count;
3626 handle = __le16_to_cpu(info->handle);
3627 block_count = __le16_to_cpu(info->blocks);
3629 conn = __hci_conn_lookup_handle(hdev, handle);
3633 conn->sent -= block_count;
3635 switch (conn->type) {
3638 hdev->block_cnt += block_count;
3639 if (hdev->block_cnt > hdev->num_blocks)
3640 hdev->block_cnt = hdev->num_blocks;
3644 bt_dev_err(hdev, "unknown type %d conn %p",
3650 queue_work(hdev->workqueue, &hdev->tx_work);
3653 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3655 struct hci_ev_mode_change *ev = (void *) skb->data;
3656 struct hci_conn *conn;
3658 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3662 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3664 conn->mode = ev->mode;
3666 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3668 if (conn->mode == HCI_CM_ACTIVE)
3669 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3671 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3674 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3675 hci_sco_setup(conn, ev->status);
3678 hci_dev_unlock(hdev);
3681 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3683 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3684 struct hci_conn *conn;
3686 BT_DBG("%s", hdev->name);
3690 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3694 if (conn->state == BT_CONNECTED) {
3695 hci_conn_hold(conn);
3696 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3697 hci_conn_drop(conn);
3700 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3701 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3702 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3703 sizeof(ev->bdaddr), &ev->bdaddr);
3704 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3707 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3712 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3716 hci_dev_unlock(hdev);
3719 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3721 if (key_type == HCI_LK_CHANGED_COMBINATION)
3724 conn->pin_length = pin_len;
3725 conn->key_type = key_type;
3728 case HCI_LK_LOCAL_UNIT:
3729 case HCI_LK_REMOTE_UNIT:
3730 case HCI_LK_DEBUG_COMBINATION:
3732 case HCI_LK_COMBINATION:
3734 conn->pending_sec_level = BT_SECURITY_HIGH;
3736 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3738 case HCI_LK_UNAUTH_COMBINATION_P192:
3739 case HCI_LK_UNAUTH_COMBINATION_P256:
3740 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3742 case HCI_LK_AUTH_COMBINATION_P192:
3743 conn->pending_sec_level = BT_SECURITY_HIGH;
3745 case HCI_LK_AUTH_COMBINATION_P256:
3746 conn->pending_sec_level = BT_SECURITY_FIPS;
3751 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3753 struct hci_ev_link_key_req *ev = (void *) skb->data;
3754 struct hci_cp_link_key_reply cp;
3755 struct hci_conn *conn;
3756 struct link_key *key;
3758 BT_DBG("%s", hdev->name);
3760 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3765 key = hci_find_link_key(hdev, &ev->bdaddr);
3767 BT_DBG("%s link key not found for %pMR", hdev->name,
3772 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3775 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3777 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3779 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3780 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3781 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3782 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3786 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3787 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3788 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3789 BT_DBG("%s ignoring key unauthenticated for high security",
3794 conn_set_key(conn, key->type, key->pin_len);
3797 bacpy(&cp.bdaddr, &ev->bdaddr);
3798 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3800 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3802 hci_dev_unlock(hdev);
3807 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3808 hci_dev_unlock(hdev);
3811 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3813 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3814 struct hci_conn *conn;
3815 struct link_key *key;
3819 BT_DBG("%s", hdev->name);
3823 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3827 hci_conn_hold(conn);
3828 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3829 hci_conn_drop(conn);
3831 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3832 conn_set_key(conn, ev->key_type, conn->pin_length);
3834 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3837 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3838 ev->key_type, pin_len, &persistent);
3842 /* Update connection information since adding the key will have
3843 * fixed up the type in the case of changed combination keys.
3845 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3846 conn_set_key(conn, key->type, key->pin_len);
3848 mgmt_new_link_key(hdev, key, persistent);
3850 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3851 * is set. If it's not set simply remove the key from the kernel
3852 * list (we've still notified user space about it but with
3853 * store_hint being 0).
3855 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3856 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3857 list_del_rcu(&key->list);
3858 kfree_rcu(key, rcu);
3863 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3865 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3868 hci_dev_unlock(hdev);
3871 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3873 struct hci_ev_clock_offset *ev = (void *) skb->data;
3874 struct hci_conn *conn;
3876 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3880 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3881 if (conn && !ev->status) {
3882 struct inquiry_entry *ie;
3884 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3886 ie->data.clock_offset = ev->clock_offset;
3887 ie->timestamp = jiffies;
3891 hci_dev_unlock(hdev);
3894 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3896 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3897 struct hci_conn *conn;
3899 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3903 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3904 if (conn && !ev->status)
3905 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3907 hci_dev_unlock(hdev);
3910 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3912 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3913 struct inquiry_entry *ie;
3915 BT_DBG("%s", hdev->name);
3919 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3921 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3922 ie->timestamp = jiffies;
3925 hci_dev_unlock(hdev);
3928 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3929 struct sk_buff *skb)
3931 struct inquiry_data data;
3932 int num_rsp = *((__u8 *) skb->data);
3934 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3939 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3944 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3945 struct inquiry_info_with_rssi_and_pscan_mode *info;
3946 info = (void *) (skb->data + 1);
3948 for (; num_rsp; num_rsp--, info++) {
3951 bacpy(&data.bdaddr, &info->bdaddr);
3952 data.pscan_rep_mode = info->pscan_rep_mode;
3953 data.pscan_period_mode = info->pscan_period_mode;
3954 data.pscan_mode = info->pscan_mode;
3955 memcpy(data.dev_class, info->dev_class, 3);
3956 data.clock_offset = info->clock_offset;
3957 data.rssi = info->rssi;
3958 data.ssp_mode = 0x00;
3960 flags = hci_inquiry_cache_update(hdev, &data, false);
3962 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3963 info->dev_class, info->rssi,
3964 flags, NULL, 0, NULL, 0);
3967 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3969 for (; num_rsp; num_rsp--, info++) {
3972 bacpy(&data.bdaddr, &info->bdaddr);
3973 data.pscan_rep_mode = info->pscan_rep_mode;
3974 data.pscan_period_mode = info->pscan_period_mode;
3975 data.pscan_mode = 0x00;
3976 memcpy(data.dev_class, info->dev_class, 3);
3977 data.clock_offset = info->clock_offset;
3978 data.rssi = info->rssi;
3979 data.ssp_mode = 0x00;
3981 flags = hci_inquiry_cache_update(hdev, &data, false);
3983 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3984 info->dev_class, info->rssi,
3985 flags, NULL, 0, NULL, 0);
3989 hci_dev_unlock(hdev);
3992 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3993 struct sk_buff *skb)
3995 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3996 struct hci_conn *conn;
3998 BT_DBG("%s", hdev->name);
4002 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4006 if (ev->page < HCI_MAX_PAGES)
4007 memcpy(conn->features[ev->page], ev->features, 8);
4009 if (!ev->status && ev->page == 0x01) {
4010 struct inquiry_entry *ie;
4012 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4014 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4016 if (ev->features[0] & LMP_HOST_SSP) {
4017 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4019 /* It is mandatory by the Bluetooth specification that
4020 * Extended Inquiry Results are only used when Secure
4021 * Simple Pairing is enabled, but some devices violate
4024 * To make these devices work, the internal SSP
4025 * enabled flag needs to be cleared if the remote host
4026 * features do not indicate SSP support */
4027 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4030 if (ev->features[0] & LMP_HOST_SC)
4031 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4034 if (conn->state != BT_CONFIG)
4037 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4038 struct hci_cp_remote_name_req cp;
4039 memset(&cp, 0, sizeof(cp));
4040 bacpy(&cp.bdaddr, &conn->dst);
4041 cp.pscan_rep_mode = 0x02;
4042 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4043 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4044 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4046 if (!hci_outgoing_auth_needed(hdev, conn)) {
4047 conn->state = BT_CONNECTED;
4048 hci_connect_cfm(conn, ev->status);
4049 hci_conn_drop(conn);
4053 hci_dev_unlock(hdev);
4056 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4057 struct sk_buff *skb)
4059 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4060 struct hci_conn *conn;
4062 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4066 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4068 if (ev->link_type == ESCO_LINK)
4071 /* When the link type in the event indicates SCO connection
4072 * and lookup of the connection object fails, then check
4073 * if an eSCO connection object exists.
4075 * The core limits the synchronous connections to either
4076 * SCO or eSCO. The eSCO connection is preferred and tried
4077 * to be setup first and until successfully established,
4078 * the link type will be hinted as eSCO.
4080 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4085 switch (ev->status) {
4087 conn->handle = __le16_to_cpu(ev->handle);
4088 conn->state = BT_CONNECTED;
4089 conn->type = ev->link_type;
4091 hci_debugfs_create_conn(conn);
4092 hci_conn_add_sysfs(conn);
4095 case 0x10: /* Connection Accept Timeout */
4096 case 0x0d: /* Connection Rejected due to Limited Resources */
4097 case 0x11: /* Unsupported Feature or Parameter Value */
4098 case 0x1c: /* SCO interval rejected */
4099 case 0x1a: /* Unsupported Remote Feature */
4100 case 0x1f: /* Unspecified error */
4101 case 0x20: /* Unsupported LMP Parameter value */
4103 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4104 (hdev->esco_type & EDR_ESCO_MASK);
4105 if (hci_setup_sync(conn, conn->link->handle))
4111 conn->state = BT_CLOSED;
4115 hci_connect_cfm(conn, ev->status);
4120 hci_dev_unlock(hdev);
4123 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4127 while (parsed < eir_len) {
4128 u8 field_len = eir[0];
4133 parsed += field_len + 1;
4134 eir += field_len + 1;
4140 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4141 struct sk_buff *skb)
4143 struct inquiry_data data;
4144 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4145 int num_rsp = *((__u8 *) skb->data);
4148 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4153 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4158 for (; num_rsp; num_rsp--, info++) {
4162 bacpy(&data.bdaddr, &info->bdaddr);
4163 data.pscan_rep_mode = info->pscan_rep_mode;
4164 data.pscan_period_mode = info->pscan_period_mode;
4165 data.pscan_mode = 0x00;
4166 memcpy(data.dev_class, info->dev_class, 3);
4167 data.clock_offset = info->clock_offset;
4168 data.rssi = info->rssi;
4169 data.ssp_mode = 0x01;
4171 if (hci_dev_test_flag(hdev, HCI_MGMT))
4172 name_known = eir_get_data(info->data,
4174 EIR_NAME_COMPLETE, NULL);
4178 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4180 eir_len = eir_get_length(info->data, sizeof(info->data));
4182 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4183 info->dev_class, info->rssi,
4184 flags, info->data, eir_len, NULL, 0);
4187 hci_dev_unlock(hdev);
4190 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4191 struct sk_buff *skb)
4193 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4194 struct hci_conn *conn;
4196 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4197 __le16_to_cpu(ev->handle));
4201 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4205 /* For BR/EDR the necessary steps are taken through the
4206 * auth_complete event.
4208 if (conn->type != LE_LINK)
4212 conn->sec_level = conn->pending_sec_level;
4214 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4216 if (ev->status && conn->state == BT_CONNECTED) {
4217 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4218 hci_conn_drop(conn);
4222 if (conn->state == BT_CONFIG) {
4224 conn->state = BT_CONNECTED;
4226 hci_connect_cfm(conn, ev->status);
4227 hci_conn_drop(conn);
4229 hci_auth_cfm(conn, ev->status);
4231 hci_conn_hold(conn);
4232 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4233 hci_conn_drop(conn);
4237 hci_dev_unlock(hdev);
4240 static u8 hci_get_auth_req(struct hci_conn *conn)
4242 /* If remote requests no-bonding follow that lead */
4243 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4244 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4245 return conn->remote_auth | (conn->auth_type & 0x01);
4247 /* If both remote and local have enough IO capabilities, require
4250 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4251 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4252 return conn->remote_auth | 0x01;
4254 /* No MITM protection possible so ignore remote requirement */
4255 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4258 static u8 bredr_oob_data_present(struct hci_conn *conn)
4260 struct hci_dev *hdev = conn->hdev;
4261 struct oob_data *data;
4263 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4267 if (bredr_sc_enabled(hdev)) {
4268 /* When Secure Connections is enabled, then just
4269 * return the present value stored with the OOB
4270 * data. The stored value contains the right present
4271 * information. However it can only be trusted when
4272 * not in Secure Connection Only mode.
4274 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4275 return data->present;
4277 /* When Secure Connections Only mode is enabled, then
4278 * the P-256 values are required. If they are not
4279 * available, then do not declare that OOB data is
4282 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4283 !memcmp(data->hash256, ZERO_KEY, 16))
4289 /* When Secure Connections is not enabled or actually
4290 * not supported by the hardware, then check that if
4291 * P-192 data values are present.
4293 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4294 !memcmp(data->hash192, ZERO_KEY, 16))
4300 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4302 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4303 struct hci_conn *conn;
4305 BT_DBG("%s", hdev->name);
4309 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4313 hci_conn_hold(conn);
4315 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4318 /* Allow pairing if we're pairable, the initiators of the
4319 * pairing or if the remote is not requesting bonding.
4321 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4322 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4323 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4324 struct hci_cp_io_capability_reply cp;
4326 bacpy(&cp.bdaddr, &ev->bdaddr);
4327 /* Change the IO capability from KeyboardDisplay
4328 * to DisplayYesNo as it is not supported by BT spec. */
4329 cp.capability = (conn->io_capability == 0x04) ?
4330 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4332 /* If we are initiators, there is no remote information yet */
4333 if (conn->remote_auth == 0xff) {
4334 /* Request MITM protection if our IO caps allow it
4335 * except for the no-bonding case.
4337 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4338 conn->auth_type != HCI_AT_NO_BONDING)
4339 conn->auth_type |= 0x01;
4341 conn->auth_type = hci_get_auth_req(conn);
4344 /* If we're not bondable, force one of the non-bondable
4345 * authentication requirement values.
4347 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4348 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4350 cp.authentication = conn->auth_type;
4351 cp.oob_data = bredr_oob_data_present(conn);
4353 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4356 struct hci_cp_io_capability_neg_reply cp;
4358 bacpy(&cp.bdaddr, &ev->bdaddr);
4359 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4361 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4366 hci_dev_unlock(hdev);
4369 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4371 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4372 struct hci_conn *conn;
4374 BT_DBG("%s", hdev->name);
4378 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4382 conn->remote_cap = ev->capability;
4383 conn->remote_auth = ev->authentication;
4386 hci_dev_unlock(hdev);
4389 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4390 struct sk_buff *skb)
4392 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4393 int loc_mitm, rem_mitm, confirm_hint = 0;
4394 struct hci_conn *conn;
4396 BT_DBG("%s", hdev->name);
4400 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4403 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4407 loc_mitm = (conn->auth_type & 0x01);
4408 rem_mitm = (conn->remote_auth & 0x01);
4410 /* If we require MITM but the remote device can't provide that
4411 * (it has NoInputNoOutput) then reject the confirmation
4412 * request. We check the security level here since it doesn't
4413 * necessarily match conn->auth_type.
4415 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4416 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4417 BT_DBG("Rejecting request: remote device can't provide MITM");
4418 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4419 sizeof(ev->bdaddr), &ev->bdaddr);
4423 /* If no side requires MITM protection; auto-accept */
4424 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4425 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4427 /* If we're not the initiators request authorization to
4428 * proceed from user space (mgmt_user_confirm with
4429 * confirm_hint set to 1). The exception is if neither
4430 * side had MITM or if the local IO capability is
4431 * NoInputNoOutput, in which case we do auto-accept
4433 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4434 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4435 (loc_mitm || rem_mitm)) {
4436 BT_DBG("Confirming auto-accept as acceptor");
4441 BT_DBG("Auto-accept of user confirmation with %ums delay",
4442 hdev->auto_accept_delay);
4444 if (hdev->auto_accept_delay > 0) {
4445 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4446 queue_delayed_work(conn->hdev->workqueue,
4447 &conn->auto_accept_work, delay);
4451 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4452 sizeof(ev->bdaddr), &ev->bdaddr);
4457 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4458 le32_to_cpu(ev->passkey), confirm_hint);
4461 hci_dev_unlock(hdev);
4464 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4465 struct sk_buff *skb)
4467 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4469 BT_DBG("%s", hdev->name);
4471 if (hci_dev_test_flag(hdev, HCI_MGMT))
4472 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4475 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4476 struct sk_buff *skb)
4478 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4479 struct hci_conn *conn;
4481 BT_DBG("%s", hdev->name);
4483 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4487 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4488 conn->passkey_entered = 0;
4490 if (hci_dev_test_flag(hdev, HCI_MGMT))
4491 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4492 conn->dst_type, conn->passkey_notify,
4493 conn->passkey_entered);
4496 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4498 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4499 struct hci_conn *conn;
4501 BT_DBG("%s", hdev->name);
4503 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4508 case HCI_KEYPRESS_STARTED:
4509 conn->passkey_entered = 0;
4512 case HCI_KEYPRESS_ENTERED:
4513 conn->passkey_entered++;
4516 case HCI_KEYPRESS_ERASED:
4517 conn->passkey_entered--;
4520 case HCI_KEYPRESS_CLEARED:
4521 conn->passkey_entered = 0;
4524 case HCI_KEYPRESS_COMPLETED:
4528 if (hci_dev_test_flag(hdev, HCI_MGMT))
4529 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4530 conn->dst_type, conn->passkey_notify,
4531 conn->passkey_entered);
4534 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4535 struct sk_buff *skb)
4537 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4538 struct hci_conn *conn;
4540 BT_DBG("%s", hdev->name);
4544 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4548 /* Reset the authentication requirement to unknown */
4549 conn->remote_auth = 0xff;
4551 /* To avoid duplicate auth_failed events to user space we check
4552 * the HCI_CONN_AUTH_PEND flag which will be set if we
4553 * initiated the authentication. A traditional auth_complete
4554 * event gets always produced as initiator and is also mapped to
4555 * the mgmt_auth_failed event */
4556 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4557 mgmt_auth_failed(conn, ev->status);
4559 hci_conn_drop(conn);
4562 hci_dev_unlock(hdev);
4565 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4566 struct sk_buff *skb)
4568 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4569 struct inquiry_entry *ie;
4570 struct hci_conn *conn;
4572 BT_DBG("%s", hdev->name);
4576 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4578 memcpy(conn->features[1], ev->features, 8);
4580 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4582 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4584 hci_dev_unlock(hdev);
4587 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4588 struct sk_buff *skb)
4590 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4591 struct oob_data *data;
4593 BT_DBG("%s", hdev->name);
4597 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4600 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4602 struct hci_cp_remote_oob_data_neg_reply cp;
4604 bacpy(&cp.bdaddr, &ev->bdaddr);
4605 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4610 if (bredr_sc_enabled(hdev)) {
4611 struct hci_cp_remote_oob_ext_data_reply cp;
4613 bacpy(&cp.bdaddr, &ev->bdaddr);
4614 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4615 memset(cp.hash192, 0, sizeof(cp.hash192));
4616 memset(cp.rand192, 0, sizeof(cp.rand192));
4618 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4619 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4621 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4622 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4624 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4627 struct hci_cp_remote_oob_data_reply cp;
4629 bacpy(&cp.bdaddr, &ev->bdaddr);
4630 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4631 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4633 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4638 hci_dev_unlock(hdev);
4641 #if IS_ENABLED(CONFIG_BT_HS)
4642 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4644 struct hci_ev_channel_selected *ev = (void *)skb->data;
4645 struct hci_conn *hcon;
4647 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4649 skb_pull(skb, sizeof(*ev));
4651 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4655 amp_read_loc_assoc_final_data(hdev, hcon);
4658 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4659 struct sk_buff *skb)
4661 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4662 struct hci_conn *hcon, *bredr_hcon;
4664 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4669 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4671 hci_dev_unlock(hdev);
4677 hci_dev_unlock(hdev);
4681 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4683 hcon->state = BT_CONNECTED;
4684 bacpy(&hcon->dst, &bredr_hcon->dst);
4686 hci_conn_hold(hcon);
4687 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4688 hci_conn_drop(hcon);
4690 hci_debugfs_create_conn(hcon);
4691 hci_conn_add_sysfs(hcon);
4693 amp_physical_cfm(bredr_hcon, hcon);
4695 hci_dev_unlock(hdev);
4698 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4700 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4701 struct hci_conn *hcon;
4702 struct hci_chan *hchan;
4703 struct amp_mgr *mgr;
4705 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4706 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4709 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4713 /* Create AMP hchan */
4714 hchan = hci_chan_create(hcon);
4718 hchan->handle = le16_to_cpu(ev->handle);
4720 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4722 mgr = hcon->amp_mgr;
4723 if (mgr && mgr->bredr_chan) {
4724 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4726 l2cap_chan_lock(bredr_chan);
4728 bredr_chan->conn->mtu = hdev->block_mtu;
4729 l2cap_logical_cfm(bredr_chan, hchan, 0);
4730 hci_conn_hold(hcon);
4732 l2cap_chan_unlock(bredr_chan);
4736 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4737 struct sk_buff *skb)
4739 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4740 struct hci_chan *hchan;
4742 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4743 le16_to_cpu(ev->handle), ev->status);
4750 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4754 amp_destroy_logical_link(hchan, ev->reason);
4757 hci_dev_unlock(hdev);
4760 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4761 struct sk_buff *skb)
4763 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4764 struct hci_conn *hcon;
4766 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4773 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4775 hcon->state = BT_CLOSED;
4779 hci_dev_unlock(hdev);
4783 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
4784 bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle,
4785 u16 interval, u16 latency, u16 supervision_timeout)
4787 struct hci_conn_params *params;
4788 struct hci_conn *conn;
4789 struct smp_irk *irk;
4794 /* All controllers implicitly stop advertising in the event of a
4795 * connection, so ensure that the state bit is cleared.
4797 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4799 conn = hci_lookup_le_connect(hdev);
4801 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
4803 bt_dev_err(hdev, "no memory for new connection");
4807 conn->dst_type = bdaddr_type;
4809 /* If we didn't have a hci_conn object previously
4810 * but we're in master role this must be something
4811 * initiated using a white list. Since white list based
4812 * connections are not "first class citizens" we don't
4813 * have full tracking of them. Therefore, we go ahead
4814 * with a "best effort" approach of determining the
4815 * initiator address based on the HCI_PRIVACY flag.
4818 conn->resp_addr_type = bdaddr_type;
4819 bacpy(&conn->resp_addr, bdaddr);
4820 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4821 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4822 bacpy(&conn->init_addr, &hdev->rpa);
4824 hci_copy_identity_address(hdev,
4826 &conn->init_addr_type);
4830 cancel_delayed_work(&conn->le_conn_timeout);
4834 /* Set the responder (our side) address type based on
4835 * the advertising address type.
4837 conn->resp_addr_type = hdev->adv_addr_type;
4838 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
4839 /* In case of ext adv, resp_addr will be updated in
4840 * Adv Terminated event.
4842 if (!ext_adv_capable(hdev))
4843 bacpy(&conn->resp_addr, &hdev->random_addr);
4845 bacpy(&conn->resp_addr, &hdev->bdaddr);
4848 conn->init_addr_type = bdaddr_type;
4849 bacpy(&conn->init_addr, bdaddr);
4851 /* For incoming connections, set the default minimum
4852 * and maximum connection interval. They will be used
4853 * to check if the parameters are in range and if not
4854 * trigger the connection update procedure.
4856 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4857 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4860 /* Lookup the identity address from the stored connection
4861 * address and address type.
4863 * When establishing connections to an identity address, the
4864 * connection procedure will store the resolvable random
4865 * address first. Now if it can be converted back into the
4866 * identity address, start using the identity address from
4869 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4871 bacpy(&conn->dst, &irk->bdaddr);
4872 conn->dst_type = irk->addr_type;
4876 hci_le_conn_failed(conn, status);
4880 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4881 addr_type = BDADDR_LE_PUBLIC;
4883 addr_type = BDADDR_LE_RANDOM;
4885 /* Drop the connection if the device is blocked */
4886 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4887 hci_conn_drop(conn);
4891 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4892 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4894 conn->sec_level = BT_SECURITY_LOW;
4895 conn->handle = handle;
4896 conn->state = BT_CONFIG;
4898 conn->le_conn_interval = interval;
4899 conn->le_conn_latency = latency;
4900 conn->le_supv_timeout = supervision_timeout;
4902 hci_debugfs_create_conn(conn);
4903 hci_conn_add_sysfs(conn);
4906 /* The remote features procedure is defined for master
4907 * role only. So only in case of an initiated connection
4908 * request the remote features.
4910 * If the local controller supports slave-initiated features
4911 * exchange, then requesting the remote features in slave
4912 * role is possible. Otherwise just transition into the
4913 * connected state without requesting the remote features.
4916 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
4917 struct hci_cp_le_read_remote_features cp;
4919 cp.handle = __cpu_to_le16(conn->handle);
4921 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
4924 hci_conn_hold(conn);
4926 conn->state = BT_CONNECTED;
4927 hci_connect_cfm(conn, status);
4930 hci_connect_cfm(conn, status);
4933 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4936 list_del_init(¶ms->action);
4938 hci_conn_drop(params->conn);
4939 hci_conn_put(params->conn);
4940 params->conn = NULL;
4945 hci_update_background_scan(hdev);
4946 hci_dev_unlock(hdev);
4949 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4951 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4953 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4955 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
4956 ev->role, le16_to_cpu(ev->handle),
4957 le16_to_cpu(ev->interval),
4958 le16_to_cpu(ev->latency),
4959 le16_to_cpu(ev->supervision_timeout));
4962 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
4963 struct sk_buff *skb)
4965 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
4967 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4969 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
4970 ev->role, le16_to_cpu(ev->handle),
4971 le16_to_cpu(ev->interval),
4972 le16_to_cpu(ev->latency),
4973 le16_to_cpu(ev->supervision_timeout));
4976 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
4978 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
4979 struct hci_conn *conn;
4981 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4986 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
4988 struct adv_info *adv_instance;
4990 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM)
4993 if (!hdev->cur_adv_instance) {
4994 bacpy(&conn->resp_addr, &hdev->random_addr);
4998 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
5000 bacpy(&conn->resp_addr, &adv_instance->random_addr);
5004 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5005 struct sk_buff *skb)
5007 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5008 struct hci_conn *conn;
5010 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5017 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5019 conn->le_conn_interval = le16_to_cpu(ev->interval);
5020 conn->le_conn_latency = le16_to_cpu(ev->latency);
5021 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5024 hci_dev_unlock(hdev);
5027 /* This function requires the caller holds hdev->lock */
5028 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5030 u8 addr_type, u8 adv_type,
5031 bdaddr_t *direct_rpa)
5033 struct hci_conn *conn;
5034 struct hci_conn_params *params;
5036 /* If the event is not connectable don't proceed further */
5037 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5040 /* Ignore if the device is blocked */
5041 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
5044 /* Most controller will fail if we try to create new connections
5045 * while we have an existing one in slave role.
5047 if (hdev->conn_hash.le_num_slave > 0)
5050 /* If we're not connectable only connect devices that we have in
5051 * our pend_le_conns list.
5053 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5058 if (!params->explicit_connect) {
5059 switch (params->auto_connect) {
5060 case HCI_AUTO_CONN_DIRECT:
5061 /* Only devices advertising with ADV_DIRECT_IND are
5062 * triggering a connection attempt. This is allowing
5063 * incoming connections from slave devices.
5065 if (adv_type != LE_ADV_DIRECT_IND)
5068 case HCI_AUTO_CONN_ALWAYS:
5069 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5070 * are triggering a connection attempt. This means
5071 * that incoming connectioms from slave device are
5072 * accepted and also outgoing connections to slave
5073 * devices are established when found.
5081 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5082 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
5084 if (!IS_ERR(conn)) {
5085 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5086 * by higher layer that tried to connect, if no then
5087 * store the pointer since we don't really have any
5088 * other owner of the object besides the params that
5089 * triggered it. This way we can abort the connection if
5090 * the parameters get removed and keep the reference
5091 * count consistent once the connection is established.
5094 if (!params->explicit_connect)
5095 params->conn = hci_conn_get(conn);
5100 switch (PTR_ERR(conn)) {
5102 /* If hci_connect() returns -EBUSY it means there is already
5103 * an LE connection attempt going on. Since controllers don't
5104 * support more than one connection attempt at the time, we
5105 * don't consider this an error case.
5109 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5116 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5117 u8 bdaddr_type, bdaddr_t *direct_addr,
5118 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
5120 struct discovery_state *d = &hdev->discovery;
5121 struct smp_irk *irk;
5122 struct hci_conn *conn;
5129 case LE_ADV_DIRECT_IND:
5130 case LE_ADV_SCAN_IND:
5131 case LE_ADV_NONCONN_IND:
5132 case LE_ADV_SCAN_RSP:
5135 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5136 "type: 0x%02x", type);
5140 /* Find the end of the data in case the report contains padded zero
5141 * bytes at the end causing an invalid length value.
5143 * When data is NULL, len is 0 so there is no need for extra ptr
5144 * check as 'ptr < data + 0' is already false in such case.
5146 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5147 if (ptr + 1 + *ptr > data + len)
5151 real_len = ptr - data;
5153 /* Adjust for actual length */
5154 if (len != real_len) {
5155 bt_dev_err_ratelimited(hdev, "advertising data len corrected");
5159 /* If the direct address is present, then this report is from
5160 * a LE Direct Advertising Report event. In that case it is
5161 * important to see if the address is matching the local
5162 * controller address.
5165 /* Only resolvable random addresses are valid for these
5166 * kind of reports and others can be ignored.
5168 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5171 /* If the controller is not using resolvable random
5172 * addresses, then this report can be ignored.
5174 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5177 /* If the local IRK of the controller does not match
5178 * with the resolvable random address provided, then
5179 * this report can be ignored.
5181 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5185 /* Check if we need to convert to identity address */
5186 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5188 bdaddr = &irk->bdaddr;
5189 bdaddr_type = irk->addr_type;
5192 /* Check if we have been requested to connect to this device.
5194 * direct_addr is set only for directed advertising reports (it is NULL
5195 * for advertising reports) and is already verified to be RPA above.
5197 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5199 if (conn && type == LE_ADV_IND) {
5200 /* Store report for later inclusion by
5201 * mgmt_device_connected
5203 memcpy(conn->le_adv_data, data, len);
5204 conn->le_adv_data_len = len;
5207 /* Passive scanning shouldn't trigger any device found events,
5208 * except for devices marked as CONN_REPORT for which we do send
5209 * device found events.
5211 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5212 if (type == LE_ADV_DIRECT_IND)
5215 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5216 bdaddr, bdaddr_type))
5219 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5220 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5223 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5224 rssi, flags, data, len, NULL, 0);
5228 /* When receiving non-connectable or scannable undirected
5229 * advertising reports, this means that the remote device is
5230 * not connectable and then clearly indicate this in the
5231 * device found event.
5233 * When receiving a scan response, then there is no way to
5234 * know if the remote device is connectable or not. However
5235 * since scan responses are merged with a previously seen
5236 * advertising report, the flags field from that report
5239 * In the really unlikely case that a controller get confused
5240 * and just sends a scan response event, then it is marked as
5241 * not connectable as well.
5243 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5244 type == LE_ADV_SCAN_RSP)
5245 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5249 /* If there's nothing pending either store the data from this
5250 * event or send an immediate device found event if the data
5251 * should not be stored for later.
5253 if (!has_pending_adv_report(hdev)) {
5254 /* If the report will trigger a SCAN_REQ store it for
5257 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5258 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5259 rssi, flags, data, len);
5263 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5264 rssi, flags, data, len, NULL, 0);
5268 /* Check if the pending report is for the same device as the new one */
5269 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5270 bdaddr_type == d->last_adv_addr_type);
5272 /* If the pending data doesn't match this report or this isn't a
5273 * scan response (e.g. we got a duplicate ADV_IND) then force
5274 * sending of the pending data.
5276 if (type != LE_ADV_SCAN_RSP || !match) {
5277 /* Send out whatever is in the cache, but skip duplicates */
5279 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5280 d->last_adv_addr_type, NULL,
5281 d->last_adv_rssi, d->last_adv_flags,
5283 d->last_adv_data_len, NULL, 0);
5285 /* If the new report will trigger a SCAN_REQ store it for
5288 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5289 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5290 rssi, flags, data, len);
5294 /* The advertising reports cannot be merged, so clear
5295 * the pending report and send out a device found event.
5297 clear_pending_adv_report(hdev);
5298 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5299 rssi, flags, data, len, NULL, 0);
5303 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5304 * the new event is a SCAN_RSP. We can therefore proceed with
5305 * sending a merged device found event.
5307 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5308 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5309 d->last_adv_data, d->last_adv_data_len, data, len);
5310 clear_pending_adv_report(hdev);
5313 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5315 u8 num_reports = skb->data[0];
5316 void *ptr = &skb->data[1];
5320 while (num_reports--) {
5321 struct hci_ev_le_advertising_info *ev = ptr;
5324 if (ev->length <= HCI_MAX_AD_LENGTH) {
5325 rssi = ev->data[ev->length];
5326 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5327 ev->bdaddr_type, NULL, 0, rssi,
5328 ev->data, ev->length);
5330 bt_dev_err(hdev, "Dropping invalid advertising data");
5333 ptr += sizeof(*ev) + ev->length + 1;
5336 hci_dev_unlock(hdev);
5339 static u8 ext_evt_type_to_legacy(u16 evt_type)
5341 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5343 case LE_LEGACY_ADV_IND:
5345 case LE_LEGACY_ADV_DIRECT_IND:
5346 return LE_ADV_DIRECT_IND;
5347 case LE_LEGACY_ADV_SCAN_IND:
5348 return LE_ADV_SCAN_IND;
5349 case LE_LEGACY_NONCONN_IND:
5350 return LE_ADV_NONCONN_IND;
5351 case LE_LEGACY_SCAN_RSP_ADV:
5352 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5353 return LE_ADV_SCAN_RSP;
5356 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5359 return LE_ADV_INVALID;
5362 if (evt_type & LE_EXT_ADV_CONN_IND) {
5363 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5364 return LE_ADV_DIRECT_IND;
5369 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5370 return LE_ADV_SCAN_RSP;
5372 if (evt_type & LE_EXT_ADV_SCAN_IND)
5373 return LE_ADV_SCAN_IND;
5375 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5376 evt_type & LE_EXT_ADV_DIRECT_IND)
5377 return LE_ADV_NONCONN_IND;
5379 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5382 return LE_ADV_INVALID;
5385 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5387 u8 num_reports = skb->data[0];
5388 void *ptr = &skb->data[1];
5392 while (num_reports--) {
5393 struct hci_ev_le_ext_adv_report *ev = ptr;
5397 evt_type = __le16_to_cpu(ev->evt_type);
5398 legacy_evt_type = ext_evt_type_to_legacy(evt_type);
5399 if (legacy_evt_type != LE_ADV_INVALID) {
5400 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5401 ev->bdaddr_type, NULL, 0, ev->rssi,
5402 ev->data, ev->length);
5405 ptr += sizeof(*ev) + ev->length + 1;
5408 hci_dev_unlock(hdev);
5411 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5412 struct sk_buff *skb)
5414 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5415 struct hci_conn *conn;
5417 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5421 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5424 memcpy(conn->features[0], ev->features, 8);
5426 if (conn->state == BT_CONFIG) {
5429 /* If the local controller supports slave-initiated
5430 * features exchange, but the remote controller does
5431 * not, then it is possible that the error code 0x1a
5432 * for unsupported remote feature gets returned.
5434 * In this specific case, allow the connection to
5435 * transition into connected state and mark it as
5438 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5439 !conn->out && ev->status == 0x1a)
5442 status = ev->status;
5444 conn->state = BT_CONNECTED;
5445 hci_connect_cfm(conn, status);
5446 hci_conn_drop(conn);
5450 hci_dev_unlock(hdev);
5453 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5455 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5456 struct hci_cp_le_ltk_reply cp;
5457 struct hci_cp_le_ltk_neg_reply neg;
5458 struct hci_conn *conn;
5459 struct smp_ltk *ltk;
5461 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5465 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5469 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5473 if (smp_ltk_is_sc(ltk)) {
5474 /* With SC both EDiv and Rand are set to zero */
5475 if (ev->ediv || ev->rand)
5478 /* For non-SC keys check that EDiv and Rand match */
5479 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5483 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5484 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5485 cp.handle = cpu_to_le16(conn->handle);
5487 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5489 conn->enc_key_size = ltk->enc_size;
5491 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5493 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5494 * temporary key used to encrypt a connection following
5495 * pairing. It is used during the Encrypted Session Setup to
5496 * distribute the keys. Later, security can be re-established
5497 * using a distributed LTK.
5499 if (ltk->type == SMP_STK) {
5500 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5501 list_del_rcu(<k->list);
5502 kfree_rcu(ltk, rcu);
5504 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5507 hci_dev_unlock(hdev);
5512 neg.handle = ev->handle;
5513 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5514 hci_dev_unlock(hdev);
5517 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5520 struct hci_cp_le_conn_param_req_neg_reply cp;
5522 cp.handle = cpu_to_le16(handle);
5525 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5529 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5530 struct sk_buff *skb)
5532 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5533 struct hci_cp_le_conn_param_req_reply cp;
5534 struct hci_conn *hcon;
5535 u16 handle, min, max, latency, timeout;
5537 handle = le16_to_cpu(ev->handle);
5538 min = le16_to_cpu(ev->interval_min);
5539 max = le16_to_cpu(ev->interval_max);
5540 latency = le16_to_cpu(ev->latency);
5541 timeout = le16_to_cpu(ev->timeout);
5543 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5544 if (!hcon || hcon->state != BT_CONNECTED)
5545 return send_conn_param_neg_reply(hdev, handle,
5546 HCI_ERROR_UNKNOWN_CONN_ID);
5548 if (min < hcon->le_conn_min_interval ||
5549 max > hcon->le_conn_max_interval)
5550 return send_conn_param_neg_reply(hdev, handle,
5551 HCI_ERROR_INVALID_LL_PARAMS);
5553 if (hci_check_conn_params(min, max, latency, timeout))
5554 return send_conn_param_neg_reply(hdev, handle,
5555 HCI_ERROR_INVALID_LL_PARAMS);
5557 if (hcon->role == HCI_ROLE_MASTER) {
5558 struct hci_conn_params *params;
5563 params = hci_conn_params_lookup(hdev, &hcon->dst,
5566 params->conn_min_interval = min;
5567 params->conn_max_interval = max;
5568 params->conn_latency = latency;
5569 params->supervision_timeout = timeout;
5575 hci_dev_unlock(hdev);
5577 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5578 store_hint, min, max, latency, timeout);
5581 cp.handle = ev->handle;
5582 cp.interval_min = ev->interval_min;
5583 cp.interval_max = ev->interval_max;
5584 cp.latency = ev->latency;
5585 cp.timeout = ev->timeout;
5589 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5592 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5593 struct sk_buff *skb)
5595 u8 num_reports = skb->data[0];
5596 void *ptr = &skb->data[1];
5600 while (num_reports--) {
5601 struct hci_ev_le_direct_adv_info *ev = ptr;
5603 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5604 ev->bdaddr_type, &ev->direct_addr,
5605 ev->direct_addr_type, ev->rssi, NULL, 0);
5610 hci_dev_unlock(hdev);
5613 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5615 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5617 skb_pull(skb, sizeof(*le_ev));
5619 switch (le_ev->subevent) {
5620 case HCI_EV_LE_CONN_COMPLETE:
5621 hci_le_conn_complete_evt(hdev, skb);
5624 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5625 hci_le_conn_update_complete_evt(hdev, skb);
5628 case HCI_EV_LE_ADVERTISING_REPORT:
5629 hci_le_adv_report_evt(hdev, skb);
5632 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5633 hci_le_remote_feat_complete_evt(hdev, skb);
5636 case HCI_EV_LE_LTK_REQ:
5637 hci_le_ltk_request_evt(hdev, skb);
5640 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5641 hci_le_remote_conn_param_req_evt(hdev, skb);
5644 case HCI_EV_LE_DIRECT_ADV_REPORT:
5645 hci_le_direct_adv_report_evt(hdev, skb);
5648 case HCI_EV_LE_EXT_ADV_REPORT:
5649 hci_le_ext_adv_report_evt(hdev, skb);
5652 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
5653 hci_le_enh_conn_complete_evt(hdev, skb);
5656 case HCI_EV_LE_EXT_ADV_SET_TERM:
5657 hci_le_ext_adv_term_evt(hdev, skb);
5665 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5666 u8 event, struct sk_buff *skb)
5668 struct hci_ev_cmd_complete *ev;
5669 struct hci_event_hdr *hdr;
5674 if (skb->len < sizeof(*hdr)) {
5675 bt_dev_err(hdev, "too short HCI event");
5679 hdr = (void *) skb->data;
5680 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5683 if (hdr->evt != event)
5688 /* Check if request ended in Command Status - no way to retreive
5689 * any extra parameters in this case.
5691 if (hdr->evt == HCI_EV_CMD_STATUS)
5694 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5695 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
5700 if (skb->len < sizeof(*ev)) {
5701 bt_dev_err(hdev, "too short cmd_complete event");
5705 ev = (void *) skb->data;
5706 skb_pull(skb, sizeof(*ev));
5708 if (opcode != __le16_to_cpu(ev->opcode)) {
5709 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5710 __le16_to_cpu(ev->opcode));
5717 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5719 struct hci_event_hdr *hdr = (void *) skb->data;
5720 hci_req_complete_t req_complete = NULL;
5721 hci_req_complete_skb_t req_complete_skb = NULL;
5722 struct sk_buff *orig_skb = NULL;
5723 u8 status = 0, event = hdr->evt, req_evt = 0;
5724 u16 opcode = HCI_OP_NOP;
5726 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5727 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5728 opcode = __le16_to_cpu(cmd_hdr->opcode);
5729 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5734 /* If it looks like we might end up having to call
5735 * req_complete_skb, store a pristine copy of the skb since the
5736 * various handlers may modify the original one through
5737 * skb_pull() calls, etc.
5739 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5740 event == HCI_EV_CMD_COMPLETE)
5741 orig_skb = skb_clone(skb, GFP_KERNEL);
5743 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5746 case HCI_EV_INQUIRY_COMPLETE:
5747 hci_inquiry_complete_evt(hdev, skb);
5750 case HCI_EV_INQUIRY_RESULT:
5751 hci_inquiry_result_evt(hdev, skb);
5754 case HCI_EV_CONN_COMPLETE:
5755 hci_conn_complete_evt(hdev, skb);
5758 case HCI_EV_CONN_REQUEST:
5759 hci_conn_request_evt(hdev, skb);
5762 case HCI_EV_DISCONN_COMPLETE:
5763 hci_disconn_complete_evt(hdev, skb);
5766 case HCI_EV_AUTH_COMPLETE:
5767 hci_auth_complete_evt(hdev, skb);
5770 case HCI_EV_REMOTE_NAME:
5771 hci_remote_name_evt(hdev, skb);
5774 case HCI_EV_ENCRYPT_CHANGE:
5775 hci_encrypt_change_evt(hdev, skb);
5778 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5779 hci_change_link_key_complete_evt(hdev, skb);
5782 case HCI_EV_REMOTE_FEATURES:
5783 hci_remote_features_evt(hdev, skb);
5786 case HCI_EV_CMD_COMPLETE:
5787 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5788 &req_complete, &req_complete_skb);
5791 case HCI_EV_CMD_STATUS:
5792 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5796 case HCI_EV_HARDWARE_ERROR:
5797 hci_hardware_error_evt(hdev, skb);
5800 case HCI_EV_ROLE_CHANGE:
5801 hci_role_change_evt(hdev, skb);
5804 case HCI_EV_NUM_COMP_PKTS:
5805 hci_num_comp_pkts_evt(hdev, skb);
5808 case HCI_EV_MODE_CHANGE:
5809 hci_mode_change_evt(hdev, skb);
5812 case HCI_EV_PIN_CODE_REQ:
5813 hci_pin_code_request_evt(hdev, skb);
5816 case HCI_EV_LINK_KEY_REQ:
5817 hci_link_key_request_evt(hdev, skb);
5820 case HCI_EV_LINK_KEY_NOTIFY:
5821 hci_link_key_notify_evt(hdev, skb);
5824 case HCI_EV_CLOCK_OFFSET:
5825 hci_clock_offset_evt(hdev, skb);
5828 case HCI_EV_PKT_TYPE_CHANGE:
5829 hci_pkt_type_change_evt(hdev, skb);
5832 case HCI_EV_PSCAN_REP_MODE:
5833 hci_pscan_rep_mode_evt(hdev, skb);
5836 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5837 hci_inquiry_result_with_rssi_evt(hdev, skb);
5840 case HCI_EV_REMOTE_EXT_FEATURES:
5841 hci_remote_ext_features_evt(hdev, skb);
5844 case HCI_EV_SYNC_CONN_COMPLETE:
5845 hci_sync_conn_complete_evt(hdev, skb);
5848 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5849 hci_extended_inquiry_result_evt(hdev, skb);
5852 case HCI_EV_KEY_REFRESH_COMPLETE:
5853 hci_key_refresh_complete_evt(hdev, skb);
5856 case HCI_EV_IO_CAPA_REQUEST:
5857 hci_io_capa_request_evt(hdev, skb);
5860 case HCI_EV_IO_CAPA_REPLY:
5861 hci_io_capa_reply_evt(hdev, skb);
5864 case HCI_EV_USER_CONFIRM_REQUEST:
5865 hci_user_confirm_request_evt(hdev, skb);
5868 case HCI_EV_USER_PASSKEY_REQUEST:
5869 hci_user_passkey_request_evt(hdev, skb);
5872 case HCI_EV_USER_PASSKEY_NOTIFY:
5873 hci_user_passkey_notify_evt(hdev, skb);
5876 case HCI_EV_KEYPRESS_NOTIFY:
5877 hci_keypress_notify_evt(hdev, skb);
5880 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5881 hci_simple_pair_complete_evt(hdev, skb);
5884 case HCI_EV_REMOTE_HOST_FEATURES:
5885 hci_remote_host_features_evt(hdev, skb);
5888 case HCI_EV_LE_META:
5889 hci_le_meta_evt(hdev, skb);
5892 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5893 hci_remote_oob_data_request_evt(hdev, skb);
5896 #if IS_ENABLED(CONFIG_BT_HS)
5897 case HCI_EV_CHANNEL_SELECTED:
5898 hci_chan_selected_evt(hdev, skb);
5901 case HCI_EV_PHY_LINK_COMPLETE:
5902 hci_phy_link_complete_evt(hdev, skb);
5905 case HCI_EV_LOGICAL_LINK_COMPLETE:
5906 hci_loglink_complete_evt(hdev, skb);
5909 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5910 hci_disconn_loglink_complete_evt(hdev, skb);
5913 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5914 hci_disconn_phylink_complete_evt(hdev, skb);
5918 case HCI_EV_NUM_COMP_BLOCKS:
5919 hci_num_comp_blocks_evt(hdev, skb);
5923 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5928 req_complete(hdev, status, opcode);
5929 } else if (req_complete_skb) {
5930 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
5931 kfree_skb(orig_skb);
5934 req_complete_skb(hdev, status, opcode, orig_skb);
5937 kfree_skb(orig_skb);
5939 hdev->stat.evt_rx++;