2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
42 /* Handle HCI Event packets */
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
46 __u8 status = *((__u8 *) skb->data);
48 BT_DBG("%s status 0x%2.2x", hdev->name, status);
53 clear_bit(HCI_INQUIRY, &hdev->flags);
54 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
55 wake_up_bit(&hdev->flags, HCI_INQUIRY);
58 /* Set discovery state to stopped if we're not doing LE active
61 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
62 hdev->le_scan_type != LE_SCAN_ACTIVE)
63 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
66 hci_conn_check_pending(hdev);
69 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
71 __u8 status = *((__u8 *) skb->data);
73 BT_DBG("%s status 0x%2.2x", hdev->name, status);
78 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
81 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
83 __u8 status = *((__u8 *) skb->data);
85 BT_DBG("%s status 0x%2.2x", hdev->name, status);
90 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
92 hci_conn_check_pending(hdev);
95 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
98 BT_DBG("%s", hdev->name);
101 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
103 struct hci_rp_role_discovery *rp = (void *) skb->data;
104 struct hci_conn *conn;
106 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
113 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
115 conn->role = rp->role;
117 hci_dev_unlock(hdev);
120 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
122 struct hci_rp_read_link_policy *rp = (void *) skb->data;
123 struct hci_conn *conn;
125 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
132 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
134 conn->link_policy = __le16_to_cpu(rp->policy);
136 hci_dev_unlock(hdev);
139 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
141 struct hci_rp_write_link_policy *rp = (void *) skb->data;
142 struct hci_conn *conn;
145 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
150 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
156 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
158 conn->link_policy = get_unaligned_le16(sent + 2);
160 hci_dev_unlock(hdev);
163 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
166 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
168 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
173 hdev->link_policy = __le16_to_cpu(rp->policy);
176 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
179 __u8 status = *((__u8 *) skb->data);
182 BT_DBG("%s status 0x%2.2x", hdev->name, status);
187 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
191 hdev->link_policy = get_unaligned_le16(sent);
194 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
196 __u8 status = *((__u8 *) skb->data);
198 BT_DBG("%s status 0x%2.2x", hdev->name, status);
200 clear_bit(HCI_RESET, &hdev->flags);
205 /* Reset all non-persistent flags */
206 hci_dev_clear_volatile_flags(hdev);
208 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
210 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
211 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
213 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
214 hdev->adv_data_len = 0;
216 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
217 hdev->scan_rsp_data_len = 0;
219 hdev->le_scan_type = LE_SCAN_PASSIVE;
221 hdev->ssp_debug_mode = 0;
223 hci_bdaddr_list_clear(&hdev->le_white_list);
224 hci_bdaddr_list_clear(&hdev->le_resolv_list);
227 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
230 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
231 struct hci_cp_read_stored_link_key *sent;
233 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
235 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
239 if (!rp->status && sent->read_all == 0x01) {
240 hdev->stored_max_keys = rp->max_keys;
241 hdev->stored_num_keys = rp->num_keys;
245 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
248 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
250 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
255 if (rp->num_keys <= hdev->stored_num_keys)
256 hdev->stored_num_keys -= rp->num_keys;
258 hdev->stored_num_keys = 0;
261 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
263 __u8 status = *((__u8 *) skb->data);
266 BT_DBG("%s status 0x%2.2x", hdev->name, status);
268 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
274 if (hci_dev_test_flag(hdev, HCI_MGMT))
275 mgmt_set_local_name_complete(hdev, sent, status);
277 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
279 hci_dev_unlock(hdev);
282 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
284 struct hci_rp_read_local_name *rp = (void *) skb->data;
286 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
291 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
292 hci_dev_test_flag(hdev, HCI_CONFIG))
293 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
296 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
298 __u8 status = *((__u8 *) skb->data);
301 BT_DBG("%s status 0x%2.2x", hdev->name, status);
303 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
310 __u8 param = *((__u8 *) sent);
312 if (param == AUTH_ENABLED)
313 set_bit(HCI_AUTH, &hdev->flags);
315 clear_bit(HCI_AUTH, &hdev->flags);
318 if (hci_dev_test_flag(hdev, HCI_MGMT))
319 mgmt_auth_enable_complete(hdev, status);
321 hci_dev_unlock(hdev);
324 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
326 __u8 status = *((__u8 *) skb->data);
330 BT_DBG("%s status 0x%2.2x", hdev->name, status);
335 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
339 param = *((__u8 *) sent);
342 set_bit(HCI_ENCRYPT, &hdev->flags);
344 clear_bit(HCI_ENCRYPT, &hdev->flags);
347 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
349 __u8 status = *((__u8 *) skb->data);
353 BT_DBG("%s status 0x%2.2x", hdev->name, status);
355 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
359 param = *((__u8 *) sent);
364 hdev->discov_timeout = 0;
368 if (param & SCAN_INQUIRY)
369 set_bit(HCI_ISCAN, &hdev->flags);
371 clear_bit(HCI_ISCAN, &hdev->flags);
373 if (param & SCAN_PAGE)
374 set_bit(HCI_PSCAN, &hdev->flags);
376 clear_bit(HCI_PSCAN, &hdev->flags);
379 hci_dev_unlock(hdev);
382 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
384 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
386 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
391 memcpy(hdev->dev_class, rp->dev_class, 3);
393 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
394 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
397 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
399 __u8 status = *((__u8 *) skb->data);
402 BT_DBG("%s status 0x%2.2x", hdev->name, status);
404 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
411 memcpy(hdev->dev_class, sent, 3);
413 if (hci_dev_test_flag(hdev, HCI_MGMT))
414 mgmt_set_class_of_dev_complete(hdev, sent, status);
416 hci_dev_unlock(hdev);
419 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
421 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
424 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
429 setting = __le16_to_cpu(rp->voice_setting);
431 if (hdev->voice_setting == setting)
434 hdev->voice_setting = setting;
436 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
439 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
442 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
445 __u8 status = *((__u8 *) skb->data);
449 BT_DBG("%s status 0x%2.2x", hdev->name, status);
454 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
458 setting = get_unaligned_le16(sent);
460 if (hdev->voice_setting == setting)
463 hdev->voice_setting = setting;
465 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
468 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
471 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
474 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
476 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
481 hdev->num_iac = rp->num_iac;
483 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
486 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
488 __u8 status = *((__u8 *) skb->data);
489 struct hci_cp_write_ssp_mode *sent;
491 BT_DBG("%s status 0x%2.2x", hdev->name, status);
493 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
501 hdev->features[1][0] |= LMP_HOST_SSP;
503 hdev->features[1][0] &= ~LMP_HOST_SSP;
506 if (hci_dev_test_flag(hdev, HCI_MGMT))
507 mgmt_ssp_enable_complete(hdev, sent->mode, status);
510 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
512 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
515 hci_dev_unlock(hdev);
518 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
520 u8 status = *((u8 *) skb->data);
521 struct hci_cp_write_sc_support *sent;
523 BT_DBG("%s status 0x%2.2x", hdev->name, status);
525 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
533 hdev->features[1][0] |= LMP_HOST_SC;
535 hdev->features[1][0] &= ~LMP_HOST_SC;
538 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
540 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
542 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
545 hci_dev_unlock(hdev);
548 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
550 struct hci_rp_read_local_version *rp = (void *) skb->data;
552 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
557 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
558 hci_dev_test_flag(hdev, HCI_CONFIG)) {
559 hdev->hci_ver = rp->hci_ver;
560 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
561 hdev->lmp_ver = rp->lmp_ver;
562 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
563 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
567 static void hci_cc_read_local_commands(struct hci_dev *hdev,
570 struct hci_rp_read_local_commands *rp = (void *) skb->data;
572 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
577 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
578 hci_dev_test_flag(hdev, HCI_CONFIG))
579 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
582 static void hci_cc_read_local_features(struct hci_dev *hdev,
585 struct hci_rp_read_local_features *rp = (void *) skb->data;
587 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
592 memcpy(hdev->features, rp->features, 8);
594 /* Adjust default settings according to features
595 * supported by device. */
597 if (hdev->features[0][0] & LMP_3SLOT)
598 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
600 if (hdev->features[0][0] & LMP_5SLOT)
601 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
603 if (hdev->features[0][1] & LMP_HV2) {
604 hdev->pkt_type |= (HCI_HV2);
605 hdev->esco_type |= (ESCO_HV2);
608 if (hdev->features[0][1] & LMP_HV3) {
609 hdev->pkt_type |= (HCI_HV3);
610 hdev->esco_type |= (ESCO_HV3);
613 if (lmp_esco_capable(hdev))
614 hdev->esco_type |= (ESCO_EV3);
616 if (hdev->features[0][4] & LMP_EV4)
617 hdev->esco_type |= (ESCO_EV4);
619 if (hdev->features[0][4] & LMP_EV5)
620 hdev->esco_type |= (ESCO_EV5);
622 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
623 hdev->esco_type |= (ESCO_2EV3);
625 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
626 hdev->esco_type |= (ESCO_3EV3);
628 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
629 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
632 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
635 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
637 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
642 if (hdev->max_page < rp->max_page)
643 hdev->max_page = rp->max_page;
645 if (rp->page < HCI_MAX_PAGES)
646 memcpy(hdev->features[rp->page], rp->features, 8);
649 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
652 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
654 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
659 hdev->flow_ctl_mode = rp->mode;
662 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
664 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
666 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
671 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
672 hdev->sco_mtu = rp->sco_mtu;
673 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
674 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
676 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
681 hdev->acl_cnt = hdev->acl_pkts;
682 hdev->sco_cnt = hdev->sco_pkts;
684 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
685 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
688 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
690 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
692 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
697 if (test_bit(HCI_INIT, &hdev->flags))
698 bacpy(&hdev->bdaddr, &rp->bdaddr);
700 if (hci_dev_test_flag(hdev, HCI_SETUP))
701 bacpy(&hdev->setup_addr, &rp->bdaddr);
704 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
707 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
709 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
714 if (test_bit(HCI_INIT, &hdev->flags)) {
715 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
716 hdev->page_scan_window = __le16_to_cpu(rp->window);
720 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
723 u8 status = *((u8 *) skb->data);
724 struct hci_cp_write_page_scan_activity *sent;
726 BT_DBG("%s status 0x%2.2x", hdev->name, status);
731 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
735 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
736 hdev->page_scan_window = __le16_to_cpu(sent->window);
739 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
742 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
744 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
749 if (test_bit(HCI_INIT, &hdev->flags))
750 hdev->page_scan_type = rp->type;
753 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
756 u8 status = *((u8 *) skb->data);
759 BT_DBG("%s status 0x%2.2x", hdev->name, status);
764 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
766 hdev->page_scan_type = *type;
769 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
772 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
774 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
779 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
780 hdev->block_len = __le16_to_cpu(rp->block_len);
781 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
783 hdev->block_cnt = hdev->num_blocks;
785 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
786 hdev->block_cnt, hdev->block_len);
789 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
791 struct hci_rp_read_clock *rp = (void *) skb->data;
792 struct hci_cp_read_clock *cp;
793 struct hci_conn *conn;
795 BT_DBG("%s", hdev->name);
797 if (skb->len < sizeof(*rp))
805 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
809 if (cp->which == 0x00) {
810 hdev->clock = le32_to_cpu(rp->clock);
814 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
816 conn->clock = le32_to_cpu(rp->clock);
817 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
821 hci_dev_unlock(hdev);
824 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
827 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
829 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
834 hdev->amp_status = rp->amp_status;
835 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
836 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
837 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
838 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
839 hdev->amp_type = rp->amp_type;
840 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
841 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
842 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
843 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
846 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
849 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
851 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
856 hdev->inq_tx_power = rp->tx_power;
859 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
861 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
862 struct hci_cp_pin_code_reply *cp;
863 struct hci_conn *conn;
865 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
869 if (hci_dev_test_flag(hdev, HCI_MGMT))
870 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
875 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
879 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
881 conn->pin_length = cp->pin_len;
884 hci_dev_unlock(hdev);
887 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
889 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
891 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
895 if (hci_dev_test_flag(hdev, HCI_MGMT))
896 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
899 hci_dev_unlock(hdev);
902 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
905 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
907 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
912 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
913 hdev->le_pkts = rp->le_max_pkt;
915 hdev->le_cnt = hdev->le_pkts;
917 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
920 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
923 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
925 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
930 memcpy(hdev->le_features, rp->features, 8);
933 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
936 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
938 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
943 hdev->adv_tx_power = rp->tx_power;
946 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
948 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
950 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
954 if (hci_dev_test_flag(hdev, HCI_MGMT))
955 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
958 hci_dev_unlock(hdev);
961 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
964 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
966 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
970 if (hci_dev_test_flag(hdev, HCI_MGMT))
971 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
972 ACL_LINK, 0, rp->status);
974 hci_dev_unlock(hdev);
977 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
979 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
981 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
985 if (hci_dev_test_flag(hdev, HCI_MGMT))
986 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
989 hci_dev_unlock(hdev);
992 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
995 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
997 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1001 if (hci_dev_test_flag(hdev, HCI_MGMT))
1002 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1003 ACL_LINK, 0, rp->status);
1005 hci_dev_unlock(hdev);
1008 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1009 struct sk_buff *skb)
1011 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1013 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1016 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1017 struct sk_buff *skb)
1019 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1021 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1024 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1026 __u8 status = *((__u8 *) skb->data);
1029 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1034 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1040 bacpy(&hdev->random_addr, sent);
1042 hci_dev_unlock(hdev);
1045 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1047 __u8 status = *((__u8 *) skb->data);
1048 struct hci_cp_le_set_default_phy *cp;
1050 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1055 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1061 hdev->le_tx_def_phys = cp->tx_phys;
1062 hdev->le_rx_def_phys = cp->rx_phys;
1064 hci_dev_unlock(hdev);
1067 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1068 struct sk_buff *skb)
1070 __u8 status = *((__u8 *) skb->data);
1071 struct hci_cp_le_set_adv_set_rand_addr *cp;
1072 struct adv_info *adv_instance;
1077 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1083 if (!hdev->cur_adv_instance) {
1084 /* Store in hdev for instance 0 (Set adv and Directed advs) */
1085 bacpy(&hdev->random_addr, &cp->bdaddr);
1087 adv_instance = hci_find_adv_instance(hdev,
1088 hdev->cur_adv_instance);
1090 bacpy(&adv_instance->random_addr, &cp->bdaddr);
1093 hci_dev_unlock(hdev);
1096 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1098 __u8 *sent, status = *((__u8 *) skb->data);
1100 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1105 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1111 /* If we're doing connection initiation as peripheral. Set a
1112 * timeout in case something goes wrong.
1115 struct hci_conn *conn;
1117 hci_dev_set_flag(hdev, HCI_LE_ADV);
1119 conn = hci_lookup_le_connect(hdev);
1121 queue_delayed_work(hdev->workqueue,
1122 &conn->le_conn_timeout,
1123 conn->conn_timeout);
1125 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1128 hci_dev_unlock(hdev);
1131 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1132 struct sk_buff *skb)
1134 struct hci_cp_le_set_ext_adv_enable *cp;
1135 __u8 status = *((__u8 *) skb->data);
1137 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1142 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1149 struct hci_conn *conn;
1151 hci_dev_set_flag(hdev, HCI_LE_ADV);
1153 conn = hci_lookup_le_connect(hdev);
1155 queue_delayed_work(hdev->workqueue,
1156 &conn->le_conn_timeout,
1157 conn->conn_timeout);
1159 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1162 hci_dev_unlock(hdev);
1165 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1167 struct hci_cp_le_set_scan_param *cp;
1168 __u8 status = *((__u8 *) skb->data);
1170 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1175 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1181 hdev->le_scan_type = cp->type;
1183 hci_dev_unlock(hdev);
1186 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1187 struct sk_buff *skb)
1189 struct hci_cp_le_set_ext_scan_params *cp;
1190 __u8 status = *((__u8 *) skb->data);
1191 struct hci_cp_le_scan_phy_params *phy_param;
1193 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1198 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1202 phy_param = (void *)cp->data;
1206 hdev->le_scan_type = phy_param->type;
1208 hci_dev_unlock(hdev);
1211 static bool has_pending_adv_report(struct hci_dev *hdev)
1213 struct discovery_state *d = &hdev->discovery;
1215 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1218 static void clear_pending_adv_report(struct hci_dev *hdev)
1220 struct discovery_state *d = &hdev->discovery;
1222 bacpy(&d->last_adv_addr, BDADDR_ANY);
1223 d->last_adv_data_len = 0;
1226 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1227 u8 bdaddr_type, s8 rssi, u32 flags,
1230 struct discovery_state *d = &hdev->discovery;
1232 bacpy(&d->last_adv_addr, bdaddr);
1233 d->last_adv_addr_type = bdaddr_type;
1234 d->last_adv_rssi = rssi;
1235 d->last_adv_flags = flags;
1236 memcpy(d->last_adv_data, data, len);
1237 d->last_adv_data_len = len;
1240 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1245 case LE_SCAN_ENABLE:
1246 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1247 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1248 clear_pending_adv_report(hdev);
1251 case LE_SCAN_DISABLE:
1252 /* We do this here instead of when setting DISCOVERY_STOPPED
1253 * since the latter would potentially require waiting for
1254 * inquiry to stop too.
1256 if (has_pending_adv_report(hdev)) {
1257 struct discovery_state *d = &hdev->discovery;
1259 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1260 d->last_adv_addr_type, NULL,
1261 d->last_adv_rssi, d->last_adv_flags,
1263 d->last_adv_data_len, NULL, 0);
1266 /* Cancel this timer so that we don't try to disable scanning
1267 * when it's already disabled.
1269 cancel_delayed_work(&hdev->le_scan_disable);
1271 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1273 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1274 * interrupted scanning due to a connect request. Mark
1275 * therefore discovery as stopped. If this was not
1276 * because of a connect request advertising might have
1277 * been disabled because of active scanning, so
1278 * re-enable it again if necessary.
1280 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1281 #ifndef TIZEN_BT /* The below line is kernel bug. */
1282 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1284 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
1286 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1287 hdev->discovery.state == DISCOVERY_FINDING)
1288 hci_req_reenable_advertising(hdev);
1293 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1298 hci_dev_unlock(hdev);
1301 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1302 struct sk_buff *skb)
1304 struct hci_cp_le_set_scan_enable *cp;
1305 __u8 status = *((__u8 *) skb->data);
1307 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1312 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1316 le_set_scan_enable_complete(hdev, cp->enable);
1319 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1320 struct sk_buff *skb)
1322 struct hci_cp_le_set_ext_scan_enable *cp;
1323 __u8 status = *((__u8 *) skb->data);
1325 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1330 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1334 le_set_scan_enable_complete(hdev, cp->enable);
1337 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1338 struct sk_buff *skb)
1340 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1342 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1348 hdev->le_num_of_adv_sets = rp->num_of_sets;
1351 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1352 struct sk_buff *skb)
1354 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1356 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1361 hdev->le_white_list_size = rp->size;
1364 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1365 struct sk_buff *skb)
1367 __u8 status = *((__u8 *) skb->data);
1369 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1374 hci_bdaddr_list_clear(&hdev->le_white_list);
1377 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1378 struct sk_buff *skb)
1380 struct hci_cp_le_add_to_white_list *sent;
1381 __u8 status = *((__u8 *) skb->data);
1383 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1388 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1392 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1396 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1397 struct sk_buff *skb)
1399 struct hci_cp_le_del_from_white_list *sent;
1400 __u8 status = *((__u8 *) skb->data);
1402 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1407 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1411 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1415 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1416 struct sk_buff *skb)
1418 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1420 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1425 memcpy(hdev->le_states, rp->le_states, 8);
1428 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1429 struct sk_buff *skb)
1431 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1433 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1438 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1439 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1442 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1443 struct sk_buff *skb)
1445 struct hci_cp_le_write_def_data_len *sent;
1446 __u8 status = *((__u8 *) skb->data);
1448 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1453 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1457 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1458 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1461 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1462 struct sk_buff *skb)
1464 __u8 status = *((__u8 *) skb->data);
1466 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1471 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1474 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1475 struct sk_buff *skb)
1477 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1479 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1484 hdev->le_resolv_list_size = rp->size;
1487 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1488 struct sk_buff *skb)
1490 __u8 *sent, status = *((__u8 *) skb->data);
1492 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1497 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1504 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1506 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1508 hci_dev_unlock(hdev);
1511 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1512 struct sk_buff *skb)
1514 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1516 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1521 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1522 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1523 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1524 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1527 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1528 struct sk_buff *skb)
1530 struct hci_cp_write_le_host_supported *sent;
1531 __u8 status = *((__u8 *) skb->data);
1533 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1538 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1545 hdev->features[1][0] |= LMP_HOST_LE;
1546 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1548 hdev->features[1][0] &= ~LMP_HOST_LE;
1549 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1550 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1554 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1556 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1558 hci_dev_unlock(hdev);
1561 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1563 struct hci_cp_le_set_adv_param *cp;
1564 u8 status = *((u8 *) skb->data);
1566 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1571 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1576 hdev->adv_addr_type = cp->own_address_type;
1577 hci_dev_unlock(hdev);
1580 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1582 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1583 struct hci_cp_le_set_ext_adv_params *cp;
1584 struct adv_info *adv_instance;
1586 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1591 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1596 hdev->adv_addr_type = cp->own_addr_type;
1597 if (!hdev->cur_adv_instance) {
1598 /* Store in hdev for instance 0 */
1599 hdev->adv_tx_power = rp->tx_power;
1601 adv_instance = hci_find_adv_instance(hdev,
1602 hdev->cur_adv_instance);
1604 adv_instance->tx_power = rp->tx_power;
1606 /* Update adv data as tx power is known now */
1607 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1608 hci_dev_unlock(hdev);
1612 static void hci_cc_enable_rssi(struct hci_dev *hdev,
1613 struct sk_buff *skb)
1615 struct hci_cc_rsp_enable_rssi *rp = (void *)skb->data;
1617 BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
1618 hdev->name, rp->status, rp->le_ext_opcode);
1620 mgmt_enable_rssi_cc(hdev, rp, rp->status);
1623 static void hci_cc_get_raw_rssi(struct hci_dev *hdev,
1624 struct sk_buff *skb)
1626 struct hci_cc_rp_get_raw_rssi *rp = (void *)skb->data;
1628 BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
1629 hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
1631 mgmt_raw_rssi_response(hdev, rp, rp->status);
1635 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1637 struct hci_rp_read_rssi *rp = (void *) skb->data;
1638 struct hci_conn *conn;
1640 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1647 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1649 conn->rssi = rp->rssi;
1651 hci_dev_unlock(hdev);
1654 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1656 struct hci_cp_read_tx_power *sent;
1657 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1658 struct hci_conn *conn;
1660 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1665 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1671 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1675 switch (sent->type) {
1677 conn->tx_power = rp->tx_power;
1680 conn->max_tx_power = rp->tx_power;
1685 hci_dev_unlock(hdev);
1688 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1690 u8 status = *((u8 *) skb->data);
1693 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1698 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1700 hdev->ssp_debug_mode = *mode;
1703 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1705 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1708 hci_conn_check_pending(hdev);
1712 set_bit(HCI_INQUIRY, &hdev->flags);
1715 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1717 struct hci_cp_create_conn *cp;
1718 struct hci_conn *conn;
1720 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1722 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1728 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1730 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1733 if (conn && conn->state == BT_CONNECT) {
1734 if (status != 0x0c || conn->attempt > 2) {
1735 conn->state = BT_CLOSED;
1736 hci_connect_cfm(conn, status);
1739 conn->state = BT_CONNECT2;
1743 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1746 bt_dev_err(hdev, "no memory for new connection");
1750 hci_dev_unlock(hdev);
1753 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1755 struct hci_cp_add_sco *cp;
1756 struct hci_conn *acl, *sco;
1759 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1764 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1768 handle = __le16_to_cpu(cp->handle);
1770 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1774 acl = hci_conn_hash_lookup_handle(hdev, handle);
1778 sco->state = BT_CLOSED;
1780 hci_connect_cfm(sco, status);
1785 hci_dev_unlock(hdev);
1788 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1790 struct hci_cp_auth_requested *cp;
1791 struct hci_conn *conn;
1793 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1798 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1804 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1806 if (conn->state == BT_CONFIG) {
1807 hci_connect_cfm(conn, status);
1808 hci_conn_drop(conn);
1812 hci_dev_unlock(hdev);
1815 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1817 struct hci_cp_set_conn_encrypt *cp;
1818 struct hci_conn *conn;
1820 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1825 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1831 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1833 if (conn->state == BT_CONFIG) {
1834 hci_connect_cfm(conn, status);
1835 hci_conn_drop(conn);
1839 hci_dev_unlock(hdev);
1842 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1843 struct hci_conn *conn)
1845 if (conn->state != BT_CONFIG || !conn->out)
1848 if (conn->pending_sec_level == BT_SECURITY_SDP)
1851 /* Only request authentication for SSP connections or non-SSP
1852 * devices with sec_level MEDIUM or HIGH or if MITM protection
1855 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1856 conn->pending_sec_level != BT_SECURITY_FIPS &&
1857 conn->pending_sec_level != BT_SECURITY_HIGH &&
1858 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1864 static int hci_resolve_name(struct hci_dev *hdev,
1865 struct inquiry_entry *e)
1867 struct hci_cp_remote_name_req cp;
1869 memset(&cp, 0, sizeof(cp));
1871 bacpy(&cp.bdaddr, &e->data.bdaddr);
1872 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1873 cp.pscan_mode = e->data.pscan_mode;
1874 cp.clock_offset = e->data.clock_offset;
1876 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1879 static bool hci_resolve_next_name(struct hci_dev *hdev)
1881 struct discovery_state *discov = &hdev->discovery;
1882 struct inquiry_entry *e;
1884 if (list_empty(&discov->resolve))
1887 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1891 if (hci_resolve_name(hdev, e) == 0) {
1892 e->name_state = NAME_PENDING;
1899 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1900 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1902 struct discovery_state *discov = &hdev->discovery;
1903 struct inquiry_entry *e;
1906 /* Update the mgmt connected state if necessary. Be careful with
1907 * conn objects that exist but are not (yet) connected however.
1908 * Only those in BT_CONFIG or BT_CONNECTED states can be
1909 * considered connected.
1912 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
1913 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1914 mgmt_device_connected(hdev, conn, 0, name, name_len);
1916 mgmt_device_name_update(hdev, bdaddr, name, name_len);
1920 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1921 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1922 mgmt_device_connected(hdev, conn, 0, name, name_len);
1925 if (discov->state == DISCOVERY_STOPPED)
1928 if (discov->state == DISCOVERY_STOPPING)
1929 goto discov_complete;
1931 if (discov->state != DISCOVERY_RESOLVING)
1934 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1935 /* If the device was not found in a list of found devices names of which
1936 * are pending. there is no need to continue resolving a next name as it
1937 * will be done upon receiving another Remote Name Request Complete
1944 e->name_state = NAME_KNOWN;
1945 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1946 e->data.rssi, name, name_len);
1948 e->name_state = NAME_NOT_KNOWN;
1951 if (hci_resolve_next_name(hdev))
1955 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1958 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1960 struct hci_cp_remote_name_req *cp;
1961 struct hci_conn *conn;
1963 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1965 /* If successful wait for the name req complete event before
1966 * checking for the need to do authentication */
1970 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1976 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1978 if (hci_dev_test_flag(hdev, HCI_MGMT))
1979 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1984 if (!hci_outgoing_auth_needed(hdev, conn))
1987 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1988 struct hci_cp_auth_requested auth_cp;
1990 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1992 auth_cp.handle = __cpu_to_le16(conn->handle);
1993 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1994 sizeof(auth_cp), &auth_cp);
1998 hci_dev_unlock(hdev);
2001 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2003 struct hci_cp_read_remote_features *cp;
2004 struct hci_conn *conn;
2006 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2011 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2017 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2019 if (conn->state == BT_CONFIG) {
2020 hci_connect_cfm(conn, status);
2021 hci_conn_drop(conn);
2025 hci_dev_unlock(hdev);
2028 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2030 struct hci_cp_read_remote_ext_features *cp;
2031 struct hci_conn *conn;
2033 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2038 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2044 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2046 if (conn->state == BT_CONFIG) {
2047 hci_connect_cfm(conn, status);
2048 hci_conn_drop(conn);
2052 hci_dev_unlock(hdev);
2055 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2057 struct hci_cp_setup_sync_conn *cp;
2058 struct hci_conn *acl, *sco;
2061 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2066 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2070 handle = __le16_to_cpu(cp->handle);
2072 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2076 acl = hci_conn_hash_lookup_handle(hdev, handle);
2080 sco->state = BT_CLOSED;
2082 hci_connect_cfm(sco, status);
2087 hci_dev_unlock(hdev);
2090 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2092 struct hci_cp_sniff_mode *cp;
2093 struct hci_conn *conn;
2095 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2100 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2106 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2108 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2110 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2111 hci_sco_setup(conn, status);
2114 hci_dev_unlock(hdev);
2117 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2119 struct hci_cp_exit_sniff_mode *cp;
2120 struct hci_conn *conn;
2122 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2127 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2133 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2135 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2137 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2138 hci_sco_setup(conn, status);
2141 hci_dev_unlock(hdev);
2144 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2146 struct hci_cp_disconnect *cp;
2147 struct hci_conn *conn;
2152 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2158 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2160 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2161 conn->dst_type, status);
2163 hci_dev_unlock(hdev);
2166 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2167 u8 peer_addr_type, u8 own_address_type,
2170 struct hci_conn *conn;
2172 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2177 /* Store the initiator and responder address information which
2178 * is needed for SMP. These values will not change during the
2179 * lifetime of the connection.
2181 conn->init_addr_type = own_address_type;
2182 if (own_address_type == ADDR_LE_DEV_RANDOM)
2183 bacpy(&conn->init_addr, &hdev->random_addr);
2185 bacpy(&conn->init_addr, &hdev->bdaddr);
2187 conn->resp_addr_type = peer_addr_type;
2188 bacpy(&conn->resp_addr, peer_addr);
2190 /* We don't want the connection attempt to stick around
2191 * indefinitely since LE doesn't have a page timeout concept
2192 * like BR/EDR. Set a timer for any connection that doesn't use
2193 * the white list for connecting.
2195 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2196 queue_delayed_work(conn->hdev->workqueue,
2197 &conn->le_conn_timeout,
2198 conn->conn_timeout);
2201 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2203 struct hci_cp_le_create_conn *cp;
2205 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2207 /* All connection failure handling is taken care of by the
2208 * hci_le_conn_failed function which is triggered by the HCI
2209 * request completion callbacks used for connecting.
2214 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2220 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2221 cp->own_address_type, cp->filter_policy);
2223 hci_dev_unlock(hdev);
2226 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2228 struct hci_cp_le_ext_create_conn *cp;
2230 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2232 /* All connection failure handling is taken care of by the
2233 * hci_le_conn_failed function which is triggered by the HCI
2234 * request completion callbacks used for connecting.
2239 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2245 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2246 cp->own_addr_type, cp->filter_policy);
2248 hci_dev_unlock(hdev);
2251 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2253 struct hci_cp_le_read_remote_features *cp;
2254 struct hci_conn *conn;
2256 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2261 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2267 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2269 if (conn->state == BT_CONFIG) {
2270 hci_connect_cfm(conn, status);
2271 hci_conn_drop(conn);
2275 hci_dev_unlock(hdev);
2278 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2280 struct hci_cp_le_start_enc *cp;
2281 struct hci_conn *conn;
2283 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2290 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2294 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2298 if (conn->state != BT_CONNECTED)
2301 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2302 hci_conn_drop(conn);
2305 hci_dev_unlock(hdev);
2308 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2310 struct hci_cp_switch_role *cp;
2311 struct hci_conn *conn;
2313 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2318 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2324 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2326 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2328 hci_dev_unlock(hdev);
2331 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2333 __u8 status = *((__u8 *) skb->data);
2334 struct discovery_state *discov = &hdev->discovery;
2335 struct inquiry_entry *e;
2337 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2339 hci_conn_check_pending(hdev);
2341 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2344 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2345 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2347 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2352 if (discov->state != DISCOVERY_FINDING)
2355 if (list_empty(&discov->resolve)) {
2356 /* When BR/EDR inquiry is active and no LE scanning is in
2357 * progress, then change discovery state to indicate completion.
2359 * When running LE scanning and BR/EDR inquiry simultaneously
2360 * and the LE scan already finished, then change the discovery
2361 * state to indicate completion.
2363 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2364 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2365 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2369 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2370 if (e && hci_resolve_name(hdev, e) == 0) {
2371 e->name_state = NAME_PENDING;
2372 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2374 /* When BR/EDR inquiry is active and no LE scanning is in
2375 * progress, then change discovery state to indicate completion.
2377 * When running LE scanning and BR/EDR inquiry simultaneously
2378 * and the LE scan already finished, then change the discovery
2379 * state to indicate completion.
2381 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2382 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2383 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2387 hci_dev_unlock(hdev);
2390 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2392 struct inquiry_data data;
2393 struct inquiry_info *info = (void *) (skb->data + 1);
2394 int num_rsp = *((__u8 *) skb->data);
2396 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2401 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2406 for (; num_rsp; num_rsp--, info++) {
2409 bacpy(&data.bdaddr, &info->bdaddr);
2410 data.pscan_rep_mode = info->pscan_rep_mode;
2411 data.pscan_period_mode = info->pscan_period_mode;
2412 data.pscan_mode = info->pscan_mode;
2413 memcpy(data.dev_class, info->dev_class, 3);
2414 data.clock_offset = info->clock_offset;
2415 data.rssi = HCI_RSSI_INVALID;
2416 data.ssp_mode = 0x00;
2418 flags = hci_inquiry_cache_update(hdev, &data, false);
2420 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2421 info->dev_class, HCI_RSSI_INVALID,
2422 flags, NULL, 0, NULL, 0);
2425 hci_dev_unlock(hdev);
2428 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2430 struct hci_ev_conn_complete *ev = (void *) skb->data;
2431 struct hci_conn *conn;
2433 BT_DBG("%s", hdev->name);
2437 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2439 if (ev->link_type != SCO_LINK)
2442 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2446 conn->type = SCO_LINK;
2450 conn->handle = __le16_to_cpu(ev->handle);
2452 if (conn->type == ACL_LINK) {
2453 conn->state = BT_CONFIG;
2454 hci_conn_hold(conn);
2456 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2457 !hci_find_link_key(hdev, &ev->bdaddr))
2458 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2460 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2462 conn->state = BT_CONNECTED;
2464 hci_debugfs_create_conn(conn);
2465 hci_conn_add_sysfs(conn);
2467 if (test_bit(HCI_AUTH, &hdev->flags))
2468 set_bit(HCI_CONN_AUTH, &conn->flags);
2470 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2471 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2473 /* Get remote features */
2474 if (conn->type == ACL_LINK) {
2475 struct hci_cp_read_remote_features cp;
2476 cp.handle = ev->handle;
2477 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2480 hci_req_update_scan(hdev);
2483 /* Set packet type for incoming connection */
2484 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2485 struct hci_cp_change_conn_ptype cp;
2486 cp.handle = ev->handle;
2487 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2488 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2492 conn->state = BT_CLOSED;
2493 if (conn->type == ACL_LINK)
2494 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2495 conn->dst_type, ev->status);
2498 if (conn->type == ACL_LINK)
2499 hci_sco_setup(conn, ev->status);
2502 hci_connect_cfm(conn, ev->status);
2504 } else if (ev->link_type != ACL_LINK)
2505 hci_connect_cfm(conn, ev->status);
2508 hci_dev_unlock(hdev);
2510 hci_conn_check_pending(hdev);
2513 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2515 struct hci_cp_reject_conn_req cp;
2517 bacpy(&cp.bdaddr, bdaddr);
2518 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2519 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2522 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2524 struct hci_ev_conn_request *ev = (void *) skb->data;
2525 int mask = hdev->link_mode;
2526 struct inquiry_entry *ie;
2527 struct hci_conn *conn;
2530 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2533 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2536 if (!(mask & HCI_LM_ACCEPT)) {
2537 hci_reject_conn(hdev, &ev->bdaddr);
2541 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2543 hci_reject_conn(hdev, &ev->bdaddr);
2547 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2548 * connection. These features are only touched through mgmt so
2549 * only do the checks if HCI_MGMT is set.
2551 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2552 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2553 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2555 hci_reject_conn(hdev, &ev->bdaddr);
2559 /* Connection accepted */
2563 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2565 memcpy(ie->data.dev_class, ev->dev_class, 3);
2567 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2570 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2573 bt_dev_err(hdev, "no memory for new connection");
2574 hci_dev_unlock(hdev);
2579 memcpy(conn->dev_class, ev->dev_class, 3);
2581 hci_dev_unlock(hdev);
2583 if (ev->link_type == ACL_LINK ||
2584 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2585 struct hci_cp_accept_conn_req cp;
2586 conn->state = BT_CONNECT;
2588 bacpy(&cp.bdaddr, &ev->bdaddr);
2590 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2591 cp.role = 0x00; /* Become master */
2593 cp.role = 0x01; /* Remain slave */
2595 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2596 } else if (!(flags & HCI_PROTO_DEFER)) {
2597 struct hci_cp_accept_sync_conn_req cp;
2598 conn->state = BT_CONNECT;
2600 bacpy(&cp.bdaddr, &ev->bdaddr);
2601 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2603 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2604 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2605 cp.max_latency = cpu_to_le16(0xffff);
2606 cp.content_format = cpu_to_le16(hdev->voice_setting);
2607 cp.retrans_effort = 0xff;
2609 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2612 conn->state = BT_CONNECT2;
2613 hci_connect_cfm(conn, 0);
2617 static u8 hci_to_mgmt_reason(u8 err)
2620 case HCI_ERROR_CONNECTION_TIMEOUT:
2621 return MGMT_DEV_DISCONN_TIMEOUT;
2622 case HCI_ERROR_REMOTE_USER_TERM:
2623 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2624 case HCI_ERROR_REMOTE_POWER_OFF:
2625 return MGMT_DEV_DISCONN_REMOTE;
2626 case HCI_ERROR_LOCAL_HOST_TERM:
2627 return MGMT_DEV_DISCONN_LOCAL_HOST;
2629 return MGMT_DEV_DISCONN_UNKNOWN;
2633 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2635 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2637 struct hci_conn_params *params;
2638 struct hci_conn *conn;
2639 bool mgmt_connected;
2642 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2646 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2651 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2652 conn->dst_type, ev->status);
2656 conn->state = BT_CLOSED;
2658 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2660 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2661 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2663 reason = hci_to_mgmt_reason(ev->reason);
2665 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2666 reason, mgmt_connected);
2668 if (conn->type == ACL_LINK) {
2669 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2670 hci_remove_link_key(hdev, &conn->dst);
2672 hci_req_update_scan(hdev);
2675 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2677 switch (params->auto_connect) {
2678 case HCI_AUTO_CONN_LINK_LOSS:
2679 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2683 case HCI_AUTO_CONN_DIRECT:
2684 case HCI_AUTO_CONN_ALWAYS:
2685 list_del_init(¶ms->action);
2686 list_add(¶ms->action, &hdev->pend_le_conns);
2687 hci_update_background_scan(hdev);
2697 hci_disconn_cfm(conn, ev->reason);
2700 /* Re-enable advertising if necessary, since it might
2701 * have been disabled by the connection. From the
2702 * HCI_LE_Set_Advertise_Enable command description in
2703 * the core specification (v4.0):
2704 * "The Controller shall continue advertising until the Host
2705 * issues an LE_Set_Advertise_Enable command with
2706 * Advertising_Enable set to 0x00 (Advertising is disabled)
2707 * or until a connection is created or until the Advertising
2708 * is timed out due to Directed Advertising."
2710 if (type == LE_LINK)
2711 hci_req_reenable_advertising(hdev);
2714 hci_dev_unlock(hdev);
2717 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2719 struct hci_ev_auth_complete *ev = (void *) skb->data;
2720 struct hci_conn *conn;
2722 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2726 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2731 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2733 if (!hci_conn_ssp_enabled(conn) &&
2734 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2735 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2737 set_bit(HCI_CONN_AUTH, &conn->flags);
2738 conn->sec_level = conn->pending_sec_level;
2741 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2742 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2744 mgmt_auth_failed(conn, ev->status);
2747 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2748 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2750 if (conn->state == BT_CONFIG) {
2751 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2752 struct hci_cp_set_conn_encrypt cp;
2753 cp.handle = ev->handle;
2755 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2758 conn->state = BT_CONNECTED;
2759 hci_connect_cfm(conn, ev->status);
2760 hci_conn_drop(conn);
2763 hci_auth_cfm(conn, ev->status);
2765 hci_conn_hold(conn);
2766 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2767 hci_conn_drop(conn);
2770 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2772 struct hci_cp_set_conn_encrypt cp;
2773 cp.handle = ev->handle;
2775 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2778 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2779 hci_encrypt_cfm(conn, ev->status, 0x00);
2784 hci_dev_unlock(hdev);
2787 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2789 struct hci_ev_remote_name *ev = (void *) skb->data;
2790 struct hci_conn *conn;
2792 BT_DBG("%s", hdev->name);
2794 hci_conn_check_pending(hdev);
2798 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2800 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2803 if (ev->status == 0)
2804 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2805 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2807 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2813 if (!hci_outgoing_auth_needed(hdev, conn))
2816 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2817 struct hci_cp_auth_requested cp;
2819 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2821 cp.handle = __cpu_to_le16(conn->handle);
2822 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2826 hci_dev_unlock(hdev);
2829 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2830 u16 opcode, struct sk_buff *skb)
2832 const struct hci_rp_read_enc_key_size *rp;
2833 struct hci_conn *conn;
2836 BT_DBG("%s status 0x%02x", hdev->name, status);
2838 if (!skb || skb->len < sizeof(*rp)) {
2839 bt_dev_err(hdev, "invalid read key size response");
2843 rp = (void *)skb->data;
2844 handle = le16_to_cpu(rp->handle);
2848 conn = hci_conn_hash_lookup_handle(hdev, handle);
2852 /* If we fail to read the encryption key size, assume maximum
2853 * (which is the same we do also when this HCI command isn't
2857 bt_dev_err(hdev, "failed to read key size for handle %u",
2859 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2861 conn->enc_key_size = rp->key_size;
2864 if (conn->state == BT_CONFIG) {
2865 conn->state = BT_CONNECTED;
2866 hci_connect_cfm(conn, 0);
2867 hci_conn_drop(conn);
2871 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2873 else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
2878 hci_encrypt_cfm(conn, 0, encrypt);
2882 hci_dev_unlock(hdev);
2885 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2887 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2888 struct hci_conn *conn;
2890 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2894 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2900 /* Encryption implies authentication */
2901 set_bit(HCI_CONN_AUTH, &conn->flags);
2902 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2903 conn->sec_level = conn->pending_sec_level;
2905 /* P-256 authentication key implies FIPS */
2906 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2907 set_bit(HCI_CONN_FIPS, &conn->flags);
2909 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2910 conn->type == LE_LINK)
2911 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2913 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2914 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2918 /* We should disregard the current RPA and generate a new one
2919 * whenever the encryption procedure fails.
2921 if (ev->status && conn->type == LE_LINK) {
2922 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2923 hci_adv_instances_set_rpa_expired(hdev, true);
2926 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2928 if (ev->status && conn->state == BT_CONNECTED) {
2929 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2930 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2932 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2933 hci_conn_drop(conn);
2937 /* In Secure Connections Only mode, do not allow any connections
2938 * that are not encrypted with AES-CCM using a P-256 authenticated
2941 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
2942 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2943 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2944 hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2945 hci_conn_drop(conn);
2949 /* Try reading the encryption key size for encrypted ACL links */
2950 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
2951 struct hci_cp_read_enc_key_size cp;
2952 struct hci_request req;
2954 /* Only send HCI_Read_Encryption_Key_Size if the
2955 * controller really supports it. If it doesn't, assume
2956 * the default size (16).
2958 if (!(hdev->commands[20] & 0x10)) {
2959 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2963 hci_req_init(&req, hdev);
2965 cp.handle = cpu_to_le16(conn->handle);
2966 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
2968 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
2969 bt_dev_err(hdev, "sending read key size failed");
2970 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2978 if (conn->state == BT_CONFIG) {
2980 conn->state = BT_CONNECTED;
2982 hci_connect_cfm(conn, ev->status);
2983 hci_conn_drop(conn);
2985 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2988 hci_dev_unlock(hdev);
2991 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2992 struct sk_buff *skb)
2994 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2995 struct hci_conn *conn;
2997 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3001 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3004 set_bit(HCI_CONN_SECURE, &conn->flags);
3006 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3008 hci_key_change_cfm(conn, ev->status);
3011 hci_dev_unlock(hdev);
3014 static void hci_remote_features_evt(struct hci_dev *hdev,
3015 struct sk_buff *skb)
3017 struct hci_ev_remote_features *ev = (void *) skb->data;
3018 struct hci_conn *conn;
3020 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3024 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3029 memcpy(conn->features[0], ev->features, 8);
3031 if (conn->state != BT_CONFIG)
3034 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3035 lmp_ext_feat_capable(conn)) {
3036 struct hci_cp_read_remote_ext_features cp;
3037 cp.handle = ev->handle;
3039 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3044 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3045 struct hci_cp_remote_name_req cp;
3046 memset(&cp, 0, sizeof(cp));
3047 bacpy(&cp.bdaddr, &conn->dst);
3048 cp.pscan_rep_mode = 0x02;
3049 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3050 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3051 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3053 if (!hci_outgoing_auth_needed(hdev, conn)) {
3054 conn->state = BT_CONNECTED;
3055 hci_connect_cfm(conn, ev->status);
3056 hci_conn_drop(conn);
3060 hci_dev_unlock(hdev);
3063 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3064 u16 *opcode, u8 *status,
3065 hci_req_complete_t *req_complete,
3066 hci_req_complete_skb_t *req_complete_skb)
3068 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3070 *opcode = __le16_to_cpu(ev->opcode);
3071 *status = skb->data[sizeof(*ev)];
3073 skb_pull(skb, sizeof(*ev));
3076 case HCI_OP_INQUIRY_CANCEL:
3077 hci_cc_inquiry_cancel(hdev, skb);
3080 case HCI_OP_PERIODIC_INQ:
3081 hci_cc_periodic_inq(hdev, skb);
3084 case HCI_OP_EXIT_PERIODIC_INQ:
3085 hci_cc_exit_periodic_inq(hdev, skb);
3088 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3089 hci_cc_remote_name_req_cancel(hdev, skb);
3092 case HCI_OP_ROLE_DISCOVERY:
3093 hci_cc_role_discovery(hdev, skb);
3096 case HCI_OP_READ_LINK_POLICY:
3097 hci_cc_read_link_policy(hdev, skb);
3100 case HCI_OP_WRITE_LINK_POLICY:
3101 hci_cc_write_link_policy(hdev, skb);
3104 case HCI_OP_READ_DEF_LINK_POLICY:
3105 hci_cc_read_def_link_policy(hdev, skb);
3108 case HCI_OP_WRITE_DEF_LINK_POLICY:
3109 hci_cc_write_def_link_policy(hdev, skb);
3113 hci_cc_reset(hdev, skb);
3116 case HCI_OP_READ_STORED_LINK_KEY:
3117 hci_cc_read_stored_link_key(hdev, skb);
3120 case HCI_OP_DELETE_STORED_LINK_KEY:
3121 hci_cc_delete_stored_link_key(hdev, skb);
3124 case HCI_OP_WRITE_LOCAL_NAME:
3125 hci_cc_write_local_name(hdev, skb);
3128 case HCI_OP_READ_LOCAL_NAME:
3129 hci_cc_read_local_name(hdev, skb);
3132 case HCI_OP_WRITE_AUTH_ENABLE:
3133 hci_cc_write_auth_enable(hdev, skb);
3136 case HCI_OP_WRITE_ENCRYPT_MODE:
3137 hci_cc_write_encrypt_mode(hdev, skb);
3140 case HCI_OP_WRITE_SCAN_ENABLE:
3141 hci_cc_write_scan_enable(hdev, skb);
3144 case HCI_OP_READ_CLASS_OF_DEV:
3145 hci_cc_read_class_of_dev(hdev, skb);
3148 case HCI_OP_WRITE_CLASS_OF_DEV:
3149 hci_cc_write_class_of_dev(hdev, skb);
3152 case HCI_OP_READ_VOICE_SETTING:
3153 hci_cc_read_voice_setting(hdev, skb);
3156 case HCI_OP_WRITE_VOICE_SETTING:
3157 hci_cc_write_voice_setting(hdev, skb);
3160 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3161 hci_cc_read_num_supported_iac(hdev, skb);
3164 case HCI_OP_WRITE_SSP_MODE:
3165 hci_cc_write_ssp_mode(hdev, skb);
3168 case HCI_OP_WRITE_SC_SUPPORT:
3169 hci_cc_write_sc_support(hdev, skb);
3172 case HCI_OP_READ_LOCAL_VERSION:
3173 hci_cc_read_local_version(hdev, skb);
3176 case HCI_OP_READ_LOCAL_COMMANDS:
3177 hci_cc_read_local_commands(hdev, skb);
3180 case HCI_OP_READ_LOCAL_FEATURES:
3181 hci_cc_read_local_features(hdev, skb);
3184 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3185 hci_cc_read_local_ext_features(hdev, skb);
3188 case HCI_OP_READ_BUFFER_SIZE:
3189 hci_cc_read_buffer_size(hdev, skb);
3192 case HCI_OP_READ_BD_ADDR:
3193 hci_cc_read_bd_addr(hdev, skb);
3196 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3197 hci_cc_read_page_scan_activity(hdev, skb);
3200 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3201 hci_cc_write_page_scan_activity(hdev, skb);
3204 case HCI_OP_READ_PAGE_SCAN_TYPE:
3205 hci_cc_read_page_scan_type(hdev, skb);
3208 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3209 hci_cc_write_page_scan_type(hdev, skb);
3212 case HCI_OP_READ_DATA_BLOCK_SIZE:
3213 hci_cc_read_data_block_size(hdev, skb);
3216 case HCI_OP_READ_FLOW_CONTROL_MODE:
3217 hci_cc_read_flow_control_mode(hdev, skb);
3220 case HCI_OP_READ_LOCAL_AMP_INFO:
3221 hci_cc_read_local_amp_info(hdev, skb);
3224 case HCI_OP_READ_CLOCK:
3225 hci_cc_read_clock(hdev, skb);
3228 case HCI_OP_READ_INQ_RSP_TX_POWER:
3229 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3232 case HCI_OP_PIN_CODE_REPLY:
3233 hci_cc_pin_code_reply(hdev, skb);
3236 case HCI_OP_PIN_CODE_NEG_REPLY:
3237 hci_cc_pin_code_neg_reply(hdev, skb);
3240 case HCI_OP_READ_LOCAL_OOB_DATA:
3241 hci_cc_read_local_oob_data(hdev, skb);
3244 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3245 hci_cc_read_local_oob_ext_data(hdev, skb);
3248 case HCI_OP_LE_READ_BUFFER_SIZE:
3249 hci_cc_le_read_buffer_size(hdev, skb);
3252 case HCI_OP_LE_READ_LOCAL_FEATURES:
3253 hci_cc_le_read_local_features(hdev, skb);
3256 case HCI_OP_LE_READ_ADV_TX_POWER:
3257 hci_cc_le_read_adv_tx_power(hdev, skb);
3260 case HCI_OP_USER_CONFIRM_REPLY:
3261 hci_cc_user_confirm_reply(hdev, skb);
3264 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3265 hci_cc_user_confirm_neg_reply(hdev, skb);
3268 case HCI_OP_USER_PASSKEY_REPLY:
3269 hci_cc_user_passkey_reply(hdev, skb);
3272 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3273 hci_cc_user_passkey_neg_reply(hdev, skb);
3276 case HCI_OP_LE_SET_RANDOM_ADDR:
3277 hci_cc_le_set_random_addr(hdev, skb);
3280 case HCI_OP_LE_SET_ADV_ENABLE:
3281 hci_cc_le_set_adv_enable(hdev, skb);
3284 case HCI_OP_LE_SET_SCAN_PARAM:
3285 hci_cc_le_set_scan_param(hdev, skb);
3288 case HCI_OP_LE_SET_SCAN_ENABLE:
3289 hci_cc_le_set_scan_enable(hdev, skb);
3292 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3293 hci_cc_le_read_white_list_size(hdev, skb);
3296 case HCI_OP_LE_CLEAR_WHITE_LIST:
3297 hci_cc_le_clear_white_list(hdev, skb);
3300 case HCI_OP_LE_ADD_TO_WHITE_LIST:
3301 hci_cc_le_add_to_white_list(hdev, skb);
3304 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3305 hci_cc_le_del_from_white_list(hdev, skb);
3308 case HCI_OP_LE_READ_SUPPORTED_STATES:
3309 hci_cc_le_read_supported_states(hdev, skb);
3312 case HCI_OP_LE_READ_DEF_DATA_LEN:
3313 hci_cc_le_read_def_data_len(hdev, skb);
3316 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3317 hci_cc_le_write_def_data_len(hdev, skb);
3320 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3321 hci_cc_le_clear_resolv_list(hdev, skb);
3324 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3325 hci_cc_le_read_resolv_list_size(hdev, skb);
3328 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3329 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3332 case HCI_OP_LE_READ_MAX_DATA_LEN:
3333 hci_cc_le_read_max_data_len(hdev, skb);
3336 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3337 hci_cc_write_le_host_supported(hdev, skb);
3340 case HCI_OP_LE_SET_ADV_PARAM:
3341 hci_cc_set_adv_param(hdev, skb);
3344 case HCI_OP_READ_RSSI:
3345 hci_cc_read_rssi(hdev, skb);
3348 case HCI_OP_READ_TX_POWER:
3349 hci_cc_read_tx_power(hdev, skb);
3352 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3353 hci_cc_write_ssp_debug_mode(hdev, skb);
3356 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3357 hci_cc_le_set_ext_scan_param(hdev, skb);
3360 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3361 hci_cc_le_set_ext_scan_enable(hdev, skb);
3364 case HCI_OP_LE_SET_DEFAULT_PHY:
3365 hci_cc_le_set_default_phy(hdev, skb);
3368 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3369 hci_cc_le_read_num_adv_sets(hdev, skb);
3372 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3373 hci_cc_set_ext_adv_param(hdev, skb);
3376 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3377 hci_cc_le_set_ext_adv_enable(hdev, skb);
3380 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3381 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3384 case HCI_OP_ENABLE_RSSI:
3385 hci_cc_enable_rssi(hdev, skb);
3388 case HCI_OP_GET_RAW_RSSI:
3389 hci_cc_get_raw_rssi(hdev, skb);
3393 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3397 if (*opcode != HCI_OP_NOP)
3398 cancel_delayed_work(&hdev->cmd_timer);
3400 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3401 atomic_set(&hdev->cmd_cnt, 1);
3403 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3406 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3408 "unexpected event for opcode 0x%4.4x", *opcode);
3412 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3413 queue_work(hdev->workqueue, &hdev->cmd_work);
3416 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3417 u16 *opcode, u8 *status,
3418 hci_req_complete_t *req_complete,
3419 hci_req_complete_skb_t *req_complete_skb)
3421 struct hci_ev_cmd_status *ev = (void *) skb->data;
3423 skb_pull(skb, sizeof(*ev));
3425 *opcode = __le16_to_cpu(ev->opcode);
3426 *status = ev->status;
3429 case HCI_OP_INQUIRY:
3430 hci_cs_inquiry(hdev, ev->status);
3433 case HCI_OP_CREATE_CONN:
3434 hci_cs_create_conn(hdev, ev->status);
3437 case HCI_OP_DISCONNECT:
3438 hci_cs_disconnect(hdev, ev->status);
3441 case HCI_OP_ADD_SCO:
3442 hci_cs_add_sco(hdev, ev->status);
3445 case HCI_OP_AUTH_REQUESTED:
3446 hci_cs_auth_requested(hdev, ev->status);
3449 case HCI_OP_SET_CONN_ENCRYPT:
3450 hci_cs_set_conn_encrypt(hdev, ev->status);
3453 case HCI_OP_REMOTE_NAME_REQ:
3454 hci_cs_remote_name_req(hdev, ev->status);
3457 case HCI_OP_READ_REMOTE_FEATURES:
3458 hci_cs_read_remote_features(hdev, ev->status);
3461 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3462 hci_cs_read_remote_ext_features(hdev, ev->status);
3465 case HCI_OP_SETUP_SYNC_CONN:
3466 hci_cs_setup_sync_conn(hdev, ev->status);
3469 case HCI_OP_SNIFF_MODE:
3470 hci_cs_sniff_mode(hdev, ev->status);
3473 case HCI_OP_EXIT_SNIFF_MODE:
3474 hci_cs_exit_sniff_mode(hdev, ev->status);
3477 case HCI_OP_SWITCH_ROLE:
3478 hci_cs_switch_role(hdev, ev->status);
3481 case HCI_OP_LE_CREATE_CONN:
3482 hci_cs_le_create_conn(hdev, ev->status);
3485 case HCI_OP_LE_READ_REMOTE_FEATURES:
3486 hci_cs_le_read_remote_features(hdev, ev->status);
3489 case HCI_OP_LE_START_ENC:
3490 hci_cs_le_start_enc(hdev, ev->status);
3493 case HCI_OP_LE_EXT_CREATE_CONN:
3494 hci_cs_le_ext_create_conn(hdev, ev->status);
3498 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3502 if (*opcode != HCI_OP_NOP)
3503 cancel_delayed_work(&hdev->cmd_timer);
3505 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3506 atomic_set(&hdev->cmd_cnt, 1);
3508 /* Indicate request completion if the command failed. Also, if
3509 * we're not waiting for a special event and we get a success
3510 * command status we should try to flag the request as completed
3511 * (since for this kind of commands there will not be a command
3515 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3516 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3519 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3521 "unexpected event for opcode 0x%4.4x", *opcode);
3525 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3526 queue_work(hdev->workqueue, &hdev->cmd_work);
3529 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3531 struct hci_ev_hardware_error *ev = (void *) skb->data;
3533 hdev->hw_error_code = ev->code;
3535 queue_work(hdev->req_workqueue, &hdev->error_reset);
3538 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3540 struct hci_ev_role_change *ev = (void *) skb->data;
3541 struct hci_conn *conn;
3543 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3547 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3550 conn->role = ev->role;
3552 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3554 hci_role_switch_cfm(conn, ev->status, ev->role);
3557 hci_dev_unlock(hdev);
3560 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3562 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3565 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3566 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3570 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3571 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3572 BT_DBG("%s bad parameters", hdev->name);
3576 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3578 for (i = 0; i < ev->num_hndl; i++) {
3579 struct hci_comp_pkts_info *info = &ev->handles[i];
3580 struct hci_conn *conn;
3581 __u16 handle, count;
3583 handle = __le16_to_cpu(info->handle);
3584 count = __le16_to_cpu(info->count);
3586 conn = hci_conn_hash_lookup_handle(hdev, handle);
3590 conn->sent -= count;
3592 switch (conn->type) {
3594 hdev->acl_cnt += count;
3595 if (hdev->acl_cnt > hdev->acl_pkts)
3596 hdev->acl_cnt = hdev->acl_pkts;
3600 if (hdev->le_pkts) {
3601 hdev->le_cnt += count;
3602 if (hdev->le_cnt > hdev->le_pkts)
3603 hdev->le_cnt = hdev->le_pkts;
3605 hdev->acl_cnt += count;
3606 if (hdev->acl_cnt > hdev->acl_pkts)
3607 hdev->acl_cnt = hdev->acl_pkts;
3612 hdev->sco_cnt += count;
3613 if (hdev->sco_cnt > hdev->sco_pkts)
3614 hdev->sco_cnt = hdev->sco_pkts;
3618 bt_dev_err(hdev, "unknown type %d conn %p",
3624 queue_work(hdev->workqueue, &hdev->tx_work);
3627 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3630 struct hci_chan *chan;
3632 switch (hdev->dev_type) {
3634 return hci_conn_hash_lookup_handle(hdev, handle);
3636 chan = hci_chan_lookup_handle(hdev, handle);
3641 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3648 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3650 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3653 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3654 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3658 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3659 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3660 BT_DBG("%s bad parameters", hdev->name);
3664 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3667 for (i = 0; i < ev->num_hndl; i++) {
3668 struct hci_comp_blocks_info *info = &ev->handles[i];
3669 struct hci_conn *conn = NULL;
3670 __u16 handle, block_count;
3672 handle = __le16_to_cpu(info->handle);
3673 block_count = __le16_to_cpu(info->blocks);
3675 conn = __hci_conn_lookup_handle(hdev, handle);
3679 conn->sent -= block_count;
3681 switch (conn->type) {
3684 hdev->block_cnt += block_count;
3685 if (hdev->block_cnt > hdev->num_blocks)
3686 hdev->block_cnt = hdev->num_blocks;
3690 bt_dev_err(hdev, "unknown type %d conn %p",
3696 queue_work(hdev->workqueue, &hdev->tx_work);
3699 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3701 struct hci_ev_mode_change *ev = (void *) skb->data;
3702 struct hci_conn *conn;
3704 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3708 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3710 conn->mode = ev->mode;
3712 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3714 if (conn->mode == HCI_CM_ACTIVE)
3715 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3717 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3720 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3721 hci_sco_setup(conn, ev->status);
3724 hci_dev_unlock(hdev);
3727 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3729 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3730 struct hci_conn *conn;
3732 BT_DBG("%s", hdev->name);
3736 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3740 if (conn->state == BT_CONNECTED) {
3741 hci_conn_hold(conn);
3742 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3743 hci_conn_drop(conn);
3746 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3747 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3748 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3749 sizeof(ev->bdaddr), &ev->bdaddr);
3750 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3753 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3758 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3762 hci_dev_unlock(hdev);
3765 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3767 if (key_type == HCI_LK_CHANGED_COMBINATION)
3770 conn->pin_length = pin_len;
3771 conn->key_type = key_type;
3774 case HCI_LK_LOCAL_UNIT:
3775 case HCI_LK_REMOTE_UNIT:
3776 case HCI_LK_DEBUG_COMBINATION:
3778 case HCI_LK_COMBINATION:
3780 conn->pending_sec_level = BT_SECURITY_HIGH;
3782 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3784 case HCI_LK_UNAUTH_COMBINATION_P192:
3785 case HCI_LK_UNAUTH_COMBINATION_P256:
3786 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3788 case HCI_LK_AUTH_COMBINATION_P192:
3789 conn->pending_sec_level = BT_SECURITY_HIGH;
3791 case HCI_LK_AUTH_COMBINATION_P256:
3792 conn->pending_sec_level = BT_SECURITY_FIPS;
3797 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3799 struct hci_ev_link_key_req *ev = (void *) skb->data;
3800 struct hci_cp_link_key_reply cp;
3801 struct hci_conn *conn;
3802 struct link_key *key;
3804 BT_DBG("%s", hdev->name);
3806 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3811 key = hci_find_link_key(hdev, &ev->bdaddr);
3813 BT_DBG("%s link key not found for %pMR", hdev->name,
3818 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3821 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3823 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3825 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3826 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3827 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3828 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3832 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3833 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3834 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3835 BT_DBG("%s ignoring key unauthenticated for high security",
3840 conn_set_key(conn, key->type, key->pin_len);
3843 bacpy(&cp.bdaddr, &ev->bdaddr);
3844 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3846 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3848 hci_dev_unlock(hdev);
3853 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3854 hci_dev_unlock(hdev);
3857 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3859 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3860 struct hci_conn *conn;
3861 struct link_key *key;
3865 BT_DBG("%s", hdev->name);
3869 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3873 hci_conn_hold(conn);
3874 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3875 hci_conn_drop(conn);
3877 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3878 conn_set_key(conn, ev->key_type, conn->pin_length);
3880 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3883 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3884 ev->key_type, pin_len, &persistent);
3888 /* Update connection information since adding the key will have
3889 * fixed up the type in the case of changed combination keys.
3891 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3892 conn_set_key(conn, key->type, key->pin_len);
3894 mgmt_new_link_key(hdev, key, persistent);
3896 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3897 * is set. If it's not set simply remove the key from the kernel
3898 * list (we've still notified user space about it but with
3899 * store_hint being 0).
3901 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3902 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3903 list_del_rcu(&key->list);
3904 kfree_rcu(key, rcu);
3909 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3911 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3914 hci_dev_unlock(hdev);
3917 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3919 struct hci_ev_clock_offset *ev = (void *) skb->data;
3920 struct hci_conn *conn;
3922 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3926 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3927 if (conn && !ev->status) {
3928 struct inquiry_entry *ie;
3930 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3932 ie->data.clock_offset = ev->clock_offset;
3933 ie->timestamp = jiffies;
3937 hci_dev_unlock(hdev);
3940 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3942 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3943 struct hci_conn *conn;
3945 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3949 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3950 if (conn && !ev->status)
3951 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3953 hci_dev_unlock(hdev);
3956 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3958 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3959 struct inquiry_entry *ie;
3961 BT_DBG("%s", hdev->name);
3965 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3967 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3968 ie->timestamp = jiffies;
3971 hci_dev_unlock(hdev);
3974 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3975 struct sk_buff *skb)
3977 struct inquiry_data data;
3978 int num_rsp = *((__u8 *) skb->data);
3980 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3985 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3990 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3991 struct inquiry_info_with_rssi_and_pscan_mode *info;
3992 info = (void *) (skb->data + 1);
3994 for (; num_rsp; num_rsp--, info++) {
3997 bacpy(&data.bdaddr, &info->bdaddr);
3998 data.pscan_rep_mode = info->pscan_rep_mode;
3999 data.pscan_period_mode = info->pscan_period_mode;
4000 data.pscan_mode = info->pscan_mode;
4001 memcpy(data.dev_class, info->dev_class, 3);
4002 data.clock_offset = info->clock_offset;
4003 data.rssi = info->rssi;
4004 data.ssp_mode = 0x00;
4006 flags = hci_inquiry_cache_update(hdev, &data, false);
4008 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4009 info->dev_class, info->rssi,
4010 flags, NULL, 0, NULL, 0);
4013 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4015 for (; num_rsp; num_rsp--, info++) {
4018 bacpy(&data.bdaddr, &info->bdaddr);
4019 data.pscan_rep_mode = info->pscan_rep_mode;
4020 data.pscan_period_mode = info->pscan_period_mode;
4021 data.pscan_mode = 0x00;
4022 memcpy(data.dev_class, info->dev_class, 3);
4023 data.clock_offset = info->clock_offset;
4024 data.rssi = info->rssi;
4025 data.ssp_mode = 0x00;
4027 flags = hci_inquiry_cache_update(hdev, &data, false);
4029 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4030 info->dev_class, info->rssi,
4031 flags, NULL, 0, NULL, 0);
4035 hci_dev_unlock(hdev);
4038 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4039 struct sk_buff *skb)
4041 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4042 struct hci_conn *conn;
4044 BT_DBG("%s", hdev->name);
4048 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4052 if (ev->page < HCI_MAX_PAGES)
4053 memcpy(conn->features[ev->page], ev->features, 8);
4055 if (!ev->status && ev->page == 0x01) {
4056 struct inquiry_entry *ie;
4058 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4060 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4062 if (ev->features[0] & LMP_HOST_SSP) {
4063 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4065 /* It is mandatory by the Bluetooth specification that
4066 * Extended Inquiry Results are only used when Secure
4067 * Simple Pairing is enabled, but some devices violate
4070 * To make these devices work, the internal SSP
4071 * enabled flag needs to be cleared if the remote host
4072 * features do not indicate SSP support */
4073 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4076 if (ev->features[0] & LMP_HOST_SC)
4077 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4080 if (conn->state != BT_CONFIG)
4083 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4084 struct hci_cp_remote_name_req cp;
4085 memset(&cp, 0, sizeof(cp));
4086 bacpy(&cp.bdaddr, &conn->dst);
4087 cp.pscan_rep_mode = 0x02;
4088 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4089 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4090 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4092 if (!hci_outgoing_auth_needed(hdev, conn)) {
4093 conn->state = BT_CONNECTED;
4094 hci_connect_cfm(conn, ev->status);
4095 hci_conn_drop(conn);
4099 hci_dev_unlock(hdev);
4102 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4103 struct sk_buff *skb)
4105 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4106 struct hci_conn *conn;
4108 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4112 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4114 if (ev->link_type == ESCO_LINK)
4117 /* When the link type in the event indicates SCO connection
4118 * and lookup of the connection object fails, then check
4119 * if an eSCO connection object exists.
4121 * The core limits the synchronous connections to either
4122 * SCO or eSCO. The eSCO connection is preferred and tried
4123 * to be setup first and until successfully established,
4124 * the link type will be hinted as eSCO.
4126 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4131 switch (ev->status) {
4133 conn->handle = __le16_to_cpu(ev->handle);
4134 conn->state = BT_CONNECTED;
4135 conn->type = ev->link_type;
4137 hci_debugfs_create_conn(conn);
4138 hci_conn_add_sysfs(conn);
4141 case 0x10: /* Connection Accept Timeout */
4142 case 0x0d: /* Connection Rejected due to Limited Resources */
4143 case 0x11: /* Unsupported Feature or Parameter Value */
4144 case 0x1c: /* SCO interval rejected */
4145 case 0x1a: /* Unsupported Remote Feature */
4146 case 0x1f: /* Unspecified error */
4147 case 0x20: /* Unsupported LMP Parameter value */
4149 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4150 (hdev->esco_type & EDR_ESCO_MASK);
4151 if (hci_setup_sync(conn, conn->link->handle))
4157 conn->state = BT_CLOSED;
4161 hci_connect_cfm(conn, ev->status);
4166 hci_dev_unlock(hdev);
4169 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4173 while (parsed < eir_len) {
4174 u8 field_len = eir[0];
4179 parsed += field_len + 1;
4180 eir += field_len + 1;
4186 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4187 struct sk_buff *skb)
4189 struct inquiry_data data;
4190 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4191 int num_rsp = *((__u8 *) skb->data);
4194 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4199 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4204 for (; num_rsp; num_rsp--, info++) {
4208 bacpy(&data.bdaddr, &info->bdaddr);
4209 data.pscan_rep_mode = info->pscan_rep_mode;
4210 data.pscan_period_mode = info->pscan_period_mode;
4211 data.pscan_mode = 0x00;
4212 memcpy(data.dev_class, info->dev_class, 3);
4213 data.clock_offset = info->clock_offset;
4214 data.rssi = info->rssi;
4215 data.ssp_mode = 0x01;
4217 if (hci_dev_test_flag(hdev, HCI_MGMT))
4218 name_known = eir_get_data(info->data,
4220 EIR_NAME_COMPLETE, NULL);
4224 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4226 eir_len = eir_get_length(info->data, sizeof(info->data));
4228 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4229 info->dev_class, info->rssi,
4230 flags, info->data, eir_len, NULL, 0);
4233 hci_dev_unlock(hdev);
4236 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4237 struct sk_buff *skb)
4239 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4240 struct hci_conn *conn;
4242 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4243 __le16_to_cpu(ev->handle));
4247 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4251 /* For BR/EDR the necessary steps are taken through the
4252 * auth_complete event.
4254 if (conn->type != LE_LINK)
4258 conn->sec_level = conn->pending_sec_level;
4260 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4262 if (ev->status && conn->state == BT_CONNECTED) {
4263 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4264 hci_conn_drop(conn);
4268 if (conn->state == BT_CONFIG) {
4270 conn->state = BT_CONNECTED;
4272 hci_connect_cfm(conn, ev->status);
4273 hci_conn_drop(conn);
4275 hci_auth_cfm(conn, ev->status);
4277 hci_conn_hold(conn);
4278 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4279 hci_conn_drop(conn);
4283 hci_dev_unlock(hdev);
4286 static u8 hci_get_auth_req(struct hci_conn *conn)
4288 /* If remote requests no-bonding follow that lead */
4289 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4290 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4291 return conn->remote_auth | (conn->auth_type & 0x01);
4293 /* If both remote and local have enough IO capabilities, require
4296 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4297 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4298 return conn->remote_auth | 0x01;
4300 /* No MITM protection possible so ignore remote requirement */
4301 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4304 static u8 bredr_oob_data_present(struct hci_conn *conn)
4306 struct hci_dev *hdev = conn->hdev;
4307 struct oob_data *data;
4309 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4313 if (bredr_sc_enabled(hdev)) {
4314 /* When Secure Connections is enabled, then just
4315 * return the present value stored with the OOB
4316 * data. The stored value contains the right present
4317 * information. However it can only be trusted when
4318 * not in Secure Connection Only mode.
4320 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4321 return data->present;
4323 /* When Secure Connections Only mode is enabled, then
4324 * the P-256 values are required. If they are not
4325 * available, then do not declare that OOB data is
4328 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4329 !memcmp(data->hash256, ZERO_KEY, 16))
4335 /* When Secure Connections is not enabled or actually
4336 * not supported by the hardware, then check that if
4337 * P-192 data values are present.
4339 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4340 !memcmp(data->hash192, ZERO_KEY, 16))
4346 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4348 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4349 struct hci_conn *conn;
4351 BT_DBG("%s", hdev->name);
4355 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4359 hci_conn_hold(conn);
4361 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4364 /* Allow pairing if we're pairable, the initiators of the
4365 * pairing or if the remote is not requesting bonding.
4367 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4368 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4369 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4370 struct hci_cp_io_capability_reply cp;
4372 bacpy(&cp.bdaddr, &ev->bdaddr);
4373 /* Change the IO capability from KeyboardDisplay
4374 * to DisplayYesNo as it is not supported by BT spec. */
4375 cp.capability = (conn->io_capability == 0x04) ?
4376 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4378 /* If we are initiators, there is no remote information yet */
4379 if (conn->remote_auth == 0xff) {
4380 /* Request MITM protection if our IO caps allow it
4381 * except for the no-bonding case.
4383 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4384 conn->auth_type != HCI_AT_NO_BONDING)
4385 conn->auth_type |= 0x01;
4387 conn->auth_type = hci_get_auth_req(conn);
4390 /* If we're not bondable, force one of the non-bondable
4391 * authentication requirement values.
4393 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4394 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4396 cp.authentication = conn->auth_type;
4397 cp.oob_data = bredr_oob_data_present(conn);
4399 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4402 struct hci_cp_io_capability_neg_reply cp;
4404 bacpy(&cp.bdaddr, &ev->bdaddr);
4405 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4407 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4412 hci_dev_unlock(hdev);
4415 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4417 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4418 struct hci_conn *conn;
4420 BT_DBG("%s", hdev->name);
4424 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4428 conn->remote_cap = ev->capability;
4429 conn->remote_auth = ev->authentication;
4432 hci_dev_unlock(hdev);
4435 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4436 struct sk_buff *skb)
4438 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4439 int loc_mitm, rem_mitm, confirm_hint = 0;
4440 struct hci_conn *conn;
4442 BT_DBG("%s", hdev->name);
4446 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4449 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4453 loc_mitm = (conn->auth_type & 0x01);
4454 rem_mitm = (conn->remote_auth & 0x01);
4456 /* If we require MITM but the remote device can't provide that
4457 * (it has NoInputNoOutput) then reject the confirmation
4458 * request. We check the security level here since it doesn't
4459 * necessarily match conn->auth_type.
4461 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4462 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4463 BT_DBG("Rejecting request: remote device can't provide MITM");
4464 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4465 sizeof(ev->bdaddr), &ev->bdaddr);
4469 /* If no side requires MITM protection; auto-accept */
4470 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4471 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4473 /* If we're not the initiators request authorization to
4474 * proceed from user space (mgmt_user_confirm with
4475 * confirm_hint set to 1). The exception is if neither
4476 * side had MITM or if the local IO capability is
4477 * NoInputNoOutput, in which case we do auto-accept
4479 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4480 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4481 (loc_mitm || rem_mitm)) {
4482 BT_DBG("Confirming auto-accept as acceptor");
4487 BT_DBG("Auto-accept of user confirmation with %ums delay",
4488 hdev->auto_accept_delay);
4490 if (hdev->auto_accept_delay > 0) {
4491 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4492 queue_delayed_work(conn->hdev->workqueue,
4493 &conn->auto_accept_work, delay);
4497 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4498 sizeof(ev->bdaddr), &ev->bdaddr);
4503 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4504 le32_to_cpu(ev->passkey), confirm_hint);
4507 hci_dev_unlock(hdev);
4510 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4511 struct sk_buff *skb)
4513 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4515 BT_DBG("%s", hdev->name);
4517 if (hci_dev_test_flag(hdev, HCI_MGMT))
4518 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4521 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4522 struct sk_buff *skb)
4524 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4525 struct hci_conn *conn;
4527 BT_DBG("%s", hdev->name);
4529 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4533 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4534 conn->passkey_entered = 0;
4536 if (hci_dev_test_flag(hdev, HCI_MGMT))
4537 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4538 conn->dst_type, conn->passkey_notify,
4539 conn->passkey_entered);
4542 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4544 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4545 struct hci_conn *conn;
4547 BT_DBG("%s", hdev->name);
4549 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4554 case HCI_KEYPRESS_STARTED:
4555 conn->passkey_entered = 0;
4558 case HCI_KEYPRESS_ENTERED:
4559 conn->passkey_entered++;
4562 case HCI_KEYPRESS_ERASED:
4563 conn->passkey_entered--;
4566 case HCI_KEYPRESS_CLEARED:
4567 conn->passkey_entered = 0;
4570 case HCI_KEYPRESS_COMPLETED:
4574 if (hci_dev_test_flag(hdev, HCI_MGMT))
4575 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4576 conn->dst_type, conn->passkey_notify,
4577 conn->passkey_entered);
4580 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4581 struct sk_buff *skb)
4583 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4584 struct hci_conn *conn;
4586 BT_DBG("%s", hdev->name);
4590 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4594 /* Reset the authentication requirement to unknown */
4595 conn->remote_auth = 0xff;
4597 /* To avoid duplicate auth_failed events to user space we check
4598 * the HCI_CONN_AUTH_PEND flag which will be set if we
4599 * initiated the authentication. A traditional auth_complete
4600 * event gets always produced as initiator and is also mapped to
4601 * the mgmt_auth_failed event */
4602 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4603 mgmt_auth_failed(conn, ev->status);
4605 hci_conn_drop(conn);
4608 hci_dev_unlock(hdev);
4611 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4612 struct sk_buff *skb)
4614 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4615 struct inquiry_entry *ie;
4616 struct hci_conn *conn;
4618 BT_DBG("%s", hdev->name);
4622 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4624 memcpy(conn->features[1], ev->features, 8);
4626 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4628 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4630 hci_dev_unlock(hdev);
4633 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4634 struct sk_buff *skb)
4636 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4637 struct oob_data *data;
4639 BT_DBG("%s", hdev->name);
4643 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4646 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4648 struct hci_cp_remote_oob_data_neg_reply cp;
4650 bacpy(&cp.bdaddr, &ev->bdaddr);
4651 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4656 if (bredr_sc_enabled(hdev)) {
4657 struct hci_cp_remote_oob_ext_data_reply cp;
4659 bacpy(&cp.bdaddr, &ev->bdaddr);
4660 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4661 memset(cp.hash192, 0, sizeof(cp.hash192));
4662 memset(cp.rand192, 0, sizeof(cp.rand192));
4664 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4665 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4667 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4668 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4670 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4673 struct hci_cp_remote_oob_data_reply cp;
4675 bacpy(&cp.bdaddr, &ev->bdaddr);
4676 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4677 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4679 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4684 hci_dev_unlock(hdev);
4687 #if IS_ENABLED(CONFIG_BT_HS)
4688 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4690 struct hci_ev_channel_selected *ev = (void *)skb->data;
4691 struct hci_conn *hcon;
4693 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4695 skb_pull(skb, sizeof(*ev));
4697 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4701 amp_read_loc_assoc_final_data(hdev, hcon);
4704 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4705 struct sk_buff *skb)
4707 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4708 struct hci_conn *hcon, *bredr_hcon;
4710 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4715 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4717 hci_dev_unlock(hdev);
4723 hci_dev_unlock(hdev);
4727 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4729 hcon->state = BT_CONNECTED;
4730 bacpy(&hcon->dst, &bredr_hcon->dst);
4732 hci_conn_hold(hcon);
4733 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4734 hci_conn_drop(hcon);
4736 hci_debugfs_create_conn(hcon);
4737 hci_conn_add_sysfs(hcon);
4739 amp_physical_cfm(bredr_hcon, hcon);
4741 hci_dev_unlock(hdev);
4744 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4746 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4747 struct hci_conn *hcon;
4748 struct hci_chan *hchan;
4749 struct amp_mgr *mgr;
4751 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4752 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4755 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4759 /* Create AMP hchan */
4760 hchan = hci_chan_create(hcon);
4764 hchan->handle = le16_to_cpu(ev->handle);
4766 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4768 mgr = hcon->amp_mgr;
4769 if (mgr && mgr->bredr_chan) {
4770 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4772 l2cap_chan_lock(bredr_chan);
4774 bredr_chan->conn->mtu = hdev->block_mtu;
4775 l2cap_logical_cfm(bredr_chan, hchan, 0);
4776 hci_conn_hold(hcon);
4778 l2cap_chan_unlock(bredr_chan);
4782 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4783 struct sk_buff *skb)
4785 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4786 struct hci_chan *hchan;
4788 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4789 le16_to_cpu(ev->handle), ev->status);
4796 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4800 amp_destroy_logical_link(hchan, ev->reason);
4803 hci_dev_unlock(hdev);
4806 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4807 struct sk_buff *skb)
4809 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4810 struct hci_conn *hcon;
4812 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4819 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4821 hcon->state = BT_CLOSED;
4825 hci_dev_unlock(hdev);
4829 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
4830 bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle,
4831 u16 interval, u16 latency, u16 supervision_timeout)
4833 struct hci_conn_params *params;
4834 struct hci_conn *conn;
4835 struct smp_irk *irk;
4840 /* All controllers implicitly stop advertising in the event of a
4841 * connection, so ensure that the state bit is cleared.
4843 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4845 conn = hci_lookup_le_connect(hdev);
4847 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
4849 bt_dev_err(hdev, "no memory for new connection");
4853 conn->dst_type = bdaddr_type;
4855 /* If we didn't have a hci_conn object previously
4856 * but we're in master role this must be something
4857 * initiated using a white list. Since white list based
4858 * connections are not "first class citizens" we don't
4859 * have full tracking of them. Therefore, we go ahead
4860 * with a "best effort" approach of determining the
4861 * initiator address based on the HCI_PRIVACY flag.
4864 conn->resp_addr_type = bdaddr_type;
4865 bacpy(&conn->resp_addr, bdaddr);
4866 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4867 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4868 bacpy(&conn->init_addr, &hdev->rpa);
4870 hci_copy_identity_address(hdev,
4872 &conn->init_addr_type);
4876 cancel_delayed_work(&conn->le_conn_timeout);
4880 /* Set the responder (our side) address type based on
4881 * the advertising address type.
4883 conn->resp_addr_type = hdev->adv_addr_type;
4884 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
4885 /* In case of ext adv, resp_addr will be updated in
4886 * Adv Terminated event.
4888 if (!ext_adv_capable(hdev))
4889 bacpy(&conn->resp_addr, &hdev->random_addr);
4891 bacpy(&conn->resp_addr, &hdev->bdaddr);
4894 conn->init_addr_type = bdaddr_type;
4895 bacpy(&conn->init_addr, bdaddr);
4897 /* For incoming connections, set the default minimum
4898 * and maximum connection interval. They will be used
4899 * to check if the parameters are in range and if not
4900 * trigger the connection update procedure.
4902 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4903 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4906 /* Lookup the identity address from the stored connection
4907 * address and address type.
4909 * When establishing connections to an identity address, the
4910 * connection procedure will store the resolvable random
4911 * address first. Now if it can be converted back into the
4912 * identity address, start using the identity address from
4915 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4917 bacpy(&conn->dst, &irk->bdaddr);
4918 conn->dst_type = irk->addr_type;
4922 hci_le_conn_failed(conn, status);
4926 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4927 addr_type = BDADDR_LE_PUBLIC;
4929 addr_type = BDADDR_LE_RANDOM;
4931 /* Drop the connection if the device is blocked */
4932 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4933 hci_conn_drop(conn);
4937 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4938 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4940 conn->sec_level = BT_SECURITY_LOW;
4941 conn->handle = handle;
4942 conn->state = BT_CONFIG;
4944 conn->le_conn_interval = interval;
4945 conn->le_conn_latency = latency;
4946 conn->le_supv_timeout = supervision_timeout;
4948 hci_debugfs_create_conn(conn);
4949 hci_conn_add_sysfs(conn);
4952 /* The remote features procedure is defined for master
4953 * role only. So only in case of an initiated connection
4954 * request the remote features.
4956 * If the local controller supports slave-initiated features
4957 * exchange, then requesting the remote features in slave
4958 * role is possible. Otherwise just transition into the
4959 * connected state without requesting the remote features.
4962 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
4963 struct hci_cp_le_read_remote_features cp;
4965 cp.handle = __cpu_to_le16(conn->handle);
4967 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
4970 hci_conn_hold(conn);
4972 conn->state = BT_CONNECTED;
4973 hci_connect_cfm(conn, status);
4976 hci_connect_cfm(conn, status);
4979 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4982 list_del_init(¶ms->action);
4984 hci_conn_drop(params->conn);
4985 hci_conn_put(params->conn);
4986 params->conn = NULL;
4991 hci_update_background_scan(hdev);
4992 hci_dev_unlock(hdev);
4995 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4997 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4999 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5001 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5002 ev->role, le16_to_cpu(ev->handle),
5003 le16_to_cpu(ev->interval),
5004 le16_to_cpu(ev->latency),
5005 le16_to_cpu(ev->supervision_timeout));
5008 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5009 struct sk_buff *skb)
5011 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5013 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5015 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5016 ev->role, le16_to_cpu(ev->handle),
5017 le16_to_cpu(ev->interval),
5018 le16_to_cpu(ev->latency),
5019 le16_to_cpu(ev->supervision_timeout));
5022 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5024 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5025 struct hci_conn *conn;
5027 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5032 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5034 struct adv_info *adv_instance;
5036 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM)
5039 if (!hdev->cur_adv_instance) {
5040 bacpy(&conn->resp_addr, &hdev->random_addr);
5044 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
5046 bacpy(&conn->resp_addr, &adv_instance->random_addr);
5050 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5051 struct sk_buff *skb)
5053 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5054 struct hci_conn *conn;
5056 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5063 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5067 hci_dev_unlock(hdev);
5068 mgmt_le_conn_update_failed(hdev, &conn->dst,
5069 conn->type, conn->dst_type, ev->status);
5073 conn->le_conn_interval = le16_to_cpu(ev->interval);
5074 conn->le_conn_latency = le16_to_cpu(ev->latency);
5075 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5078 hci_dev_unlock(hdev);
5081 mgmt_le_conn_updated(hdev, &conn->dst, conn->type,
5082 conn->dst_type, conn->le_conn_interval,
5083 conn->le_conn_latency, conn->le_supv_timeout);
5087 /* This function requires the caller holds hdev->lock */
5088 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5090 u8 addr_type, u8 adv_type,
5091 bdaddr_t *direct_rpa)
5093 struct hci_conn *conn;
5094 struct hci_conn_params *params;
5096 /* If the event is not connectable don't proceed further */
5097 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5100 /* Ignore if the device is blocked */
5101 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
5104 /* Most controller will fail if we try to create new connections
5105 * while we have an existing one in slave role.
5107 if (hdev->conn_hash.le_num_slave > 0)
5110 /* If we're not connectable only connect devices that we have in
5111 * our pend_le_conns list.
5113 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5118 if (!params->explicit_connect) {
5119 switch (params->auto_connect) {
5120 case HCI_AUTO_CONN_DIRECT:
5121 /* Only devices advertising with ADV_DIRECT_IND are
5122 * triggering a connection attempt. This is allowing
5123 * incoming connections from slave devices.
5125 if (adv_type != LE_ADV_DIRECT_IND)
5128 case HCI_AUTO_CONN_ALWAYS:
5129 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5130 * are triggering a connection attempt. This means
5131 * that incoming connectioms from slave device are
5132 * accepted and also outgoing connections to slave
5133 * devices are established when found.
5141 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5142 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
5144 if (!IS_ERR(conn)) {
5145 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5146 * by higher layer that tried to connect, if no then
5147 * store the pointer since we don't really have any
5148 * other owner of the object besides the params that
5149 * triggered it. This way we can abort the connection if
5150 * the parameters get removed and keep the reference
5151 * count consistent once the connection is established.
5154 if (!params->explicit_connect)
5155 params->conn = hci_conn_get(conn);
5160 switch (PTR_ERR(conn)) {
5162 /* If hci_connect() returns -EBUSY it means there is already
5163 * an LE connection attempt going on. Since controllers don't
5164 * support more than one connection attempt at the time, we
5165 * don't consider this an error case.
5169 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5176 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5177 u8 bdaddr_type, bdaddr_t *direct_addr,
5178 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
5180 struct discovery_state *d = &hdev->discovery;
5181 struct smp_irk *irk;
5182 struct hci_conn *conn;
5189 case LE_ADV_DIRECT_IND:
5190 case LE_ADV_SCAN_IND:
5191 case LE_ADV_NONCONN_IND:
5192 case LE_ADV_SCAN_RSP:
5195 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5196 "type: 0x%02x", type);
5200 /* Find the end of the data in case the report contains padded zero
5201 * bytes at the end causing an invalid length value.
5203 * When data is NULL, len is 0 so there is no need for extra ptr
5204 * check as 'ptr < data + 0' is already false in such case.
5206 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5207 if (ptr + 1 + *ptr > data + len)
5211 real_len = ptr - data;
5213 /* Adjust for actual length */
5214 if (len != real_len) {
5215 bt_dev_err_ratelimited(hdev, "advertising data len corrected");
5219 /* If the direct address is present, then this report is from
5220 * a LE Direct Advertising Report event. In that case it is
5221 * important to see if the address is matching the local
5222 * controller address.
5225 /* Only resolvable random addresses are valid for these
5226 * kind of reports and others can be ignored.
5228 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5231 /* If the controller is not using resolvable random
5232 * addresses, then this report can be ignored.
5234 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5237 /* If the local IRK of the controller does not match
5238 * with the resolvable random address provided, then
5239 * this report can be ignored.
5241 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5245 /* Check if we need to convert to identity address */
5246 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5248 bdaddr = &irk->bdaddr;
5249 bdaddr_type = irk->addr_type;
5252 /* Check if we have been requested to connect to this device.
5254 * direct_addr is set only for directed advertising reports (it is NULL
5255 * for advertising reports) and is already verified to be RPA above.
5257 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5259 if (conn && type == LE_ADV_IND) {
5260 /* Store report for later inclusion by
5261 * mgmt_device_connected
5263 memcpy(conn->le_adv_data, data, len);
5264 conn->le_adv_data_len = len;
5267 /* Passive scanning shouldn't trigger any device found events,
5268 * except for devices marked as CONN_REPORT for which we do send
5269 * device found events.
5271 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5272 if (type == LE_ADV_DIRECT_IND)
5275 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5276 bdaddr, bdaddr_type))
5279 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5280 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5283 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5284 rssi, flags, data, len, NULL, 0);
5288 /* When receiving non-connectable or scannable undirected
5289 * advertising reports, this means that the remote device is
5290 * not connectable and then clearly indicate this in the
5291 * device found event.
5293 * When receiving a scan response, then there is no way to
5294 * know if the remote device is connectable or not. However
5295 * since scan responses are merged with a previously seen
5296 * advertising report, the flags field from that report
5299 * In the really unlikely case that a controller get confused
5300 * and just sends a scan response event, then it is marked as
5301 * not connectable as well.
5303 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5304 type == LE_ADV_SCAN_RSP)
5305 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5309 /* If there's nothing pending either store the data from this
5310 * event or send an immediate device found event if the data
5311 * should not be stored for later.
5313 if (!has_pending_adv_report(hdev)) {
5314 /* If the report will trigger a SCAN_REQ store it for
5317 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5318 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5319 rssi, flags, data, len);
5323 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5324 rssi, flags, data, len, NULL, 0);
5328 /* Check if the pending report is for the same device as the new one */
5329 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5330 bdaddr_type == d->last_adv_addr_type);
5332 /* If the pending data doesn't match this report or this isn't a
5333 * scan response (e.g. we got a duplicate ADV_IND) then force
5334 * sending of the pending data.
5336 if (type != LE_ADV_SCAN_RSP || !match) {
5337 /* Send out whatever is in the cache, but skip duplicates */
5339 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5340 d->last_adv_addr_type, NULL,
5341 d->last_adv_rssi, d->last_adv_flags,
5343 d->last_adv_data_len, NULL, 0);
5345 /* If the new report will trigger a SCAN_REQ store it for
5348 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5349 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5350 rssi, flags, data, len);
5354 /* The advertising reports cannot be merged, so clear
5355 * the pending report and send out a device found event.
5357 clear_pending_adv_report(hdev);
5358 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5359 rssi, flags, data, len, NULL, 0);
5363 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5364 * the new event is a SCAN_RSP. We can therefore proceed with
5365 * sending a merged device found event.
5367 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5368 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5369 d->last_adv_data, d->last_adv_data_len, data, len);
5370 clear_pending_adv_report(hdev);
5373 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5375 u8 num_reports = skb->data[0];
5376 void *ptr = &skb->data[1];
5380 while (num_reports--) {
5381 struct hci_ev_le_advertising_info *ev = ptr;
5384 if (ev->length <= HCI_MAX_AD_LENGTH) {
5385 rssi = ev->data[ev->length];
5386 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5387 ev->bdaddr_type, NULL, 0, rssi,
5388 ev->data, ev->length);
5390 bt_dev_err(hdev, "Dropping invalid advertising data");
5393 ptr += sizeof(*ev) + ev->length + 1;
5396 hci_dev_unlock(hdev);
5399 static u8 ext_evt_type_to_legacy(u16 evt_type)
5401 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5403 case LE_LEGACY_ADV_IND:
5405 case LE_LEGACY_ADV_DIRECT_IND:
5406 return LE_ADV_DIRECT_IND;
5407 case LE_LEGACY_ADV_SCAN_IND:
5408 return LE_ADV_SCAN_IND;
5409 case LE_LEGACY_NONCONN_IND:
5410 return LE_ADV_NONCONN_IND;
5411 case LE_LEGACY_SCAN_RSP_ADV:
5412 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5413 return LE_ADV_SCAN_RSP;
5416 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5419 return LE_ADV_INVALID;
5422 if (evt_type & LE_EXT_ADV_CONN_IND) {
5423 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5424 return LE_ADV_DIRECT_IND;
5429 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5430 return LE_ADV_SCAN_RSP;
5432 if (evt_type & LE_EXT_ADV_SCAN_IND)
5433 return LE_ADV_SCAN_IND;
5435 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5436 evt_type & LE_EXT_ADV_DIRECT_IND)
5437 return LE_ADV_NONCONN_IND;
5439 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5442 return LE_ADV_INVALID;
5445 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5447 u8 num_reports = skb->data[0];
5448 void *ptr = &skb->data[1];
5452 while (num_reports--) {
5453 struct hci_ev_le_ext_adv_report *ev = ptr;
5457 evt_type = __le16_to_cpu(ev->evt_type);
5458 legacy_evt_type = ext_evt_type_to_legacy(evt_type);
5459 if (legacy_evt_type != LE_ADV_INVALID) {
5460 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5461 ev->bdaddr_type, NULL, 0, ev->rssi,
5462 ev->data, ev->length);
5465 ptr += sizeof(*ev) + ev->length + 1;
5468 hci_dev_unlock(hdev);
5471 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5472 struct sk_buff *skb)
5474 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5475 struct hci_conn *conn;
5477 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5481 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5484 memcpy(conn->features[0], ev->features, 8);
5486 if (conn->state == BT_CONFIG) {
5489 /* If the local controller supports slave-initiated
5490 * features exchange, but the remote controller does
5491 * not, then it is possible that the error code 0x1a
5492 * for unsupported remote feature gets returned.
5494 * In this specific case, allow the connection to
5495 * transition into connected state and mark it as
5498 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5499 !conn->out && ev->status == 0x1a)
5502 status = ev->status;
5504 conn->state = BT_CONNECTED;
5505 hci_connect_cfm(conn, status);
5506 hci_conn_drop(conn);
5510 hci_dev_unlock(hdev);
5513 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5515 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5516 struct hci_cp_le_ltk_reply cp;
5517 struct hci_cp_le_ltk_neg_reply neg;
5518 struct hci_conn *conn;
5519 struct smp_ltk *ltk;
5521 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5525 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5529 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5533 if (smp_ltk_is_sc(ltk)) {
5534 /* With SC both EDiv and Rand are set to zero */
5535 if (ev->ediv || ev->rand)
5538 /* For non-SC keys check that EDiv and Rand match */
5539 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5543 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5544 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5545 cp.handle = cpu_to_le16(conn->handle);
5547 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5549 conn->enc_key_size = ltk->enc_size;
5551 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5553 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5554 * temporary key used to encrypt a connection following
5555 * pairing. It is used during the Encrypted Session Setup to
5556 * distribute the keys. Later, security can be re-established
5557 * using a distributed LTK.
5559 if (ltk->type == SMP_STK) {
5560 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5561 list_del_rcu(<k->list);
5562 kfree_rcu(ltk, rcu);
5564 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5567 hci_dev_unlock(hdev);
5572 neg.handle = ev->handle;
5573 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5574 hci_dev_unlock(hdev);
5577 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5580 struct hci_cp_le_conn_param_req_neg_reply cp;
5582 cp.handle = cpu_to_le16(handle);
5585 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5589 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5590 struct sk_buff *skb)
5592 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5593 struct hci_cp_le_conn_param_req_reply cp;
5594 struct hci_conn *hcon;
5595 u16 handle, min, max, latency, timeout;
5597 handle = le16_to_cpu(ev->handle);
5598 min = le16_to_cpu(ev->interval_min);
5599 max = le16_to_cpu(ev->interval_max);
5600 latency = le16_to_cpu(ev->latency);
5601 timeout = le16_to_cpu(ev->timeout);
5603 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5604 if (!hcon || hcon->state != BT_CONNECTED)
5605 return send_conn_param_neg_reply(hdev, handle,
5606 HCI_ERROR_UNKNOWN_CONN_ID);
5608 if (hci_check_conn_params(min, max, latency, timeout))
5609 return send_conn_param_neg_reply(hdev, handle,
5610 HCI_ERROR_INVALID_LL_PARAMS);
5612 if (hcon->role == HCI_ROLE_MASTER) {
5613 struct hci_conn_params *params;
5618 params = hci_conn_params_lookup(hdev, &hcon->dst,
5621 params->conn_min_interval = min;
5622 params->conn_max_interval = max;
5623 params->conn_latency = latency;
5624 params->supervision_timeout = timeout;
5630 hci_dev_unlock(hdev);
5632 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5633 store_hint, min, max, latency, timeout);
5636 cp.handle = ev->handle;
5637 cp.interval_min = ev->interval_min;
5638 cp.interval_max = ev->interval_max;
5639 cp.latency = ev->latency;
5640 cp.timeout = ev->timeout;
5644 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5647 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5648 struct sk_buff *skb)
5650 u8 num_reports = skb->data[0];
5651 void *ptr = &skb->data[1];
5655 while (num_reports--) {
5656 struct hci_ev_le_direct_adv_info *ev = ptr;
5658 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5659 ev->bdaddr_type, &ev->direct_addr,
5660 ev->direct_addr_type, ev->rssi, NULL, 0);
5665 hci_dev_unlock(hdev);
5668 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5670 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5672 skb_pull(skb, sizeof(*le_ev));
5674 switch (le_ev->subevent) {
5675 case HCI_EV_LE_CONN_COMPLETE:
5676 hci_le_conn_complete_evt(hdev, skb);
5679 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5680 hci_le_conn_update_complete_evt(hdev, skb);
5683 case HCI_EV_LE_ADVERTISING_REPORT:
5684 hci_le_adv_report_evt(hdev, skb);
5687 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5688 hci_le_remote_feat_complete_evt(hdev, skb);
5691 case HCI_EV_LE_LTK_REQ:
5692 hci_le_ltk_request_evt(hdev, skb);
5695 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5696 hci_le_remote_conn_param_req_evt(hdev, skb);
5699 case HCI_EV_LE_DIRECT_ADV_REPORT:
5700 hci_le_direct_adv_report_evt(hdev, skb);
5703 case HCI_EV_LE_EXT_ADV_REPORT:
5704 hci_le_ext_adv_report_evt(hdev, skb);
5707 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
5708 hci_le_enh_conn_complete_evt(hdev, skb);
5711 case HCI_EV_LE_EXT_ADV_SET_TERM:
5712 hci_le_ext_adv_term_evt(hdev, skb);
5720 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5721 u8 event, struct sk_buff *skb)
5723 struct hci_ev_cmd_complete *ev;
5724 struct hci_event_hdr *hdr;
5729 if (skb->len < sizeof(*hdr)) {
5730 bt_dev_err(hdev, "too short HCI event");
5734 hdr = (void *) skb->data;
5735 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5738 if (hdr->evt != event)
5743 /* Check if request ended in Command Status - no way to retreive
5744 * any extra parameters in this case.
5746 if (hdr->evt == HCI_EV_CMD_STATUS)
5749 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5750 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
5755 if (skb->len < sizeof(*ev)) {
5756 bt_dev_err(hdev, "too short cmd_complete event");
5760 ev = (void *) skb->data;
5761 skb_pull(skb, sizeof(*ev));
5763 if (opcode != __le16_to_cpu(ev->opcode)) {
5764 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5765 __le16_to_cpu(ev->opcode));
5772 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5774 struct hci_event_hdr *hdr = (void *) skb->data;
5775 hci_req_complete_t req_complete = NULL;
5776 hci_req_complete_skb_t req_complete_skb = NULL;
5777 struct sk_buff *orig_skb = NULL;
5778 u8 status = 0, event = hdr->evt, req_evt = 0;
5779 u16 opcode = HCI_OP_NOP;
5781 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5782 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5783 opcode = __le16_to_cpu(cmd_hdr->opcode);
5784 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5789 /* If it looks like we might end up having to call
5790 * req_complete_skb, store a pristine copy of the skb since the
5791 * various handlers may modify the original one through
5792 * skb_pull() calls, etc.
5794 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5795 event == HCI_EV_CMD_COMPLETE)
5796 orig_skb = skb_clone(skb, GFP_KERNEL);
5798 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5801 case HCI_EV_INQUIRY_COMPLETE:
5802 hci_inquiry_complete_evt(hdev, skb);
5805 case HCI_EV_INQUIRY_RESULT:
5806 hci_inquiry_result_evt(hdev, skb);
5809 case HCI_EV_CONN_COMPLETE:
5810 hci_conn_complete_evt(hdev, skb);
5813 case HCI_EV_CONN_REQUEST:
5814 hci_conn_request_evt(hdev, skb);
5817 case HCI_EV_DISCONN_COMPLETE:
5818 hci_disconn_complete_evt(hdev, skb);
5821 case HCI_EV_AUTH_COMPLETE:
5822 hci_auth_complete_evt(hdev, skb);
5825 case HCI_EV_REMOTE_NAME:
5826 hci_remote_name_evt(hdev, skb);
5829 case HCI_EV_ENCRYPT_CHANGE:
5830 hci_encrypt_change_evt(hdev, skb);
5833 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5834 hci_change_link_key_complete_evt(hdev, skb);
5837 case HCI_EV_REMOTE_FEATURES:
5838 hci_remote_features_evt(hdev, skb);
5841 case HCI_EV_CMD_COMPLETE:
5842 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5843 &req_complete, &req_complete_skb);
5846 case HCI_EV_CMD_STATUS:
5847 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5851 case HCI_EV_HARDWARE_ERROR:
5852 hci_hardware_error_evt(hdev, skb);
5855 case HCI_EV_ROLE_CHANGE:
5856 hci_role_change_evt(hdev, skb);
5859 case HCI_EV_NUM_COMP_PKTS:
5860 hci_num_comp_pkts_evt(hdev, skb);
5863 case HCI_EV_MODE_CHANGE:
5864 hci_mode_change_evt(hdev, skb);
5867 case HCI_EV_PIN_CODE_REQ:
5868 hci_pin_code_request_evt(hdev, skb);
5871 case HCI_EV_LINK_KEY_REQ:
5872 hci_link_key_request_evt(hdev, skb);
5875 case HCI_EV_LINK_KEY_NOTIFY:
5876 hci_link_key_notify_evt(hdev, skb);
5879 case HCI_EV_CLOCK_OFFSET:
5880 hci_clock_offset_evt(hdev, skb);
5883 case HCI_EV_PKT_TYPE_CHANGE:
5884 hci_pkt_type_change_evt(hdev, skb);
5887 case HCI_EV_PSCAN_REP_MODE:
5888 hci_pscan_rep_mode_evt(hdev, skb);
5891 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5892 hci_inquiry_result_with_rssi_evt(hdev, skb);
5895 case HCI_EV_REMOTE_EXT_FEATURES:
5896 hci_remote_ext_features_evt(hdev, skb);
5899 case HCI_EV_SYNC_CONN_COMPLETE:
5900 hci_sync_conn_complete_evt(hdev, skb);
5903 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5904 hci_extended_inquiry_result_evt(hdev, skb);
5907 case HCI_EV_KEY_REFRESH_COMPLETE:
5908 hci_key_refresh_complete_evt(hdev, skb);
5911 case HCI_EV_IO_CAPA_REQUEST:
5912 hci_io_capa_request_evt(hdev, skb);
5915 case HCI_EV_IO_CAPA_REPLY:
5916 hci_io_capa_reply_evt(hdev, skb);
5919 case HCI_EV_USER_CONFIRM_REQUEST:
5920 hci_user_confirm_request_evt(hdev, skb);
5923 case HCI_EV_USER_PASSKEY_REQUEST:
5924 hci_user_passkey_request_evt(hdev, skb);
5927 case HCI_EV_USER_PASSKEY_NOTIFY:
5928 hci_user_passkey_notify_evt(hdev, skb);
5931 case HCI_EV_KEYPRESS_NOTIFY:
5932 hci_keypress_notify_evt(hdev, skb);
5935 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5936 hci_simple_pair_complete_evt(hdev, skb);
5939 case HCI_EV_REMOTE_HOST_FEATURES:
5940 hci_remote_host_features_evt(hdev, skb);
5943 case HCI_EV_LE_META:
5944 hci_le_meta_evt(hdev, skb);
5947 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5948 hci_remote_oob_data_request_evt(hdev, skb);
5951 #if IS_ENABLED(CONFIG_BT_HS)
5952 case HCI_EV_CHANNEL_SELECTED:
5953 hci_chan_selected_evt(hdev, skb);
5956 case HCI_EV_PHY_LINK_COMPLETE:
5957 hci_phy_link_complete_evt(hdev, skb);
5960 case HCI_EV_LOGICAL_LINK_COMPLETE:
5961 hci_loglink_complete_evt(hdev, skb);
5964 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5965 hci_disconn_loglink_complete_evt(hdev, skb);
5968 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5969 hci_disconn_phylink_complete_evt(hdev, skb);
5973 case HCI_EV_NUM_COMP_BLOCKS:
5974 hci_num_comp_blocks_evt(hdev, skb);
5978 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5983 req_complete(hdev, status, opcode);
5984 } else if (req_complete_skb) {
5985 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
5986 kfree_skb(orig_skb);
5989 req_complete_skb(hdev, status, opcode, orig_skb);
5992 kfree_skb(orig_skb);
5994 hdev->stat.evt_rx++;