2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
42 /* Handle HCI Event packets */
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
46 __u8 status = *((__u8 *) skb->data);
48 BT_DBG("%s status 0x%2.2x", hdev->name, status);
53 clear_bit(HCI_INQUIRY, &hdev->flags);
54 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
55 wake_up_bit(&hdev->flags, HCI_INQUIRY);
58 /* Set discovery state to stopped if we're not doing LE active
61 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
62 hdev->le_scan_type != LE_SCAN_ACTIVE)
63 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
66 hci_conn_check_pending(hdev);
69 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
71 __u8 status = *((__u8 *) skb->data);
73 BT_DBG("%s status 0x%2.2x", hdev->name, status);
78 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
81 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
83 __u8 status = *((__u8 *) skb->data);
85 BT_DBG("%s status 0x%2.2x", hdev->name, status);
90 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
92 hci_conn_check_pending(hdev);
95 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
98 BT_DBG("%s", hdev->name);
101 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
103 struct hci_rp_role_discovery *rp = (void *) skb->data;
104 struct hci_conn *conn;
106 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
113 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
115 conn->role = rp->role;
117 hci_dev_unlock(hdev);
120 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
122 struct hci_rp_read_link_policy *rp = (void *) skb->data;
123 struct hci_conn *conn;
125 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
132 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
134 conn->link_policy = __le16_to_cpu(rp->policy);
136 hci_dev_unlock(hdev);
139 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
141 struct hci_rp_write_link_policy *rp = (void *) skb->data;
142 struct hci_conn *conn;
145 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
150 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
156 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
158 conn->link_policy = get_unaligned_le16(sent + 2);
160 hci_dev_unlock(hdev);
163 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
166 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
168 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
173 hdev->link_policy = __le16_to_cpu(rp->policy);
176 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
179 __u8 status = *((__u8 *) skb->data);
182 BT_DBG("%s status 0x%2.2x", hdev->name, status);
187 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
191 hdev->link_policy = get_unaligned_le16(sent);
194 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
196 __u8 status = *((__u8 *) skb->data);
198 BT_DBG("%s status 0x%2.2x", hdev->name, status);
200 clear_bit(HCI_RESET, &hdev->flags);
205 /* Reset all non-persistent flags */
206 hci_dev_clear_volatile_flags(hdev);
208 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
210 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
211 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
213 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
214 hdev->adv_data_len = 0;
216 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
217 hdev->scan_rsp_data_len = 0;
219 hdev->le_scan_type = LE_SCAN_PASSIVE;
221 hdev->ssp_debug_mode = 0;
223 hci_bdaddr_list_clear(&hdev->le_white_list);
224 hci_bdaddr_list_clear(&hdev->le_resolv_list);
227 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
230 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
231 struct hci_cp_read_stored_link_key *sent;
233 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
235 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
239 if (!rp->status && sent->read_all == 0x01) {
240 hdev->stored_max_keys = rp->max_keys;
241 hdev->stored_num_keys = rp->num_keys;
245 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
248 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
250 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
255 if (rp->num_keys <= hdev->stored_num_keys)
256 hdev->stored_num_keys -= rp->num_keys;
258 hdev->stored_num_keys = 0;
261 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
263 __u8 status = *((__u8 *) skb->data);
266 BT_DBG("%s status 0x%2.2x", hdev->name, status);
268 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
274 if (hci_dev_test_flag(hdev, HCI_MGMT))
275 mgmt_set_local_name_complete(hdev, sent, status);
277 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
279 hci_dev_unlock(hdev);
282 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
284 struct hci_rp_read_local_name *rp = (void *) skb->data;
286 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
291 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
292 hci_dev_test_flag(hdev, HCI_CONFIG))
293 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
296 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
298 __u8 status = *((__u8 *) skb->data);
301 BT_DBG("%s status 0x%2.2x", hdev->name, status);
303 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
310 __u8 param = *((__u8 *) sent);
312 if (param == AUTH_ENABLED)
313 set_bit(HCI_AUTH, &hdev->flags);
315 clear_bit(HCI_AUTH, &hdev->flags);
318 if (hci_dev_test_flag(hdev, HCI_MGMT))
319 mgmt_auth_enable_complete(hdev, status);
321 hci_dev_unlock(hdev);
324 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
326 __u8 status = *((__u8 *) skb->data);
330 BT_DBG("%s status 0x%2.2x", hdev->name, status);
335 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
339 param = *((__u8 *) sent);
342 set_bit(HCI_ENCRYPT, &hdev->flags);
344 clear_bit(HCI_ENCRYPT, &hdev->flags);
347 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
349 __u8 status = *((__u8 *) skb->data);
353 BT_DBG("%s status 0x%2.2x", hdev->name, status);
355 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
359 param = *((__u8 *) sent);
364 hdev->discov_timeout = 0;
368 if (param & SCAN_INQUIRY)
369 set_bit(HCI_ISCAN, &hdev->flags);
371 clear_bit(HCI_ISCAN, &hdev->flags);
373 if (param & SCAN_PAGE)
374 set_bit(HCI_PSCAN, &hdev->flags);
376 clear_bit(HCI_PSCAN, &hdev->flags);
379 hci_dev_unlock(hdev);
382 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
384 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
386 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
391 memcpy(hdev->dev_class, rp->dev_class, 3);
393 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
394 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
397 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
399 __u8 status = *((__u8 *) skb->data);
402 BT_DBG("%s status 0x%2.2x", hdev->name, status);
404 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
411 memcpy(hdev->dev_class, sent, 3);
413 if (hci_dev_test_flag(hdev, HCI_MGMT))
414 mgmt_set_class_of_dev_complete(hdev, sent, status);
416 hci_dev_unlock(hdev);
419 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
421 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
424 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
429 setting = __le16_to_cpu(rp->voice_setting);
431 if (hdev->voice_setting == setting)
434 hdev->voice_setting = setting;
436 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
439 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
442 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
445 __u8 status = *((__u8 *) skb->data);
449 BT_DBG("%s status 0x%2.2x", hdev->name, status);
454 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
458 setting = get_unaligned_le16(sent);
460 if (hdev->voice_setting == setting)
463 hdev->voice_setting = setting;
465 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
468 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
471 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
474 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
476 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
481 hdev->num_iac = rp->num_iac;
483 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
486 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
488 __u8 status = *((__u8 *) skb->data);
489 struct hci_cp_write_ssp_mode *sent;
491 BT_DBG("%s status 0x%2.2x", hdev->name, status);
493 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
501 hdev->features[1][0] |= LMP_HOST_SSP;
503 hdev->features[1][0] &= ~LMP_HOST_SSP;
506 if (hci_dev_test_flag(hdev, HCI_MGMT))
507 mgmt_ssp_enable_complete(hdev, sent->mode, status);
510 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
512 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
515 hci_dev_unlock(hdev);
518 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
520 u8 status = *((u8 *) skb->data);
521 struct hci_cp_write_sc_support *sent;
523 BT_DBG("%s status 0x%2.2x", hdev->name, status);
525 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
533 hdev->features[1][0] |= LMP_HOST_SC;
535 hdev->features[1][0] &= ~LMP_HOST_SC;
538 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
540 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
542 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
545 hci_dev_unlock(hdev);
548 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
550 struct hci_rp_read_local_version *rp = (void *) skb->data;
552 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
557 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
558 hci_dev_test_flag(hdev, HCI_CONFIG)) {
559 hdev->hci_ver = rp->hci_ver;
560 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
561 hdev->lmp_ver = rp->lmp_ver;
562 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
563 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
567 static void hci_cc_read_local_commands(struct hci_dev *hdev,
570 struct hci_rp_read_local_commands *rp = (void *) skb->data;
572 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
577 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
578 hci_dev_test_flag(hdev, HCI_CONFIG))
579 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
582 static void hci_cc_read_local_features(struct hci_dev *hdev,
585 struct hci_rp_read_local_features *rp = (void *) skb->data;
587 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
592 memcpy(hdev->features, rp->features, 8);
594 /* Adjust default settings according to features
595 * supported by device. */
597 if (hdev->features[0][0] & LMP_3SLOT)
598 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
600 if (hdev->features[0][0] & LMP_5SLOT)
601 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
603 if (hdev->features[0][1] & LMP_HV2) {
604 hdev->pkt_type |= (HCI_HV2);
605 hdev->esco_type |= (ESCO_HV2);
608 if (hdev->features[0][1] & LMP_HV3) {
609 hdev->pkt_type |= (HCI_HV3);
610 hdev->esco_type |= (ESCO_HV3);
613 if (lmp_esco_capable(hdev))
614 hdev->esco_type |= (ESCO_EV3);
616 if (hdev->features[0][4] & LMP_EV4)
617 hdev->esco_type |= (ESCO_EV4);
619 if (hdev->features[0][4] & LMP_EV5)
620 hdev->esco_type |= (ESCO_EV5);
622 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
623 hdev->esco_type |= (ESCO_2EV3);
625 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
626 hdev->esco_type |= (ESCO_3EV3);
628 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
629 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
632 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
635 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
637 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
642 if (hdev->max_page < rp->max_page)
643 hdev->max_page = rp->max_page;
645 if (rp->page < HCI_MAX_PAGES)
646 memcpy(hdev->features[rp->page], rp->features, 8);
649 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
652 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
654 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
659 hdev->flow_ctl_mode = rp->mode;
662 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
664 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
666 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
671 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
672 hdev->sco_mtu = rp->sco_mtu;
673 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
674 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
676 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
681 hdev->acl_cnt = hdev->acl_pkts;
682 hdev->sco_cnt = hdev->sco_pkts;
684 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
685 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
688 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
690 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
692 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
697 if (test_bit(HCI_INIT, &hdev->flags))
698 bacpy(&hdev->bdaddr, &rp->bdaddr);
700 if (hci_dev_test_flag(hdev, HCI_SETUP))
701 bacpy(&hdev->setup_addr, &rp->bdaddr);
704 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
707 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
709 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
714 if (test_bit(HCI_INIT, &hdev->flags)) {
715 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
716 hdev->page_scan_window = __le16_to_cpu(rp->window);
720 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
723 u8 status = *((u8 *) skb->data);
724 struct hci_cp_write_page_scan_activity *sent;
726 BT_DBG("%s status 0x%2.2x", hdev->name, status);
731 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
735 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
736 hdev->page_scan_window = __le16_to_cpu(sent->window);
739 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
742 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
744 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
749 if (test_bit(HCI_INIT, &hdev->flags))
750 hdev->page_scan_type = rp->type;
753 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
756 u8 status = *((u8 *) skb->data);
759 BT_DBG("%s status 0x%2.2x", hdev->name, status);
764 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
766 hdev->page_scan_type = *type;
769 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
772 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
774 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
779 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
780 hdev->block_len = __le16_to_cpu(rp->block_len);
781 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
783 hdev->block_cnt = hdev->num_blocks;
785 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
786 hdev->block_cnt, hdev->block_len);
789 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
791 struct hci_rp_read_clock *rp = (void *) skb->data;
792 struct hci_cp_read_clock *cp;
793 struct hci_conn *conn;
795 BT_DBG("%s", hdev->name);
797 if (skb->len < sizeof(*rp))
805 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
809 if (cp->which == 0x00) {
810 hdev->clock = le32_to_cpu(rp->clock);
814 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
816 conn->clock = le32_to_cpu(rp->clock);
817 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
821 hci_dev_unlock(hdev);
824 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
827 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
829 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
834 hdev->amp_status = rp->amp_status;
835 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
836 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
837 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
838 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
839 hdev->amp_type = rp->amp_type;
840 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
841 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
842 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
843 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
846 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
849 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
851 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
856 hdev->inq_tx_power = rp->tx_power;
859 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
861 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
862 struct hci_cp_pin_code_reply *cp;
863 struct hci_conn *conn;
865 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
869 if (hci_dev_test_flag(hdev, HCI_MGMT))
870 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
875 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
879 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
881 conn->pin_length = cp->pin_len;
884 hci_dev_unlock(hdev);
887 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
889 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
891 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
895 if (hci_dev_test_flag(hdev, HCI_MGMT))
896 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
899 hci_dev_unlock(hdev);
902 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
905 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
907 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
912 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
913 hdev->le_pkts = rp->le_max_pkt;
915 hdev->le_cnt = hdev->le_pkts;
917 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
920 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
923 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
925 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
930 memcpy(hdev->le_features, rp->features, 8);
933 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
936 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
938 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
943 hdev->adv_tx_power = rp->tx_power;
946 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
948 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
950 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
954 if (hci_dev_test_flag(hdev, HCI_MGMT))
955 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
958 hci_dev_unlock(hdev);
961 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
964 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
966 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
970 if (hci_dev_test_flag(hdev, HCI_MGMT))
971 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
972 ACL_LINK, 0, rp->status);
974 hci_dev_unlock(hdev);
977 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
979 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
981 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
985 if (hci_dev_test_flag(hdev, HCI_MGMT))
986 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
989 hci_dev_unlock(hdev);
992 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
995 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
997 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1001 if (hci_dev_test_flag(hdev, HCI_MGMT))
1002 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1003 ACL_LINK, 0, rp->status);
1005 hci_dev_unlock(hdev);
1008 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1009 struct sk_buff *skb)
1011 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1013 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1016 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1017 struct sk_buff *skb)
1019 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1021 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1024 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1026 __u8 status = *((__u8 *) skb->data);
1029 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1034 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1040 bacpy(&hdev->random_addr, sent);
1042 hci_dev_unlock(hdev);
1045 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1047 __u8 status = *((__u8 *) skb->data);
1048 struct hci_cp_le_set_default_phy *cp;
1050 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1055 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1061 hdev->le_tx_def_phys = cp->tx_phys;
1062 hdev->le_rx_def_phys = cp->rx_phys;
1064 hci_dev_unlock(hdev);
1067 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1068 struct sk_buff *skb)
1070 __u8 status = *((__u8 *) skb->data);
1071 struct hci_cp_le_set_adv_set_rand_addr *cp;
1072 struct adv_info *adv_instance;
1077 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1083 if (!hdev->cur_adv_instance) {
1084 /* Store in hdev for instance 0 (Set adv and Directed advs) */
1085 bacpy(&hdev->random_addr, &cp->bdaddr);
1087 adv_instance = hci_find_adv_instance(hdev,
1088 hdev->cur_adv_instance);
1090 bacpy(&adv_instance->random_addr, &cp->bdaddr);
1093 hci_dev_unlock(hdev);
1096 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1098 __u8 *sent, status = *((__u8 *) skb->data);
1100 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1105 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1111 /* If we're doing connection initiation as peripheral. Set a
1112 * timeout in case something goes wrong.
1115 struct hci_conn *conn;
1117 hci_dev_set_flag(hdev, HCI_LE_ADV);
1119 conn = hci_lookup_le_connect(hdev);
1121 queue_delayed_work(hdev->workqueue,
1122 &conn->le_conn_timeout,
1123 conn->conn_timeout);
1125 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1128 hci_dev_unlock(hdev);
1131 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1132 struct sk_buff *skb)
1134 struct hci_cp_le_set_ext_adv_enable *cp;
1135 __u8 status = *((__u8 *) skb->data);
1137 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1142 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1149 struct hci_conn *conn;
1151 hci_dev_set_flag(hdev, HCI_LE_ADV);
1153 conn = hci_lookup_le_connect(hdev);
1155 queue_delayed_work(hdev->workqueue,
1156 &conn->le_conn_timeout,
1157 conn->conn_timeout);
1159 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1162 hci_dev_unlock(hdev);
1165 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1167 struct hci_cp_le_set_scan_param *cp;
1168 __u8 status = *((__u8 *) skb->data);
1170 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1175 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1181 hdev->le_scan_type = cp->type;
1183 hci_dev_unlock(hdev);
1186 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1187 struct sk_buff *skb)
1189 struct hci_cp_le_set_ext_scan_params *cp;
1190 __u8 status = *((__u8 *) skb->data);
1191 struct hci_cp_le_scan_phy_params *phy_param;
1193 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1198 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1202 phy_param = (void *)cp->data;
1206 hdev->le_scan_type = phy_param->type;
1208 hci_dev_unlock(hdev);
1211 static bool has_pending_adv_report(struct hci_dev *hdev)
1213 struct discovery_state *d = &hdev->discovery;
1215 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1218 static void clear_pending_adv_report(struct hci_dev *hdev)
1220 struct discovery_state *d = &hdev->discovery;
1222 bacpy(&d->last_adv_addr, BDADDR_ANY);
1223 d->last_adv_data_len = 0;
1226 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1227 u8 bdaddr_type, s8 rssi, u32 flags,
1230 struct discovery_state *d = &hdev->discovery;
1232 bacpy(&d->last_adv_addr, bdaddr);
1233 d->last_adv_addr_type = bdaddr_type;
1234 d->last_adv_rssi = rssi;
1235 d->last_adv_flags = flags;
1236 memcpy(d->last_adv_data, data, len);
1237 d->last_adv_data_len = len;
1240 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1245 case LE_SCAN_ENABLE:
1246 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1247 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1248 clear_pending_adv_report(hdev);
1251 case LE_SCAN_DISABLE:
1252 /* We do this here instead of when setting DISCOVERY_STOPPED
1253 * since the latter would potentially require waiting for
1254 * inquiry to stop too.
1256 if (has_pending_adv_report(hdev)) {
1257 struct discovery_state *d = &hdev->discovery;
1259 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1260 d->last_adv_addr_type, NULL,
1261 d->last_adv_rssi, d->last_adv_flags,
1263 d->last_adv_data_len, NULL, 0);
1266 /* Cancel this timer so that we don't try to disable scanning
1267 * when it's already disabled.
1269 cancel_delayed_work(&hdev->le_scan_disable);
1271 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1273 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1274 * interrupted scanning due to a connect request. Mark
1275 * therefore discovery as stopped. If this was not
1276 * because of a connect request advertising might have
1277 * been disabled because of active scanning, so
1278 * re-enable it again if necessary.
1280 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1281 #ifndef TIZEN_BT /* The below line is kernel bug. */
1282 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1284 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
1286 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1287 hdev->discovery.state == DISCOVERY_FINDING)
1288 hci_req_reenable_advertising(hdev);
1293 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1298 hci_dev_unlock(hdev);
1301 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1302 struct sk_buff *skb)
1304 struct hci_cp_le_set_scan_enable *cp;
1305 __u8 status = *((__u8 *) skb->data);
1307 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1312 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1316 le_set_scan_enable_complete(hdev, cp->enable);
1319 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1320 struct sk_buff *skb)
1322 struct hci_cp_le_set_ext_scan_enable *cp;
1323 __u8 status = *((__u8 *) skb->data);
1325 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1330 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1334 le_set_scan_enable_complete(hdev, cp->enable);
1337 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1338 struct sk_buff *skb)
1340 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1342 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1348 hdev->le_num_of_adv_sets = rp->num_of_sets;
1351 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1352 struct sk_buff *skb)
1354 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1356 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1361 hdev->le_white_list_size = rp->size;
1364 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1365 struct sk_buff *skb)
1367 __u8 status = *((__u8 *) skb->data);
1369 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1374 hci_bdaddr_list_clear(&hdev->le_white_list);
1377 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1378 struct sk_buff *skb)
1380 struct hci_cp_le_add_to_white_list *sent;
1381 __u8 status = *((__u8 *) skb->data);
1383 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1388 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1392 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1396 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1397 struct sk_buff *skb)
1399 struct hci_cp_le_del_from_white_list *sent;
1400 __u8 status = *((__u8 *) skb->data);
1402 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1407 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1411 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1415 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1416 struct sk_buff *skb)
1418 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1420 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1425 memcpy(hdev->le_states, rp->le_states, 8);
1428 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1429 struct sk_buff *skb)
1431 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1433 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1438 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1439 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1442 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1443 struct sk_buff *skb)
1445 struct hci_cp_le_write_def_data_len *sent;
1446 __u8 status = *((__u8 *) skb->data);
1448 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1453 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1457 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1458 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1461 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1462 struct sk_buff *skb)
1464 __u8 status = *((__u8 *) skb->data);
1466 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1471 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1474 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1475 struct sk_buff *skb)
1477 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1479 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1484 hdev->le_resolv_list_size = rp->size;
1487 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1488 struct sk_buff *skb)
1490 __u8 *sent, status = *((__u8 *) skb->data);
1492 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1497 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1504 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1506 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1508 hci_dev_unlock(hdev);
1511 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1512 struct sk_buff *skb)
1514 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1516 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1521 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1522 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1523 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1524 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1527 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1528 struct sk_buff *skb)
1530 struct hci_cp_write_le_host_supported *sent;
1531 __u8 status = *((__u8 *) skb->data);
1533 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1538 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1545 hdev->features[1][0] |= LMP_HOST_LE;
1546 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1548 hdev->features[1][0] &= ~LMP_HOST_LE;
1549 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1550 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1554 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1556 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1558 hci_dev_unlock(hdev);
1561 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1563 struct hci_cp_le_set_adv_param *cp;
1564 u8 status = *((u8 *) skb->data);
1566 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1571 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1576 hdev->adv_addr_type = cp->own_address_type;
1577 hci_dev_unlock(hdev);
1580 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1582 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1583 struct hci_cp_le_set_ext_adv_params *cp;
1584 struct adv_info *adv_instance;
1586 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1591 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1596 hdev->adv_addr_type = cp->own_addr_type;
1597 if (!hdev->cur_adv_instance) {
1598 /* Store in hdev for instance 0 */
1599 hdev->adv_tx_power = rp->tx_power;
1601 adv_instance = hci_find_adv_instance(hdev,
1602 hdev->cur_adv_instance);
1604 adv_instance->tx_power = rp->tx_power;
1606 /* Update adv data as tx power is known now */
1607 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1608 hci_dev_unlock(hdev);
1612 static void hci_cc_enable_rssi(struct hci_dev *hdev,
1613 struct sk_buff *skb)
1615 struct hci_cc_rsp_enable_rssi *rp = (void *)skb->data;
1617 BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
1618 hdev->name, rp->status, rp->le_ext_opcode);
1620 mgmt_enable_rssi_cc(hdev, rp, rp->status);
1623 static void hci_cc_get_raw_rssi(struct hci_dev *hdev,
1624 struct sk_buff *skb)
1626 struct hci_cc_rp_get_raw_rssi *rp = (void *)skb->data;
1628 BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
1629 hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
1631 mgmt_raw_rssi_response(hdev, rp, rp->status);
1634 static void hci_vendor_ext_rssi_link_alert_evt(struct hci_dev *hdev,
1635 struct sk_buff *skb)
1637 struct hci_ev_vendor_specific_rssi_alert *ev = (void *)skb->data;
1639 BT_DBG("RSSI event LE_RSSI_LINK_ALERT %X", LE_RSSI_LINK_ALERT);
1641 mgmt_rssi_alert_evt(hdev, ev->conn_handle, ev->alert_type,
1645 static void hci_vendor_specific_group_ext_evt(struct hci_dev *hdev,
1646 struct sk_buff *skb)
1648 struct hci_ev_ext_vendor_specific *ev = (void *)skb->data;
1649 __u8 event_le_ext_sub_code;
1651 BT_DBG("RSSI event LE_META_VENDOR_SPECIFIC_GROUP_EVENT: %X",
1652 LE_META_VENDOR_SPECIFIC_GROUP_EVENT);
1654 skb_pull(skb, sizeof(*ev));
1655 event_le_ext_sub_code = ev->event_le_ext_sub_code;
1657 switch (event_le_ext_sub_code) {
1658 case LE_RSSI_LINK_ALERT:
1659 hci_vendor_ext_rssi_link_alert_evt(hdev, skb);
1667 static void hci_vendor_specific_evt(struct hci_dev *hdev, struct sk_buff *skb)
1669 struct hci_ev_vendor_specific *ev = (void *)skb->data;
1670 __u8 event_sub_code;
1672 BT_DBG("hci_vendor_specific_evt");
1674 skb_pull(skb, sizeof(*ev));
1675 event_sub_code = ev->event_sub_code;
1677 switch (event_sub_code) {
1678 case LE_META_VENDOR_SPECIFIC_GROUP_EVENT:
1679 hci_vendor_specific_group_ext_evt(hdev, skb);
1688 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1690 struct hci_rp_read_rssi *rp = (void *) skb->data;
1691 struct hci_conn *conn;
1693 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1700 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1702 conn->rssi = rp->rssi;
1704 hci_dev_unlock(hdev);
1707 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1709 struct hci_cp_read_tx_power *sent;
1710 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1711 struct hci_conn *conn;
1713 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1718 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1724 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1728 switch (sent->type) {
1730 conn->tx_power = rp->tx_power;
1733 conn->max_tx_power = rp->tx_power;
1738 hci_dev_unlock(hdev);
1741 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1743 u8 status = *((u8 *) skb->data);
1746 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1751 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1753 hdev->ssp_debug_mode = *mode;
1756 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1758 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1761 hci_conn_check_pending(hdev);
1765 set_bit(HCI_INQUIRY, &hdev->flags);
1768 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1770 struct hci_cp_create_conn *cp;
1771 struct hci_conn *conn;
1773 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1775 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1781 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1783 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1786 if (conn && conn->state == BT_CONNECT) {
1787 if (status != 0x0c || conn->attempt > 2) {
1788 conn->state = BT_CLOSED;
1789 hci_connect_cfm(conn, status);
1792 conn->state = BT_CONNECT2;
1796 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1799 bt_dev_err(hdev, "no memory for new connection");
1803 hci_dev_unlock(hdev);
1806 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1808 struct hci_cp_add_sco *cp;
1809 struct hci_conn *acl, *sco;
1812 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1817 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1821 handle = __le16_to_cpu(cp->handle);
1823 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1827 acl = hci_conn_hash_lookup_handle(hdev, handle);
1831 sco->state = BT_CLOSED;
1833 hci_connect_cfm(sco, status);
1838 hci_dev_unlock(hdev);
1841 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1843 struct hci_cp_auth_requested *cp;
1844 struct hci_conn *conn;
1846 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1851 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1857 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1859 if (conn->state == BT_CONFIG) {
1860 hci_connect_cfm(conn, status);
1861 hci_conn_drop(conn);
1865 hci_dev_unlock(hdev);
1868 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1870 struct hci_cp_set_conn_encrypt *cp;
1871 struct hci_conn *conn;
1873 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1878 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1884 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1886 if (conn->state == BT_CONFIG) {
1887 hci_connect_cfm(conn, status);
1888 hci_conn_drop(conn);
1892 hci_dev_unlock(hdev);
1895 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1896 struct hci_conn *conn)
1898 if (conn->state != BT_CONFIG || !conn->out)
1901 if (conn->pending_sec_level == BT_SECURITY_SDP)
1904 /* Only request authentication for SSP connections or non-SSP
1905 * devices with sec_level MEDIUM or HIGH or if MITM protection
1908 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1909 conn->pending_sec_level != BT_SECURITY_FIPS &&
1910 conn->pending_sec_level != BT_SECURITY_HIGH &&
1911 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1917 static int hci_resolve_name(struct hci_dev *hdev,
1918 struct inquiry_entry *e)
1920 struct hci_cp_remote_name_req cp;
1922 memset(&cp, 0, sizeof(cp));
1924 bacpy(&cp.bdaddr, &e->data.bdaddr);
1925 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1926 cp.pscan_mode = e->data.pscan_mode;
1927 cp.clock_offset = e->data.clock_offset;
1929 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1932 static bool hci_resolve_next_name(struct hci_dev *hdev)
1934 struct discovery_state *discov = &hdev->discovery;
1935 struct inquiry_entry *e;
1937 if (list_empty(&discov->resolve))
1940 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1944 if (hci_resolve_name(hdev, e) == 0) {
1945 e->name_state = NAME_PENDING;
1952 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1953 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1955 struct discovery_state *discov = &hdev->discovery;
1956 struct inquiry_entry *e;
1959 /* Update the mgmt connected state if necessary. Be careful with
1960 * conn objects that exist but are not (yet) connected however.
1961 * Only those in BT_CONFIG or BT_CONNECTED states can be
1962 * considered connected.
1965 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
1966 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1967 mgmt_device_connected(hdev, conn, 0, name, name_len);
1969 mgmt_device_name_update(hdev, bdaddr, name, name_len);
1973 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1974 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1975 mgmt_device_connected(hdev, conn, 0, name, name_len);
1978 if (discov->state == DISCOVERY_STOPPED)
1981 if (discov->state == DISCOVERY_STOPPING)
1982 goto discov_complete;
1984 if (discov->state != DISCOVERY_RESOLVING)
1987 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1988 /* If the device was not found in a list of found devices names of which
1989 * are pending. there is no need to continue resolving a next name as it
1990 * will be done upon receiving another Remote Name Request Complete
1997 e->name_state = NAME_KNOWN;
1998 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1999 e->data.rssi, name, name_len);
2001 e->name_state = NAME_NOT_KNOWN;
2004 if (hci_resolve_next_name(hdev))
2008 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2011 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2013 struct hci_cp_remote_name_req *cp;
2014 struct hci_conn *conn;
2016 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2018 /* If successful wait for the name req complete event before
2019 * checking for the need to do authentication */
2023 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2029 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2031 if (hci_dev_test_flag(hdev, HCI_MGMT))
2032 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2037 if (!hci_outgoing_auth_needed(hdev, conn))
2040 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2041 struct hci_cp_auth_requested auth_cp;
2043 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2045 auth_cp.handle = __cpu_to_le16(conn->handle);
2046 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2047 sizeof(auth_cp), &auth_cp);
2051 hci_dev_unlock(hdev);
2054 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2056 struct hci_cp_read_remote_features *cp;
2057 struct hci_conn *conn;
2059 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2064 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2070 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2072 if (conn->state == BT_CONFIG) {
2073 hci_connect_cfm(conn, status);
2074 hci_conn_drop(conn);
2078 hci_dev_unlock(hdev);
2081 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2083 struct hci_cp_read_remote_ext_features *cp;
2084 struct hci_conn *conn;
2086 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2091 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2097 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2099 if (conn->state == BT_CONFIG) {
2100 hci_connect_cfm(conn, status);
2101 hci_conn_drop(conn);
2105 hci_dev_unlock(hdev);
2108 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2110 struct hci_cp_setup_sync_conn *cp;
2111 struct hci_conn *acl, *sco;
2114 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2119 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2123 handle = __le16_to_cpu(cp->handle);
2125 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2129 acl = hci_conn_hash_lookup_handle(hdev, handle);
2133 sco->state = BT_CLOSED;
2135 hci_connect_cfm(sco, status);
2140 hci_dev_unlock(hdev);
2143 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2145 struct hci_cp_sniff_mode *cp;
2146 struct hci_conn *conn;
2148 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2153 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2159 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2161 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2163 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2164 hci_sco_setup(conn, status);
2167 hci_dev_unlock(hdev);
2170 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2172 struct hci_cp_exit_sniff_mode *cp;
2173 struct hci_conn *conn;
2175 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2180 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2186 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2188 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2190 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2191 hci_sco_setup(conn, status);
2194 hci_dev_unlock(hdev);
2197 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2199 struct hci_cp_disconnect *cp;
2200 struct hci_conn *conn;
2205 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2211 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2213 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2214 conn->dst_type, status);
2216 hci_dev_unlock(hdev);
2219 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2220 u8 peer_addr_type, u8 own_address_type,
2223 struct hci_conn *conn;
2225 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2230 /* Store the initiator and responder address information which
2231 * is needed for SMP. These values will not change during the
2232 * lifetime of the connection.
2234 conn->init_addr_type = own_address_type;
2235 if (own_address_type == ADDR_LE_DEV_RANDOM)
2236 bacpy(&conn->init_addr, &hdev->random_addr);
2238 bacpy(&conn->init_addr, &hdev->bdaddr);
2240 conn->resp_addr_type = peer_addr_type;
2241 bacpy(&conn->resp_addr, peer_addr);
2243 /* We don't want the connection attempt to stick around
2244 * indefinitely since LE doesn't have a page timeout concept
2245 * like BR/EDR. Set a timer for any connection that doesn't use
2246 * the white list for connecting.
2248 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2249 queue_delayed_work(conn->hdev->workqueue,
2250 &conn->le_conn_timeout,
2251 conn->conn_timeout);
2254 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2256 struct hci_cp_le_create_conn *cp;
2258 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2260 /* All connection failure handling is taken care of by the
2261 * hci_le_conn_failed function which is triggered by the HCI
2262 * request completion callbacks used for connecting.
2267 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2273 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2274 cp->own_address_type, cp->filter_policy);
2276 hci_dev_unlock(hdev);
2279 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2281 struct hci_cp_le_ext_create_conn *cp;
2283 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2285 /* All connection failure handling is taken care of by the
2286 * hci_le_conn_failed function which is triggered by the HCI
2287 * request completion callbacks used for connecting.
2292 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2298 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2299 cp->own_addr_type, cp->filter_policy);
2301 hci_dev_unlock(hdev);
2304 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2306 struct hci_cp_le_read_remote_features *cp;
2307 struct hci_conn *conn;
2309 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2314 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2320 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2322 if (conn->state == BT_CONFIG) {
2323 hci_connect_cfm(conn, status);
2324 hci_conn_drop(conn);
2328 hci_dev_unlock(hdev);
2331 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2333 struct hci_cp_le_start_enc *cp;
2334 struct hci_conn *conn;
2336 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2343 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2347 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2351 if (conn->state != BT_CONNECTED)
2354 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2355 hci_conn_drop(conn);
2358 hci_dev_unlock(hdev);
2361 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2363 struct hci_cp_switch_role *cp;
2364 struct hci_conn *conn;
2366 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2371 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2377 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2379 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2381 hci_dev_unlock(hdev);
2384 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2386 __u8 status = *((__u8 *) skb->data);
2387 struct discovery_state *discov = &hdev->discovery;
2388 struct inquiry_entry *e;
2390 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2392 hci_conn_check_pending(hdev);
2394 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2397 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2398 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2400 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2405 if (discov->state != DISCOVERY_FINDING)
2408 if (list_empty(&discov->resolve)) {
2409 /* When BR/EDR inquiry is active and no LE scanning is in
2410 * progress, then change discovery state to indicate completion.
2412 * When running LE scanning and BR/EDR inquiry simultaneously
2413 * and the LE scan already finished, then change the discovery
2414 * state to indicate completion.
2416 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2417 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2418 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2422 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2423 if (e && hci_resolve_name(hdev, e) == 0) {
2424 e->name_state = NAME_PENDING;
2425 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2427 /* When BR/EDR inquiry is active and no LE scanning is in
2428 * progress, then change discovery state to indicate completion.
2430 * When running LE scanning and BR/EDR inquiry simultaneously
2431 * and the LE scan already finished, then change the discovery
2432 * state to indicate completion.
2434 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2435 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2436 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2440 hci_dev_unlock(hdev);
2443 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2445 struct inquiry_data data;
2446 struct inquiry_info *info = (void *) (skb->data + 1);
2447 int num_rsp = *((__u8 *) skb->data);
2449 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2454 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2459 for (; num_rsp; num_rsp--, info++) {
2462 bacpy(&data.bdaddr, &info->bdaddr);
2463 data.pscan_rep_mode = info->pscan_rep_mode;
2464 data.pscan_period_mode = info->pscan_period_mode;
2465 data.pscan_mode = info->pscan_mode;
2466 memcpy(data.dev_class, info->dev_class, 3);
2467 data.clock_offset = info->clock_offset;
2468 data.rssi = HCI_RSSI_INVALID;
2469 data.ssp_mode = 0x00;
2471 flags = hci_inquiry_cache_update(hdev, &data, false);
2473 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2474 info->dev_class, HCI_RSSI_INVALID,
2475 flags, NULL, 0, NULL, 0);
2478 hci_dev_unlock(hdev);
2481 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2483 struct hci_ev_conn_complete *ev = (void *) skb->data;
2484 struct hci_conn *conn;
2486 BT_DBG("%s", hdev->name);
2490 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2492 if (ev->link_type != SCO_LINK)
2495 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2499 conn->type = SCO_LINK;
2503 conn->handle = __le16_to_cpu(ev->handle);
2505 if (conn->type == ACL_LINK) {
2506 conn->state = BT_CONFIG;
2507 hci_conn_hold(conn);
2509 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2510 !hci_find_link_key(hdev, &ev->bdaddr))
2511 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2513 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2515 conn->state = BT_CONNECTED;
2517 hci_debugfs_create_conn(conn);
2518 hci_conn_add_sysfs(conn);
2520 if (test_bit(HCI_AUTH, &hdev->flags))
2521 set_bit(HCI_CONN_AUTH, &conn->flags);
2523 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2524 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2526 /* Get remote features */
2527 if (conn->type == ACL_LINK) {
2528 struct hci_cp_read_remote_features cp;
2529 cp.handle = ev->handle;
2530 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2533 hci_req_update_scan(hdev);
2536 /* Set packet type for incoming connection */
2537 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2538 struct hci_cp_change_conn_ptype cp;
2539 cp.handle = ev->handle;
2540 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2541 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2545 conn->state = BT_CLOSED;
2546 if (conn->type == ACL_LINK)
2547 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2548 conn->dst_type, ev->status);
2551 if (conn->type == ACL_LINK)
2552 hci_sco_setup(conn, ev->status);
2555 hci_connect_cfm(conn, ev->status);
2557 } else if (ev->link_type != ACL_LINK)
2558 hci_connect_cfm(conn, ev->status);
2561 hci_dev_unlock(hdev);
2563 hci_conn_check_pending(hdev);
2566 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2568 struct hci_cp_reject_conn_req cp;
2570 bacpy(&cp.bdaddr, bdaddr);
2571 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2572 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2575 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2577 struct hci_ev_conn_request *ev = (void *) skb->data;
2578 int mask = hdev->link_mode;
2579 struct inquiry_entry *ie;
2580 struct hci_conn *conn;
2583 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2586 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2589 if (!(mask & HCI_LM_ACCEPT)) {
2590 hci_reject_conn(hdev, &ev->bdaddr);
2594 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2596 hci_reject_conn(hdev, &ev->bdaddr);
2600 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2601 * connection. These features are only touched through mgmt so
2602 * only do the checks if HCI_MGMT is set.
2604 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2605 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2606 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2608 hci_reject_conn(hdev, &ev->bdaddr);
2612 /* Connection accepted */
2616 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2618 memcpy(ie->data.dev_class, ev->dev_class, 3);
2620 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2623 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2626 bt_dev_err(hdev, "no memory for new connection");
2627 hci_dev_unlock(hdev);
2632 memcpy(conn->dev_class, ev->dev_class, 3);
2634 hci_dev_unlock(hdev);
2636 if (ev->link_type == ACL_LINK ||
2637 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2638 struct hci_cp_accept_conn_req cp;
2639 conn->state = BT_CONNECT;
2641 bacpy(&cp.bdaddr, &ev->bdaddr);
2643 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2644 cp.role = 0x00; /* Become master */
2646 cp.role = 0x01; /* Remain slave */
2648 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2649 } else if (!(flags & HCI_PROTO_DEFER)) {
2650 struct hci_cp_accept_sync_conn_req cp;
2651 conn->state = BT_CONNECT;
2653 bacpy(&cp.bdaddr, &ev->bdaddr);
2654 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2656 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2657 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2658 cp.max_latency = cpu_to_le16(0xffff);
2659 cp.content_format = cpu_to_le16(hdev->voice_setting);
2660 cp.retrans_effort = 0xff;
2662 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2665 conn->state = BT_CONNECT2;
2666 hci_connect_cfm(conn, 0);
2670 static u8 hci_to_mgmt_reason(u8 err)
2673 case HCI_ERROR_CONNECTION_TIMEOUT:
2674 return MGMT_DEV_DISCONN_TIMEOUT;
2675 case HCI_ERROR_REMOTE_USER_TERM:
2676 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2677 case HCI_ERROR_REMOTE_POWER_OFF:
2678 return MGMT_DEV_DISCONN_REMOTE;
2679 case HCI_ERROR_LOCAL_HOST_TERM:
2680 return MGMT_DEV_DISCONN_LOCAL_HOST;
2682 return MGMT_DEV_DISCONN_UNKNOWN;
2686 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2688 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2690 struct hci_conn_params *params;
2691 struct hci_conn *conn;
2692 bool mgmt_connected;
2695 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2699 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2704 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2705 conn->dst_type, ev->status);
2709 conn->state = BT_CLOSED;
2711 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2713 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2714 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2716 reason = hci_to_mgmt_reason(ev->reason);
2718 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2719 reason, mgmt_connected);
2721 if (conn->type == ACL_LINK) {
2722 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2723 hci_remove_link_key(hdev, &conn->dst);
2725 hci_req_update_scan(hdev);
2728 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2730 switch (params->auto_connect) {
2731 case HCI_AUTO_CONN_LINK_LOSS:
2732 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2736 case HCI_AUTO_CONN_DIRECT:
2737 case HCI_AUTO_CONN_ALWAYS:
2738 list_del_init(¶ms->action);
2739 list_add(¶ms->action, &hdev->pend_le_conns);
2740 hci_update_background_scan(hdev);
2750 hci_disconn_cfm(conn, ev->reason);
2753 /* Re-enable advertising if necessary, since it might
2754 * have been disabled by the connection. From the
2755 * HCI_LE_Set_Advertise_Enable command description in
2756 * the core specification (v4.0):
2757 * "The Controller shall continue advertising until the Host
2758 * issues an LE_Set_Advertise_Enable command with
2759 * Advertising_Enable set to 0x00 (Advertising is disabled)
2760 * or until a connection is created or until the Advertising
2761 * is timed out due to Directed Advertising."
2763 if (type == LE_LINK)
2764 hci_req_reenable_advertising(hdev);
2767 hci_dev_unlock(hdev);
2770 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2772 struct hci_ev_auth_complete *ev = (void *) skb->data;
2773 struct hci_conn *conn;
2775 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2779 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2784 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2786 if (!hci_conn_ssp_enabled(conn) &&
2787 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2788 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2790 set_bit(HCI_CONN_AUTH, &conn->flags);
2791 conn->sec_level = conn->pending_sec_level;
2794 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2795 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2797 mgmt_auth_failed(conn, ev->status);
2800 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2801 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2803 if (conn->state == BT_CONFIG) {
2804 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2805 struct hci_cp_set_conn_encrypt cp;
2806 cp.handle = ev->handle;
2808 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2811 conn->state = BT_CONNECTED;
2812 hci_connect_cfm(conn, ev->status);
2813 hci_conn_drop(conn);
2816 hci_auth_cfm(conn, ev->status);
2818 hci_conn_hold(conn);
2819 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2820 hci_conn_drop(conn);
2823 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2825 struct hci_cp_set_conn_encrypt cp;
2826 cp.handle = ev->handle;
2828 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2831 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2832 hci_encrypt_cfm(conn, ev->status, 0x00);
2837 hci_dev_unlock(hdev);
2840 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2842 struct hci_ev_remote_name *ev = (void *) skb->data;
2843 struct hci_conn *conn;
2845 BT_DBG("%s", hdev->name);
2847 hci_conn_check_pending(hdev);
2851 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2853 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2856 if (ev->status == 0)
2857 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2858 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2860 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2866 if (!hci_outgoing_auth_needed(hdev, conn))
2869 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2870 struct hci_cp_auth_requested cp;
2872 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2874 cp.handle = __cpu_to_le16(conn->handle);
2875 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2879 hci_dev_unlock(hdev);
2882 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2883 u16 opcode, struct sk_buff *skb)
2885 const struct hci_rp_read_enc_key_size *rp;
2886 struct hci_conn *conn;
2889 BT_DBG("%s status 0x%02x", hdev->name, status);
2891 if (!skb || skb->len < sizeof(*rp)) {
2892 bt_dev_err(hdev, "invalid read key size response");
2896 rp = (void *)skb->data;
2897 handle = le16_to_cpu(rp->handle);
2901 conn = hci_conn_hash_lookup_handle(hdev, handle);
2905 /* If we fail to read the encryption key size, assume maximum
2906 * (which is the same we do also when this HCI command isn't
2910 bt_dev_err(hdev, "failed to read key size for handle %u",
2912 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2914 conn->enc_key_size = rp->key_size;
2917 if (conn->state == BT_CONFIG) {
2918 conn->state = BT_CONNECTED;
2919 hci_connect_cfm(conn, 0);
2920 hci_conn_drop(conn);
2924 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2926 else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
2931 hci_encrypt_cfm(conn, 0, encrypt);
2935 hci_dev_unlock(hdev);
2938 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2940 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2941 struct hci_conn *conn;
2943 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2947 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2953 /* Encryption implies authentication */
2954 set_bit(HCI_CONN_AUTH, &conn->flags);
2955 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2956 conn->sec_level = conn->pending_sec_level;
2958 /* P-256 authentication key implies FIPS */
2959 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2960 set_bit(HCI_CONN_FIPS, &conn->flags);
2962 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2963 conn->type == LE_LINK)
2964 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2966 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2967 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2971 /* We should disregard the current RPA and generate a new one
2972 * whenever the encryption procedure fails.
2974 if (ev->status && conn->type == LE_LINK) {
2975 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2976 hci_adv_instances_set_rpa_expired(hdev, true);
2979 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2981 if (ev->status && conn->state == BT_CONNECTED) {
2982 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2983 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2985 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2986 hci_conn_drop(conn);
2990 /* In Secure Connections Only mode, do not allow any connections
2991 * that are not encrypted with AES-CCM using a P-256 authenticated
2994 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
2995 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2996 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2997 hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2998 hci_conn_drop(conn);
3002 /* Try reading the encryption key size for encrypted ACL links */
3003 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3004 struct hci_cp_read_enc_key_size cp;
3005 struct hci_request req;
3007 /* Only send HCI_Read_Encryption_Key_Size if the
3008 * controller really supports it. If it doesn't, assume
3009 * the default size (16).
3011 if (!(hdev->commands[20] & 0x10)) {
3012 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3016 hci_req_init(&req, hdev);
3018 cp.handle = cpu_to_le16(conn->handle);
3019 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3021 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3022 bt_dev_err(hdev, "sending read key size failed");
3023 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3031 if (conn->state == BT_CONFIG) {
3033 conn->state = BT_CONNECTED;
3035 hci_connect_cfm(conn, ev->status);
3036 hci_conn_drop(conn);
3038 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
3041 hci_dev_unlock(hdev);
3044 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3045 struct sk_buff *skb)
3047 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3048 struct hci_conn *conn;
3050 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3054 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3057 set_bit(HCI_CONN_SECURE, &conn->flags);
3059 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3061 hci_key_change_cfm(conn, ev->status);
3064 hci_dev_unlock(hdev);
3067 static void hci_remote_features_evt(struct hci_dev *hdev,
3068 struct sk_buff *skb)
3070 struct hci_ev_remote_features *ev = (void *) skb->data;
3071 struct hci_conn *conn;
3073 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3077 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3082 memcpy(conn->features[0], ev->features, 8);
3084 if (conn->state != BT_CONFIG)
3087 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3088 lmp_ext_feat_capable(conn)) {
3089 struct hci_cp_read_remote_ext_features cp;
3090 cp.handle = ev->handle;
3092 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3097 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3098 struct hci_cp_remote_name_req cp;
3099 memset(&cp, 0, sizeof(cp));
3100 bacpy(&cp.bdaddr, &conn->dst);
3101 cp.pscan_rep_mode = 0x02;
3102 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3103 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3104 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3106 if (!hci_outgoing_auth_needed(hdev, conn)) {
3107 conn->state = BT_CONNECTED;
3108 hci_connect_cfm(conn, ev->status);
3109 hci_conn_drop(conn);
3113 hci_dev_unlock(hdev);
3116 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3117 u16 *opcode, u8 *status,
3118 hci_req_complete_t *req_complete,
3119 hci_req_complete_skb_t *req_complete_skb)
3121 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3123 *opcode = __le16_to_cpu(ev->opcode);
3124 *status = skb->data[sizeof(*ev)];
3126 skb_pull(skb, sizeof(*ev));
3129 case HCI_OP_INQUIRY_CANCEL:
3130 hci_cc_inquiry_cancel(hdev, skb);
3133 case HCI_OP_PERIODIC_INQ:
3134 hci_cc_periodic_inq(hdev, skb);
3137 case HCI_OP_EXIT_PERIODIC_INQ:
3138 hci_cc_exit_periodic_inq(hdev, skb);
3141 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3142 hci_cc_remote_name_req_cancel(hdev, skb);
3145 case HCI_OP_ROLE_DISCOVERY:
3146 hci_cc_role_discovery(hdev, skb);
3149 case HCI_OP_READ_LINK_POLICY:
3150 hci_cc_read_link_policy(hdev, skb);
3153 case HCI_OP_WRITE_LINK_POLICY:
3154 hci_cc_write_link_policy(hdev, skb);
3157 case HCI_OP_READ_DEF_LINK_POLICY:
3158 hci_cc_read_def_link_policy(hdev, skb);
3161 case HCI_OP_WRITE_DEF_LINK_POLICY:
3162 hci_cc_write_def_link_policy(hdev, skb);
3166 hci_cc_reset(hdev, skb);
3169 case HCI_OP_READ_STORED_LINK_KEY:
3170 hci_cc_read_stored_link_key(hdev, skb);
3173 case HCI_OP_DELETE_STORED_LINK_KEY:
3174 hci_cc_delete_stored_link_key(hdev, skb);
3177 case HCI_OP_WRITE_LOCAL_NAME:
3178 hci_cc_write_local_name(hdev, skb);
3181 case HCI_OP_READ_LOCAL_NAME:
3182 hci_cc_read_local_name(hdev, skb);
3185 case HCI_OP_WRITE_AUTH_ENABLE:
3186 hci_cc_write_auth_enable(hdev, skb);
3189 case HCI_OP_WRITE_ENCRYPT_MODE:
3190 hci_cc_write_encrypt_mode(hdev, skb);
3193 case HCI_OP_WRITE_SCAN_ENABLE:
3194 hci_cc_write_scan_enable(hdev, skb);
3197 case HCI_OP_READ_CLASS_OF_DEV:
3198 hci_cc_read_class_of_dev(hdev, skb);
3201 case HCI_OP_WRITE_CLASS_OF_DEV:
3202 hci_cc_write_class_of_dev(hdev, skb);
3205 case HCI_OP_READ_VOICE_SETTING:
3206 hci_cc_read_voice_setting(hdev, skb);
3209 case HCI_OP_WRITE_VOICE_SETTING:
3210 hci_cc_write_voice_setting(hdev, skb);
3213 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3214 hci_cc_read_num_supported_iac(hdev, skb);
3217 case HCI_OP_WRITE_SSP_MODE:
3218 hci_cc_write_ssp_mode(hdev, skb);
3221 case HCI_OP_WRITE_SC_SUPPORT:
3222 hci_cc_write_sc_support(hdev, skb);
3225 case HCI_OP_READ_LOCAL_VERSION:
3226 hci_cc_read_local_version(hdev, skb);
3229 case HCI_OP_READ_LOCAL_COMMANDS:
3230 hci_cc_read_local_commands(hdev, skb);
3233 case HCI_OP_READ_LOCAL_FEATURES:
3234 hci_cc_read_local_features(hdev, skb);
3237 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3238 hci_cc_read_local_ext_features(hdev, skb);
3241 case HCI_OP_READ_BUFFER_SIZE:
3242 hci_cc_read_buffer_size(hdev, skb);
3245 case HCI_OP_READ_BD_ADDR:
3246 hci_cc_read_bd_addr(hdev, skb);
3249 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3250 hci_cc_read_page_scan_activity(hdev, skb);
3253 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3254 hci_cc_write_page_scan_activity(hdev, skb);
3257 case HCI_OP_READ_PAGE_SCAN_TYPE:
3258 hci_cc_read_page_scan_type(hdev, skb);
3261 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3262 hci_cc_write_page_scan_type(hdev, skb);
3265 case HCI_OP_READ_DATA_BLOCK_SIZE:
3266 hci_cc_read_data_block_size(hdev, skb);
3269 case HCI_OP_READ_FLOW_CONTROL_MODE:
3270 hci_cc_read_flow_control_mode(hdev, skb);
3273 case HCI_OP_READ_LOCAL_AMP_INFO:
3274 hci_cc_read_local_amp_info(hdev, skb);
3277 case HCI_OP_READ_CLOCK:
3278 hci_cc_read_clock(hdev, skb);
3281 case HCI_OP_READ_INQ_RSP_TX_POWER:
3282 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3285 case HCI_OP_PIN_CODE_REPLY:
3286 hci_cc_pin_code_reply(hdev, skb);
3289 case HCI_OP_PIN_CODE_NEG_REPLY:
3290 hci_cc_pin_code_neg_reply(hdev, skb);
3293 case HCI_OP_READ_LOCAL_OOB_DATA:
3294 hci_cc_read_local_oob_data(hdev, skb);
3297 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3298 hci_cc_read_local_oob_ext_data(hdev, skb);
3301 case HCI_OP_LE_READ_BUFFER_SIZE:
3302 hci_cc_le_read_buffer_size(hdev, skb);
3305 case HCI_OP_LE_READ_LOCAL_FEATURES:
3306 hci_cc_le_read_local_features(hdev, skb);
3309 case HCI_OP_LE_READ_ADV_TX_POWER:
3310 hci_cc_le_read_adv_tx_power(hdev, skb);
3313 case HCI_OP_USER_CONFIRM_REPLY:
3314 hci_cc_user_confirm_reply(hdev, skb);
3317 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3318 hci_cc_user_confirm_neg_reply(hdev, skb);
3321 case HCI_OP_USER_PASSKEY_REPLY:
3322 hci_cc_user_passkey_reply(hdev, skb);
3325 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3326 hci_cc_user_passkey_neg_reply(hdev, skb);
3329 case HCI_OP_LE_SET_RANDOM_ADDR:
3330 hci_cc_le_set_random_addr(hdev, skb);
3333 case HCI_OP_LE_SET_ADV_ENABLE:
3334 hci_cc_le_set_adv_enable(hdev, skb);
3337 case HCI_OP_LE_SET_SCAN_PARAM:
3338 hci_cc_le_set_scan_param(hdev, skb);
3341 case HCI_OP_LE_SET_SCAN_ENABLE:
3342 hci_cc_le_set_scan_enable(hdev, skb);
3345 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3346 hci_cc_le_read_white_list_size(hdev, skb);
3349 case HCI_OP_LE_CLEAR_WHITE_LIST:
3350 hci_cc_le_clear_white_list(hdev, skb);
3353 case HCI_OP_LE_ADD_TO_WHITE_LIST:
3354 hci_cc_le_add_to_white_list(hdev, skb);
3357 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3358 hci_cc_le_del_from_white_list(hdev, skb);
3361 case HCI_OP_LE_READ_SUPPORTED_STATES:
3362 hci_cc_le_read_supported_states(hdev, skb);
3365 case HCI_OP_LE_READ_DEF_DATA_LEN:
3366 hci_cc_le_read_def_data_len(hdev, skb);
3369 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3370 hci_cc_le_write_def_data_len(hdev, skb);
3373 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3374 hci_cc_le_clear_resolv_list(hdev, skb);
3377 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3378 hci_cc_le_read_resolv_list_size(hdev, skb);
3381 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3382 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3385 case HCI_OP_LE_READ_MAX_DATA_LEN:
3386 hci_cc_le_read_max_data_len(hdev, skb);
3389 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3390 hci_cc_write_le_host_supported(hdev, skb);
3393 case HCI_OP_LE_SET_ADV_PARAM:
3394 hci_cc_set_adv_param(hdev, skb);
3397 case HCI_OP_READ_RSSI:
3398 hci_cc_read_rssi(hdev, skb);
3401 case HCI_OP_READ_TX_POWER:
3402 hci_cc_read_tx_power(hdev, skb);
3405 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3406 hci_cc_write_ssp_debug_mode(hdev, skb);
3409 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3410 hci_cc_le_set_ext_scan_param(hdev, skb);
3413 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3414 hci_cc_le_set_ext_scan_enable(hdev, skb);
3417 case HCI_OP_LE_SET_DEFAULT_PHY:
3418 hci_cc_le_set_default_phy(hdev, skb);
3421 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3422 hci_cc_le_read_num_adv_sets(hdev, skb);
3425 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3426 hci_cc_set_ext_adv_param(hdev, skb);
3429 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3430 hci_cc_le_set_ext_adv_enable(hdev, skb);
3433 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3434 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3437 case HCI_OP_ENABLE_RSSI:
3438 hci_cc_enable_rssi(hdev, skb);
3441 case HCI_OP_GET_RAW_RSSI:
3442 hci_cc_get_raw_rssi(hdev, skb);
3446 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3450 if (*opcode != HCI_OP_NOP)
3451 cancel_delayed_work(&hdev->cmd_timer);
3453 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3454 atomic_set(&hdev->cmd_cnt, 1);
3456 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3459 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3461 "unexpected event for opcode 0x%4.4x", *opcode);
3465 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3466 queue_work(hdev->workqueue, &hdev->cmd_work);
3469 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3470 u16 *opcode, u8 *status,
3471 hci_req_complete_t *req_complete,
3472 hci_req_complete_skb_t *req_complete_skb)
3474 struct hci_ev_cmd_status *ev = (void *) skb->data;
3476 skb_pull(skb, sizeof(*ev));
3478 *opcode = __le16_to_cpu(ev->opcode);
3479 *status = ev->status;
3482 case HCI_OP_INQUIRY:
3483 hci_cs_inquiry(hdev, ev->status);
3486 case HCI_OP_CREATE_CONN:
3487 hci_cs_create_conn(hdev, ev->status);
3490 case HCI_OP_DISCONNECT:
3491 hci_cs_disconnect(hdev, ev->status);
3494 case HCI_OP_ADD_SCO:
3495 hci_cs_add_sco(hdev, ev->status);
3498 case HCI_OP_AUTH_REQUESTED:
3499 hci_cs_auth_requested(hdev, ev->status);
3502 case HCI_OP_SET_CONN_ENCRYPT:
3503 hci_cs_set_conn_encrypt(hdev, ev->status);
3506 case HCI_OP_REMOTE_NAME_REQ:
3507 hci_cs_remote_name_req(hdev, ev->status);
3510 case HCI_OP_READ_REMOTE_FEATURES:
3511 hci_cs_read_remote_features(hdev, ev->status);
3514 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3515 hci_cs_read_remote_ext_features(hdev, ev->status);
3518 case HCI_OP_SETUP_SYNC_CONN:
3519 hci_cs_setup_sync_conn(hdev, ev->status);
3522 case HCI_OP_SNIFF_MODE:
3523 hci_cs_sniff_mode(hdev, ev->status);
3526 case HCI_OP_EXIT_SNIFF_MODE:
3527 hci_cs_exit_sniff_mode(hdev, ev->status);
3530 case HCI_OP_SWITCH_ROLE:
3531 hci_cs_switch_role(hdev, ev->status);
3534 case HCI_OP_LE_CREATE_CONN:
3535 hci_cs_le_create_conn(hdev, ev->status);
3538 case HCI_OP_LE_READ_REMOTE_FEATURES:
3539 hci_cs_le_read_remote_features(hdev, ev->status);
3542 case HCI_OP_LE_START_ENC:
3543 hci_cs_le_start_enc(hdev, ev->status);
3546 case HCI_OP_LE_EXT_CREATE_CONN:
3547 hci_cs_le_ext_create_conn(hdev, ev->status);
3551 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3555 if (*opcode != HCI_OP_NOP)
3556 cancel_delayed_work(&hdev->cmd_timer);
3558 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3559 atomic_set(&hdev->cmd_cnt, 1);
3561 /* Indicate request completion if the command failed. Also, if
3562 * we're not waiting for a special event and we get a success
3563 * command status we should try to flag the request as completed
3564 * (since for this kind of commands there will not be a command
3568 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3569 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3572 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3574 "unexpected event for opcode 0x%4.4x", *opcode);
3578 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3579 queue_work(hdev->workqueue, &hdev->cmd_work);
3582 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3584 struct hci_ev_hardware_error *ev = (void *) skb->data;
3588 mgmt_hardware_error(hdev, ev->code);
3589 hci_dev_unlock(hdev);
3591 hdev->hw_error_code = ev->code;
3593 queue_work(hdev->req_workqueue, &hdev->error_reset);
3596 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3598 struct hci_ev_role_change *ev = (void *) skb->data;
3599 struct hci_conn *conn;
3601 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3605 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3608 conn->role = ev->role;
3610 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3612 hci_role_switch_cfm(conn, ev->status, ev->role);
3615 hci_dev_unlock(hdev);
3618 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3620 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3623 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3624 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3628 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3629 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3630 BT_DBG("%s bad parameters", hdev->name);
3634 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3636 for (i = 0; i < ev->num_hndl; i++) {
3637 struct hci_comp_pkts_info *info = &ev->handles[i];
3638 struct hci_conn *conn;
3639 __u16 handle, count;
3641 handle = __le16_to_cpu(info->handle);
3642 count = __le16_to_cpu(info->count);
3644 conn = hci_conn_hash_lookup_handle(hdev, handle);
3648 conn->sent -= count;
3650 switch (conn->type) {
3652 hdev->acl_cnt += count;
3653 if (hdev->acl_cnt > hdev->acl_pkts)
3654 hdev->acl_cnt = hdev->acl_pkts;
3658 if (hdev->le_pkts) {
3659 hdev->le_cnt += count;
3660 if (hdev->le_cnt > hdev->le_pkts)
3661 hdev->le_cnt = hdev->le_pkts;
3663 hdev->acl_cnt += count;
3664 if (hdev->acl_cnt > hdev->acl_pkts)
3665 hdev->acl_cnt = hdev->acl_pkts;
3670 hdev->sco_cnt += count;
3671 if (hdev->sco_cnt > hdev->sco_pkts)
3672 hdev->sco_cnt = hdev->sco_pkts;
3676 bt_dev_err(hdev, "unknown type %d conn %p",
3682 queue_work(hdev->workqueue, &hdev->tx_work);
3685 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3688 struct hci_chan *chan;
3690 switch (hdev->dev_type) {
3692 return hci_conn_hash_lookup_handle(hdev, handle);
3694 chan = hci_chan_lookup_handle(hdev, handle);
3699 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3706 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3708 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3711 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3712 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3716 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3717 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3718 BT_DBG("%s bad parameters", hdev->name);
3722 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3725 for (i = 0; i < ev->num_hndl; i++) {
3726 struct hci_comp_blocks_info *info = &ev->handles[i];
3727 struct hci_conn *conn = NULL;
3728 __u16 handle, block_count;
3730 handle = __le16_to_cpu(info->handle);
3731 block_count = __le16_to_cpu(info->blocks);
3733 conn = __hci_conn_lookup_handle(hdev, handle);
3737 conn->sent -= block_count;
3739 switch (conn->type) {
3742 hdev->block_cnt += block_count;
3743 if (hdev->block_cnt > hdev->num_blocks)
3744 hdev->block_cnt = hdev->num_blocks;
3748 bt_dev_err(hdev, "unknown type %d conn %p",
3754 queue_work(hdev->workqueue, &hdev->tx_work);
3757 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3759 struct hci_ev_mode_change *ev = (void *) skb->data;
3760 struct hci_conn *conn;
3762 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3766 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3768 conn->mode = ev->mode;
3770 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3772 if (conn->mode == HCI_CM_ACTIVE)
3773 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3775 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3778 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3779 hci_sco_setup(conn, ev->status);
3782 hci_dev_unlock(hdev);
3785 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3787 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3788 struct hci_conn *conn;
3790 BT_DBG("%s", hdev->name);
3794 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3798 if (conn->state == BT_CONNECTED) {
3799 hci_conn_hold(conn);
3800 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3801 hci_conn_drop(conn);
3804 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3805 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3806 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3807 sizeof(ev->bdaddr), &ev->bdaddr);
3808 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3811 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3816 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3820 hci_dev_unlock(hdev);
3823 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3825 if (key_type == HCI_LK_CHANGED_COMBINATION)
3828 conn->pin_length = pin_len;
3829 conn->key_type = key_type;
3832 case HCI_LK_LOCAL_UNIT:
3833 case HCI_LK_REMOTE_UNIT:
3834 case HCI_LK_DEBUG_COMBINATION:
3836 case HCI_LK_COMBINATION:
3838 conn->pending_sec_level = BT_SECURITY_HIGH;
3840 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3842 case HCI_LK_UNAUTH_COMBINATION_P192:
3843 case HCI_LK_UNAUTH_COMBINATION_P256:
3844 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3846 case HCI_LK_AUTH_COMBINATION_P192:
3847 conn->pending_sec_level = BT_SECURITY_HIGH;
3849 case HCI_LK_AUTH_COMBINATION_P256:
3850 conn->pending_sec_level = BT_SECURITY_FIPS;
3855 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3857 struct hci_ev_link_key_req *ev = (void *) skb->data;
3858 struct hci_cp_link_key_reply cp;
3859 struct hci_conn *conn;
3860 struct link_key *key;
3862 BT_DBG("%s", hdev->name);
3864 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3869 key = hci_find_link_key(hdev, &ev->bdaddr);
3871 BT_DBG("%s link key not found for %pMR", hdev->name,
3876 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3879 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3881 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3883 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3884 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3885 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3886 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3890 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3891 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3892 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3893 BT_DBG("%s ignoring key unauthenticated for high security",
3898 conn_set_key(conn, key->type, key->pin_len);
3901 bacpy(&cp.bdaddr, &ev->bdaddr);
3902 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3904 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3906 hci_dev_unlock(hdev);
3911 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3912 hci_dev_unlock(hdev);
3915 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3917 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3918 struct hci_conn *conn;
3919 struct link_key *key;
3923 BT_DBG("%s", hdev->name);
3927 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3931 hci_conn_hold(conn);
3932 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3933 hci_conn_drop(conn);
3935 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3936 conn_set_key(conn, ev->key_type, conn->pin_length);
3938 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3941 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3942 ev->key_type, pin_len, &persistent);
3946 /* Update connection information since adding the key will have
3947 * fixed up the type in the case of changed combination keys.
3949 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3950 conn_set_key(conn, key->type, key->pin_len);
3952 mgmt_new_link_key(hdev, key, persistent);
3954 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3955 * is set. If it's not set simply remove the key from the kernel
3956 * list (we've still notified user space about it but with
3957 * store_hint being 0).
3959 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3960 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3961 list_del_rcu(&key->list);
3962 kfree_rcu(key, rcu);
3967 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3969 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3972 hci_dev_unlock(hdev);
3975 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3977 struct hci_ev_clock_offset *ev = (void *) skb->data;
3978 struct hci_conn *conn;
3980 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3984 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3985 if (conn && !ev->status) {
3986 struct inquiry_entry *ie;
3988 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3990 ie->data.clock_offset = ev->clock_offset;
3991 ie->timestamp = jiffies;
3995 hci_dev_unlock(hdev);
3998 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4000 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4001 struct hci_conn *conn;
4003 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4007 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4008 if (conn && !ev->status)
4009 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4011 hci_dev_unlock(hdev);
4014 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4016 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4017 struct inquiry_entry *ie;
4019 BT_DBG("%s", hdev->name);
4023 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4025 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4026 ie->timestamp = jiffies;
4029 hci_dev_unlock(hdev);
4032 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4033 struct sk_buff *skb)
4035 struct inquiry_data data;
4036 int num_rsp = *((__u8 *) skb->data);
4038 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4043 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4048 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4049 struct inquiry_info_with_rssi_and_pscan_mode *info;
4050 info = (void *) (skb->data + 1);
4052 for (; num_rsp; num_rsp--, info++) {
4055 bacpy(&data.bdaddr, &info->bdaddr);
4056 data.pscan_rep_mode = info->pscan_rep_mode;
4057 data.pscan_period_mode = info->pscan_period_mode;
4058 data.pscan_mode = info->pscan_mode;
4059 memcpy(data.dev_class, info->dev_class, 3);
4060 data.clock_offset = info->clock_offset;
4061 data.rssi = info->rssi;
4062 data.ssp_mode = 0x00;
4064 flags = hci_inquiry_cache_update(hdev, &data, false);
4066 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4067 info->dev_class, info->rssi,
4068 flags, NULL, 0, NULL, 0);
4071 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4073 for (; num_rsp; num_rsp--, info++) {
4076 bacpy(&data.bdaddr, &info->bdaddr);
4077 data.pscan_rep_mode = info->pscan_rep_mode;
4078 data.pscan_period_mode = info->pscan_period_mode;
4079 data.pscan_mode = 0x00;
4080 memcpy(data.dev_class, info->dev_class, 3);
4081 data.clock_offset = info->clock_offset;
4082 data.rssi = info->rssi;
4083 data.ssp_mode = 0x00;
4085 flags = hci_inquiry_cache_update(hdev, &data, false);
4087 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4088 info->dev_class, info->rssi,
4089 flags, NULL, 0, NULL, 0);
4093 hci_dev_unlock(hdev);
4096 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4097 struct sk_buff *skb)
4099 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4100 struct hci_conn *conn;
4102 BT_DBG("%s", hdev->name);
4106 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4110 if (ev->page < HCI_MAX_PAGES)
4111 memcpy(conn->features[ev->page], ev->features, 8);
4113 if (!ev->status && ev->page == 0x01) {
4114 struct inquiry_entry *ie;
4116 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4118 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4120 if (ev->features[0] & LMP_HOST_SSP) {
4121 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4123 /* It is mandatory by the Bluetooth specification that
4124 * Extended Inquiry Results are only used when Secure
4125 * Simple Pairing is enabled, but some devices violate
4128 * To make these devices work, the internal SSP
4129 * enabled flag needs to be cleared if the remote host
4130 * features do not indicate SSP support */
4131 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4134 if (ev->features[0] & LMP_HOST_SC)
4135 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4138 if (conn->state != BT_CONFIG)
4141 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4142 struct hci_cp_remote_name_req cp;
4143 memset(&cp, 0, sizeof(cp));
4144 bacpy(&cp.bdaddr, &conn->dst);
4145 cp.pscan_rep_mode = 0x02;
4146 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4147 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4148 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4150 if (!hci_outgoing_auth_needed(hdev, conn)) {
4151 conn->state = BT_CONNECTED;
4152 hci_connect_cfm(conn, ev->status);
4153 hci_conn_drop(conn);
4157 hci_dev_unlock(hdev);
4160 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4161 struct sk_buff *skb)
4163 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4164 struct hci_conn *conn;
4166 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4170 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4172 if (ev->link_type == ESCO_LINK)
4175 /* When the link type in the event indicates SCO connection
4176 * and lookup of the connection object fails, then check
4177 * if an eSCO connection object exists.
4179 * The core limits the synchronous connections to either
4180 * SCO or eSCO. The eSCO connection is preferred and tried
4181 * to be setup first and until successfully established,
4182 * the link type will be hinted as eSCO.
4184 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4189 switch (ev->status) {
4191 conn->handle = __le16_to_cpu(ev->handle);
4192 conn->state = BT_CONNECTED;
4193 conn->type = ev->link_type;
4195 hci_debugfs_create_conn(conn);
4196 hci_conn_add_sysfs(conn);
4199 case 0x10: /* Connection Accept Timeout */
4200 case 0x0d: /* Connection Rejected due to Limited Resources */
4201 case 0x11: /* Unsupported Feature or Parameter Value */
4202 case 0x1c: /* SCO interval rejected */
4203 case 0x1a: /* Unsupported Remote Feature */
4204 case 0x1f: /* Unspecified error */
4205 case 0x20: /* Unsupported LMP Parameter value */
4207 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4208 (hdev->esco_type & EDR_ESCO_MASK);
4209 if (hci_setup_sync(conn, conn->link->handle))
4215 conn->state = BT_CLOSED;
4219 hci_connect_cfm(conn, ev->status);
4224 hci_dev_unlock(hdev);
4227 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4231 while (parsed < eir_len) {
4232 u8 field_len = eir[0];
4237 parsed += field_len + 1;
4238 eir += field_len + 1;
4244 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4245 struct sk_buff *skb)
4247 struct inquiry_data data;
4248 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4249 int num_rsp = *((__u8 *) skb->data);
4252 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4257 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4262 for (; num_rsp; num_rsp--, info++) {
4266 bacpy(&data.bdaddr, &info->bdaddr);
4267 data.pscan_rep_mode = info->pscan_rep_mode;
4268 data.pscan_period_mode = info->pscan_period_mode;
4269 data.pscan_mode = 0x00;
4270 memcpy(data.dev_class, info->dev_class, 3);
4271 data.clock_offset = info->clock_offset;
4272 data.rssi = info->rssi;
4273 data.ssp_mode = 0x01;
4275 if (hci_dev_test_flag(hdev, HCI_MGMT))
4276 name_known = eir_get_data(info->data,
4278 EIR_NAME_COMPLETE, NULL);
4282 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4284 eir_len = eir_get_length(info->data, sizeof(info->data));
4286 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4287 info->dev_class, info->rssi,
4288 flags, info->data, eir_len, NULL, 0);
4291 hci_dev_unlock(hdev);
4294 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4295 struct sk_buff *skb)
4297 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4298 struct hci_conn *conn;
4300 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4301 __le16_to_cpu(ev->handle));
4305 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4309 /* For BR/EDR the necessary steps are taken through the
4310 * auth_complete event.
4312 if (conn->type != LE_LINK)
4316 conn->sec_level = conn->pending_sec_level;
4318 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4320 if (ev->status && conn->state == BT_CONNECTED) {
4321 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4322 hci_conn_drop(conn);
4326 if (conn->state == BT_CONFIG) {
4328 conn->state = BT_CONNECTED;
4330 hci_connect_cfm(conn, ev->status);
4331 hci_conn_drop(conn);
4333 hci_auth_cfm(conn, ev->status);
4335 hci_conn_hold(conn);
4336 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4337 hci_conn_drop(conn);
4341 hci_dev_unlock(hdev);
4344 static u8 hci_get_auth_req(struct hci_conn *conn)
4346 /* If remote requests no-bonding follow that lead */
4347 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4348 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4349 return conn->remote_auth | (conn->auth_type & 0x01);
4351 /* If both remote and local have enough IO capabilities, require
4354 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4355 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4356 return conn->remote_auth | 0x01;
4358 /* No MITM protection possible so ignore remote requirement */
4359 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4362 static u8 bredr_oob_data_present(struct hci_conn *conn)
4364 struct hci_dev *hdev = conn->hdev;
4365 struct oob_data *data;
4367 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4371 if (bredr_sc_enabled(hdev)) {
4372 /* When Secure Connections is enabled, then just
4373 * return the present value stored with the OOB
4374 * data. The stored value contains the right present
4375 * information. However it can only be trusted when
4376 * not in Secure Connection Only mode.
4378 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4379 return data->present;
4381 /* When Secure Connections Only mode is enabled, then
4382 * the P-256 values are required. If they are not
4383 * available, then do not declare that OOB data is
4386 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4387 !memcmp(data->hash256, ZERO_KEY, 16))
4393 /* When Secure Connections is not enabled or actually
4394 * not supported by the hardware, then check that if
4395 * P-192 data values are present.
4397 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4398 !memcmp(data->hash192, ZERO_KEY, 16))
4404 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4406 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4407 struct hci_conn *conn;
4409 BT_DBG("%s", hdev->name);
4413 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4417 hci_conn_hold(conn);
4419 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4422 /* Allow pairing if we're pairable, the initiators of the
4423 * pairing or if the remote is not requesting bonding.
4425 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4426 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4427 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4428 struct hci_cp_io_capability_reply cp;
4430 bacpy(&cp.bdaddr, &ev->bdaddr);
4431 /* Change the IO capability from KeyboardDisplay
4432 * to DisplayYesNo as it is not supported by BT spec. */
4433 cp.capability = (conn->io_capability == 0x04) ?
4434 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4436 /* If we are initiators, there is no remote information yet */
4437 if (conn->remote_auth == 0xff) {
4438 /* Request MITM protection if our IO caps allow it
4439 * except for the no-bonding case.
4441 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4442 conn->auth_type != HCI_AT_NO_BONDING)
4443 conn->auth_type |= 0x01;
4445 conn->auth_type = hci_get_auth_req(conn);
4448 /* If we're not bondable, force one of the non-bondable
4449 * authentication requirement values.
4451 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4452 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4454 cp.authentication = conn->auth_type;
4455 cp.oob_data = bredr_oob_data_present(conn);
4457 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4460 struct hci_cp_io_capability_neg_reply cp;
4462 bacpy(&cp.bdaddr, &ev->bdaddr);
4463 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4465 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4470 hci_dev_unlock(hdev);
4473 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4475 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4476 struct hci_conn *conn;
4478 BT_DBG("%s", hdev->name);
4482 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4486 conn->remote_cap = ev->capability;
4487 conn->remote_auth = ev->authentication;
4490 hci_dev_unlock(hdev);
4493 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4494 struct sk_buff *skb)
4496 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4497 int loc_mitm, rem_mitm, confirm_hint = 0;
4498 struct hci_conn *conn;
4500 BT_DBG("%s", hdev->name);
4504 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4507 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4511 loc_mitm = (conn->auth_type & 0x01);
4512 rem_mitm = (conn->remote_auth & 0x01);
4514 /* If we require MITM but the remote device can't provide that
4515 * (it has NoInputNoOutput) then reject the confirmation
4516 * request. We check the security level here since it doesn't
4517 * necessarily match conn->auth_type.
4519 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4520 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4521 BT_DBG("Rejecting request: remote device can't provide MITM");
4522 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4523 sizeof(ev->bdaddr), &ev->bdaddr);
4527 /* If no side requires MITM protection; auto-accept */
4528 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4529 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4531 /* If we're not the initiators request authorization to
4532 * proceed from user space (mgmt_user_confirm with
4533 * confirm_hint set to 1). The exception is if neither
4534 * side had MITM or if the local IO capability is
4535 * NoInputNoOutput, in which case we do auto-accept
4537 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4538 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4539 (loc_mitm || rem_mitm)) {
4540 BT_DBG("Confirming auto-accept as acceptor");
4545 BT_DBG("Auto-accept of user confirmation with %ums delay",
4546 hdev->auto_accept_delay);
4548 if (hdev->auto_accept_delay > 0) {
4549 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4550 queue_delayed_work(conn->hdev->workqueue,
4551 &conn->auto_accept_work, delay);
4555 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4556 sizeof(ev->bdaddr), &ev->bdaddr);
4561 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4562 le32_to_cpu(ev->passkey), confirm_hint);
4565 hci_dev_unlock(hdev);
4568 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4569 struct sk_buff *skb)
4571 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4573 BT_DBG("%s", hdev->name);
4575 if (hci_dev_test_flag(hdev, HCI_MGMT))
4576 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4579 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4580 struct sk_buff *skb)
4582 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4583 struct hci_conn *conn;
4585 BT_DBG("%s", hdev->name);
4587 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4591 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4592 conn->passkey_entered = 0;
4594 if (hci_dev_test_flag(hdev, HCI_MGMT))
4595 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4596 conn->dst_type, conn->passkey_notify,
4597 conn->passkey_entered);
4600 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4602 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4603 struct hci_conn *conn;
4605 BT_DBG("%s", hdev->name);
4607 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4612 case HCI_KEYPRESS_STARTED:
4613 conn->passkey_entered = 0;
4616 case HCI_KEYPRESS_ENTERED:
4617 conn->passkey_entered++;
4620 case HCI_KEYPRESS_ERASED:
4621 conn->passkey_entered--;
4624 case HCI_KEYPRESS_CLEARED:
4625 conn->passkey_entered = 0;
4628 case HCI_KEYPRESS_COMPLETED:
4632 if (hci_dev_test_flag(hdev, HCI_MGMT))
4633 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4634 conn->dst_type, conn->passkey_notify,
4635 conn->passkey_entered);
4638 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4639 struct sk_buff *skb)
4641 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4642 struct hci_conn *conn;
4644 BT_DBG("%s", hdev->name);
4648 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4652 /* Reset the authentication requirement to unknown */
4653 conn->remote_auth = 0xff;
4655 /* To avoid duplicate auth_failed events to user space we check
4656 * the HCI_CONN_AUTH_PEND flag which will be set if we
4657 * initiated the authentication. A traditional auth_complete
4658 * event gets always produced as initiator and is also mapped to
4659 * the mgmt_auth_failed event */
4660 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4661 mgmt_auth_failed(conn, ev->status);
4663 hci_conn_drop(conn);
4666 hci_dev_unlock(hdev);
4669 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4670 struct sk_buff *skb)
4672 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4673 struct inquiry_entry *ie;
4674 struct hci_conn *conn;
4676 BT_DBG("%s", hdev->name);
4680 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4682 memcpy(conn->features[1], ev->features, 8);
4684 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4686 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4688 hci_dev_unlock(hdev);
4691 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4692 struct sk_buff *skb)
4694 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4695 struct oob_data *data;
4697 BT_DBG("%s", hdev->name);
4701 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4704 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4706 struct hci_cp_remote_oob_data_neg_reply cp;
4708 bacpy(&cp.bdaddr, &ev->bdaddr);
4709 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4714 if (bredr_sc_enabled(hdev)) {
4715 struct hci_cp_remote_oob_ext_data_reply cp;
4717 bacpy(&cp.bdaddr, &ev->bdaddr);
4718 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4719 memset(cp.hash192, 0, sizeof(cp.hash192));
4720 memset(cp.rand192, 0, sizeof(cp.rand192));
4722 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4723 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4725 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4726 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4728 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4731 struct hci_cp_remote_oob_data_reply cp;
4733 bacpy(&cp.bdaddr, &ev->bdaddr);
4734 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4735 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4737 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4742 hci_dev_unlock(hdev);
4745 #if IS_ENABLED(CONFIG_BT_HS)
4746 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4748 struct hci_ev_channel_selected *ev = (void *)skb->data;
4749 struct hci_conn *hcon;
4751 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4753 skb_pull(skb, sizeof(*ev));
4755 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4759 amp_read_loc_assoc_final_data(hdev, hcon);
4762 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4763 struct sk_buff *skb)
4765 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4766 struct hci_conn *hcon, *bredr_hcon;
4768 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4773 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4775 hci_dev_unlock(hdev);
4781 hci_dev_unlock(hdev);
4785 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4787 hcon->state = BT_CONNECTED;
4788 bacpy(&hcon->dst, &bredr_hcon->dst);
4790 hci_conn_hold(hcon);
4791 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4792 hci_conn_drop(hcon);
4794 hci_debugfs_create_conn(hcon);
4795 hci_conn_add_sysfs(hcon);
4797 amp_physical_cfm(bredr_hcon, hcon);
4799 hci_dev_unlock(hdev);
4802 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4804 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4805 struct hci_conn *hcon;
4806 struct hci_chan *hchan;
4807 struct amp_mgr *mgr;
4809 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4810 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4813 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4817 /* Create AMP hchan */
4818 hchan = hci_chan_create(hcon);
4822 hchan->handle = le16_to_cpu(ev->handle);
4824 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4826 mgr = hcon->amp_mgr;
4827 if (mgr && mgr->bredr_chan) {
4828 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4830 l2cap_chan_lock(bredr_chan);
4832 bredr_chan->conn->mtu = hdev->block_mtu;
4833 l2cap_logical_cfm(bredr_chan, hchan, 0);
4834 hci_conn_hold(hcon);
4836 l2cap_chan_unlock(bredr_chan);
4840 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4841 struct sk_buff *skb)
4843 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4844 struct hci_chan *hchan;
4846 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4847 le16_to_cpu(ev->handle), ev->status);
4854 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4858 amp_destroy_logical_link(hchan, ev->reason);
4861 hci_dev_unlock(hdev);
4864 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4865 struct sk_buff *skb)
4867 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4868 struct hci_conn *hcon;
4870 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4877 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4879 hcon->state = BT_CLOSED;
4883 hci_dev_unlock(hdev);
4887 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
4888 bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle,
4889 u16 interval, u16 latency, u16 supervision_timeout)
4891 struct hci_conn_params *params;
4892 struct hci_conn *conn;
4893 struct smp_irk *irk;
4898 /* All controllers implicitly stop advertising in the event of a
4899 * connection, so ensure that the state bit is cleared.
4901 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4903 conn = hci_lookup_le_connect(hdev);
4905 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
4907 bt_dev_err(hdev, "no memory for new connection");
4911 conn->dst_type = bdaddr_type;
4913 /* If we didn't have a hci_conn object previously
4914 * but we're in master role this must be something
4915 * initiated using a white list. Since white list based
4916 * connections are not "first class citizens" we don't
4917 * have full tracking of them. Therefore, we go ahead
4918 * with a "best effort" approach of determining the
4919 * initiator address based on the HCI_PRIVACY flag.
4922 conn->resp_addr_type = bdaddr_type;
4923 bacpy(&conn->resp_addr, bdaddr);
4924 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4925 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4926 bacpy(&conn->init_addr, &hdev->rpa);
4928 hci_copy_identity_address(hdev,
4930 &conn->init_addr_type);
4934 cancel_delayed_work(&conn->le_conn_timeout);
4938 /* Set the responder (our side) address type based on
4939 * the advertising address type.
4941 conn->resp_addr_type = hdev->adv_addr_type;
4942 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
4943 /* In case of ext adv, resp_addr will be updated in
4944 * Adv Terminated event.
4946 if (!ext_adv_capable(hdev))
4947 bacpy(&conn->resp_addr, &hdev->random_addr);
4949 bacpy(&conn->resp_addr, &hdev->bdaddr);
4952 conn->init_addr_type = bdaddr_type;
4953 bacpy(&conn->init_addr, bdaddr);
4955 /* For incoming connections, set the default minimum
4956 * and maximum connection interval. They will be used
4957 * to check if the parameters are in range and if not
4958 * trigger the connection update procedure.
4960 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4961 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4964 /* Lookup the identity address from the stored connection
4965 * address and address type.
4967 * When establishing connections to an identity address, the
4968 * connection procedure will store the resolvable random
4969 * address first. Now if it can be converted back into the
4970 * identity address, start using the identity address from
4973 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4975 bacpy(&conn->dst, &irk->bdaddr);
4976 conn->dst_type = irk->addr_type;
4980 hci_le_conn_failed(conn, status);
4984 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4985 addr_type = BDADDR_LE_PUBLIC;
4987 addr_type = BDADDR_LE_RANDOM;
4989 /* Drop the connection if the device is blocked */
4990 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4991 hci_conn_drop(conn);
4995 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4996 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4998 conn->sec_level = BT_SECURITY_LOW;
4999 conn->handle = handle;
5000 conn->state = BT_CONFIG;
5002 conn->le_conn_interval = interval;
5003 conn->le_conn_latency = latency;
5004 conn->le_supv_timeout = supervision_timeout;
5006 hci_debugfs_create_conn(conn);
5007 hci_conn_add_sysfs(conn);
5010 /* The remote features procedure is defined for master
5011 * role only. So only in case of an initiated connection
5012 * request the remote features.
5014 * If the local controller supports slave-initiated features
5015 * exchange, then requesting the remote features in slave
5016 * role is possible. Otherwise just transition into the
5017 * connected state without requesting the remote features.
5020 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
5021 struct hci_cp_le_read_remote_features cp;
5023 cp.handle = __cpu_to_le16(conn->handle);
5025 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5028 hci_conn_hold(conn);
5030 conn->state = BT_CONNECTED;
5031 hci_connect_cfm(conn, status);
5034 hci_connect_cfm(conn, status);
5037 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5040 list_del_init(¶ms->action);
5042 hci_conn_drop(params->conn);
5043 hci_conn_put(params->conn);
5044 params->conn = NULL;
5049 hci_update_background_scan(hdev);
5050 hci_dev_unlock(hdev);
5053 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5055 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5057 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5059 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5060 ev->role, le16_to_cpu(ev->handle),
5061 le16_to_cpu(ev->interval),
5062 le16_to_cpu(ev->latency),
5063 le16_to_cpu(ev->supervision_timeout));
5066 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5067 struct sk_buff *skb)
5069 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5071 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5073 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5074 ev->role, le16_to_cpu(ev->handle),
5075 le16_to_cpu(ev->interval),
5076 le16_to_cpu(ev->latency),
5077 le16_to_cpu(ev->supervision_timeout));
5080 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5082 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5083 struct hci_conn *conn;
5085 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5090 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5092 struct adv_info *adv_instance;
5094 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM)
5097 if (!hdev->cur_adv_instance) {
5098 bacpy(&conn->resp_addr, &hdev->random_addr);
5102 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
5104 bacpy(&conn->resp_addr, &adv_instance->random_addr);
5108 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5109 struct sk_buff *skb)
5111 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5112 struct hci_conn *conn;
5114 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5121 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5125 hci_dev_unlock(hdev);
5126 mgmt_le_conn_update_failed(hdev, &conn->dst,
5127 conn->type, conn->dst_type, ev->status);
5131 conn->le_conn_interval = le16_to_cpu(ev->interval);
5132 conn->le_conn_latency = le16_to_cpu(ev->latency);
5133 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5136 hci_dev_unlock(hdev);
5139 mgmt_le_conn_updated(hdev, &conn->dst, conn->type,
5140 conn->dst_type, conn->le_conn_interval,
5141 conn->le_conn_latency, conn->le_supv_timeout);
5145 /* This function requires the caller holds hdev->lock */
5146 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5148 u8 addr_type, u8 adv_type,
5149 bdaddr_t *direct_rpa)
5151 struct hci_conn *conn;
5152 struct hci_conn_params *params;
5154 /* If the event is not connectable don't proceed further */
5155 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5158 /* Ignore if the device is blocked */
5159 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
5162 /* Most controller will fail if we try to create new connections
5163 * while we have an existing one in slave role.
5165 if (hdev->conn_hash.le_num_slave > 0)
5168 /* If we're not connectable only connect devices that we have in
5169 * our pend_le_conns list.
5171 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5176 if (!params->explicit_connect) {
5177 switch (params->auto_connect) {
5178 case HCI_AUTO_CONN_DIRECT:
5179 /* Only devices advertising with ADV_DIRECT_IND are
5180 * triggering a connection attempt. This is allowing
5181 * incoming connections from slave devices.
5183 if (adv_type != LE_ADV_DIRECT_IND)
5186 case HCI_AUTO_CONN_ALWAYS:
5187 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5188 * are triggering a connection attempt. This means
5189 * that incoming connectioms from slave device are
5190 * accepted and also outgoing connections to slave
5191 * devices are established when found.
5199 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5200 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
5202 if (!IS_ERR(conn)) {
5203 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5204 * by higher layer that tried to connect, if no then
5205 * store the pointer since we don't really have any
5206 * other owner of the object besides the params that
5207 * triggered it. This way we can abort the connection if
5208 * the parameters get removed and keep the reference
5209 * count consistent once the connection is established.
5212 if (!params->explicit_connect)
5213 params->conn = hci_conn_get(conn);
5218 switch (PTR_ERR(conn)) {
5220 /* If hci_connect() returns -EBUSY it means there is already
5221 * an LE connection attempt going on. Since controllers don't
5222 * support more than one connection attempt at the time, we
5223 * don't consider this an error case.
5227 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5234 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5235 u8 bdaddr_type, bdaddr_t *direct_addr,
5236 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
5238 struct discovery_state *d = &hdev->discovery;
5239 struct smp_irk *irk;
5240 struct hci_conn *conn;
5247 case LE_ADV_DIRECT_IND:
5248 case LE_ADV_SCAN_IND:
5249 case LE_ADV_NONCONN_IND:
5250 case LE_ADV_SCAN_RSP:
5253 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5254 "type: 0x%02x", type);
5258 /* Find the end of the data in case the report contains padded zero
5259 * bytes at the end causing an invalid length value.
5261 * When data is NULL, len is 0 so there is no need for extra ptr
5262 * check as 'ptr < data + 0' is already false in such case.
5264 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5265 if (ptr + 1 + *ptr > data + len)
5269 real_len = ptr - data;
5271 /* Adjust for actual length */
5272 if (len != real_len) {
5273 bt_dev_err_ratelimited(hdev, "advertising data len corrected");
5277 /* If the direct address is present, then this report is from
5278 * a LE Direct Advertising Report event. In that case it is
5279 * important to see if the address is matching the local
5280 * controller address.
5283 /* Only resolvable random addresses are valid for these
5284 * kind of reports and others can be ignored.
5286 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5289 /* If the controller is not using resolvable random
5290 * addresses, then this report can be ignored.
5292 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5295 /* If the local IRK of the controller does not match
5296 * with the resolvable random address provided, then
5297 * this report can be ignored.
5299 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5303 /* Check if we need to convert to identity address */
5304 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5306 bdaddr = &irk->bdaddr;
5307 bdaddr_type = irk->addr_type;
5310 /* Check if we have been requested to connect to this device.
5312 * direct_addr is set only for directed advertising reports (it is NULL
5313 * for advertising reports) and is already verified to be RPA above.
5315 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5317 if (conn && type == LE_ADV_IND) {
5318 /* Store report for later inclusion by
5319 * mgmt_device_connected
5321 memcpy(conn->le_adv_data, data, len);
5322 conn->le_adv_data_len = len;
5325 /* Passive scanning shouldn't trigger any device found events,
5326 * except for devices marked as CONN_REPORT for which we do send
5327 * device found events.
5329 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5330 if (type == LE_ADV_DIRECT_IND)
5333 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5334 bdaddr, bdaddr_type))
5337 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5338 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5341 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5342 rssi, flags, data, len, NULL, 0);
5346 /* When receiving non-connectable or scannable undirected
5347 * advertising reports, this means that the remote device is
5348 * not connectable and then clearly indicate this in the
5349 * device found event.
5351 * When receiving a scan response, then there is no way to
5352 * know if the remote device is connectable or not. However
5353 * since scan responses are merged with a previously seen
5354 * advertising report, the flags field from that report
5357 * In the really unlikely case that a controller get confused
5358 * and just sends a scan response event, then it is marked as
5359 * not connectable as well.
5361 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5362 type == LE_ADV_SCAN_RSP)
5363 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5367 /* If there's nothing pending either store the data from this
5368 * event or send an immediate device found event if the data
5369 * should not be stored for later.
5371 if (!has_pending_adv_report(hdev)) {
5372 /* If the report will trigger a SCAN_REQ store it for
5375 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5376 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5377 rssi, flags, data, len);
5381 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5382 rssi, flags, data, len, NULL, 0);
5386 /* Check if the pending report is for the same device as the new one */
5387 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5388 bdaddr_type == d->last_adv_addr_type);
5390 /* If the pending data doesn't match this report or this isn't a
5391 * scan response (e.g. we got a duplicate ADV_IND) then force
5392 * sending of the pending data.
5394 if (type != LE_ADV_SCAN_RSP || !match) {
5395 /* Send out whatever is in the cache, but skip duplicates */
5397 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5398 d->last_adv_addr_type, NULL,
5399 d->last_adv_rssi, d->last_adv_flags,
5401 d->last_adv_data_len, NULL, 0);
5403 /* If the new report will trigger a SCAN_REQ store it for
5406 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5407 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5408 rssi, flags, data, len);
5412 /* The advertising reports cannot be merged, so clear
5413 * the pending report and send out a device found event.
5415 clear_pending_adv_report(hdev);
5416 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5417 rssi, flags, data, len, NULL, 0);
5421 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5422 * the new event is a SCAN_RSP. We can therefore proceed with
5423 * sending a merged device found event.
5425 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5426 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5427 d->last_adv_data, d->last_adv_data_len, data, len);
5428 clear_pending_adv_report(hdev);
5431 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5433 u8 num_reports = skb->data[0];
5434 void *ptr = &skb->data[1];
5438 while (num_reports--) {
5439 struct hci_ev_le_advertising_info *ev = ptr;
5442 if (ev->length <= HCI_MAX_AD_LENGTH) {
5443 rssi = ev->data[ev->length];
5444 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5445 ev->bdaddr_type, NULL, 0, rssi,
5446 ev->data, ev->length);
5448 bt_dev_err(hdev, "Dropping invalid advertising data");
5451 ptr += sizeof(*ev) + ev->length + 1;
5454 hci_dev_unlock(hdev);
5457 static u8 ext_evt_type_to_legacy(u16 evt_type)
5459 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5461 case LE_LEGACY_ADV_IND:
5463 case LE_LEGACY_ADV_DIRECT_IND:
5464 return LE_ADV_DIRECT_IND;
5465 case LE_LEGACY_ADV_SCAN_IND:
5466 return LE_ADV_SCAN_IND;
5467 case LE_LEGACY_NONCONN_IND:
5468 return LE_ADV_NONCONN_IND;
5469 case LE_LEGACY_SCAN_RSP_ADV:
5470 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5471 return LE_ADV_SCAN_RSP;
5474 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5477 return LE_ADV_INVALID;
5480 if (evt_type & LE_EXT_ADV_CONN_IND) {
5481 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5482 return LE_ADV_DIRECT_IND;
5487 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5488 return LE_ADV_SCAN_RSP;
5490 if (evt_type & LE_EXT_ADV_SCAN_IND)
5491 return LE_ADV_SCAN_IND;
5493 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5494 evt_type & LE_EXT_ADV_DIRECT_IND)
5495 return LE_ADV_NONCONN_IND;
5497 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5500 return LE_ADV_INVALID;
5503 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5505 u8 num_reports = skb->data[0];
5506 void *ptr = &skb->data[1];
5510 while (num_reports--) {
5511 struct hci_ev_le_ext_adv_report *ev = ptr;
5515 evt_type = __le16_to_cpu(ev->evt_type);
5516 legacy_evt_type = ext_evt_type_to_legacy(evt_type);
5517 if (legacy_evt_type != LE_ADV_INVALID) {
5518 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5519 ev->bdaddr_type, NULL, 0, ev->rssi,
5520 ev->data, ev->length);
5523 ptr += sizeof(*ev) + ev->length + 1;
5526 hci_dev_unlock(hdev);
5529 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5530 struct sk_buff *skb)
5532 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5533 struct hci_conn *conn;
5535 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5539 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5542 memcpy(conn->features[0], ev->features, 8);
5544 if (conn->state == BT_CONFIG) {
5547 /* If the local controller supports slave-initiated
5548 * features exchange, but the remote controller does
5549 * not, then it is possible that the error code 0x1a
5550 * for unsupported remote feature gets returned.
5552 * In this specific case, allow the connection to
5553 * transition into connected state and mark it as
5556 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5557 !conn->out && ev->status == 0x1a)
5560 status = ev->status;
5562 conn->state = BT_CONNECTED;
5563 hci_connect_cfm(conn, status);
5564 hci_conn_drop(conn);
5568 hci_dev_unlock(hdev);
5571 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5573 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5574 struct hci_cp_le_ltk_reply cp;
5575 struct hci_cp_le_ltk_neg_reply neg;
5576 struct hci_conn *conn;
5577 struct smp_ltk *ltk;
5579 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5583 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5587 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5591 if (smp_ltk_is_sc(ltk)) {
5592 /* With SC both EDiv and Rand are set to zero */
5593 if (ev->ediv || ev->rand)
5596 /* For non-SC keys check that EDiv and Rand match */
5597 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5601 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5602 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5603 cp.handle = cpu_to_le16(conn->handle);
5605 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5607 conn->enc_key_size = ltk->enc_size;
5609 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5611 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5612 * temporary key used to encrypt a connection following
5613 * pairing. It is used during the Encrypted Session Setup to
5614 * distribute the keys. Later, security can be re-established
5615 * using a distributed LTK.
5617 if (ltk->type == SMP_STK) {
5618 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5619 list_del_rcu(<k->list);
5620 kfree_rcu(ltk, rcu);
5622 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5625 hci_dev_unlock(hdev);
5630 neg.handle = ev->handle;
5631 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5632 hci_dev_unlock(hdev);
5635 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5638 struct hci_cp_le_conn_param_req_neg_reply cp;
5640 cp.handle = cpu_to_le16(handle);
5643 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5647 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5648 struct sk_buff *skb)
5650 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5651 struct hci_cp_le_conn_param_req_reply cp;
5652 struct hci_conn *hcon;
5653 u16 handle, min, max, latency, timeout;
5655 handle = le16_to_cpu(ev->handle);
5656 min = le16_to_cpu(ev->interval_min);
5657 max = le16_to_cpu(ev->interval_max);
5658 latency = le16_to_cpu(ev->latency);
5659 timeout = le16_to_cpu(ev->timeout);
5661 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5662 if (!hcon || hcon->state != BT_CONNECTED)
5663 return send_conn_param_neg_reply(hdev, handle,
5664 HCI_ERROR_UNKNOWN_CONN_ID);
5666 if (hci_check_conn_params(min, max, latency, timeout))
5667 return send_conn_param_neg_reply(hdev, handle,
5668 HCI_ERROR_INVALID_LL_PARAMS);
5670 if (hcon->role == HCI_ROLE_MASTER) {
5671 struct hci_conn_params *params;
5676 params = hci_conn_params_lookup(hdev, &hcon->dst,
5679 params->conn_min_interval = min;
5680 params->conn_max_interval = max;
5681 params->conn_latency = latency;
5682 params->supervision_timeout = timeout;
5688 hci_dev_unlock(hdev);
5690 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5691 store_hint, min, max, latency, timeout);
5694 cp.handle = ev->handle;
5695 cp.interval_min = ev->interval_min;
5696 cp.interval_max = ev->interval_max;
5697 cp.latency = ev->latency;
5698 cp.timeout = ev->timeout;
5702 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5705 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5706 struct sk_buff *skb)
5708 u8 num_reports = skb->data[0];
5709 void *ptr = &skb->data[1];
5713 while (num_reports--) {
5714 struct hci_ev_le_direct_adv_info *ev = ptr;
5716 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5717 ev->bdaddr_type, &ev->direct_addr,
5718 ev->direct_addr_type, ev->rssi, NULL, 0);
5723 hci_dev_unlock(hdev);
5726 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5728 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5730 skb_pull(skb, sizeof(*le_ev));
5732 switch (le_ev->subevent) {
5733 case HCI_EV_LE_CONN_COMPLETE:
5734 hci_le_conn_complete_evt(hdev, skb);
5737 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5738 hci_le_conn_update_complete_evt(hdev, skb);
5741 case HCI_EV_LE_ADVERTISING_REPORT:
5742 hci_le_adv_report_evt(hdev, skb);
5745 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5746 hci_le_remote_feat_complete_evt(hdev, skb);
5749 case HCI_EV_LE_LTK_REQ:
5750 hci_le_ltk_request_evt(hdev, skb);
5753 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5754 hci_le_remote_conn_param_req_evt(hdev, skb);
5757 case HCI_EV_LE_DIRECT_ADV_REPORT:
5758 hci_le_direct_adv_report_evt(hdev, skb);
5761 case HCI_EV_LE_EXT_ADV_REPORT:
5762 hci_le_ext_adv_report_evt(hdev, skb);
5765 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
5766 hci_le_enh_conn_complete_evt(hdev, skb);
5769 case HCI_EV_LE_EXT_ADV_SET_TERM:
5770 hci_le_ext_adv_term_evt(hdev, skb);
5778 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5779 u8 event, struct sk_buff *skb)
5781 struct hci_ev_cmd_complete *ev;
5782 struct hci_event_hdr *hdr;
5787 if (skb->len < sizeof(*hdr)) {
5788 bt_dev_err(hdev, "too short HCI event");
5792 hdr = (void *) skb->data;
5793 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5796 if (hdr->evt != event)
5801 /* Check if request ended in Command Status - no way to retreive
5802 * any extra parameters in this case.
5804 if (hdr->evt == HCI_EV_CMD_STATUS)
5807 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5808 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
5813 if (skb->len < sizeof(*ev)) {
5814 bt_dev_err(hdev, "too short cmd_complete event");
5818 ev = (void *) skb->data;
5819 skb_pull(skb, sizeof(*ev));
5821 if (opcode != __le16_to_cpu(ev->opcode)) {
5822 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5823 __le16_to_cpu(ev->opcode));
5830 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5832 struct hci_event_hdr *hdr = (void *) skb->data;
5833 hci_req_complete_t req_complete = NULL;
5834 hci_req_complete_skb_t req_complete_skb = NULL;
5835 struct sk_buff *orig_skb = NULL;
5836 u8 status = 0, event = hdr->evt, req_evt = 0;
5837 u16 opcode = HCI_OP_NOP;
5839 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5840 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5841 opcode = __le16_to_cpu(cmd_hdr->opcode);
5842 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5847 /* If it looks like we might end up having to call
5848 * req_complete_skb, store a pristine copy of the skb since the
5849 * various handlers may modify the original one through
5850 * skb_pull() calls, etc.
5852 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5853 event == HCI_EV_CMD_COMPLETE)
5854 orig_skb = skb_clone(skb, GFP_KERNEL);
5856 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5859 case HCI_EV_INQUIRY_COMPLETE:
5860 hci_inquiry_complete_evt(hdev, skb);
5863 case HCI_EV_INQUIRY_RESULT:
5864 hci_inquiry_result_evt(hdev, skb);
5867 case HCI_EV_CONN_COMPLETE:
5868 hci_conn_complete_evt(hdev, skb);
5871 case HCI_EV_CONN_REQUEST:
5872 hci_conn_request_evt(hdev, skb);
5875 case HCI_EV_DISCONN_COMPLETE:
5876 hci_disconn_complete_evt(hdev, skb);
5879 case HCI_EV_AUTH_COMPLETE:
5880 hci_auth_complete_evt(hdev, skb);
5883 case HCI_EV_REMOTE_NAME:
5884 hci_remote_name_evt(hdev, skb);
5887 case HCI_EV_ENCRYPT_CHANGE:
5888 hci_encrypt_change_evt(hdev, skb);
5891 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5892 hci_change_link_key_complete_evt(hdev, skb);
5895 case HCI_EV_REMOTE_FEATURES:
5896 hci_remote_features_evt(hdev, skb);
5899 case HCI_EV_CMD_COMPLETE:
5900 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5901 &req_complete, &req_complete_skb);
5904 case HCI_EV_CMD_STATUS:
5905 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5909 case HCI_EV_HARDWARE_ERROR:
5910 hci_hardware_error_evt(hdev, skb);
5913 case HCI_EV_ROLE_CHANGE:
5914 hci_role_change_evt(hdev, skb);
5917 case HCI_EV_NUM_COMP_PKTS:
5918 hci_num_comp_pkts_evt(hdev, skb);
5921 case HCI_EV_MODE_CHANGE:
5922 hci_mode_change_evt(hdev, skb);
5925 case HCI_EV_PIN_CODE_REQ:
5926 hci_pin_code_request_evt(hdev, skb);
5929 case HCI_EV_LINK_KEY_REQ:
5930 hci_link_key_request_evt(hdev, skb);
5933 case HCI_EV_LINK_KEY_NOTIFY:
5934 hci_link_key_notify_evt(hdev, skb);
5937 case HCI_EV_CLOCK_OFFSET:
5938 hci_clock_offset_evt(hdev, skb);
5941 case HCI_EV_PKT_TYPE_CHANGE:
5942 hci_pkt_type_change_evt(hdev, skb);
5945 case HCI_EV_PSCAN_REP_MODE:
5946 hci_pscan_rep_mode_evt(hdev, skb);
5949 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5950 hci_inquiry_result_with_rssi_evt(hdev, skb);
5953 case HCI_EV_REMOTE_EXT_FEATURES:
5954 hci_remote_ext_features_evt(hdev, skb);
5957 case HCI_EV_SYNC_CONN_COMPLETE:
5958 hci_sync_conn_complete_evt(hdev, skb);
5961 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5962 hci_extended_inquiry_result_evt(hdev, skb);
5965 case HCI_EV_KEY_REFRESH_COMPLETE:
5966 hci_key_refresh_complete_evt(hdev, skb);
5969 case HCI_EV_IO_CAPA_REQUEST:
5970 hci_io_capa_request_evt(hdev, skb);
5973 case HCI_EV_IO_CAPA_REPLY:
5974 hci_io_capa_reply_evt(hdev, skb);
5977 case HCI_EV_USER_CONFIRM_REQUEST:
5978 hci_user_confirm_request_evt(hdev, skb);
5981 case HCI_EV_USER_PASSKEY_REQUEST:
5982 hci_user_passkey_request_evt(hdev, skb);
5985 case HCI_EV_USER_PASSKEY_NOTIFY:
5986 hci_user_passkey_notify_evt(hdev, skb);
5989 case HCI_EV_KEYPRESS_NOTIFY:
5990 hci_keypress_notify_evt(hdev, skb);
5993 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5994 hci_simple_pair_complete_evt(hdev, skb);
5997 case HCI_EV_REMOTE_HOST_FEATURES:
5998 hci_remote_host_features_evt(hdev, skb);
6001 case HCI_EV_LE_META:
6002 hci_le_meta_evt(hdev, skb);
6005 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6006 hci_remote_oob_data_request_evt(hdev, skb);
6009 #if IS_ENABLED(CONFIG_BT_HS)
6010 case HCI_EV_CHANNEL_SELECTED:
6011 hci_chan_selected_evt(hdev, skb);
6014 case HCI_EV_PHY_LINK_COMPLETE:
6015 hci_phy_link_complete_evt(hdev, skb);
6018 case HCI_EV_LOGICAL_LINK_COMPLETE:
6019 hci_loglink_complete_evt(hdev, skb);
6022 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6023 hci_disconn_loglink_complete_evt(hdev, skb);
6026 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6027 hci_disconn_phylink_complete_evt(hdev, skb);
6031 case HCI_EV_NUM_COMP_BLOCKS:
6032 hci_num_comp_blocks_evt(hdev, skb);
6036 case HCI_EV_VENDOR_SPECIFIC:
6037 hci_vendor_specific_evt(hdev, skb);
6042 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6047 req_complete(hdev, status, opcode);
6048 } else if (req_complete_skb) {
6049 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6050 kfree_skb(orig_skb);
6053 req_complete_skb(hdev, status, opcode, orig_skb);
6056 kfree_skb(orig_skb);
6058 hdev->stat.evt_rx++;