2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
42 /* Handle HCI Event packets */
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
47 __u8 status = *((__u8 *) skb->data);
49 BT_DBG("%s status 0x%2.2x", hdev->name, status);
51 /* It is possible that we receive Inquiry Complete event right
52 * before we receive Inquiry Cancel Command Complete event, in
53 * which case the latter event should have status of Command
54 * Disallowed (0x0c). This should not be treated as error, since
55 * we actually achieve what Inquiry Cancel wants to achieve,
56 * which is to end the last Inquiry session.
58 if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
59 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
68 clear_bit(HCI_INQUIRY, &hdev->flags);
69 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
70 wake_up_bit(&hdev->flags, HCI_INQUIRY);
73 /* Set discovery state to stopped if we're not doing LE active
76 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
77 hdev->le_scan_type != LE_SCAN_ACTIVE)
78 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
81 hci_conn_check_pending(hdev);
84 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
86 __u8 status = *((__u8 *) skb->data);
88 BT_DBG("%s status 0x%2.2x", hdev->name, status);
93 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
96 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
98 __u8 status = *((__u8 *) skb->data);
100 BT_DBG("%s status 0x%2.2x", hdev->name, status);
105 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
107 hci_conn_check_pending(hdev);
110 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
113 BT_DBG("%s", hdev->name);
116 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
118 struct hci_rp_role_discovery *rp = (void *) skb->data;
119 struct hci_conn *conn;
121 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
130 conn->role = rp->role;
132 hci_dev_unlock(hdev);
135 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
137 struct hci_rp_read_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
140 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
147 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
149 conn->link_policy = __le16_to_cpu(rp->policy);
151 hci_dev_unlock(hdev);
154 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
156 struct hci_rp_write_link_policy *rp = (void *) skb->data;
157 struct hci_conn *conn;
160 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
165 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
171 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
173 conn->link_policy = get_unaligned_le16(sent + 2);
175 hci_dev_unlock(hdev);
178 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
181 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
183 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
188 hdev->link_policy = __le16_to_cpu(rp->policy);
191 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
194 __u8 status = *((__u8 *) skb->data);
197 BT_DBG("%s status 0x%2.2x", hdev->name, status);
202 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
206 hdev->link_policy = get_unaligned_le16(sent);
209 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
211 __u8 status = *((__u8 *) skb->data);
213 BT_DBG("%s status 0x%2.2x", hdev->name, status);
215 clear_bit(HCI_RESET, &hdev->flags);
220 /* Reset all non-persistent flags */
221 hci_dev_clear_volatile_flags(hdev);
223 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
225 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
226 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
228 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
229 hdev->adv_data_len = 0;
231 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
232 hdev->scan_rsp_data_len = 0;
234 hdev->le_scan_type = LE_SCAN_PASSIVE;
236 hdev->ssp_debug_mode = 0;
238 hci_bdaddr_list_clear(&hdev->le_white_list);
241 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
244 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
245 struct hci_cp_read_stored_link_key *sent;
247 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
249 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
253 if (!rp->status && sent->read_all == 0x01) {
254 hdev->stored_max_keys = rp->max_keys;
255 hdev->stored_num_keys = rp->num_keys;
259 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
262 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
264 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
269 if (rp->num_keys <= hdev->stored_num_keys)
270 hdev->stored_num_keys -= rp->num_keys;
272 hdev->stored_num_keys = 0;
275 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
277 __u8 status = *((__u8 *) skb->data);
280 BT_DBG("%s status 0x%2.2x", hdev->name, status);
282 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
288 if (hci_dev_test_flag(hdev, HCI_MGMT))
289 mgmt_set_local_name_complete(hdev, sent, status);
291 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
293 hci_dev_unlock(hdev);
296 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
298 struct hci_rp_read_local_name *rp = (void *) skb->data;
300 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
305 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
306 hci_dev_test_flag(hdev, HCI_CONFIG))
307 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
310 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
312 __u8 status = *((__u8 *) skb->data);
315 BT_DBG("%s status 0x%2.2x", hdev->name, status);
317 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
324 __u8 param = *((__u8 *) sent);
326 if (param == AUTH_ENABLED)
327 set_bit(HCI_AUTH, &hdev->flags);
329 clear_bit(HCI_AUTH, &hdev->flags);
332 if (hci_dev_test_flag(hdev, HCI_MGMT))
333 mgmt_auth_enable_complete(hdev, status);
335 hci_dev_unlock(hdev);
338 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
340 __u8 status = *((__u8 *) skb->data);
344 BT_DBG("%s status 0x%2.2x", hdev->name, status);
349 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
353 param = *((__u8 *) sent);
356 set_bit(HCI_ENCRYPT, &hdev->flags);
358 clear_bit(HCI_ENCRYPT, &hdev->flags);
361 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
363 __u8 status = *((__u8 *) skb->data);
367 BT_DBG("%s status 0x%2.2x", hdev->name, status);
369 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
373 param = *((__u8 *) sent);
378 hdev->discov_timeout = 0;
382 if (param & SCAN_INQUIRY)
383 set_bit(HCI_ISCAN, &hdev->flags);
385 clear_bit(HCI_ISCAN, &hdev->flags);
387 if (param & SCAN_PAGE)
388 set_bit(HCI_PSCAN, &hdev->flags);
390 clear_bit(HCI_PSCAN, &hdev->flags);
393 hci_dev_unlock(hdev);
396 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
398 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
400 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
405 memcpy(hdev->dev_class, rp->dev_class, 3);
407 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
408 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
411 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
413 __u8 status = *((__u8 *) skb->data);
416 BT_DBG("%s status 0x%2.2x", hdev->name, status);
418 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
425 memcpy(hdev->dev_class, sent, 3);
427 if (hci_dev_test_flag(hdev, HCI_MGMT))
428 mgmt_set_class_of_dev_complete(hdev, sent, status);
430 hci_dev_unlock(hdev);
433 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
435 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
438 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
443 setting = __le16_to_cpu(rp->voice_setting);
445 if (hdev->voice_setting == setting)
448 hdev->voice_setting = setting;
450 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
453 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
456 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
459 __u8 status = *((__u8 *) skb->data);
463 BT_DBG("%s status 0x%2.2x", hdev->name, status);
468 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
472 setting = get_unaligned_le16(sent);
474 if (hdev->voice_setting == setting)
477 hdev->voice_setting = setting;
479 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
482 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
485 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
488 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
490 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
495 hdev->num_iac = rp->num_iac;
497 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
500 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
502 __u8 status = *((__u8 *) skb->data);
503 struct hci_cp_write_ssp_mode *sent;
505 BT_DBG("%s status 0x%2.2x", hdev->name, status);
507 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
515 hdev->features[1][0] |= LMP_HOST_SSP;
517 hdev->features[1][0] &= ~LMP_HOST_SSP;
520 if (hci_dev_test_flag(hdev, HCI_MGMT))
521 mgmt_ssp_enable_complete(hdev, sent->mode, status);
524 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
526 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
529 hci_dev_unlock(hdev);
532 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
534 u8 status = *((u8 *) skb->data);
535 struct hci_cp_write_sc_support *sent;
537 BT_DBG("%s status 0x%2.2x", hdev->name, status);
539 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
547 hdev->features[1][0] |= LMP_HOST_SC;
549 hdev->features[1][0] &= ~LMP_HOST_SC;
552 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
554 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
556 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
559 hci_dev_unlock(hdev);
562 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
564 struct hci_rp_read_local_version *rp = (void *) skb->data;
566 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
571 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
572 hci_dev_test_flag(hdev, HCI_CONFIG)) {
573 hdev->hci_ver = rp->hci_ver;
574 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
575 hdev->lmp_ver = rp->lmp_ver;
576 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
577 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
581 static void hci_cc_read_local_commands(struct hci_dev *hdev,
584 struct hci_rp_read_local_commands *rp = (void *) skb->data;
586 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
591 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
592 hci_dev_test_flag(hdev, HCI_CONFIG))
593 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
596 static void hci_cc_read_local_features(struct hci_dev *hdev,
599 struct hci_rp_read_local_features *rp = (void *) skb->data;
601 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
606 memcpy(hdev->features, rp->features, 8);
608 /* Adjust default settings according to features
609 * supported by device. */
611 if (hdev->features[0][0] & LMP_3SLOT)
612 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
614 if (hdev->features[0][0] & LMP_5SLOT)
615 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
617 if (hdev->features[0][1] & LMP_HV2) {
618 hdev->pkt_type |= (HCI_HV2);
619 hdev->esco_type |= (ESCO_HV2);
622 if (hdev->features[0][1] & LMP_HV3) {
623 hdev->pkt_type |= (HCI_HV3);
624 hdev->esco_type |= (ESCO_HV3);
627 if (lmp_esco_capable(hdev))
628 hdev->esco_type |= (ESCO_EV3);
630 if (hdev->features[0][4] & LMP_EV4)
631 hdev->esco_type |= (ESCO_EV4);
633 if (hdev->features[0][4] & LMP_EV5)
634 hdev->esco_type |= (ESCO_EV5);
636 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
637 hdev->esco_type |= (ESCO_2EV3);
639 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
640 hdev->esco_type |= (ESCO_3EV3);
642 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
643 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
646 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
649 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
651 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
656 if (hdev->max_page < rp->max_page)
657 hdev->max_page = rp->max_page;
659 if (rp->page < HCI_MAX_PAGES)
660 memcpy(hdev->features[rp->page], rp->features, 8);
663 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
666 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
668 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
673 hdev->flow_ctl_mode = rp->mode;
676 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
678 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
680 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
685 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
686 hdev->sco_mtu = rp->sco_mtu;
687 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
688 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
690 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
695 hdev->acl_cnt = hdev->acl_pkts;
696 hdev->sco_cnt = hdev->sco_pkts;
698 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
699 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
702 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
704 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
706 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
711 if (test_bit(HCI_INIT, &hdev->flags))
712 bacpy(&hdev->bdaddr, &rp->bdaddr);
714 if (hci_dev_test_flag(hdev, HCI_SETUP))
715 bacpy(&hdev->setup_addr, &rp->bdaddr);
718 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
721 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
723 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
728 if (test_bit(HCI_INIT, &hdev->flags)) {
729 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
730 hdev->page_scan_window = __le16_to_cpu(rp->window);
734 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
737 u8 status = *((u8 *) skb->data);
738 struct hci_cp_write_page_scan_activity *sent;
740 BT_DBG("%s status 0x%2.2x", hdev->name, status);
745 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
749 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
750 hdev->page_scan_window = __le16_to_cpu(sent->window);
753 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
756 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
758 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
763 if (test_bit(HCI_INIT, &hdev->flags))
764 hdev->page_scan_type = rp->type;
767 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
770 u8 status = *((u8 *) skb->data);
773 BT_DBG("%s status 0x%2.2x", hdev->name, status);
778 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
780 hdev->page_scan_type = *type;
783 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
786 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
788 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
793 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
794 hdev->block_len = __le16_to_cpu(rp->block_len);
795 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
797 hdev->block_cnt = hdev->num_blocks;
799 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
800 hdev->block_cnt, hdev->block_len);
803 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
805 struct hci_rp_read_clock *rp = (void *) skb->data;
806 struct hci_cp_read_clock *cp;
807 struct hci_conn *conn;
809 BT_DBG("%s", hdev->name);
811 if (skb->len < sizeof(*rp))
819 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
823 if (cp->which == 0x00) {
824 hdev->clock = le32_to_cpu(rp->clock);
828 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
830 conn->clock = le32_to_cpu(rp->clock);
831 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
835 hci_dev_unlock(hdev);
838 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
841 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
843 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
848 hdev->amp_status = rp->amp_status;
849 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
850 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
851 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
852 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
853 hdev->amp_type = rp->amp_type;
854 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
855 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
856 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
857 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
860 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
863 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
865 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
870 hdev->inq_tx_power = rp->tx_power;
873 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
875 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
876 struct hci_cp_pin_code_reply *cp;
877 struct hci_conn *conn;
879 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
883 if (hci_dev_test_flag(hdev, HCI_MGMT))
884 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
889 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
893 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
895 conn->pin_length = cp->pin_len;
898 hci_dev_unlock(hdev);
901 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
903 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
905 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
909 if (hci_dev_test_flag(hdev, HCI_MGMT))
910 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
913 hci_dev_unlock(hdev);
916 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
919 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
921 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
926 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
927 hdev->le_pkts = rp->le_max_pkt;
929 hdev->le_cnt = hdev->le_pkts;
931 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
934 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
937 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
939 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
944 memcpy(hdev->le_features, rp->features, 8);
947 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
950 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
952 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
957 hdev->adv_tx_power = rp->tx_power;
960 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
962 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
964 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
968 if (hci_dev_test_flag(hdev, HCI_MGMT))
969 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
972 hci_dev_unlock(hdev);
975 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
978 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
980 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
984 if (hci_dev_test_flag(hdev, HCI_MGMT))
985 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
986 ACL_LINK, 0, rp->status);
988 hci_dev_unlock(hdev);
991 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
993 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
995 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
999 if (hci_dev_test_flag(hdev, HCI_MGMT))
1000 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1003 hci_dev_unlock(hdev);
1006 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1007 struct sk_buff *skb)
1009 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1011 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1015 if (hci_dev_test_flag(hdev, HCI_MGMT))
1016 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1017 ACL_LINK, 0, rp->status);
1019 hci_dev_unlock(hdev);
1022 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1023 struct sk_buff *skb)
1025 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1027 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1030 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1031 struct sk_buff *skb)
1033 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1035 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1038 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1040 __u8 status = *((__u8 *) skb->data);
1043 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1048 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1054 bacpy(&hdev->random_addr, sent);
1056 hci_dev_unlock(hdev);
1059 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1061 __u8 *sent, status = *((__u8 *) skb->data);
1063 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1068 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1074 /* If we're doing connection initiation as peripheral. Set a
1075 * timeout in case something goes wrong.
1078 struct hci_conn *conn;
1080 hci_dev_set_flag(hdev, HCI_LE_ADV);
1082 conn = hci_lookup_le_connect(hdev);
1084 queue_delayed_work(hdev->workqueue,
1085 &conn->le_conn_timeout,
1086 conn->conn_timeout);
1088 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1091 hci_dev_unlock(hdev);
1094 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1096 struct hci_cp_le_set_scan_param *cp;
1097 __u8 status = *((__u8 *) skb->data);
1099 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1104 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1110 hdev->le_scan_type = cp->type;
1112 hci_dev_unlock(hdev);
1115 static bool has_pending_adv_report(struct hci_dev *hdev)
1117 struct discovery_state *d = &hdev->discovery;
1119 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1122 static void clear_pending_adv_report(struct hci_dev *hdev)
1124 struct discovery_state *d = &hdev->discovery;
1126 bacpy(&d->last_adv_addr, BDADDR_ANY);
1127 d->last_adv_data_len = 0;
1130 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1131 u8 bdaddr_type, s8 rssi, u32 flags,
1134 struct discovery_state *d = &hdev->discovery;
1136 if (len > HCI_MAX_AD_LENGTH)
1139 bacpy(&d->last_adv_addr, bdaddr);
1140 d->last_adv_addr_type = bdaddr_type;
1141 d->last_adv_rssi = rssi;
1142 d->last_adv_flags = flags;
1143 memcpy(d->last_adv_data, data, len);
1144 d->last_adv_data_len = len;
1147 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1148 struct sk_buff *skb)
1150 struct hci_cp_le_set_scan_enable *cp;
1151 __u8 status = *((__u8 *) skb->data);
1153 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1158 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1164 switch (cp->enable) {
1165 case LE_SCAN_ENABLE:
1166 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1167 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1168 clear_pending_adv_report(hdev);
1171 case LE_SCAN_DISABLE:
1172 /* We do this here instead of when setting DISCOVERY_STOPPED
1173 * since the latter would potentially require waiting for
1174 * inquiry to stop too.
1176 if (has_pending_adv_report(hdev)) {
1177 struct discovery_state *d = &hdev->discovery;
1179 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1180 d->last_adv_addr_type, NULL,
1181 d->last_adv_rssi, d->last_adv_flags,
1183 d->last_adv_data_len, NULL, 0);
1186 /* Cancel this timer so that we don't try to disable scanning
1187 * when it's already disabled.
1189 cancel_delayed_work(&hdev->le_scan_disable);
1191 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1193 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1194 * interrupted scanning due to a connect request. Mark
1195 * therefore discovery as stopped. If this was not
1196 * because of a connect request advertising might have
1197 * been disabled because of active scanning, so
1198 * re-enable it again if necessary.
1200 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1201 #ifndef TIZEN_BT /* The below line is kernel bug. */
1202 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1204 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
1206 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1207 hdev->discovery.state == DISCOVERY_FINDING)
1208 hci_req_reenable_advertising(hdev);
1213 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1217 hci_dev_unlock(hdev);
1220 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1221 struct sk_buff *skb)
1223 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1225 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1230 hdev->le_white_list_size = rp->size;
1233 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1234 struct sk_buff *skb)
1236 __u8 status = *((__u8 *) skb->data);
1238 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1243 hci_bdaddr_list_clear(&hdev->le_white_list);
1246 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1247 struct sk_buff *skb)
1249 struct hci_cp_le_add_to_white_list *sent;
1250 __u8 status = *((__u8 *) skb->data);
1252 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1257 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1261 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1265 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1266 struct sk_buff *skb)
1268 struct hci_cp_le_del_from_white_list *sent;
1269 __u8 status = *((__u8 *) skb->data);
1271 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1276 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1280 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1284 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1285 struct sk_buff *skb)
1287 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1289 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1294 memcpy(hdev->le_states, rp->le_states, 8);
1297 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1298 struct sk_buff *skb)
1300 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1302 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1307 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1308 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1311 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1312 struct sk_buff *skb)
1314 struct hci_cp_le_write_def_data_len *sent;
1315 __u8 status = *((__u8 *) skb->data);
1317 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1322 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1326 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1327 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1330 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1331 struct sk_buff *skb)
1333 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1335 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1340 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1341 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1342 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1343 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1346 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1347 struct sk_buff *skb)
1349 struct hci_cp_write_le_host_supported *sent;
1350 __u8 status = *((__u8 *) skb->data);
1352 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1357 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1364 hdev->features[1][0] |= LMP_HOST_LE;
1365 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1367 hdev->features[1][0] &= ~LMP_HOST_LE;
1368 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1369 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1373 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1375 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1377 hci_dev_unlock(hdev);
1380 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1382 struct hci_cp_le_set_adv_param *cp;
1383 u8 status = *((u8 *) skb->data);
1385 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1390 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1395 hdev->adv_addr_type = cp->own_address_type;
1396 hci_dev_unlock(hdev);
1400 static void hci_cc_enable_rssi(struct hci_dev *hdev,
1401 struct sk_buff *skb)
1403 struct hci_cc_rsp_enable_rssi *rp = (void *)skb->data;
1405 BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
1406 hdev->name, rp->status, rp->le_ext_opcode);
1408 mgmt_enable_rssi_cc(hdev, rp, rp->status);
1411 static void hci_cc_get_raw_rssi(struct hci_dev *hdev,
1412 struct sk_buff *skb)
1414 struct hci_cc_rp_get_raw_rssi *rp = (void *)skb->data;
1416 BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
1417 hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
1419 mgmt_raw_rssi_response(hdev, rp, rp->status);
1422 static void hci_vendor_specific_group_ext_evt(struct hci_dev *hdev,
1423 struct sk_buff *skb)
1425 struct hci_ev_ext_vendor_specific *ev = (void *)skb->data;
1426 __u8 event_le_ext_sub_code;
1428 BT_DBG("RSSI event LE_META_VENDOR_SPECIFIC_GROUP_EVENT: %X",
1429 LE_META_VENDOR_SPECIFIC_GROUP_EVENT);
1431 skb_pull(skb, sizeof(*ev));
1432 event_le_ext_sub_code = ev->event_le_ext_sub_code;
1434 switch (event_le_ext_sub_code) {
1435 case LE_RSSI_LINK_ALERT:
1436 BT_DBG("RSSI event LE_RSSI_LINK_ALERT %X",
1437 LE_RSSI_LINK_ALERT);
1438 mgmt_rssi_alert_evt(hdev, skb);
1446 static void hci_vendor_specific_evt(struct hci_dev *hdev, struct sk_buff *skb)
1448 struct hci_ev_vendor_specific *ev = (void *)skb->data;
1449 __u8 event_sub_code;
1451 BT_DBG("hci_vendor_specific_evt");
1453 skb_pull(skb, sizeof(*ev));
1454 event_sub_code = ev->event_sub_code;
1456 switch (event_sub_code) {
1457 case LE_META_VENDOR_SPECIFIC_GROUP_EVENT:
1458 hci_vendor_specific_group_ext_evt(hdev, skb);
1467 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1469 struct hci_rp_read_rssi *rp = (void *) skb->data;
1470 struct hci_conn *conn;
1472 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1479 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1481 conn->rssi = rp->rssi;
1483 hci_dev_unlock(hdev);
1486 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1488 struct hci_cp_read_tx_power *sent;
1489 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1490 struct hci_conn *conn;
1492 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1497 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1503 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1507 switch (sent->type) {
1509 conn->tx_power = rp->tx_power;
1512 conn->max_tx_power = rp->tx_power;
1517 hci_dev_unlock(hdev);
1520 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1522 u8 status = *((u8 *) skb->data);
1525 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1530 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1532 hdev->ssp_debug_mode = *mode;
1535 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1537 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1540 hci_conn_check_pending(hdev);
1544 set_bit(HCI_INQUIRY, &hdev->flags);
1547 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1549 struct hci_cp_create_conn *cp;
1550 struct hci_conn *conn;
1552 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1554 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1560 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1562 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1565 if (conn && conn->state == BT_CONNECT) {
1566 if (status != 0x0c || conn->attempt > 2) {
1567 conn->state = BT_CLOSED;
1568 hci_connect_cfm(conn, status);
1571 conn->state = BT_CONNECT2;
1575 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1578 BT_ERR("No memory for new connection");
1582 hci_dev_unlock(hdev);
1585 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1587 struct hci_cp_add_sco *cp;
1588 struct hci_conn *acl, *sco;
1591 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1596 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1600 handle = __le16_to_cpu(cp->handle);
1602 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1606 acl = hci_conn_hash_lookup_handle(hdev, handle);
1610 sco->state = BT_CLOSED;
1612 hci_connect_cfm(sco, status);
1617 hci_dev_unlock(hdev);
1620 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1622 struct hci_cp_auth_requested *cp;
1623 struct hci_conn *conn;
1625 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1630 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1636 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1638 if (conn->state == BT_CONFIG) {
1639 hci_connect_cfm(conn, status);
1640 hci_conn_drop(conn);
1644 hci_dev_unlock(hdev);
1647 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1649 struct hci_cp_set_conn_encrypt *cp;
1650 struct hci_conn *conn;
1652 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1657 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1663 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1665 if (conn->state == BT_CONFIG) {
1666 hci_connect_cfm(conn, status);
1667 hci_conn_drop(conn);
1671 hci_dev_unlock(hdev);
1674 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1675 struct hci_conn *conn)
1677 if (conn->state != BT_CONFIG || !conn->out)
1680 if (conn->pending_sec_level == BT_SECURITY_SDP)
1683 /* Only request authentication for SSP connections or non-SSP
1684 * devices with sec_level MEDIUM or HIGH or if MITM protection
1687 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1688 conn->pending_sec_level != BT_SECURITY_FIPS &&
1689 conn->pending_sec_level != BT_SECURITY_HIGH &&
1690 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1696 static int hci_resolve_name(struct hci_dev *hdev,
1697 struct inquiry_entry *e)
1699 struct hci_cp_remote_name_req cp;
1701 memset(&cp, 0, sizeof(cp));
1703 bacpy(&cp.bdaddr, &e->data.bdaddr);
1704 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1705 cp.pscan_mode = e->data.pscan_mode;
1706 cp.clock_offset = e->data.clock_offset;
1708 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1711 static bool hci_resolve_next_name(struct hci_dev *hdev)
1713 struct discovery_state *discov = &hdev->discovery;
1714 struct inquiry_entry *e;
1716 if (list_empty(&discov->resolve))
1719 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1723 if (hci_resolve_name(hdev, e) == 0) {
1724 e->name_state = NAME_PENDING;
1731 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1732 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1734 struct discovery_state *discov = &hdev->discovery;
1735 struct inquiry_entry *e;
1738 /* Update the mgmt connected state if necessary. Be careful with
1739 * conn objects that exist but are not (yet) connected however.
1740 * Only those in BT_CONFIG or BT_CONNECTED states can be
1741 * considered connected.
1744 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
1745 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1746 mgmt_device_connected(hdev, conn, 0, name, name_len);
1748 mgmt_device_name_update(hdev, bdaddr, name, name_len);
1752 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1753 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1754 mgmt_device_connected(hdev, conn, 0, name, name_len);
1757 if (discov->state == DISCOVERY_STOPPED)
1760 if (discov->state == DISCOVERY_STOPPING)
1761 goto discov_complete;
1763 if (discov->state != DISCOVERY_RESOLVING)
1766 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1767 /* If the device was not found in a list of found devices names of which
1768 * are pending. there is no need to continue resolving a next name as it
1769 * will be done upon receiving another Remote Name Request Complete
1776 e->name_state = NAME_KNOWN;
1777 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1778 e->data.rssi, name, name_len);
1780 e->name_state = NAME_NOT_KNOWN;
1783 if (hci_resolve_next_name(hdev))
1787 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1790 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1792 struct hci_cp_remote_name_req *cp;
1793 struct hci_conn *conn;
1795 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1797 /* If successful wait for the name req complete event before
1798 * checking for the need to do authentication */
1802 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1808 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1810 if (hci_dev_test_flag(hdev, HCI_MGMT))
1811 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1816 if (!hci_outgoing_auth_needed(hdev, conn))
1819 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1820 struct hci_cp_auth_requested auth_cp;
1822 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1824 auth_cp.handle = __cpu_to_le16(conn->handle);
1825 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1826 sizeof(auth_cp), &auth_cp);
1830 hci_dev_unlock(hdev);
1833 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1835 struct hci_cp_read_remote_features *cp;
1836 struct hci_conn *conn;
1838 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1843 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1849 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1851 if (conn->state == BT_CONFIG) {
1852 hci_connect_cfm(conn, status);
1853 hci_conn_drop(conn);
1857 hci_dev_unlock(hdev);
1860 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1862 struct hci_cp_read_remote_ext_features *cp;
1863 struct hci_conn *conn;
1865 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1870 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1876 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1878 if (conn->state == BT_CONFIG) {
1879 hci_connect_cfm(conn, status);
1880 hci_conn_drop(conn);
1884 hci_dev_unlock(hdev);
1887 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1889 struct hci_cp_setup_sync_conn *cp;
1890 struct hci_conn *acl, *sco;
1893 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1898 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1902 handle = __le16_to_cpu(cp->handle);
1904 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1908 acl = hci_conn_hash_lookup_handle(hdev, handle);
1912 sco->state = BT_CLOSED;
1914 hci_connect_cfm(sco, status);
1919 hci_dev_unlock(hdev);
1922 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1924 struct hci_cp_sniff_mode *cp;
1925 struct hci_conn *conn;
1927 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1932 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1938 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1940 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1942 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1943 hci_sco_setup(conn, status);
1946 hci_dev_unlock(hdev);
1949 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1951 struct hci_cp_exit_sniff_mode *cp;
1952 struct hci_conn *conn;
1954 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1959 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1965 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1967 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1969 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1970 hci_sco_setup(conn, status);
1973 hci_dev_unlock(hdev);
1976 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1978 struct hci_cp_disconnect *cp;
1979 struct hci_conn *conn;
1984 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1990 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1992 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1993 conn->dst_type, status);
1995 hci_dev_unlock(hdev);
1998 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2000 struct hci_cp_le_create_conn *cp;
2001 struct hci_conn *conn;
2003 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2005 /* All connection failure handling is taken care of by the
2006 * hci_le_conn_failed function which is triggered by the HCI
2007 * request completion callbacks used for connecting.
2012 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2018 conn = hci_conn_hash_lookup_le(hdev, &cp->peer_addr,
2019 cp->peer_addr_type);
2023 /* Store the initiator and responder address information which
2024 * is needed for SMP. These values will not change during the
2025 * lifetime of the connection.
2027 conn->init_addr_type = cp->own_address_type;
2028 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
2029 bacpy(&conn->init_addr, &hdev->random_addr);
2031 bacpy(&conn->init_addr, &hdev->bdaddr);
2033 conn->resp_addr_type = cp->peer_addr_type;
2034 bacpy(&conn->resp_addr, &cp->peer_addr);
2036 /* We don't want the connection attempt to stick around
2037 * indefinitely since LE doesn't have a page timeout concept
2038 * like BR/EDR. Set a timer for any connection that doesn't use
2039 * the white list for connecting.
2041 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
2042 queue_delayed_work(conn->hdev->workqueue,
2043 &conn->le_conn_timeout,
2044 conn->conn_timeout);
2047 hci_dev_unlock(hdev);
2050 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2052 struct hci_cp_le_read_remote_features *cp;
2053 struct hci_conn *conn;
2055 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2060 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2066 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2068 if (conn->state == BT_CONFIG) {
2069 hci_connect_cfm(conn, status);
2070 hci_conn_drop(conn);
2074 hci_dev_unlock(hdev);
2077 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2079 struct hci_cp_le_start_enc *cp;
2080 struct hci_conn *conn;
2082 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2089 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2093 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2097 if (conn->state != BT_CONNECTED)
2100 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2101 hci_conn_drop(conn);
2104 hci_dev_unlock(hdev);
2107 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2109 struct hci_cp_switch_role *cp;
2110 struct hci_conn *conn;
2112 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2117 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2123 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2125 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2127 hci_dev_unlock(hdev);
2130 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2132 __u8 status = *((__u8 *) skb->data);
2133 struct discovery_state *discov = &hdev->discovery;
2134 struct inquiry_entry *e;
2136 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2138 hci_conn_check_pending(hdev);
2140 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2143 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2144 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2146 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2151 if (discov->state != DISCOVERY_FINDING)
2154 if (list_empty(&discov->resolve)) {
2155 /* When BR/EDR inquiry is active and no LE scanning is in
2156 * progress, then change discovery state to indicate completion.
2158 * When running LE scanning and BR/EDR inquiry simultaneously
2159 * and the LE scan already finished, then change the discovery
2160 * state to indicate completion.
2162 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2163 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2164 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2168 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2169 if (e && hci_resolve_name(hdev, e) == 0) {
2170 e->name_state = NAME_PENDING;
2171 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2173 /* When BR/EDR inquiry is active and no LE scanning is in
2174 * progress, then change discovery state to indicate completion.
2176 * When running LE scanning and BR/EDR inquiry simultaneously
2177 * and the LE scan already finished, then change the discovery
2178 * state to indicate completion.
2180 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2181 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2182 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2186 hci_dev_unlock(hdev);
2189 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2191 struct inquiry_data data;
2192 struct inquiry_info *info = (void *) (skb->data + 1);
2193 int num_rsp = *((__u8 *) skb->data);
2195 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2197 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
2200 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2205 for (; num_rsp; num_rsp--, info++) {
2208 bacpy(&data.bdaddr, &info->bdaddr);
2209 data.pscan_rep_mode = info->pscan_rep_mode;
2210 data.pscan_period_mode = info->pscan_period_mode;
2211 data.pscan_mode = info->pscan_mode;
2212 memcpy(data.dev_class, info->dev_class, 3);
2213 data.clock_offset = info->clock_offset;
2214 data.rssi = HCI_RSSI_INVALID;
2215 data.ssp_mode = 0x00;
2217 flags = hci_inquiry_cache_update(hdev, &data, false);
2219 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2220 info->dev_class, HCI_RSSI_INVALID,
2221 flags, NULL, 0, NULL, 0);
2224 hci_dev_unlock(hdev);
2227 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2229 struct hci_ev_conn_complete *ev = (void *) skb->data;
2230 struct hci_conn *conn;
2232 BT_DBG("%s", hdev->name);
2236 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2238 if (ev->link_type != SCO_LINK)
2241 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2245 conn->type = SCO_LINK;
2249 conn->handle = __le16_to_cpu(ev->handle);
2251 if (conn->type == ACL_LINK) {
2252 conn->state = BT_CONFIG;
2253 hci_conn_hold(conn);
2255 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2256 !hci_find_link_key(hdev, &ev->bdaddr))
2257 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2259 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2261 conn->state = BT_CONNECTED;
2263 hci_debugfs_create_conn(conn);
2264 hci_conn_add_sysfs(conn);
2266 if (test_bit(HCI_AUTH, &hdev->flags))
2267 set_bit(HCI_CONN_AUTH, &conn->flags);
2269 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2270 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2272 /* Get remote features */
2273 if (conn->type == ACL_LINK) {
2274 struct hci_cp_read_remote_features cp;
2275 cp.handle = ev->handle;
2276 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2279 hci_req_update_scan(hdev);
2282 /* Set packet type for incoming connection */
2283 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2284 struct hci_cp_change_conn_ptype cp;
2285 cp.handle = ev->handle;
2286 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2287 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2291 conn->state = BT_CLOSED;
2292 if (conn->type == ACL_LINK)
2293 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2294 conn->dst_type, ev->status);
2297 if (conn->type == ACL_LINK)
2298 hci_sco_setup(conn, ev->status);
2301 hci_connect_cfm(conn, ev->status);
2303 } else if (ev->link_type != ACL_LINK)
2304 hci_connect_cfm(conn, ev->status);
2307 hci_dev_unlock(hdev);
2309 hci_conn_check_pending(hdev);
2312 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2314 struct hci_cp_reject_conn_req cp;
2316 bacpy(&cp.bdaddr, bdaddr);
2317 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2318 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2321 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2323 struct hci_ev_conn_request *ev = (void *) skb->data;
2324 int mask = hdev->link_mode;
2325 struct inquiry_entry *ie;
2326 struct hci_conn *conn;
2329 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2332 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2335 if (!(mask & HCI_LM_ACCEPT)) {
2336 hci_reject_conn(hdev, &ev->bdaddr);
2340 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2342 hci_reject_conn(hdev, &ev->bdaddr);
2346 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2347 * connection. These features are only touched through mgmt so
2348 * only do the checks if HCI_MGMT is set.
2350 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2351 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2352 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2354 hci_reject_conn(hdev, &ev->bdaddr);
2358 /* Connection accepted */
2362 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2364 memcpy(ie->data.dev_class, ev->dev_class, 3);
2366 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2369 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2372 BT_ERR("No memory for new connection");
2373 hci_dev_unlock(hdev);
2378 memcpy(conn->dev_class, ev->dev_class, 3);
2380 hci_dev_unlock(hdev);
2382 if (ev->link_type == ACL_LINK ||
2383 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2384 struct hci_cp_accept_conn_req cp;
2385 conn->state = BT_CONNECT;
2387 bacpy(&cp.bdaddr, &ev->bdaddr);
2389 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2390 cp.role = 0x00; /* Become master */
2392 cp.role = 0x01; /* Remain slave */
2394 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2395 } else if (!(flags & HCI_PROTO_DEFER)) {
2396 struct hci_cp_accept_sync_conn_req cp;
2397 conn->state = BT_CONNECT;
2399 bacpy(&cp.bdaddr, &ev->bdaddr);
2400 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2402 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2403 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2404 cp.max_latency = cpu_to_le16(0xffff);
2405 cp.content_format = cpu_to_le16(hdev->voice_setting);
2406 cp.retrans_effort = 0xff;
2408 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2411 conn->state = BT_CONNECT2;
2412 hci_connect_cfm(conn, 0);
2416 static u8 hci_to_mgmt_reason(u8 err)
2419 case HCI_ERROR_CONNECTION_TIMEOUT:
2420 return MGMT_DEV_DISCONN_TIMEOUT;
2421 case HCI_ERROR_REMOTE_USER_TERM:
2422 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2423 case HCI_ERROR_REMOTE_POWER_OFF:
2424 return MGMT_DEV_DISCONN_REMOTE;
2425 case HCI_ERROR_LOCAL_HOST_TERM:
2426 return MGMT_DEV_DISCONN_LOCAL_HOST;
2428 return MGMT_DEV_DISCONN_UNKNOWN;
2432 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2434 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2436 struct hci_conn_params *params;
2437 struct hci_conn *conn;
2438 bool mgmt_connected;
2441 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2445 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2450 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2451 conn->dst_type, ev->status);
2455 conn->state = BT_CLOSED;
2457 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2459 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2460 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2462 reason = hci_to_mgmt_reason(ev->reason);
2464 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2465 reason, mgmt_connected);
2467 if (conn->type == ACL_LINK) {
2468 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2469 hci_remove_link_key(hdev, &conn->dst);
2471 hci_req_update_scan(hdev);
2474 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2476 switch (params->auto_connect) {
2477 case HCI_AUTO_CONN_LINK_LOSS:
2478 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2482 case HCI_AUTO_CONN_DIRECT:
2483 case HCI_AUTO_CONN_ALWAYS:
2484 list_del_init(¶ms->action);
2485 list_add(¶ms->action, &hdev->pend_le_conns);
2486 hci_update_background_scan(hdev);
2496 hci_disconn_cfm(conn, ev->reason);
2499 /* Re-enable advertising if necessary, since it might
2500 * have been disabled by the connection. From the
2501 * HCI_LE_Set_Advertise_Enable command description in
2502 * the core specification (v4.0):
2503 * "The Controller shall continue advertising until the Host
2504 * issues an LE_Set_Advertise_Enable command with
2505 * Advertising_Enable set to 0x00 (Advertising is disabled)
2506 * or until a connection is created or until the Advertising
2507 * is timed out due to Directed Advertising."
2509 if (type == LE_LINK)
2510 hci_req_reenable_advertising(hdev);
2513 hci_dev_unlock(hdev);
2516 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2518 struct hci_ev_auth_complete *ev = (void *) skb->data;
2519 struct hci_conn *conn;
2521 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2525 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2530 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2532 if (!hci_conn_ssp_enabled(conn) &&
2533 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2534 BT_INFO("re-auth of legacy device is not possible.");
2536 set_bit(HCI_CONN_AUTH, &conn->flags);
2537 conn->sec_level = conn->pending_sec_level;
2540 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2541 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2543 mgmt_auth_failed(conn, ev->status);
2546 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2547 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2549 if (conn->state == BT_CONFIG) {
2550 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2551 struct hci_cp_set_conn_encrypt cp;
2552 cp.handle = ev->handle;
2554 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2557 conn->state = BT_CONNECTED;
2558 hci_connect_cfm(conn, ev->status);
2559 hci_conn_drop(conn);
2562 hci_auth_cfm(conn, ev->status);
2564 hci_conn_hold(conn);
2565 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2566 hci_conn_drop(conn);
2569 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2571 struct hci_cp_set_conn_encrypt cp;
2572 cp.handle = ev->handle;
2574 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2577 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2578 hci_encrypt_cfm(conn, ev->status);
2583 hci_dev_unlock(hdev);
2586 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2588 struct hci_ev_remote_name *ev = (void *) skb->data;
2589 struct hci_conn *conn;
2591 BT_DBG("%s", hdev->name);
2593 hci_conn_check_pending(hdev);
2597 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2599 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2602 if (ev->status == 0)
2603 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2604 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2606 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2612 if (!hci_outgoing_auth_needed(hdev, conn))
2615 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2616 struct hci_cp_auth_requested cp;
2618 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2620 cp.handle = __cpu_to_le16(conn->handle);
2621 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2625 hci_dev_unlock(hdev);
2628 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2629 u16 opcode, struct sk_buff *skb)
2631 const struct hci_rp_read_enc_key_size *rp;
2632 struct hci_conn *conn;
2635 BT_DBG("%s status 0x%02x", hdev->name, status);
2637 if (!skb || skb->len < sizeof(*rp)) {
2638 BT_ERR("%s invalid HCI Read Encryption Key Size response",
2643 rp = (void *)skb->data;
2644 handle = le16_to_cpu(rp->handle);
2648 conn = hci_conn_hash_lookup_handle(hdev, handle);
2652 /* If we fail to read the encryption key size, assume maximum
2653 * (which is the same we do also when this HCI command isn't
2657 BT_ERR("%s failed to read key size for handle %u", hdev->name,
2659 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2661 conn->enc_key_size = rp->key_size;
2664 hci_encrypt_cfm(conn, 0);
2667 hci_dev_unlock(hdev);
2670 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2672 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2673 struct hci_conn *conn;
2675 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2679 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2685 /* Encryption implies authentication */
2686 set_bit(HCI_CONN_AUTH, &conn->flags);
2687 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2688 conn->sec_level = conn->pending_sec_level;
2690 /* P-256 authentication key implies FIPS */
2691 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2692 set_bit(HCI_CONN_FIPS, &conn->flags);
2694 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2695 conn->type == LE_LINK)
2696 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2698 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2699 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2703 /* We should disregard the current RPA and generate a new one
2704 * whenever the encryption procedure fails.
2706 if (ev->status && conn->type == LE_LINK)
2707 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2709 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2711 /* Check link security requirements are met */
2712 if (!hci_conn_check_link_mode(conn))
2713 ev->status = HCI_ERROR_AUTH_FAILURE;
2715 if (ev->status && conn->state == BT_CONNECTED) {
2716 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2717 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2719 /* Notify upper layers so they can cleanup before
2722 hci_encrypt_cfm(conn, ev->status);
2723 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2724 hci_conn_drop(conn);
2728 /* Try reading the encryption key size for encrypted ACL links */
2729 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
2730 struct hci_cp_read_enc_key_size cp;
2731 struct hci_request req;
2733 /* Only send HCI_Read_Encryption_Key_Size if the
2734 * controller really supports it. If it doesn't, assume
2735 * the default size (16).
2737 if (!(hdev->commands[20] & 0x10)) {
2738 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2742 hci_req_init(&req, hdev);
2744 cp.handle = cpu_to_le16(conn->handle);
2745 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
2747 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
2748 BT_ERR("Sending HCI Read Encryption Key Size failed");
2749 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2757 hci_encrypt_cfm(conn, ev->status);
2760 hci_dev_unlock(hdev);
2763 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2764 struct sk_buff *skb)
2766 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2767 struct hci_conn *conn;
2769 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2773 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2776 set_bit(HCI_CONN_SECURE, &conn->flags);
2778 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2780 hci_key_change_cfm(conn, ev->status);
2783 hci_dev_unlock(hdev);
2786 static void hci_remote_features_evt(struct hci_dev *hdev,
2787 struct sk_buff *skb)
2789 struct hci_ev_remote_features *ev = (void *) skb->data;
2790 struct hci_conn *conn;
2792 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2796 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2801 memcpy(conn->features[0], ev->features, 8);
2803 if (conn->state != BT_CONFIG)
2806 if (!ev->status && lmp_ext_feat_capable(hdev) &&
2807 lmp_ext_feat_capable(conn)) {
2808 struct hci_cp_read_remote_ext_features cp;
2809 cp.handle = ev->handle;
2811 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2816 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2817 struct hci_cp_remote_name_req cp;
2818 memset(&cp, 0, sizeof(cp));
2819 bacpy(&cp.bdaddr, &conn->dst);
2820 cp.pscan_rep_mode = 0x02;
2821 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2822 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2823 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2825 if (!hci_outgoing_auth_needed(hdev, conn)) {
2826 conn->state = BT_CONNECTED;
2827 hci_connect_cfm(conn, ev->status);
2828 hci_conn_drop(conn);
2832 hci_dev_unlock(hdev);
2835 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
2836 u16 *opcode, u8 *status,
2837 hci_req_complete_t *req_complete,
2838 hci_req_complete_skb_t *req_complete_skb)
2840 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2842 *opcode = __le16_to_cpu(ev->opcode);
2843 *status = skb->data[sizeof(*ev)];
2845 skb_pull(skb, sizeof(*ev));
2848 case HCI_OP_INQUIRY_CANCEL:
2849 hci_cc_inquiry_cancel(hdev, skb, status);
2852 case HCI_OP_PERIODIC_INQ:
2853 hci_cc_periodic_inq(hdev, skb);
2856 case HCI_OP_EXIT_PERIODIC_INQ:
2857 hci_cc_exit_periodic_inq(hdev, skb);
2860 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2861 hci_cc_remote_name_req_cancel(hdev, skb);
2864 case HCI_OP_ROLE_DISCOVERY:
2865 hci_cc_role_discovery(hdev, skb);
2868 case HCI_OP_READ_LINK_POLICY:
2869 hci_cc_read_link_policy(hdev, skb);
2872 case HCI_OP_WRITE_LINK_POLICY:
2873 hci_cc_write_link_policy(hdev, skb);
2876 case HCI_OP_READ_DEF_LINK_POLICY:
2877 hci_cc_read_def_link_policy(hdev, skb);
2880 case HCI_OP_WRITE_DEF_LINK_POLICY:
2881 hci_cc_write_def_link_policy(hdev, skb);
2885 hci_cc_reset(hdev, skb);
2888 case HCI_OP_READ_STORED_LINK_KEY:
2889 hci_cc_read_stored_link_key(hdev, skb);
2892 case HCI_OP_DELETE_STORED_LINK_KEY:
2893 hci_cc_delete_stored_link_key(hdev, skb);
2896 case HCI_OP_WRITE_LOCAL_NAME:
2897 hci_cc_write_local_name(hdev, skb);
2900 case HCI_OP_READ_LOCAL_NAME:
2901 hci_cc_read_local_name(hdev, skb);
2904 case HCI_OP_WRITE_AUTH_ENABLE:
2905 hci_cc_write_auth_enable(hdev, skb);
2908 case HCI_OP_WRITE_ENCRYPT_MODE:
2909 hci_cc_write_encrypt_mode(hdev, skb);
2912 case HCI_OP_WRITE_SCAN_ENABLE:
2913 hci_cc_write_scan_enable(hdev, skb);
2916 case HCI_OP_READ_CLASS_OF_DEV:
2917 hci_cc_read_class_of_dev(hdev, skb);
2920 case HCI_OP_WRITE_CLASS_OF_DEV:
2921 hci_cc_write_class_of_dev(hdev, skb);
2924 case HCI_OP_READ_VOICE_SETTING:
2925 hci_cc_read_voice_setting(hdev, skb);
2928 case HCI_OP_WRITE_VOICE_SETTING:
2929 hci_cc_write_voice_setting(hdev, skb);
2932 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2933 hci_cc_read_num_supported_iac(hdev, skb);
2936 case HCI_OP_WRITE_SSP_MODE:
2937 hci_cc_write_ssp_mode(hdev, skb);
2940 case HCI_OP_WRITE_SC_SUPPORT:
2941 hci_cc_write_sc_support(hdev, skb);
2944 case HCI_OP_READ_LOCAL_VERSION:
2945 hci_cc_read_local_version(hdev, skb);
2948 case HCI_OP_READ_LOCAL_COMMANDS:
2949 hci_cc_read_local_commands(hdev, skb);
2952 case HCI_OP_READ_LOCAL_FEATURES:
2953 hci_cc_read_local_features(hdev, skb);
2956 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2957 hci_cc_read_local_ext_features(hdev, skb);
2960 case HCI_OP_READ_BUFFER_SIZE:
2961 hci_cc_read_buffer_size(hdev, skb);
2964 case HCI_OP_READ_BD_ADDR:
2965 hci_cc_read_bd_addr(hdev, skb);
2968 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2969 hci_cc_read_page_scan_activity(hdev, skb);
2972 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2973 hci_cc_write_page_scan_activity(hdev, skb);
2976 case HCI_OP_READ_PAGE_SCAN_TYPE:
2977 hci_cc_read_page_scan_type(hdev, skb);
2980 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2981 hci_cc_write_page_scan_type(hdev, skb);
2984 case HCI_OP_READ_DATA_BLOCK_SIZE:
2985 hci_cc_read_data_block_size(hdev, skb);
2988 case HCI_OP_READ_FLOW_CONTROL_MODE:
2989 hci_cc_read_flow_control_mode(hdev, skb);
2992 case HCI_OP_READ_LOCAL_AMP_INFO:
2993 hci_cc_read_local_amp_info(hdev, skb);
2996 case HCI_OP_READ_CLOCK:
2997 hci_cc_read_clock(hdev, skb);
3000 case HCI_OP_READ_INQ_RSP_TX_POWER:
3001 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3004 case HCI_OP_PIN_CODE_REPLY:
3005 hci_cc_pin_code_reply(hdev, skb);
3008 case HCI_OP_PIN_CODE_NEG_REPLY:
3009 hci_cc_pin_code_neg_reply(hdev, skb);
3012 case HCI_OP_READ_LOCAL_OOB_DATA:
3013 hci_cc_read_local_oob_data(hdev, skb);
3016 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3017 hci_cc_read_local_oob_ext_data(hdev, skb);
3020 case HCI_OP_LE_READ_BUFFER_SIZE:
3021 hci_cc_le_read_buffer_size(hdev, skb);
3024 case HCI_OP_LE_READ_LOCAL_FEATURES:
3025 hci_cc_le_read_local_features(hdev, skb);
3028 case HCI_OP_LE_READ_ADV_TX_POWER:
3029 hci_cc_le_read_adv_tx_power(hdev, skb);
3032 case HCI_OP_USER_CONFIRM_REPLY:
3033 hci_cc_user_confirm_reply(hdev, skb);
3036 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3037 hci_cc_user_confirm_neg_reply(hdev, skb);
3040 case HCI_OP_USER_PASSKEY_REPLY:
3041 hci_cc_user_passkey_reply(hdev, skb);
3044 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3045 hci_cc_user_passkey_neg_reply(hdev, skb);
3048 case HCI_OP_LE_SET_RANDOM_ADDR:
3049 hci_cc_le_set_random_addr(hdev, skb);
3052 case HCI_OP_LE_SET_ADV_ENABLE:
3053 hci_cc_le_set_adv_enable(hdev, skb);
3056 case HCI_OP_LE_SET_SCAN_PARAM:
3057 hci_cc_le_set_scan_param(hdev, skb);
3060 case HCI_OP_LE_SET_SCAN_ENABLE:
3061 hci_cc_le_set_scan_enable(hdev, skb);
3064 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3065 hci_cc_le_read_white_list_size(hdev, skb);
3068 case HCI_OP_LE_CLEAR_WHITE_LIST:
3069 hci_cc_le_clear_white_list(hdev, skb);
3072 case HCI_OP_LE_ADD_TO_WHITE_LIST:
3073 hci_cc_le_add_to_white_list(hdev, skb);
3076 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3077 hci_cc_le_del_from_white_list(hdev, skb);
3080 case HCI_OP_LE_READ_SUPPORTED_STATES:
3081 hci_cc_le_read_supported_states(hdev, skb);
3084 case HCI_OP_LE_READ_DEF_DATA_LEN:
3085 hci_cc_le_read_def_data_len(hdev, skb);
3088 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3089 hci_cc_le_write_def_data_len(hdev, skb);
3092 case HCI_OP_LE_READ_MAX_DATA_LEN:
3093 hci_cc_le_read_max_data_len(hdev, skb);
3096 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3097 hci_cc_write_le_host_supported(hdev, skb);
3100 case HCI_OP_LE_SET_ADV_PARAM:
3101 hci_cc_set_adv_param(hdev, skb);
3104 case HCI_OP_READ_RSSI:
3105 hci_cc_read_rssi(hdev, skb);
3108 case HCI_OP_READ_TX_POWER:
3109 hci_cc_read_tx_power(hdev, skb);
3112 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3113 hci_cc_write_ssp_debug_mode(hdev, skb);
3116 case HCI_OP_ENABLE_RSSI:
3117 hci_cc_enable_rssi(hdev, skb);
3120 case HCI_OP_GET_RAW_RSSI:
3121 hci_cc_get_raw_rssi(hdev, skb);
3125 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3129 if (*opcode != HCI_OP_NOP)
3130 cancel_delayed_work(&hdev->cmd_timer);
3132 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3133 atomic_set(&hdev->cmd_cnt, 1);
3135 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3138 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3139 queue_work(hdev->workqueue, &hdev->cmd_work);
3142 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3143 u16 *opcode, u8 *status,
3144 hci_req_complete_t *req_complete,
3145 hci_req_complete_skb_t *req_complete_skb)
3147 struct hci_ev_cmd_status *ev = (void *) skb->data;
3149 skb_pull(skb, sizeof(*ev));
3151 *opcode = __le16_to_cpu(ev->opcode);
3152 *status = ev->status;
3155 case HCI_OP_INQUIRY:
3156 hci_cs_inquiry(hdev, ev->status);
3159 case HCI_OP_CREATE_CONN:
3160 hci_cs_create_conn(hdev, ev->status);
3163 case HCI_OP_DISCONNECT:
3164 hci_cs_disconnect(hdev, ev->status);
3167 case HCI_OP_ADD_SCO:
3168 hci_cs_add_sco(hdev, ev->status);
3171 case HCI_OP_AUTH_REQUESTED:
3172 hci_cs_auth_requested(hdev, ev->status);
3175 case HCI_OP_SET_CONN_ENCRYPT:
3176 hci_cs_set_conn_encrypt(hdev, ev->status);
3179 case HCI_OP_REMOTE_NAME_REQ:
3180 hci_cs_remote_name_req(hdev, ev->status);
3183 case HCI_OP_READ_REMOTE_FEATURES:
3184 hci_cs_read_remote_features(hdev, ev->status);
3187 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3188 hci_cs_read_remote_ext_features(hdev, ev->status);
3191 case HCI_OP_SETUP_SYNC_CONN:
3192 hci_cs_setup_sync_conn(hdev, ev->status);
3195 case HCI_OP_SNIFF_MODE:
3196 hci_cs_sniff_mode(hdev, ev->status);
3199 case HCI_OP_EXIT_SNIFF_MODE:
3200 hci_cs_exit_sniff_mode(hdev, ev->status);
3203 case HCI_OP_SWITCH_ROLE:
3204 hci_cs_switch_role(hdev, ev->status);
3207 case HCI_OP_LE_CREATE_CONN:
3208 hci_cs_le_create_conn(hdev, ev->status);
3211 case HCI_OP_LE_READ_REMOTE_FEATURES:
3212 hci_cs_le_read_remote_features(hdev, ev->status);
3215 case HCI_OP_LE_START_ENC:
3216 hci_cs_le_start_enc(hdev, ev->status);
3220 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3224 if (*opcode != HCI_OP_NOP)
3225 cancel_delayed_work(&hdev->cmd_timer);
3227 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3228 atomic_set(&hdev->cmd_cnt, 1);
3230 /* Indicate request completion if the command failed. Also, if
3231 * we're not waiting for a special event and we get a success
3232 * command status we should try to flag the request as completed
3233 * (since for this kind of commands there will not be a command
3237 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3238 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3241 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3242 queue_work(hdev->workqueue, &hdev->cmd_work);
3245 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3247 struct hci_ev_hardware_error *ev = (void *) skb->data;
3249 hdev->hw_error_code = ev->code;
3251 queue_work(hdev->req_workqueue, &hdev->error_reset);
3254 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3256 struct hci_ev_role_change *ev = (void *) skb->data;
3257 struct hci_conn *conn;
3259 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3263 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3266 conn->role = ev->role;
3268 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3270 hci_role_switch_cfm(conn, ev->status, ev->role);
3273 hci_dev_unlock(hdev);
3276 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3278 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3281 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3282 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3286 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3287 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3288 BT_DBG("%s bad parameters", hdev->name);
3292 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3294 for (i = 0; i < ev->num_hndl; i++) {
3295 struct hci_comp_pkts_info *info = &ev->handles[i];
3296 struct hci_conn *conn;
3297 __u16 handle, count;
3299 handle = __le16_to_cpu(info->handle);
3300 count = __le16_to_cpu(info->count);
3302 conn = hci_conn_hash_lookup_handle(hdev, handle);
3306 conn->sent -= count;
3308 switch (conn->type) {
3310 hdev->acl_cnt += count;
3311 if (hdev->acl_cnt > hdev->acl_pkts)
3312 hdev->acl_cnt = hdev->acl_pkts;
3316 if (hdev->le_pkts) {
3317 hdev->le_cnt += count;
3318 if (hdev->le_cnt > hdev->le_pkts)
3319 hdev->le_cnt = hdev->le_pkts;
3321 hdev->acl_cnt += count;
3322 if (hdev->acl_cnt > hdev->acl_pkts)
3323 hdev->acl_cnt = hdev->acl_pkts;
3328 hdev->sco_cnt += count;
3329 if (hdev->sco_cnt > hdev->sco_pkts)
3330 hdev->sco_cnt = hdev->sco_pkts;
3334 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3339 queue_work(hdev->workqueue, &hdev->tx_work);
3342 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3345 struct hci_chan *chan;
3347 switch (hdev->dev_type) {
3349 return hci_conn_hash_lookup_handle(hdev, handle);
3351 chan = hci_chan_lookup_handle(hdev, handle);
3356 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3363 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3365 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3368 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3369 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3373 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3374 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3375 BT_DBG("%s bad parameters", hdev->name);
3379 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3382 for (i = 0; i < ev->num_hndl; i++) {
3383 struct hci_comp_blocks_info *info = &ev->handles[i];
3384 struct hci_conn *conn = NULL;
3385 __u16 handle, block_count;
3387 handle = __le16_to_cpu(info->handle);
3388 block_count = __le16_to_cpu(info->blocks);
3390 conn = __hci_conn_lookup_handle(hdev, handle);
3394 conn->sent -= block_count;
3396 switch (conn->type) {
3399 hdev->block_cnt += block_count;
3400 if (hdev->block_cnt > hdev->num_blocks)
3401 hdev->block_cnt = hdev->num_blocks;
3405 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3410 queue_work(hdev->workqueue, &hdev->tx_work);
3413 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3415 struct hci_ev_mode_change *ev = (void *) skb->data;
3416 struct hci_conn *conn;
3418 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3422 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3424 conn->mode = ev->mode;
3426 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3428 if (conn->mode == HCI_CM_ACTIVE)
3429 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3431 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3434 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3435 hci_sco_setup(conn, ev->status);
3438 hci_dev_unlock(hdev);
3441 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3443 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3444 struct hci_conn *conn;
3446 BT_DBG("%s", hdev->name);
3450 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3454 if (conn->state == BT_CONNECTED) {
3455 hci_conn_hold(conn);
3456 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3457 hci_conn_drop(conn);
3460 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3461 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3462 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3463 sizeof(ev->bdaddr), &ev->bdaddr);
3464 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3467 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3472 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3476 hci_dev_unlock(hdev);
3479 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3481 if (key_type == HCI_LK_CHANGED_COMBINATION)
3484 conn->pin_length = pin_len;
3485 conn->key_type = key_type;
3488 case HCI_LK_LOCAL_UNIT:
3489 case HCI_LK_REMOTE_UNIT:
3490 case HCI_LK_DEBUG_COMBINATION:
3492 case HCI_LK_COMBINATION:
3494 conn->pending_sec_level = BT_SECURITY_HIGH;
3496 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3498 case HCI_LK_UNAUTH_COMBINATION_P192:
3499 case HCI_LK_UNAUTH_COMBINATION_P256:
3500 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3502 case HCI_LK_AUTH_COMBINATION_P192:
3503 conn->pending_sec_level = BT_SECURITY_HIGH;
3505 case HCI_LK_AUTH_COMBINATION_P256:
3506 conn->pending_sec_level = BT_SECURITY_FIPS;
3511 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3513 struct hci_ev_link_key_req *ev = (void *) skb->data;
3514 struct hci_cp_link_key_reply cp;
3515 struct hci_conn *conn;
3516 struct link_key *key;
3518 BT_DBG("%s", hdev->name);
3520 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3525 key = hci_find_link_key(hdev, &ev->bdaddr);
3527 BT_DBG("%s link key not found for %pMR", hdev->name,
3532 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3535 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3537 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3539 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3540 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3541 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3542 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3546 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3547 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3548 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3549 BT_DBG("%s ignoring key unauthenticated for high security",
3554 conn_set_key(conn, key->type, key->pin_len);
3557 bacpy(&cp.bdaddr, &ev->bdaddr);
3558 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3560 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3562 hci_dev_unlock(hdev);
3567 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3568 hci_dev_unlock(hdev);
3571 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3573 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3574 struct hci_conn *conn;
3575 struct link_key *key;
3579 BT_DBG("%s", hdev->name);
3583 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3587 hci_conn_hold(conn);
3588 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3589 hci_conn_drop(conn);
3591 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3592 conn_set_key(conn, ev->key_type, conn->pin_length);
3594 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3597 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3598 ev->key_type, pin_len, &persistent);
3602 /* Update connection information since adding the key will have
3603 * fixed up the type in the case of changed combination keys.
3605 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3606 conn_set_key(conn, key->type, key->pin_len);
3608 mgmt_new_link_key(hdev, key, persistent);
3610 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3611 * is set. If it's not set simply remove the key from the kernel
3612 * list (we've still notified user space about it but with
3613 * store_hint being 0).
3615 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3616 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3617 list_del_rcu(&key->list);
3618 kfree_rcu(key, rcu);
3623 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3625 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3628 hci_dev_unlock(hdev);
3631 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3633 struct hci_ev_clock_offset *ev = (void *) skb->data;
3634 struct hci_conn *conn;
3636 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3640 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3641 if (conn && !ev->status) {
3642 struct inquiry_entry *ie;
3644 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3646 ie->data.clock_offset = ev->clock_offset;
3647 ie->timestamp = jiffies;
3651 hci_dev_unlock(hdev);
3654 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3656 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3657 struct hci_conn *conn;
3659 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3663 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3664 if (conn && !ev->status)
3665 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3667 hci_dev_unlock(hdev);
3670 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3672 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3673 struct inquiry_entry *ie;
3675 BT_DBG("%s", hdev->name);
3679 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3681 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3682 ie->timestamp = jiffies;
3685 hci_dev_unlock(hdev);
3688 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3689 struct sk_buff *skb)
3691 struct inquiry_data data;
3692 int num_rsp = *((__u8 *) skb->data);
3694 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3699 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3704 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3705 struct inquiry_info_with_rssi_and_pscan_mode *info;
3706 info = (void *) (skb->data + 1);
3708 if (skb->len < num_rsp * sizeof(*info) + 1)
3711 for (; num_rsp; num_rsp--, info++) {
3714 bacpy(&data.bdaddr, &info->bdaddr);
3715 data.pscan_rep_mode = info->pscan_rep_mode;
3716 data.pscan_period_mode = info->pscan_period_mode;
3717 data.pscan_mode = info->pscan_mode;
3718 memcpy(data.dev_class, info->dev_class, 3);
3719 data.clock_offset = info->clock_offset;
3720 data.rssi = info->rssi;
3721 data.ssp_mode = 0x00;
3723 flags = hci_inquiry_cache_update(hdev, &data, false);
3725 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3726 info->dev_class, info->rssi,
3727 flags, NULL, 0, NULL, 0);
3730 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3732 if (skb->len < num_rsp * sizeof(*info) + 1)
3735 for (; num_rsp; num_rsp--, info++) {
3738 bacpy(&data.bdaddr, &info->bdaddr);
3739 data.pscan_rep_mode = info->pscan_rep_mode;
3740 data.pscan_period_mode = info->pscan_period_mode;
3741 data.pscan_mode = 0x00;
3742 memcpy(data.dev_class, info->dev_class, 3);
3743 data.clock_offset = info->clock_offset;
3744 data.rssi = info->rssi;
3745 data.ssp_mode = 0x00;
3747 flags = hci_inquiry_cache_update(hdev, &data, false);
3749 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3750 info->dev_class, info->rssi,
3751 flags, NULL, 0, NULL, 0);
3756 hci_dev_unlock(hdev);
3759 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3760 struct sk_buff *skb)
3762 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3763 struct hci_conn *conn;
3765 BT_DBG("%s", hdev->name);
3769 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3773 if (ev->page < HCI_MAX_PAGES)
3774 memcpy(conn->features[ev->page], ev->features, 8);
3776 if (!ev->status && ev->page == 0x01) {
3777 struct inquiry_entry *ie;
3779 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3781 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3783 if (ev->features[0] & LMP_HOST_SSP) {
3784 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3786 /* It is mandatory by the Bluetooth specification that
3787 * Extended Inquiry Results are only used when Secure
3788 * Simple Pairing is enabled, but some devices violate
3791 * To make these devices work, the internal SSP
3792 * enabled flag needs to be cleared if the remote host
3793 * features do not indicate SSP support */
3794 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3797 if (ev->features[0] & LMP_HOST_SC)
3798 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3801 if (conn->state != BT_CONFIG)
3804 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3805 struct hci_cp_remote_name_req cp;
3806 memset(&cp, 0, sizeof(cp));
3807 bacpy(&cp.bdaddr, &conn->dst);
3808 cp.pscan_rep_mode = 0x02;
3809 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3810 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3811 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3813 if (!hci_outgoing_auth_needed(hdev, conn)) {
3814 conn->state = BT_CONNECTED;
3815 hci_connect_cfm(conn, ev->status);
3816 hci_conn_drop(conn);
3820 hci_dev_unlock(hdev);
3823 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3824 struct sk_buff *skb)
3826 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3827 struct hci_conn *conn;
3829 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3833 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3835 if (ev->link_type == ESCO_LINK)
3838 /* When the link type in the event indicates SCO connection
3839 * and lookup of the connection object fails, then check
3840 * if an eSCO connection object exists.
3842 * The core limits the synchronous connections to either
3843 * SCO or eSCO. The eSCO connection is preferred and tried
3844 * to be setup first and until successfully established,
3845 * the link type will be hinted as eSCO.
3847 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3852 switch (ev->status) {
3854 conn->handle = __le16_to_cpu(ev->handle);
3855 conn->state = BT_CONNECTED;
3856 conn->type = ev->link_type;
3858 hci_debugfs_create_conn(conn);
3859 hci_conn_add_sysfs(conn);
3862 case 0x10: /* Connection Accept Timeout */
3863 case 0x0d: /* Connection Rejected due to Limited Resources */
3864 case 0x11: /* Unsupported Feature or Parameter Value */
3865 case 0x1c: /* SCO interval rejected */
3866 case 0x1a: /* Unsupported Remote Feature */
3867 case 0x1e: /* Invalid LMP Parameters */
3868 case 0x1f: /* Unspecified error */
3869 case 0x20: /* Unsupported LMP Parameter value */
3871 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3872 (hdev->esco_type & EDR_ESCO_MASK);
3873 if (hci_setup_sync(conn, conn->link->handle))
3879 conn->state = BT_CLOSED;
3883 hci_connect_cfm(conn, ev->status);
3888 hci_dev_unlock(hdev);
3891 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3895 while (parsed < eir_len) {
3896 u8 field_len = eir[0];
3901 parsed += field_len + 1;
3902 eir += field_len + 1;
3908 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3909 struct sk_buff *skb)
3911 struct inquiry_data data;
3912 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3913 int num_rsp = *((__u8 *) skb->data);
3916 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3918 if (!num_rsp || skb->len < num_rsp * sizeof(*info) + 1)
3921 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3926 for (; num_rsp; num_rsp--, info++) {
3930 bacpy(&data.bdaddr, &info->bdaddr);
3931 data.pscan_rep_mode = info->pscan_rep_mode;
3932 data.pscan_period_mode = info->pscan_period_mode;
3933 data.pscan_mode = 0x00;
3934 memcpy(data.dev_class, info->dev_class, 3);
3935 data.clock_offset = info->clock_offset;
3936 data.rssi = info->rssi;
3937 data.ssp_mode = 0x01;
3939 if (hci_dev_test_flag(hdev, HCI_MGMT))
3940 name_known = eir_get_data(info->data,
3942 EIR_NAME_COMPLETE, NULL);
3946 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3948 eir_len = eir_get_length(info->data, sizeof(info->data));
3950 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3951 info->dev_class, info->rssi,
3952 flags, info->data, eir_len, NULL, 0);
3955 hci_dev_unlock(hdev);
3958 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3959 struct sk_buff *skb)
3961 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3962 struct hci_conn *conn;
3964 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3965 __le16_to_cpu(ev->handle));
3969 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3973 /* For BR/EDR the necessary steps are taken through the
3974 * auth_complete event.
3976 if (conn->type != LE_LINK)
3980 conn->sec_level = conn->pending_sec_level;
3982 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3984 if (ev->status && conn->state == BT_CONNECTED) {
3985 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3986 hci_conn_drop(conn);
3990 if (conn->state == BT_CONFIG) {
3992 conn->state = BT_CONNECTED;
3994 hci_connect_cfm(conn, ev->status);
3995 hci_conn_drop(conn);
3997 hci_auth_cfm(conn, ev->status);
3999 hci_conn_hold(conn);
4000 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4001 hci_conn_drop(conn);
4005 hci_dev_unlock(hdev);
4008 static u8 hci_get_auth_req(struct hci_conn *conn)
4010 /* If remote requests no-bonding follow that lead */
4011 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4012 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4013 return conn->remote_auth | (conn->auth_type & 0x01);
4015 /* If both remote and local have enough IO capabilities, require
4018 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4019 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4020 return conn->remote_auth | 0x01;
4022 /* No MITM protection possible so ignore remote requirement */
4023 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4026 static u8 bredr_oob_data_present(struct hci_conn *conn)
4028 struct hci_dev *hdev = conn->hdev;
4029 struct oob_data *data;
4031 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4035 if (bredr_sc_enabled(hdev)) {
4036 /* When Secure Connections is enabled, then just
4037 * return the present value stored with the OOB
4038 * data. The stored value contains the right present
4039 * information. However it can only be trusted when
4040 * not in Secure Connection Only mode.
4042 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4043 return data->present;
4045 /* When Secure Connections Only mode is enabled, then
4046 * the P-256 values are required. If they are not
4047 * available, then do not declare that OOB data is
4050 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4051 !memcmp(data->hash256, ZERO_KEY, 16))
4057 /* When Secure Connections is not enabled or actually
4058 * not supported by the hardware, then check that if
4059 * P-192 data values are present.
4061 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4062 !memcmp(data->hash192, ZERO_KEY, 16))
4068 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4070 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4071 struct hci_conn *conn;
4073 BT_DBG("%s", hdev->name);
4077 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4081 hci_conn_hold(conn);
4083 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4086 /* Allow pairing if we're pairable, the initiators of the
4087 * pairing or if the remote is not requesting bonding.
4089 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4090 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4091 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4092 struct hci_cp_io_capability_reply cp;
4094 bacpy(&cp.bdaddr, &ev->bdaddr);
4095 /* Change the IO capability from KeyboardDisplay
4096 * to DisplayYesNo as it is not supported by BT spec. */
4097 cp.capability = (conn->io_capability == 0x04) ?
4098 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4100 /* If we are initiators, there is no remote information yet */
4101 if (conn->remote_auth == 0xff) {
4102 /* Request MITM protection if our IO caps allow it
4103 * except for the no-bonding case.
4105 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4106 conn->auth_type != HCI_AT_NO_BONDING)
4107 conn->auth_type |= 0x01;
4109 conn->auth_type = hci_get_auth_req(conn);
4112 /* If we're not bondable, force one of the non-bondable
4113 * authentication requirement values.
4115 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4116 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4118 cp.authentication = conn->auth_type;
4119 cp.oob_data = bredr_oob_data_present(conn);
4121 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4124 struct hci_cp_io_capability_neg_reply cp;
4126 bacpy(&cp.bdaddr, &ev->bdaddr);
4127 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4129 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4134 hci_dev_unlock(hdev);
4137 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4139 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4140 struct hci_conn *conn;
4142 BT_DBG("%s", hdev->name);
4146 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4150 conn->remote_cap = ev->capability;
4151 conn->remote_auth = ev->authentication;
4154 hci_dev_unlock(hdev);
4157 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4158 struct sk_buff *skb)
4160 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4161 int loc_mitm, rem_mitm, confirm_hint = 0;
4162 struct hci_conn *conn;
4164 BT_DBG("%s", hdev->name);
4168 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4171 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4175 loc_mitm = (conn->auth_type & 0x01);
4176 rem_mitm = (conn->remote_auth & 0x01);
4178 /* If we require MITM but the remote device can't provide that
4179 * (it has NoInputNoOutput) then reject the confirmation
4180 * request. We check the security level here since it doesn't
4181 * necessarily match conn->auth_type.
4183 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4184 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4185 BT_DBG("Rejecting request: remote device can't provide MITM");
4186 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4187 sizeof(ev->bdaddr), &ev->bdaddr);
4191 /* If no side requires MITM protection; auto-accept */
4192 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4193 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4195 /* If we're not the initiators request authorization to
4196 * proceed from user space (mgmt_user_confirm with
4197 * confirm_hint set to 1). The exception is if neither
4198 * side had MITM or if the local IO capability is
4199 * NoInputNoOutput, in which case we do auto-accept
4201 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4202 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4203 (loc_mitm || rem_mitm)) {
4204 BT_DBG("Confirming auto-accept as acceptor");
4209 BT_DBG("Auto-accept of user confirmation with %ums delay",
4210 hdev->auto_accept_delay);
4212 if (hdev->auto_accept_delay > 0) {
4213 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4214 queue_delayed_work(conn->hdev->workqueue,
4215 &conn->auto_accept_work, delay);
4219 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4220 sizeof(ev->bdaddr), &ev->bdaddr);
4225 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4226 le32_to_cpu(ev->passkey), confirm_hint);
4229 hci_dev_unlock(hdev);
4232 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4233 struct sk_buff *skb)
4235 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4237 BT_DBG("%s", hdev->name);
4239 if (hci_dev_test_flag(hdev, HCI_MGMT))
4240 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4243 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4244 struct sk_buff *skb)
4246 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4247 struct hci_conn *conn;
4249 BT_DBG("%s", hdev->name);
4251 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4255 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4256 conn->passkey_entered = 0;
4258 if (hci_dev_test_flag(hdev, HCI_MGMT))
4259 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4260 conn->dst_type, conn->passkey_notify,
4261 conn->passkey_entered);
4264 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4266 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4267 struct hci_conn *conn;
4269 BT_DBG("%s", hdev->name);
4271 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4276 case HCI_KEYPRESS_STARTED:
4277 conn->passkey_entered = 0;
4280 case HCI_KEYPRESS_ENTERED:
4281 conn->passkey_entered++;
4284 case HCI_KEYPRESS_ERASED:
4285 conn->passkey_entered--;
4288 case HCI_KEYPRESS_CLEARED:
4289 conn->passkey_entered = 0;
4292 case HCI_KEYPRESS_COMPLETED:
4296 if (hci_dev_test_flag(hdev, HCI_MGMT))
4297 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4298 conn->dst_type, conn->passkey_notify,
4299 conn->passkey_entered);
4302 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4303 struct sk_buff *skb)
4305 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4306 struct hci_conn *conn;
4308 BT_DBG("%s", hdev->name);
4312 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4316 /* Reset the authentication requirement to unknown */
4317 conn->remote_auth = 0xff;
4319 /* To avoid duplicate auth_failed events to user space we check
4320 * the HCI_CONN_AUTH_PEND flag which will be set if we
4321 * initiated the authentication. A traditional auth_complete
4322 * event gets always produced as initiator and is also mapped to
4323 * the mgmt_auth_failed event */
4324 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4325 mgmt_auth_failed(conn, ev->status);
4327 hci_conn_drop(conn);
4330 hci_dev_unlock(hdev);
4333 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4334 struct sk_buff *skb)
4336 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4337 struct inquiry_entry *ie;
4338 struct hci_conn *conn;
4340 BT_DBG("%s", hdev->name);
4344 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4346 memcpy(conn->features[1], ev->features, 8);
4348 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4350 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4352 hci_dev_unlock(hdev);
4355 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4356 struct sk_buff *skb)
4358 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4359 struct oob_data *data;
4361 BT_DBG("%s", hdev->name);
4365 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4368 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4370 struct hci_cp_remote_oob_data_neg_reply cp;
4372 bacpy(&cp.bdaddr, &ev->bdaddr);
4373 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4378 if (bredr_sc_enabled(hdev)) {
4379 struct hci_cp_remote_oob_ext_data_reply cp;
4381 bacpy(&cp.bdaddr, &ev->bdaddr);
4382 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4383 memset(cp.hash192, 0, sizeof(cp.hash192));
4384 memset(cp.rand192, 0, sizeof(cp.rand192));
4386 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4387 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4389 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4390 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4392 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4395 struct hci_cp_remote_oob_data_reply cp;
4397 bacpy(&cp.bdaddr, &ev->bdaddr);
4398 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4399 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4401 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4406 hci_dev_unlock(hdev);
4409 #if IS_ENABLED(CONFIG_BT_HS)
4410 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4412 struct hci_ev_channel_selected *ev = (void *)skb->data;
4413 struct hci_conn *hcon;
4415 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4417 skb_pull(skb, sizeof(*ev));
4419 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4423 amp_read_loc_assoc_final_data(hdev, hcon);
4426 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4427 struct sk_buff *skb)
4429 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4430 struct hci_conn *hcon, *bredr_hcon;
4432 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4437 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4439 hci_dev_unlock(hdev);
4445 hci_dev_unlock(hdev);
4449 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4451 hcon->state = BT_CONNECTED;
4452 bacpy(&hcon->dst, &bredr_hcon->dst);
4454 hci_conn_hold(hcon);
4455 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4456 hci_conn_drop(hcon);
4458 hci_debugfs_create_conn(hcon);
4459 hci_conn_add_sysfs(hcon);
4461 amp_physical_cfm(bredr_hcon, hcon);
4463 hci_dev_unlock(hdev);
4466 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4468 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4469 struct hci_conn *hcon;
4470 struct hci_chan *hchan;
4471 struct amp_mgr *mgr;
4473 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4474 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4477 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4481 /* Create AMP hchan */
4482 hchan = hci_chan_create(hcon);
4486 hchan->handle = le16_to_cpu(ev->handle);
4488 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4490 mgr = hcon->amp_mgr;
4491 if (mgr && mgr->bredr_chan) {
4492 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4494 l2cap_chan_lock(bredr_chan);
4496 bredr_chan->conn->mtu = hdev->block_mtu;
4497 l2cap_logical_cfm(bredr_chan, hchan, 0);
4498 hci_conn_hold(hcon);
4500 l2cap_chan_unlock(bredr_chan);
4504 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4505 struct sk_buff *skb)
4507 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4508 struct hci_chan *hchan;
4510 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4511 le16_to_cpu(ev->handle), ev->status);
4518 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4522 amp_destroy_logical_link(hchan, ev->reason);
4525 hci_dev_unlock(hdev);
4528 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4529 struct sk_buff *skb)
4531 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4532 struct hci_conn *hcon;
4534 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4541 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4543 hcon->state = BT_CLOSED;
4547 hci_dev_unlock(hdev);
4551 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4553 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4554 struct hci_conn_params *params;
4555 struct hci_conn *conn;
4556 struct smp_irk *irk;
4559 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4563 /* All controllers implicitly stop advertising in the event of a
4564 * connection, so ensure that the state bit is cleared.
4566 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4568 conn = hci_lookup_le_connect(hdev);
4570 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4572 BT_ERR("No memory for new connection");
4576 conn->dst_type = ev->bdaddr_type;
4578 /* If we didn't have a hci_conn object previously
4579 * but we're in master role this must be something
4580 * initiated using a white list. Since white list based
4581 * connections are not "first class citizens" we don't
4582 * have full tracking of them. Therefore, we go ahead
4583 * with a "best effort" approach of determining the
4584 * initiator address based on the HCI_PRIVACY flag.
4587 conn->resp_addr_type = ev->bdaddr_type;
4588 bacpy(&conn->resp_addr, &ev->bdaddr);
4589 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4590 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4591 bacpy(&conn->init_addr, &hdev->rpa);
4593 hci_copy_identity_address(hdev,
4595 &conn->init_addr_type);
4599 cancel_delayed_work(&conn->le_conn_timeout);
4603 /* Set the responder (our side) address type based on
4604 * the advertising address type.
4606 conn->resp_addr_type = hdev->adv_addr_type;
4607 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4608 bacpy(&conn->resp_addr, &hdev->random_addr);
4610 bacpy(&conn->resp_addr, &hdev->bdaddr);
4612 conn->init_addr_type = ev->bdaddr_type;
4613 bacpy(&conn->init_addr, &ev->bdaddr);
4615 /* For incoming connections, set the default minimum
4616 * and maximum connection interval. They will be used
4617 * to check if the parameters are in range and if not
4618 * trigger the connection update procedure.
4620 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4621 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4624 /* Lookup the identity address from the stored connection
4625 * address and address type.
4627 * When establishing connections to an identity address, the
4628 * connection procedure will store the resolvable random
4629 * address first. Now if it can be converted back into the
4630 * identity address, start using the identity address from
4633 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4635 bacpy(&conn->dst, &irk->bdaddr);
4636 conn->dst_type = irk->addr_type;
4640 hci_le_conn_failed(conn, ev->status);
4644 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4645 addr_type = BDADDR_LE_PUBLIC;
4647 addr_type = BDADDR_LE_RANDOM;
4649 /* Drop the connection if the device is blocked */
4650 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4651 hci_conn_drop(conn);
4655 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4656 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4658 conn->sec_level = BT_SECURITY_LOW;
4659 conn->handle = __le16_to_cpu(ev->handle);
4660 conn->state = BT_CONFIG;
4662 conn->le_conn_interval = le16_to_cpu(ev->interval);
4663 conn->le_conn_latency = le16_to_cpu(ev->latency);
4664 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4666 hci_debugfs_create_conn(conn);
4667 hci_conn_add_sysfs(conn);
4670 /* The remote features procedure is defined for master
4671 * role only. So only in case of an initiated connection
4672 * request the remote features.
4674 * If the local controller supports slave-initiated features
4675 * exchange, then requesting the remote features in slave
4676 * role is possible. Otherwise just transition into the
4677 * connected state without requesting the remote features.
4680 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
4681 struct hci_cp_le_read_remote_features cp;
4683 cp.handle = __cpu_to_le16(conn->handle);
4685 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
4688 hci_conn_hold(conn);
4690 conn->state = BT_CONNECTED;
4691 hci_connect_cfm(conn, ev->status);
4694 hci_connect_cfm(conn, ev->status);
4697 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4700 list_del_init(¶ms->action);
4702 hci_conn_drop(params->conn);
4703 hci_conn_put(params->conn);
4704 params->conn = NULL;
4709 hci_update_background_scan(hdev);
4710 hci_dev_unlock(hdev);
4713 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4714 struct sk_buff *skb)
4716 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4717 struct hci_conn *conn;
4719 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4726 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4730 hci_dev_unlock(hdev);
4731 mgmt_le_conn_update_failed(hdev, &conn->dst,
4732 conn->type, conn->dst_type, ev->status);
4736 conn->le_conn_interval = le16_to_cpu(ev->interval);
4737 conn->le_conn_latency = le16_to_cpu(ev->latency);
4738 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4741 hci_dev_unlock(hdev);
4744 mgmt_le_conn_updated(hdev, &conn->dst, conn->type,
4745 conn->dst_type, conn->le_conn_interval,
4746 conn->le_conn_latency, conn->le_supv_timeout);
4750 /* This function requires the caller holds hdev->lock */
4751 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4753 u8 addr_type, u8 adv_type,
4754 bdaddr_t *direct_rpa)
4756 struct hci_conn *conn;
4757 struct hci_conn_params *params;
4759 /* If the event is not connectable don't proceed further */
4760 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4763 /* Ignore if the device is blocked */
4764 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4767 /* Most controller will fail if we try to create new connections
4768 * while we have an existing one in slave role.
4770 if (hdev->conn_hash.le_num_slave > 0)
4773 /* If we're not connectable only connect devices that we have in
4774 * our pend_le_conns list.
4776 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
4781 if (!params->explicit_connect) {
4782 switch (params->auto_connect) {
4783 case HCI_AUTO_CONN_DIRECT:
4784 /* Only devices advertising with ADV_DIRECT_IND are
4785 * triggering a connection attempt. This is allowing
4786 * incoming connections from slave devices.
4788 if (adv_type != LE_ADV_DIRECT_IND)
4791 case HCI_AUTO_CONN_ALWAYS:
4792 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4793 * are triggering a connection attempt. This means
4794 * that incoming connectioms from slave device are
4795 * accepted and also outgoing connections to slave
4796 * devices are established when found.
4804 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4805 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
4807 if (!IS_ERR(conn)) {
4808 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
4809 * by higher layer that tried to connect, if no then
4810 * store the pointer since we don't really have any
4811 * other owner of the object besides the params that
4812 * triggered it. This way we can abort the connection if
4813 * the parameters get removed and keep the reference
4814 * count consistent once the connection is established.
4817 if (!params->explicit_connect)
4818 params->conn = hci_conn_get(conn);
4823 switch (PTR_ERR(conn)) {
4825 /* If hci_connect() returns -EBUSY it means there is already
4826 * an LE connection attempt going on. Since controllers don't
4827 * support more than one connection attempt at the time, we
4828 * don't consider this an error case.
4832 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4839 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4840 u8 bdaddr_type, bdaddr_t *direct_addr,
4841 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4843 struct discovery_state *d = &hdev->discovery;
4844 struct smp_irk *irk;
4845 struct hci_conn *conn;
4852 case LE_ADV_DIRECT_IND:
4853 case LE_ADV_SCAN_IND:
4854 case LE_ADV_NONCONN_IND:
4855 case LE_ADV_SCAN_RSP:
4858 BT_ERR_RATELIMITED("Unknown advetising packet type: 0x%02x",
4863 if (len > HCI_MAX_AD_LENGTH) {
4864 pr_err_ratelimited("legacy adv larger than 31 bytes");
4868 /* Find the end of the data in case the report contains padded zero
4869 * bytes at the end causing an invalid length value.
4871 * When data is NULL, len is 0 so there is no need for extra ptr
4872 * check as 'ptr < data + 0' is already false in such case.
4874 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
4875 if (ptr + 1 + *ptr > data + len)
4879 real_len = ptr - data;
4881 /* Adjust for actual length */
4882 if (len != real_len) {
4883 BT_ERR_RATELIMITED("%s advertising data length corrected",
4888 /* If the direct address is present, then this report is from
4889 * a LE Direct Advertising Report event. In that case it is
4890 * important to see if the address is matching the local
4891 * controller address.
4894 /* Only resolvable random addresses are valid for these
4895 * kind of reports and others can be ignored.
4897 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4900 /* If the controller is not using resolvable random
4901 * addresses, then this report can be ignored.
4903 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
4906 /* If the local IRK of the controller does not match
4907 * with the resolvable random address provided, then
4908 * this report can be ignored.
4910 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4914 /* Check if we need to convert to identity address */
4915 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4917 bdaddr = &irk->bdaddr;
4918 bdaddr_type = irk->addr_type;
4921 /* Check if we have been requested to connect to this device.
4923 * direct_addr is set only for directed advertising reports (it is NULL
4924 * for advertising reports) and is already verified to be RPA above.
4926 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
4928 if (conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
4929 /* Store report for later inclusion by
4930 * mgmt_device_connected
4932 memcpy(conn->le_adv_data, data, len);
4933 conn->le_adv_data_len = len;
4936 /* Passive scanning shouldn't trigger any device found events,
4937 * except for devices marked as CONN_REPORT for which we do send
4938 * device found events.
4940 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4941 if (type == LE_ADV_DIRECT_IND)
4944 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4945 bdaddr, bdaddr_type))
4948 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4949 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4952 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4953 rssi, flags, data, len, NULL, 0);
4957 /* When receiving non-connectable or scannable undirected
4958 * advertising reports, this means that the remote device is
4959 * not connectable and then clearly indicate this in the
4960 * device found event.
4962 * When receiving a scan response, then there is no way to
4963 * know if the remote device is connectable or not. However
4964 * since scan responses are merged with a previously seen
4965 * advertising report, the flags field from that report
4968 * In the really unlikely case that a controller get confused
4969 * and just sends a scan response event, then it is marked as
4970 * not connectable as well.
4972 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4973 type == LE_ADV_SCAN_RSP)
4974 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4978 /* If there's nothing pending either store the data from this
4979 * event or send an immediate device found event if the data
4980 * should not be stored for later.
4982 if (!has_pending_adv_report(hdev)) {
4983 /* If the report will trigger a SCAN_REQ store it for
4986 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4987 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4988 rssi, flags, data, len);
4992 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4993 rssi, flags, data, len, NULL, 0);
4997 /* Check if the pending report is for the same device as the new one */
4998 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4999 bdaddr_type == d->last_adv_addr_type);
5001 /* If the pending data doesn't match this report or this isn't a
5002 * scan response (e.g. we got a duplicate ADV_IND) then force
5003 * sending of the pending data.
5005 if (type != LE_ADV_SCAN_RSP || !match) {
5006 /* Send out whatever is in the cache, but skip duplicates */
5008 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5009 d->last_adv_addr_type, NULL,
5010 d->last_adv_rssi, d->last_adv_flags,
5012 d->last_adv_data_len, NULL, 0);
5014 /* If the new report will trigger a SCAN_REQ store it for
5017 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5018 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5019 rssi, flags, data, len);
5023 /* The advertising reports cannot be merged, so clear
5024 * the pending report and send out a device found event.
5026 clear_pending_adv_report(hdev);
5027 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5028 rssi, flags, data, len, NULL, 0);
5032 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5033 * the new event is a SCAN_RSP. We can therefore proceed with
5034 * sending a merged device found event.
5036 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5037 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5038 d->last_adv_data, d->last_adv_data_len, data, len);
5039 clear_pending_adv_report(hdev);
5042 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5044 u8 num_reports = skb->data[0];
5045 void *ptr = &skb->data[1];
5049 while (num_reports--) {
5050 struct hci_ev_le_advertising_info *ev = ptr;
5053 if (ev->length <= HCI_MAX_AD_LENGTH) {
5054 rssi = ev->data[ev->length];
5055 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5056 ev->bdaddr_type, NULL, 0, rssi,
5057 ev->data, ev->length);
5059 bt_dev_err(hdev, "Dropping invalid advertising data");
5062 ptr += sizeof(*ev) + ev->length + 1;
5065 hci_dev_unlock(hdev);
5068 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5069 struct sk_buff *skb)
5071 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5072 struct hci_conn *conn;
5074 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5078 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5081 memcpy(conn->features[0], ev->features, 8);
5083 if (conn->state == BT_CONFIG) {
5086 /* If the local controller supports slave-initiated
5087 * features exchange, but the remote controller does
5088 * not, then it is possible that the error code 0x1a
5089 * for unsupported remote feature gets returned.
5091 * In this specific case, allow the connection to
5092 * transition into connected state and mark it as
5095 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5096 !conn->out && ev->status == 0x1a)
5099 status = ev->status;
5101 conn->state = BT_CONNECTED;
5102 hci_connect_cfm(conn, status);
5103 hci_conn_drop(conn);
5107 hci_dev_unlock(hdev);
5110 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5112 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5113 struct hci_cp_le_ltk_reply cp;
5114 struct hci_cp_le_ltk_neg_reply neg;
5115 struct hci_conn *conn;
5116 struct smp_ltk *ltk;
5118 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5122 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5126 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5130 if (smp_ltk_is_sc(ltk)) {
5131 /* With SC both EDiv and Rand are set to zero */
5132 if (ev->ediv || ev->rand)
5135 /* For non-SC keys check that EDiv and Rand match */
5136 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5140 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5141 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5142 cp.handle = cpu_to_le16(conn->handle);
5144 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5146 conn->enc_key_size = ltk->enc_size;
5148 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5150 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5151 * temporary key used to encrypt a connection following
5152 * pairing. It is used during the Encrypted Session Setup to
5153 * distribute the keys. Later, security can be re-established
5154 * using a distributed LTK.
5156 if (ltk->type == SMP_STK) {
5157 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5158 list_del_rcu(<k->list);
5159 kfree_rcu(ltk, rcu);
5161 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5164 hci_dev_unlock(hdev);
5169 neg.handle = ev->handle;
5170 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5171 hci_dev_unlock(hdev);
5174 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5177 struct hci_cp_le_conn_param_req_neg_reply cp;
5179 cp.handle = cpu_to_le16(handle);
5182 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5186 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5187 struct sk_buff *skb)
5189 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5190 struct hci_cp_le_conn_param_req_reply cp;
5191 struct hci_conn *hcon;
5192 u16 handle, min, max, latency, timeout;
5194 handle = le16_to_cpu(ev->handle);
5195 min = le16_to_cpu(ev->interval_min);
5196 max = le16_to_cpu(ev->interval_max);
5197 latency = le16_to_cpu(ev->latency);
5198 timeout = le16_to_cpu(ev->timeout);
5200 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5201 if (!hcon || hcon->state != BT_CONNECTED)
5202 return send_conn_param_neg_reply(hdev, handle,
5203 HCI_ERROR_UNKNOWN_CONN_ID);
5205 if (hci_check_conn_params(min, max, latency, timeout))
5206 return send_conn_param_neg_reply(hdev, handle,
5207 HCI_ERROR_INVALID_LL_PARAMS);
5209 if (hcon->role == HCI_ROLE_MASTER) {
5210 struct hci_conn_params *params;
5215 params = hci_conn_params_lookup(hdev, &hcon->dst,
5218 params->conn_min_interval = min;
5219 params->conn_max_interval = max;
5220 params->conn_latency = latency;
5221 params->supervision_timeout = timeout;
5227 hci_dev_unlock(hdev);
5229 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5230 store_hint, min, max, latency, timeout);
5233 cp.handle = ev->handle;
5234 cp.interval_min = ev->interval_min;
5235 cp.interval_max = ev->interval_max;
5236 cp.latency = ev->latency;
5237 cp.timeout = ev->timeout;
5241 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5244 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5245 struct sk_buff *skb)
5247 u8 num_reports = skb->data[0];
5248 void *ptr = &skb->data[1];
5252 while (num_reports--) {
5253 struct hci_ev_le_direct_adv_info *ev = ptr;
5255 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5256 ev->bdaddr_type, &ev->direct_addr,
5257 ev->direct_addr_type, ev->rssi, NULL, 0);
5262 hci_dev_unlock(hdev);
5265 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5267 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5269 skb_pull(skb, sizeof(*le_ev));
5271 switch (le_ev->subevent) {
5272 case HCI_EV_LE_CONN_COMPLETE:
5273 hci_le_conn_complete_evt(hdev, skb);
5276 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5277 hci_le_conn_update_complete_evt(hdev, skb);
5280 case HCI_EV_LE_ADVERTISING_REPORT:
5281 hci_le_adv_report_evt(hdev, skb);
5284 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5285 hci_le_remote_feat_complete_evt(hdev, skb);
5288 case HCI_EV_LE_LTK_REQ:
5289 hci_le_ltk_request_evt(hdev, skb);
5292 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5293 hci_le_remote_conn_param_req_evt(hdev, skb);
5296 case HCI_EV_LE_DIRECT_ADV_REPORT:
5297 hci_le_direct_adv_report_evt(hdev, skb);
5305 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5306 u8 event, struct sk_buff *skb)
5308 struct hci_ev_cmd_complete *ev;
5309 struct hci_event_hdr *hdr;
5314 if (skb->len < sizeof(*hdr)) {
5315 BT_ERR("Too short HCI event");
5319 hdr = (void *) skb->data;
5320 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5323 if (hdr->evt != event)
5328 /* Check if request ended in Command Status - no way to retreive
5329 * any extra parameters in this case.
5331 if (hdr->evt == HCI_EV_CMD_STATUS)
5334 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5335 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
5339 if (skb->len < sizeof(*ev)) {
5340 BT_ERR("Too short cmd_complete event");
5344 ev = (void *) skb->data;
5345 skb_pull(skb, sizeof(*ev));
5347 if (opcode != __le16_to_cpu(ev->opcode)) {
5348 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5349 __le16_to_cpu(ev->opcode));
5356 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5358 struct hci_event_hdr *hdr = (void *) skb->data;
5359 hci_req_complete_t req_complete = NULL;
5360 hci_req_complete_skb_t req_complete_skb = NULL;
5361 struct sk_buff *orig_skb = NULL;
5362 u8 status = 0, event = hdr->evt, req_evt = 0;
5363 u16 opcode = HCI_OP_NOP;
5366 bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
5370 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5371 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5372 opcode = __le16_to_cpu(cmd_hdr->opcode);
5373 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5378 /* If it looks like we might end up having to call
5379 * req_complete_skb, store a pristine copy of the skb since the
5380 * various handlers may modify the original one through
5381 * skb_pull() calls, etc.
5383 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5384 event == HCI_EV_CMD_COMPLETE)
5385 orig_skb = skb_clone(skb, GFP_KERNEL);
5387 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5390 case HCI_EV_INQUIRY_COMPLETE:
5391 hci_inquiry_complete_evt(hdev, skb);
5394 case HCI_EV_INQUIRY_RESULT:
5395 hci_inquiry_result_evt(hdev, skb);
5398 case HCI_EV_CONN_COMPLETE:
5399 hci_conn_complete_evt(hdev, skb);
5402 case HCI_EV_CONN_REQUEST:
5403 hci_conn_request_evt(hdev, skb);
5406 case HCI_EV_DISCONN_COMPLETE:
5407 hci_disconn_complete_evt(hdev, skb);
5410 case HCI_EV_AUTH_COMPLETE:
5411 hci_auth_complete_evt(hdev, skb);
5414 case HCI_EV_REMOTE_NAME:
5415 hci_remote_name_evt(hdev, skb);
5418 case HCI_EV_ENCRYPT_CHANGE:
5419 hci_encrypt_change_evt(hdev, skb);
5422 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5423 hci_change_link_key_complete_evt(hdev, skb);
5426 case HCI_EV_REMOTE_FEATURES:
5427 hci_remote_features_evt(hdev, skb);
5430 case HCI_EV_CMD_COMPLETE:
5431 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5432 &req_complete, &req_complete_skb);
5435 case HCI_EV_CMD_STATUS:
5436 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5440 case HCI_EV_HARDWARE_ERROR:
5441 hci_hardware_error_evt(hdev, skb);
5444 case HCI_EV_ROLE_CHANGE:
5445 hci_role_change_evt(hdev, skb);
5448 case HCI_EV_NUM_COMP_PKTS:
5449 hci_num_comp_pkts_evt(hdev, skb);
5452 case HCI_EV_MODE_CHANGE:
5453 hci_mode_change_evt(hdev, skb);
5456 case HCI_EV_PIN_CODE_REQ:
5457 hci_pin_code_request_evt(hdev, skb);
5460 case HCI_EV_LINK_KEY_REQ:
5461 hci_link_key_request_evt(hdev, skb);
5464 case HCI_EV_LINK_KEY_NOTIFY:
5465 hci_link_key_notify_evt(hdev, skb);
5468 case HCI_EV_CLOCK_OFFSET:
5469 hci_clock_offset_evt(hdev, skb);
5472 case HCI_EV_PKT_TYPE_CHANGE:
5473 hci_pkt_type_change_evt(hdev, skb);
5476 case HCI_EV_PSCAN_REP_MODE:
5477 hci_pscan_rep_mode_evt(hdev, skb);
5480 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5481 hci_inquiry_result_with_rssi_evt(hdev, skb);
5484 case HCI_EV_REMOTE_EXT_FEATURES:
5485 hci_remote_ext_features_evt(hdev, skb);
5488 case HCI_EV_SYNC_CONN_COMPLETE:
5489 hci_sync_conn_complete_evt(hdev, skb);
5492 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5493 hci_extended_inquiry_result_evt(hdev, skb);
5496 case HCI_EV_KEY_REFRESH_COMPLETE:
5497 hci_key_refresh_complete_evt(hdev, skb);
5500 case HCI_EV_IO_CAPA_REQUEST:
5501 hci_io_capa_request_evt(hdev, skb);
5504 case HCI_EV_IO_CAPA_REPLY:
5505 hci_io_capa_reply_evt(hdev, skb);
5508 case HCI_EV_USER_CONFIRM_REQUEST:
5509 hci_user_confirm_request_evt(hdev, skb);
5512 case HCI_EV_USER_PASSKEY_REQUEST:
5513 hci_user_passkey_request_evt(hdev, skb);
5516 case HCI_EV_USER_PASSKEY_NOTIFY:
5517 hci_user_passkey_notify_evt(hdev, skb);
5520 case HCI_EV_KEYPRESS_NOTIFY:
5521 hci_keypress_notify_evt(hdev, skb);
5524 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5525 hci_simple_pair_complete_evt(hdev, skb);
5528 case HCI_EV_REMOTE_HOST_FEATURES:
5529 hci_remote_host_features_evt(hdev, skb);
5532 case HCI_EV_LE_META:
5533 hci_le_meta_evt(hdev, skb);
5536 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5537 hci_remote_oob_data_request_evt(hdev, skb);
5540 #if IS_ENABLED(CONFIG_BT_HS)
5541 case HCI_EV_CHANNEL_SELECTED:
5542 hci_chan_selected_evt(hdev, skb);
5545 case HCI_EV_PHY_LINK_COMPLETE:
5546 hci_phy_link_complete_evt(hdev, skb);
5549 case HCI_EV_LOGICAL_LINK_COMPLETE:
5550 hci_loglink_complete_evt(hdev, skb);
5553 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5554 hci_disconn_loglink_complete_evt(hdev, skb);
5557 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5558 hci_disconn_phylink_complete_evt(hdev, skb);
5562 case HCI_EV_NUM_COMP_BLOCKS:
5563 hci_num_comp_blocks_evt(hdev, skb);
5567 case HCI_EV_VENDOR_SPECIFIC:
5568 hci_vendor_specific_evt(hdev, skb);
5573 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5578 req_complete(hdev, status, opcode);
5579 } else if (req_complete_skb) {
5580 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
5581 kfree_skb(orig_skb);
5584 req_complete_skb(hdev, status, opcode, orig_skb);
5588 kfree_skb(orig_skb);
5590 hdev->stat.evt_rx++;