2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
42 /* Handle HCI Event packets */
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
46 __u8 status = *((__u8 *) skb->data);
48 BT_DBG("%s status 0x%2.2x", hdev->name, status);
53 clear_bit(HCI_INQUIRY, &hdev->flags);
54 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
55 wake_up_bit(&hdev->flags, HCI_INQUIRY);
58 /* Set discovery state to stopped if we're not doing LE active
61 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
62 hdev->le_scan_type != LE_SCAN_ACTIVE)
63 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
66 hci_conn_check_pending(hdev);
69 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
71 __u8 status = *((__u8 *) skb->data);
73 BT_DBG("%s status 0x%2.2x", hdev->name, status);
78 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
81 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
83 __u8 status = *((__u8 *) skb->data);
85 BT_DBG("%s status 0x%2.2x", hdev->name, status);
90 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
92 hci_conn_check_pending(hdev);
95 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
98 BT_DBG("%s", hdev->name);
101 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
103 struct hci_rp_role_discovery *rp = (void *) skb->data;
104 struct hci_conn *conn;
106 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
113 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
115 conn->role = rp->role;
117 hci_dev_unlock(hdev);
120 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
122 struct hci_rp_read_link_policy *rp = (void *) skb->data;
123 struct hci_conn *conn;
125 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
132 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
134 conn->link_policy = __le16_to_cpu(rp->policy);
136 hci_dev_unlock(hdev);
139 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
141 struct hci_rp_write_link_policy *rp = (void *) skb->data;
142 struct hci_conn *conn;
145 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
150 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
156 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
158 conn->link_policy = get_unaligned_le16(sent + 2);
160 hci_dev_unlock(hdev);
163 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
166 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
168 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
173 hdev->link_policy = __le16_to_cpu(rp->policy);
176 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
179 __u8 status = *((__u8 *) skb->data);
182 BT_DBG("%s status 0x%2.2x", hdev->name, status);
187 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
191 hdev->link_policy = get_unaligned_le16(sent);
194 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
196 __u8 status = *((__u8 *) skb->data);
198 BT_DBG("%s status 0x%2.2x", hdev->name, status);
200 clear_bit(HCI_RESET, &hdev->flags);
205 /* Reset all non-persistent flags */
206 hci_dev_clear_volatile_flags(hdev);
208 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
210 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
211 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
213 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
214 hdev->adv_data_len = 0;
216 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
217 hdev->scan_rsp_data_len = 0;
219 hdev->le_scan_type = LE_SCAN_PASSIVE;
221 hdev->ssp_debug_mode = 0;
223 hci_bdaddr_list_clear(&hdev->le_white_list);
224 hci_bdaddr_list_clear(&hdev->le_resolv_list);
227 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
230 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
231 struct hci_cp_read_stored_link_key *sent;
233 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
235 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
239 if (!rp->status && sent->read_all == 0x01) {
240 hdev->stored_max_keys = rp->max_keys;
241 hdev->stored_num_keys = rp->num_keys;
245 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
248 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
250 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
255 if (rp->num_keys <= hdev->stored_num_keys)
256 hdev->stored_num_keys -= rp->num_keys;
258 hdev->stored_num_keys = 0;
261 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
263 __u8 status = *((__u8 *) skb->data);
266 BT_DBG("%s status 0x%2.2x", hdev->name, status);
268 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
274 if (hci_dev_test_flag(hdev, HCI_MGMT))
275 mgmt_set_local_name_complete(hdev, sent, status);
277 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
279 hci_dev_unlock(hdev);
282 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
284 struct hci_rp_read_local_name *rp = (void *) skb->data;
286 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
291 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
292 hci_dev_test_flag(hdev, HCI_CONFIG))
293 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
296 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
298 __u8 status = *((__u8 *) skb->data);
301 BT_DBG("%s status 0x%2.2x", hdev->name, status);
303 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
310 __u8 param = *((__u8 *) sent);
312 if (param == AUTH_ENABLED)
313 set_bit(HCI_AUTH, &hdev->flags);
315 clear_bit(HCI_AUTH, &hdev->flags);
318 if (hci_dev_test_flag(hdev, HCI_MGMT))
319 mgmt_auth_enable_complete(hdev, status);
321 hci_dev_unlock(hdev);
324 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
326 __u8 status = *((__u8 *) skb->data);
330 BT_DBG("%s status 0x%2.2x", hdev->name, status);
335 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
339 param = *((__u8 *) sent);
342 set_bit(HCI_ENCRYPT, &hdev->flags);
344 clear_bit(HCI_ENCRYPT, &hdev->flags);
347 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
349 __u8 status = *((__u8 *) skb->data);
353 BT_DBG("%s status 0x%2.2x", hdev->name, status);
355 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
359 param = *((__u8 *) sent);
364 hdev->discov_timeout = 0;
368 if (param & SCAN_INQUIRY)
369 set_bit(HCI_ISCAN, &hdev->flags);
371 clear_bit(HCI_ISCAN, &hdev->flags);
373 if (param & SCAN_PAGE)
374 set_bit(HCI_PSCAN, &hdev->flags);
376 clear_bit(HCI_PSCAN, &hdev->flags);
379 hci_dev_unlock(hdev);
382 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
384 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
386 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
391 memcpy(hdev->dev_class, rp->dev_class, 3);
393 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
394 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
397 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
399 __u8 status = *((__u8 *) skb->data);
402 BT_DBG("%s status 0x%2.2x", hdev->name, status);
404 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
411 memcpy(hdev->dev_class, sent, 3);
413 if (hci_dev_test_flag(hdev, HCI_MGMT))
414 mgmt_set_class_of_dev_complete(hdev, sent, status);
416 hci_dev_unlock(hdev);
419 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
421 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
424 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
429 setting = __le16_to_cpu(rp->voice_setting);
431 if (hdev->voice_setting == setting)
434 hdev->voice_setting = setting;
436 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
439 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
442 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
445 __u8 status = *((__u8 *) skb->data);
449 BT_DBG("%s status 0x%2.2x", hdev->name, status);
454 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
458 setting = get_unaligned_le16(sent);
460 if (hdev->voice_setting == setting)
463 hdev->voice_setting = setting;
465 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
468 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
471 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
474 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
476 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
481 hdev->num_iac = rp->num_iac;
483 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
486 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
488 __u8 status = *((__u8 *) skb->data);
489 struct hci_cp_write_ssp_mode *sent;
491 BT_DBG("%s status 0x%2.2x", hdev->name, status);
493 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
501 hdev->features[1][0] |= LMP_HOST_SSP;
503 hdev->features[1][0] &= ~LMP_HOST_SSP;
506 if (hci_dev_test_flag(hdev, HCI_MGMT))
507 mgmt_ssp_enable_complete(hdev, sent->mode, status);
510 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
512 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
515 hci_dev_unlock(hdev);
518 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
520 u8 status = *((u8 *) skb->data);
521 struct hci_cp_write_sc_support *sent;
523 BT_DBG("%s status 0x%2.2x", hdev->name, status);
525 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
533 hdev->features[1][0] |= LMP_HOST_SC;
535 hdev->features[1][0] &= ~LMP_HOST_SC;
538 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
540 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
542 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
545 hci_dev_unlock(hdev);
548 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
550 struct hci_rp_read_local_version *rp = (void *) skb->data;
552 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
557 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
558 hci_dev_test_flag(hdev, HCI_CONFIG)) {
559 hdev->hci_ver = rp->hci_ver;
560 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
561 hdev->lmp_ver = rp->lmp_ver;
562 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
563 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
567 static void hci_cc_read_local_commands(struct hci_dev *hdev,
570 struct hci_rp_read_local_commands *rp = (void *) skb->data;
572 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
577 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
578 hci_dev_test_flag(hdev, HCI_CONFIG))
579 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
582 static void hci_cc_read_auth_payload_timeout(struct hci_dev *hdev,
585 struct hci_rp_read_auth_payload_to *rp = (void *)skb->data;
586 struct hci_conn *conn;
588 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
595 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
597 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
599 hci_dev_unlock(hdev);
602 static void hci_cc_write_auth_payload_timeout(struct hci_dev *hdev,
605 struct hci_rp_write_auth_payload_to *rp = (void *)skb->data;
606 struct hci_conn *conn;
609 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
614 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
620 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
622 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
624 hci_dev_unlock(hdev);
627 static void hci_cc_read_local_features(struct hci_dev *hdev,
630 struct hci_rp_read_local_features *rp = (void *) skb->data;
632 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
637 memcpy(hdev->features, rp->features, 8);
639 /* Adjust default settings according to features
640 * supported by device. */
642 if (hdev->features[0][0] & LMP_3SLOT)
643 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
645 if (hdev->features[0][0] & LMP_5SLOT)
646 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
648 if (hdev->features[0][1] & LMP_HV2) {
649 hdev->pkt_type |= (HCI_HV2);
650 hdev->esco_type |= (ESCO_HV2);
653 if (hdev->features[0][1] & LMP_HV3) {
654 hdev->pkt_type |= (HCI_HV3);
655 hdev->esco_type |= (ESCO_HV3);
658 if (lmp_esco_capable(hdev))
659 hdev->esco_type |= (ESCO_EV3);
661 if (hdev->features[0][4] & LMP_EV4)
662 hdev->esco_type |= (ESCO_EV4);
664 if (hdev->features[0][4] & LMP_EV5)
665 hdev->esco_type |= (ESCO_EV5);
667 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
668 hdev->esco_type |= (ESCO_2EV3);
670 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
671 hdev->esco_type |= (ESCO_3EV3);
673 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
674 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
677 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
680 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
682 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
687 if (hdev->max_page < rp->max_page)
688 hdev->max_page = rp->max_page;
690 if (rp->page < HCI_MAX_PAGES)
691 memcpy(hdev->features[rp->page], rp->features, 8);
694 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
697 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
699 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
704 hdev->flow_ctl_mode = rp->mode;
707 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
709 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
711 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
716 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
717 hdev->sco_mtu = rp->sco_mtu;
718 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
719 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
721 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
726 hdev->acl_cnt = hdev->acl_pkts;
727 hdev->sco_cnt = hdev->sco_pkts;
729 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
730 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
733 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
735 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
737 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
742 if (test_bit(HCI_INIT, &hdev->flags))
743 bacpy(&hdev->bdaddr, &rp->bdaddr);
745 if (hci_dev_test_flag(hdev, HCI_SETUP))
746 bacpy(&hdev->setup_addr, &rp->bdaddr);
749 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
752 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
754 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
759 if (test_bit(HCI_INIT, &hdev->flags)) {
760 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
761 hdev->page_scan_window = __le16_to_cpu(rp->window);
765 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
768 u8 status = *((u8 *) skb->data);
769 struct hci_cp_write_page_scan_activity *sent;
771 BT_DBG("%s status 0x%2.2x", hdev->name, status);
776 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
780 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
781 hdev->page_scan_window = __le16_to_cpu(sent->window);
784 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
787 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
789 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
794 if (test_bit(HCI_INIT, &hdev->flags))
795 hdev->page_scan_type = rp->type;
798 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
801 u8 status = *((u8 *) skb->data);
804 BT_DBG("%s status 0x%2.2x", hdev->name, status);
809 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
811 hdev->page_scan_type = *type;
814 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
817 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
819 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
824 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
825 hdev->block_len = __le16_to_cpu(rp->block_len);
826 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
828 hdev->block_cnt = hdev->num_blocks;
830 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
831 hdev->block_cnt, hdev->block_len);
834 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
836 struct hci_rp_read_clock *rp = (void *) skb->data;
837 struct hci_cp_read_clock *cp;
838 struct hci_conn *conn;
840 BT_DBG("%s", hdev->name);
842 if (skb->len < sizeof(*rp))
850 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
854 if (cp->which == 0x00) {
855 hdev->clock = le32_to_cpu(rp->clock);
859 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
861 conn->clock = le32_to_cpu(rp->clock);
862 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
866 hci_dev_unlock(hdev);
869 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
872 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
874 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
879 hdev->amp_status = rp->amp_status;
880 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
881 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
882 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
883 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
884 hdev->amp_type = rp->amp_type;
885 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
886 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
887 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
888 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
891 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
894 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
896 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
901 hdev->inq_tx_power = rp->tx_power;
904 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
906 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
907 struct hci_cp_pin_code_reply *cp;
908 struct hci_conn *conn;
910 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
914 if (hci_dev_test_flag(hdev, HCI_MGMT))
915 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
920 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
924 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
926 conn->pin_length = cp->pin_len;
929 hci_dev_unlock(hdev);
932 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
934 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
936 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
940 if (hci_dev_test_flag(hdev, HCI_MGMT))
941 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
944 hci_dev_unlock(hdev);
947 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
950 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
952 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
957 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
958 hdev->le_pkts = rp->le_max_pkt;
960 hdev->le_cnt = hdev->le_pkts;
962 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
965 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
968 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
970 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
975 memcpy(hdev->le_features, rp->features, 8);
978 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
981 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
983 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
988 hdev->adv_tx_power = rp->tx_power;
991 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
993 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
995 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
999 if (hci_dev_test_flag(hdev, HCI_MGMT))
1000 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1003 hci_dev_unlock(hdev);
1006 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1007 struct sk_buff *skb)
1009 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1011 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1015 if (hci_dev_test_flag(hdev, HCI_MGMT))
1016 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1017 ACL_LINK, 0, rp->status);
1019 hci_dev_unlock(hdev);
1022 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1024 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1026 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1030 if (hci_dev_test_flag(hdev, HCI_MGMT))
1031 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1034 hci_dev_unlock(hdev);
1037 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1038 struct sk_buff *skb)
1040 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1042 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1046 if (hci_dev_test_flag(hdev, HCI_MGMT))
1047 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1048 ACL_LINK, 0, rp->status);
1050 hci_dev_unlock(hdev);
1053 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1054 struct sk_buff *skb)
1056 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1058 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1061 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1062 struct sk_buff *skb)
1064 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1066 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1069 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1071 __u8 status = *((__u8 *) skb->data);
1074 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1079 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1085 bacpy(&hdev->random_addr, sent);
1087 hci_dev_unlock(hdev);
1090 static void hci_cc_le_set_default_phy(struct hci_dev *hdev, struct sk_buff *skb)
1092 __u8 status = *((__u8 *) skb->data);
1093 struct hci_cp_le_set_default_phy *cp;
1095 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1100 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1106 hdev->le_tx_def_phys = cp->tx_phys;
1107 hdev->le_rx_def_phys = cp->rx_phys;
1109 hci_dev_unlock(hdev);
1112 static void hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev,
1113 struct sk_buff *skb)
1115 __u8 status = *((__u8 *) skb->data);
1116 struct hci_cp_le_set_adv_set_rand_addr *cp;
1117 struct adv_info *adv_instance;
1122 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1128 if (!hdev->cur_adv_instance) {
1129 /* Store in hdev for instance 0 (Set adv and Directed advs) */
1130 bacpy(&hdev->random_addr, &cp->bdaddr);
1132 adv_instance = hci_find_adv_instance(hdev,
1133 hdev->cur_adv_instance);
1135 bacpy(&adv_instance->random_addr, &cp->bdaddr);
1138 hci_dev_unlock(hdev);
1141 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1143 __u8 *sent, status = *((__u8 *) skb->data);
1145 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1150 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1156 /* If we're doing connection initiation as peripheral. Set a
1157 * timeout in case something goes wrong.
1160 struct hci_conn *conn;
1162 hci_dev_set_flag(hdev, HCI_LE_ADV);
1164 conn = hci_lookup_le_connect(hdev);
1166 queue_delayed_work(hdev->workqueue,
1167 &conn->le_conn_timeout,
1168 conn->conn_timeout);
1170 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1173 hci_dev_unlock(hdev);
1176 static void hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev,
1177 struct sk_buff *skb)
1179 struct hci_cp_le_set_ext_adv_enable *cp;
1180 __u8 status = *((__u8 *) skb->data);
1182 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1187 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1194 struct hci_conn *conn;
1196 hci_dev_set_flag(hdev, HCI_LE_ADV);
1198 conn = hci_lookup_le_connect(hdev);
1200 queue_delayed_work(hdev->workqueue,
1201 &conn->le_conn_timeout,
1202 conn->conn_timeout);
1204 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1207 hci_dev_unlock(hdev);
1210 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1212 struct hci_cp_le_set_scan_param *cp;
1213 __u8 status = *((__u8 *) skb->data);
1215 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1220 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1226 hdev->le_scan_type = cp->type;
1228 hci_dev_unlock(hdev);
1231 static void hci_cc_le_set_ext_scan_param(struct hci_dev *hdev,
1232 struct sk_buff *skb)
1234 struct hci_cp_le_set_ext_scan_params *cp;
1235 __u8 status = *((__u8 *) skb->data);
1236 struct hci_cp_le_scan_phy_params *phy_param;
1238 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1243 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1247 phy_param = (void *)cp->data;
1251 hdev->le_scan_type = phy_param->type;
1253 hci_dev_unlock(hdev);
1256 static bool has_pending_adv_report(struct hci_dev *hdev)
1258 struct discovery_state *d = &hdev->discovery;
1260 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1263 static void clear_pending_adv_report(struct hci_dev *hdev)
1265 struct discovery_state *d = &hdev->discovery;
1267 bacpy(&d->last_adv_addr, BDADDR_ANY);
1268 d->last_adv_data_len = 0;
1271 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1272 u8 bdaddr_type, s8 rssi, u32 flags,
1275 struct discovery_state *d = &hdev->discovery;
1277 bacpy(&d->last_adv_addr, bdaddr);
1278 d->last_adv_addr_type = bdaddr_type;
1279 d->last_adv_rssi = rssi;
1280 d->last_adv_flags = flags;
1281 memcpy(d->last_adv_data, data, len);
1282 d->last_adv_data_len = len;
1285 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1290 case LE_SCAN_ENABLE:
1291 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1292 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1293 clear_pending_adv_report(hdev);
1296 case LE_SCAN_DISABLE:
1297 /* We do this here instead of when setting DISCOVERY_STOPPED
1298 * since the latter would potentially require waiting for
1299 * inquiry to stop too.
1301 if (has_pending_adv_report(hdev)) {
1302 struct discovery_state *d = &hdev->discovery;
1304 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1305 d->last_adv_addr_type, NULL,
1306 d->last_adv_rssi, d->last_adv_flags,
1308 d->last_adv_data_len, NULL, 0);
1311 /* Cancel this timer so that we don't try to disable scanning
1312 * when it's already disabled.
1314 cancel_delayed_work(&hdev->le_scan_disable);
1316 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1318 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1319 * interrupted scanning due to a connect request. Mark
1320 * therefore discovery as stopped. If this was not
1321 * because of a connect request advertising might have
1322 * been disabled because of active scanning, so
1323 * re-enable it again if necessary.
1325 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1326 #ifndef TIZEN_BT /* The below line is kernel bug. */
1327 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1329 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
1331 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1332 hdev->discovery.state == DISCOVERY_FINDING)
1333 hci_req_reenable_advertising(hdev);
1338 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1343 hci_dev_unlock(hdev);
1346 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1347 struct sk_buff *skb)
1349 struct hci_cp_le_set_scan_enable *cp;
1350 __u8 status = *((__u8 *) skb->data);
1352 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1357 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1361 le_set_scan_enable_complete(hdev, cp->enable);
1364 static void hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev,
1365 struct sk_buff *skb)
1367 struct hci_cp_le_set_ext_scan_enable *cp;
1368 __u8 status = *((__u8 *) skb->data);
1370 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1375 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1379 le_set_scan_enable_complete(hdev, cp->enable);
1382 static void hci_cc_le_read_num_adv_sets(struct hci_dev *hdev,
1383 struct sk_buff *skb)
1385 struct hci_rp_le_read_num_supported_adv_sets *rp = (void *) skb->data;
1387 BT_DBG("%s status 0x%2.2x No of Adv sets %u", hdev->name, rp->status,
1393 hdev->le_num_of_adv_sets = rp->num_of_sets;
1396 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1397 struct sk_buff *skb)
1399 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1401 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1406 hdev->le_white_list_size = rp->size;
1409 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1410 struct sk_buff *skb)
1412 __u8 status = *((__u8 *) skb->data);
1414 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1419 hci_bdaddr_list_clear(&hdev->le_white_list);
1422 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1423 struct sk_buff *skb)
1425 struct hci_cp_le_add_to_white_list *sent;
1426 __u8 status = *((__u8 *) skb->data);
1428 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1433 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1437 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1441 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1442 struct sk_buff *skb)
1444 struct hci_cp_le_del_from_white_list *sent;
1445 __u8 status = *((__u8 *) skb->data);
1447 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1452 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1456 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1460 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1461 struct sk_buff *skb)
1463 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1465 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1470 memcpy(hdev->le_states, rp->le_states, 8);
1473 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1474 struct sk_buff *skb)
1476 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1478 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1483 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1484 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1487 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1488 struct sk_buff *skb)
1490 struct hci_cp_le_write_def_data_len *sent;
1491 __u8 status = *((__u8 *) skb->data);
1493 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1498 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1502 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1503 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1506 static void hci_cc_le_add_to_resolv_list(struct hci_dev *hdev,
1507 struct sk_buff *skb)
1509 struct hci_cp_le_add_to_resolv_list *sent;
1510 __u8 status = *((__u8 *) skb->data);
1512 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1517 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
1521 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1522 sent->bdaddr_type, sent->peer_irk,
1526 static void hci_cc_le_del_from_resolv_list(struct hci_dev *hdev,
1527 struct sk_buff *skb)
1529 struct hci_cp_le_del_from_resolv_list *sent;
1530 __u8 status = *((__u8 *) skb->data);
1532 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1537 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
1541 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
1545 static void hci_cc_le_clear_resolv_list(struct hci_dev *hdev,
1546 struct sk_buff *skb)
1548 __u8 status = *((__u8 *) skb->data);
1550 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1555 hci_bdaddr_list_clear(&hdev->le_resolv_list);
1558 static void hci_cc_le_read_resolv_list_size(struct hci_dev *hdev,
1559 struct sk_buff *skb)
1561 struct hci_rp_le_read_resolv_list_size *rp = (void *) skb->data;
1563 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1568 hdev->le_resolv_list_size = rp->size;
1571 static void hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev,
1572 struct sk_buff *skb)
1574 __u8 *sent, status = *((__u8 *) skb->data);
1576 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1581 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
1588 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
1590 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
1592 hci_dev_unlock(hdev);
1595 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1596 struct sk_buff *skb)
1598 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1600 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1605 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1606 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1607 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1608 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1611 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1612 struct sk_buff *skb)
1614 struct hci_cp_write_le_host_supported *sent;
1615 __u8 status = *((__u8 *) skb->data);
1617 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1622 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1629 hdev->features[1][0] |= LMP_HOST_LE;
1630 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1632 hdev->features[1][0] &= ~LMP_HOST_LE;
1633 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1634 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1638 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1640 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1642 hci_dev_unlock(hdev);
1645 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1647 struct hci_cp_le_set_adv_param *cp;
1648 u8 status = *((u8 *) skb->data);
1650 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1655 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1660 hdev->adv_addr_type = cp->own_address_type;
1661 hci_dev_unlock(hdev);
1664 static void hci_cc_set_ext_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1666 struct hci_rp_le_set_ext_adv_params *rp = (void *) skb->data;
1667 struct hci_cp_le_set_ext_adv_params *cp;
1668 struct adv_info *adv_instance;
1670 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1675 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
1680 hdev->adv_addr_type = cp->own_addr_type;
1681 if (!hdev->cur_adv_instance) {
1682 /* Store in hdev for instance 0 */
1683 hdev->adv_tx_power = rp->tx_power;
1685 adv_instance = hci_find_adv_instance(hdev,
1686 hdev->cur_adv_instance);
1688 adv_instance->tx_power = rp->tx_power;
1690 /* Update adv data as tx power is known now */
1691 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1692 hci_dev_unlock(hdev);
1696 static void hci_cc_enable_rssi(struct hci_dev *hdev,
1697 struct sk_buff *skb)
1699 struct hci_cc_rsp_enable_rssi *rp = (void *)skb->data;
1701 BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
1702 hdev->name, rp->status, rp->le_ext_opcode);
1704 mgmt_enable_rssi_cc(hdev, rp, rp->status);
1707 static void hci_cc_get_raw_rssi(struct hci_dev *hdev,
1708 struct sk_buff *skb)
1710 struct hci_cc_rp_get_raw_rssi *rp = (void *)skb->data;
1712 BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
1713 hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
1715 mgmt_raw_rssi_response(hdev, rp, rp->status);
1719 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1721 struct hci_rp_read_rssi *rp = (void *) skb->data;
1722 struct hci_conn *conn;
1724 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1731 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1733 conn->rssi = rp->rssi;
1735 hci_dev_unlock(hdev);
1738 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1740 struct hci_cp_read_tx_power *sent;
1741 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1742 struct hci_conn *conn;
1744 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1749 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1755 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1759 switch (sent->type) {
1761 conn->tx_power = rp->tx_power;
1764 conn->max_tx_power = rp->tx_power;
1769 hci_dev_unlock(hdev);
1772 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1774 u8 status = *((u8 *) skb->data);
1777 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1782 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1784 hdev->ssp_debug_mode = *mode;
1787 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1789 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1792 hci_conn_check_pending(hdev);
1796 set_bit(HCI_INQUIRY, &hdev->flags);
1799 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1801 struct hci_cp_create_conn *cp;
1802 struct hci_conn *conn;
1804 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1806 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1812 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1814 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1817 if (conn && conn->state == BT_CONNECT) {
1818 if (status != 0x0c || conn->attempt > 2) {
1819 conn->state = BT_CLOSED;
1820 hci_connect_cfm(conn, status);
1823 conn->state = BT_CONNECT2;
1827 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1830 bt_dev_err(hdev, "no memory for new connection");
1834 hci_dev_unlock(hdev);
1837 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1839 struct hci_cp_add_sco *cp;
1840 struct hci_conn *acl, *sco;
1843 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1848 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1852 handle = __le16_to_cpu(cp->handle);
1854 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1858 acl = hci_conn_hash_lookup_handle(hdev, handle);
1862 sco->state = BT_CLOSED;
1864 hci_connect_cfm(sco, status);
1869 hci_dev_unlock(hdev);
1872 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1874 struct hci_cp_auth_requested *cp;
1875 struct hci_conn *conn;
1877 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1882 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1888 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1890 if (conn->state == BT_CONFIG) {
1891 hci_connect_cfm(conn, status);
1892 hci_conn_drop(conn);
1896 hci_dev_unlock(hdev);
1899 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1901 struct hci_cp_set_conn_encrypt *cp;
1902 struct hci_conn *conn;
1904 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1909 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1915 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1917 if (conn->state == BT_CONFIG) {
1918 hci_connect_cfm(conn, status);
1919 hci_conn_drop(conn);
1923 hci_dev_unlock(hdev);
1926 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1927 struct hci_conn *conn)
1929 if (conn->state != BT_CONFIG || !conn->out)
1932 if (conn->pending_sec_level == BT_SECURITY_SDP)
1935 /* Only request authentication for SSP connections or non-SSP
1936 * devices with sec_level MEDIUM or HIGH or if MITM protection
1939 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1940 conn->pending_sec_level != BT_SECURITY_FIPS &&
1941 conn->pending_sec_level != BT_SECURITY_HIGH &&
1942 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1948 static int hci_resolve_name(struct hci_dev *hdev,
1949 struct inquiry_entry *e)
1951 struct hci_cp_remote_name_req cp;
1953 memset(&cp, 0, sizeof(cp));
1955 bacpy(&cp.bdaddr, &e->data.bdaddr);
1956 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1957 cp.pscan_mode = e->data.pscan_mode;
1958 cp.clock_offset = e->data.clock_offset;
1960 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1963 static bool hci_resolve_next_name(struct hci_dev *hdev)
1965 struct discovery_state *discov = &hdev->discovery;
1966 struct inquiry_entry *e;
1968 if (list_empty(&discov->resolve))
1971 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1975 if (hci_resolve_name(hdev, e) == 0) {
1976 e->name_state = NAME_PENDING;
1983 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1984 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1986 struct discovery_state *discov = &hdev->discovery;
1987 struct inquiry_entry *e;
1990 /* Update the mgmt connected state if necessary. Be careful with
1991 * conn objects that exist but are not (yet) connected however.
1992 * Only those in BT_CONFIG or BT_CONNECTED states can be
1993 * considered connected.
1996 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
1997 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1998 mgmt_device_connected(hdev, conn, 0, name, name_len);
2000 mgmt_device_name_update(hdev, bdaddr, name, name_len);
2004 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2005 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2006 mgmt_device_connected(hdev, conn, 0, name, name_len);
2009 if (discov->state == DISCOVERY_STOPPED)
2012 if (discov->state == DISCOVERY_STOPPING)
2013 goto discov_complete;
2015 if (discov->state != DISCOVERY_RESOLVING)
2018 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2019 /* If the device was not found in a list of found devices names of which
2020 * are pending. there is no need to continue resolving a next name as it
2021 * will be done upon receiving another Remote Name Request Complete
2028 e->name_state = NAME_KNOWN;
2029 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
2030 e->data.rssi, name, name_len);
2032 e->name_state = NAME_NOT_KNOWN;
2035 if (hci_resolve_next_name(hdev))
2039 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2042 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2044 struct hci_cp_remote_name_req *cp;
2045 struct hci_conn *conn;
2047 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2049 /* If successful wait for the name req complete event before
2050 * checking for the need to do authentication */
2054 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2060 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2062 if (hci_dev_test_flag(hdev, HCI_MGMT))
2063 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2068 if (!hci_outgoing_auth_needed(hdev, conn))
2071 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2072 struct hci_cp_auth_requested auth_cp;
2074 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2076 auth_cp.handle = __cpu_to_le16(conn->handle);
2077 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2078 sizeof(auth_cp), &auth_cp);
2082 hci_dev_unlock(hdev);
2085 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2087 struct hci_cp_read_remote_features *cp;
2088 struct hci_conn *conn;
2090 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2095 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2101 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2103 if (conn->state == BT_CONFIG) {
2104 hci_connect_cfm(conn, status);
2105 hci_conn_drop(conn);
2109 hci_dev_unlock(hdev);
2112 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2114 struct hci_cp_read_remote_ext_features *cp;
2115 struct hci_conn *conn;
2117 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2122 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2130 if (conn->state == BT_CONFIG) {
2131 hci_connect_cfm(conn, status);
2132 hci_conn_drop(conn);
2136 hci_dev_unlock(hdev);
2139 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2141 struct hci_cp_setup_sync_conn *cp;
2142 struct hci_conn *acl, *sco;
2145 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2150 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2154 handle = __le16_to_cpu(cp->handle);
2156 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
2160 acl = hci_conn_hash_lookup_handle(hdev, handle);
2164 sco->state = BT_CLOSED;
2166 hci_connect_cfm(sco, status);
2171 hci_dev_unlock(hdev);
2174 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2176 struct hci_cp_sniff_mode *cp;
2177 struct hci_conn *conn;
2179 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2184 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2190 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2192 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2194 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2195 hci_sco_setup(conn, status);
2198 hci_dev_unlock(hdev);
2201 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2203 struct hci_cp_exit_sniff_mode *cp;
2204 struct hci_conn *conn;
2206 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2211 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2217 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2219 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2221 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2222 hci_sco_setup(conn, status);
2225 hci_dev_unlock(hdev);
2228 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2230 struct hci_cp_disconnect *cp;
2231 struct hci_conn *conn;
2236 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2242 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2244 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2245 conn->dst_type, status);
2247 hci_dev_unlock(hdev);
2250 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2251 u8 peer_addr_type, u8 own_address_type,
2254 struct hci_conn *conn;
2256 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2261 /* Store the initiator and responder address information which
2262 * is needed for SMP. These values will not change during the
2263 * lifetime of the connection.
2265 conn->init_addr_type = own_address_type;
2266 if (own_address_type == ADDR_LE_DEV_RANDOM)
2267 bacpy(&conn->init_addr, &hdev->random_addr);
2269 bacpy(&conn->init_addr, &hdev->bdaddr);
2271 conn->resp_addr_type = peer_addr_type;
2272 bacpy(&conn->resp_addr, peer_addr);
2274 /* We don't want the connection attempt to stick around
2275 * indefinitely since LE doesn't have a page timeout concept
2276 * like BR/EDR. Set a timer for any connection that doesn't use
2277 * the white list for connecting.
2279 if (filter_policy == HCI_LE_USE_PEER_ADDR)
2280 queue_delayed_work(conn->hdev->workqueue,
2281 &conn->le_conn_timeout,
2282 conn->conn_timeout);
2285 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2287 struct hci_cp_le_create_conn *cp;
2289 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2291 /* All connection failure handling is taken care of by the
2292 * hci_le_conn_failed function which is triggered by the HCI
2293 * request completion callbacks used for connecting.
2298 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2304 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2305 cp->own_address_type, cp->filter_policy);
2307 hci_dev_unlock(hdev);
2310 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2312 struct hci_cp_le_ext_create_conn *cp;
2314 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2316 /* All connection failure handling is taken care of by the
2317 * hci_le_conn_failed function which is triggered by the HCI
2318 * request completion callbacks used for connecting.
2323 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2329 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2330 cp->own_addr_type, cp->filter_policy);
2332 hci_dev_unlock(hdev);
2335 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2337 struct hci_cp_le_read_remote_features *cp;
2338 struct hci_conn *conn;
2340 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2345 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2351 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2353 if (conn->state == BT_CONFIG) {
2354 hci_connect_cfm(conn, status);
2355 hci_conn_drop(conn);
2359 hci_dev_unlock(hdev);
2362 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2364 struct hci_cp_le_start_enc *cp;
2365 struct hci_conn *conn;
2367 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2374 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2378 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2382 if (conn->state != BT_CONNECTED)
2385 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2386 hci_conn_drop(conn);
2389 hci_dev_unlock(hdev);
2392 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2394 struct hci_cp_switch_role *cp;
2395 struct hci_conn *conn;
2397 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2402 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2408 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2410 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2412 hci_dev_unlock(hdev);
2415 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2417 __u8 status = *((__u8 *) skb->data);
2418 struct discovery_state *discov = &hdev->discovery;
2419 struct inquiry_entry *e;
2421 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2423 hci_conn_check_pending(hdev);
2425 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2428 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2429 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2431 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2436 if (discov->state != DISCOVERY_FINDING)
2439 if (list_empty(&discov->resolve)) {
2440 /* When BR/EDR inquiry is active and no LE scanning is in
2441 * progress, then change discovery state to indicate completion.
2443 * When running LE scanning and BR/EDR inquiry simultaneously
2444 * and the LE scan already finished, then change the discovery
2445 * state to indicate completion.
2447 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2448 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2449 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2453 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2454 if (e && hci_resolve_name(hdev, e) == 0) {
2455 e->name_state = NAME_PENDING;
2456 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2458 /* When BR/EDR inquiry is active and no LE scanning is in
2459 * progress, then change discovery state to indicate completion.
2461 * When running LE scanning and BR/EDR inquiry simultaneously
2462 * and the LE scan already finished, then change the discovery
2463 * state to indicate completion.
2465 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2466 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2467 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2471 hci_dev_unlock(hdev);
2474 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2476 struct inquiry_data data;
2477 struct inquiry_info *info = (void *) (skb->data + 1);
2478 int num_rsp = *((__u8 *) skb->data);
2480 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2485 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2490 for (; num_rsp; num_rsp--, info++) {
2493 bacpy(&data.bdaddr, &info->bdaddr);
2494 data.pscan_rep_mode = info->pscan_rep_mode;
2495 data.pscan_period_mode = info->pscan_period_mode;
2496 data.pscan_mode = info->pscan_mode;
2497 memcpy(data.dev_class, info->dev_class, 3);
2498 data.clock_offset = info->clock_offset;
2499 data.rssi = HCI_RSSI_INVALID;
2500 data.ssp_mode = 0x00;
2502 flags = hci_inquiry_cache_update(hdev, &data, false);
2504 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2505 info->dev_class, HCI_RSSI_INVALID,
2506 flags, NULL, 0, NULL, 0);
2509 hci_dev_unlock(hdev);
2512 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2514 struct hci_ev_conn_complete *ev = (void *) skb->data;
2515 struct hci_conn *conn;
2517 BT_DBG("%s", hdev->name);
2521 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2523 if (ev->link_type != SCO_LINK)
2526 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2530 conn->type = SCO_LINK;
2534 conn->handle = __le16_to_cpu(ev->handle);
2536 if (conn->type == ACL_LINK) {
2537 conn->state = BT_CONFIG;
2538 hci_conn_hold(conn);
2540 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2541 !hci_find_link_key(hdev, &ev->bdaddr))
2542 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2544 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2546 conn->state = BT_CONNECTED;
2548 hci_debugfs_create_conn(conn);
2549 hci_conn_add_sysfs(conn);
2551 if (test_bit(HCI_AUTH, &hdev->flags))
2552 set_bit(HCI_CONN_AUTH, &conn->flags);
2554 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2555 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2557 /* Get remote features */
2558 if (conn->type == ACL_LINK) {
2559 struct hci_cp_read_remote_features cp;
2560 cp.handle = ev->handle;
2561 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2564 hci_req_update_scan(hdev);
2567 /* Set packet type for incoming connection */
2568 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2569 struct hci_cp_change_conn_ptype cp;
2570 cp.handle = ev->handle;
2571 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2572 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2576 conn->state = BT_CLOSED;
2577 if (conn->type == ACL_LINK)
2578 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2579 conn->dst_type, ev->status);
2582 if (conn->type == ACL_LINK)
2583 hci_sco_setup(conn, ev->status);
2586 hci_connect_cfm(conn, ev->status);
2588 } else if (ev->link_type != ACL_LINK)
2589 hci_connect_cfm(conn, ev->status);
2592 hci_dev_unlock(hdev);
2594 hci_conn_check_pending(hdev);
2597 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2599 struct hci_cp_reject_conn_req cp;
2601 bacpy(&cp.bdaddr, bdaddr);
2602 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2603 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2606 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2608 struct hci_ev_conn_request *ev = (void *) skb->data;
2609 int mask = hdev->link_mode;
2610 struct inquiry_entry *ie;
2611 struct hci_conn *conn;
2614 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2617 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2620 if (!(mask & HCI_LM_ACCEPT)) {
2621 hci_reject_conn(hdev, &ev->bdaddr);
2625 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2627 hci_reject_conn(hdev, &ev->bdaddr);
2631 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2632 * connection. These features are only touched through mgmt so
2633 * only do the checks if HCI_MGMT is set.
2635 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2636 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2637 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2639 hci_reject_conn(hdev, &ev->bdaddr);
2643 /* Connection accepted */
2647 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2649 memcpy(ie->data.dev_class, ev->dev_class, 3);
2651 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2654 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2657 bt_dev_err(hdev, "no memory for new connection");
2658 hci_dev_unlock(hdev);
2663 memcpy(conn->dev_class, ev->dev_class, 3);
2665 hci_dev_unlock(hdev);
2667 if (ev->link_type == ACL_LINK ||
2668 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2669 struct hci_cp_accept_conn_req cp;
2670 conn->state = BT_CONNECT;
2672 bacpy(&cp.bdaddr, &ev->bdaddr);
2674 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2675 cp.role = 0x00; /* Become master */
2677 cp.role = 0x01; /* Remain slave */
2679 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2680 } else if (!(flags & HCI_PROTO_DEFER)) {
2681 struct hci_cp_accept_sync_conn_req cp;
2682 conn->state = BT_CONNECT;
2684 bacpy(&cp.bdaddr, &ev->bdaddr);
2685 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2687 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2688 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2689 cp.max_latency = cpu_to_le16(0xffff);
2690 cp.content_format = cpu_to_le16(hdev->voice_setting);
2691 cp.retrans_effort = 0xff;
2693 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2696 conn->state = BT_CONNECT2;
2697 hci_connect_cfm(conn, 0);
2701 static u8 hci_to_mgmt_reason(u8 err)
2704 case HCI_ERROR_CONNECTION_TIMEOUT:
2705 return MGMT_DEV_DISCONN_TIMEOUT;
2706 case HCI_ERROR_REMOTE_USER_TERM:
2707 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2708 case HCI_ERROR_REMOTE_POWER_OFF:
2709 return MGMT_DEV_DISCONN_REMOTE;
2710 case HCI_ERROR_LOCAL_HOST_TERM:
2711 return MGMT_DEV_DISCONN_LOCAL_HOST;
2713 return MGMT_DEV_DISCONN_UNKNOWN;
2717 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2719 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2721 struct hci_conn_params *params;
2722 struct hci_conn *conn;
2723 bool mgmt_connected;
2726 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2730 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2735 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2736 conn->dst_type, ev->status);
2740 conn->state = BT_CLOSED;
2742 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2744 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
2745 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
2747 reason = hci_to_mgmt_reason(ev->reason);
2749 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2750 reason, mgmt_connected);
2752 if (conn->type == ACL_LINK) {
2753 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2754 hci_remove_link_key(hdev, &conn->dst);
2756 hci_req_update_scan(hdev);
2759 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2761 switch (params->auto_connect) {
2762 case HCI_AUTO_CONN_LINK_LOSS:
2763 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2767 case HCI_AUTO_CONN_DIRECT:
2768 case HCI_AUTO_CONN_ALWAYS:
2769 list_del_init(¶ms->action);
2770 list_add(¶ms->action, &hdev->pend_le_conns);
2771 hci_update_background_scan(hdev);
2781 hci_disconn_cfm(conn, ev->reason);
2784 /* Re-enable advertising if necessary, since it might
2785 * have been disabled by the connection. From the
2786 * HCI_LE_Set_Advertise_Enable command description in
2787 * the core specification (v4.0):
2788 * "The Controller shall continue advertising until the Host
2789 * issues an LE_Set_Advertise_Enable command with
2790 * Advertising_Enable set to 0x00 (Advertising is disabled)
2791 * or until a connection is created or until the Advertising
2792 * is timed out due to Directed Advertising."
2794 if (type == LE_LINK)
2795 hci_req_reenable_advertising(hdev);
2798 hci_dev_unlock(hdev);
2801 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2803 struct hci_ev_auth_complete *ev = (void *) skb->data;
2804 struct hci_conn *conn;
2806 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2810 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2815 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2817 if (!hci_conn_ssp_enabled(conn) &&
2818 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2819 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
2821 set_bit(HCI_CONN_AUTH, &conn->flags);
2822 conn->sec_level = conn->pending_sec_level;
2825 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
2826 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
2828 mgmt_auth_failed(conn, ev->status);
2831 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2832 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2834 if (conn->state == BT_CONFIG) {
2835 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2836 struct hci_cp_set_conn_encrypt cp;
2837 cp.handle = ev->handle;
2839 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2842 conn->state = BT_CONNECTED;
2843 hci_connect_cfm(conn, ev->status);
2844 hci_conn_drop(conn);
2847 hci_auth_cfm(conn, ev->status);
2849 hci_conn_hold(conn);
2850 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2851 hci_conn_drop(conn);
2854 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2856 struct hci_cp_set_conn_encrypt cp;
2857 cp.handle = ev->handle;
2859 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2862 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2863 hci_encrypt_cfm(conn, ev->status, 0x00);
2868 hci_dev_unlock(hdev);
2871 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2873 struct hci_ev_remote_name *ev = (void *) skb->data;
2874 struct hci_conn *conn;
2876 BT_DBG("%s", hdev->name);
2878 hci_conn_check_pending(hdev);
2882 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2884 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2887 if (ev->status == 0)
2888 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2889 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2891 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2897 if (!hci_outgoing_auth_needed(hdev, conn))
2900 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2901 struct hci_cp_auth_requested cp;
2903 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2905 cp.handle = __cpu_to_le16(conn->handle);
2906 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2910 hci_dev_unlock(hdev);
2913 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2914 u16 opcode, struct sk_buff *skb)
2916 const struct hci_rp_read_enc_key_size *rp;
2917 struct hci_conn *conn;
2920 BT_DBG("%s status 0x%02x", hdev->name, status);
2922 if (!skb || skb->len < sizeof(*rp)) {
2923 bt_dev_err(hdev, "invalid read key size response");
2927 rp = (void *)skb->data;
2928 handle = le16_to_cpu(rp->handle);
2932 conn = hci_conn_hash_lookup_handle(hdev, handle);
2936 /* If we fail to read the encryption key size, assume maximum
2937 * (which is the same we do also when this HCI command isn't
2941 bt_dev_err(hdev, "failed to read key size for handle %u",
2943 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2945 conn->enc_key_size = rp->key_size;
2948 if (conn->state == BT_CONFIG) {
2949 conn->state = BT_CONNECTED;
2950 hci_connect_cfm(conn, 0);
2951 hci_conn_drop(conn);
2955 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2957 else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
2962 hci_encrypt_cfm(conn, 0, encrypt);
2966 hci_dev_unlock(hdev);
2969 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2971 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2972 struct hci_conn *conn;
2974 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2978 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2984 /* Encryption implies authentication */
2985 set_bit(HCI_CONN_AUTH, &conn->flags);
2986 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2987 conn->sec_level = conn->pending_sec_level;
2989 /* P-256 authentication key implies FIPS */
2990 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2991 set_bit(HCI_CONN_FIPS, &conn->flags);
2993 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2994 conn->type == LE_LINK)
2995 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2997 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2998 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3002 /* We should disregard the current RPA and generate a new one
3003 * whenever the encryption procedure fails.
3005 if (ev->status && conn->type == LE_LINK) {
3006 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3007 hci_adv_instances_set_rpa_expired(hdev, true);
3010 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3012 if (ev->status && conn->state == BT_CONNECTED) {
3013 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3014 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3016 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3017 hci_conn_drop(conn);
3021 /* In Secure Connections Only mode, do not allow any connections
3022 * that are not encrypted with AES-CCM using a P-256 authenticated
3025 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
3026 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
3027 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
3028 hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
3029 hci_conn_drop(conn);
3033 /* Try reading the encryption key size for encrypted ACL links */
3034 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3035 struct hci_cp_read_enc_key_size cp;
3036 struct hci_request req;
3038 /* Only send HCI_Read_Encryption_Key_Size if the
3039 * controller really supports it. If it doesn't, assume
3040 * the default size (16).
3042 if (!(hdev->commands[20] & 0x10)) {
3043 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3047 hci_req_init(&req, hdev);
3049 cp.handle = cpu_to_le16(conn->handle);
3050 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
3052 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
3053 bt_dev_err(hdev, "sending read key size failed");
3054 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3061 /* Set the default Authenticated Payload Timeout after
3062 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3063 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3064 * sent when the link is active and Encryption is enabled, the conn
3065 * type can be either LE or ACL and controller must support LMP Ping.
3066 * Ensure for AES-CCM encryption as well.
3068 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3069 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3070 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3071 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3072 struct hci_cp_write_auth_payload_to cp;
3074 cp.handle = cpu_to_le16(conn->handle);
3075 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3076 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3081 if (conn->state == BT_CONFIG) {
3083 conn->state = BT_CONNECTED;
3085 hci_connect_cfm(conn, ev->status);
3086 hci_conn_drop(conn);
3088 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
3091 hci_dev_unlock(hdev);
3094 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
3095 struct sk_buff *skb)
3097 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
3098 struct hci_conn *conn;
3100 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3104 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3107 set_bit(HCI_CONN_SECURE, &conn->flags);
3109 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3111 hci_key_change_cfm(conn, ev->status);
3114 hci_dev_unlock(hdev);
3117 static void hci_remote_features_evt(struct hci_dev *hdev,
3118 struct sk_buff *skb)
3120 struct hci_ev_remote_features *ev = (void *) skb->data;
3121 struct hci_conn *conn;
3123 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3127 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3132 memcpy(conn->features[0], ev->features, 8);
3134 if (conn->state != BT_CONFIG)
3137 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3138 lmp_ext_feat_capable(conn)) {
3139 struct hci_cp_read_remote_ext_features cp;
3140 cp.handle = ev->handle;
3142 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3147 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3148 struct hci_cp_remote_name_req cp;
3149 memset(&cp, 0, sizeof(cp));
3150 bacpy(&cp.bdaddr, &conn->dst);
3151 cp.pscan_rep_mode = 0x02;
3152 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3153 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3154 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3156 if (!hci_outgoing_auth_needed(hdev, conn)) {
3157 conn->state = BT_CONNECTED;
3158 hci_connect_cfm(conn, ev->status);
3159 hci_conn_drop(conn);
3163 hci_dev_unlock(hdev);
3166 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
3167 u16 *opcode, u8 *status,
3168 hci_req_complete_t *req_complete,
3169 hci_req_complete_skb_t *req_complete_skb)
3171 struct hci_ev_cmd_complete *ev = (void *) skb->data;
3173 *opcode = __le16_to_cpu(ev->opcode);
3174 *status = skb->data[sizeof(*ev)];
3176 skb_pull(skb, sizeof(*ev));
3179 case HCI_OP_INQUIRY_CANCEL:
3180 hci_cc_inquiry_cancel(hdev, skb);
3183 case HCI_OP_PERIODIC_INQ:
3184 hci_cc_periodic_inq(hdev, skb);
3187 case HCI_OP_EXIT_PERIODIC_INQ:
3188 hci_cc_exit_periodic_inq(hdev, skb);
3191 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
3192 hci_cc_remote_name_req_cancel(hdev, skb);
3195 case HCI_OP_ROLE_DISCOVERY:
3196 hci_cc_role_discovery(hdev, skb);
3199 case HCI_OP_READ_LINK_POLICY:
3200 hci_cc_read_link_policy(hdev, skb);
3203 case HCI_OP_WRITE_LINK_POLICY:
3204 hci_cc_write_link_policy(hdev, skb);
3207 case HCI_OP_READ_DEF_LINK_POLICY:
3208 hci_cc_read_def_link_policy(hdev, skb);
3211 case HCI_OP_WRITE_DEF_LINK_POLICY:
3212 hci_cc_write_def_link_policy(hdev, skb);
3216 hci_cc_reset(hdev, skb);
3219 case HCI_OP_READ_STORED_LINK_KEY:
3220 hci_cc_read_stored_link_key(hdev, skb);
3223 case HCI_OP_DELETE_STORED_LINK_KEY:
3224 hci_cc_delete_stored_link_key(hdev, skb);
3227 case HCI_OP_WRITE_LOCAL_NAME:
3228 hci_cc_write_local_name(hdev, skb);
3231 case HCI_OP_READ_LOCAL_NAME:
3232 hci_cc_read_local_name(hdev, skb);
3235 case HCI_OP_WRITE_AUTH_ENABLE:
3236 hci_cc_write_auth_enable(hdev, skb);
3239 case HCI_OP_WRITE_ENCRYPT_MODE:
3240 hci_cc_write_encrypt_mode(hdev, skb);
3243 case HCI_OP_WRITE_SCAN_ENABLE:
3244 hci_cc_write_scan_enable(hdev, skb);
3247 case HCI_OP_READ_CLASS_OF_DEV:
3248 hci_cc_read_class_of_dev(hdev, skb);
3251 case HCI_OP_WRITE_CLASS_OF_DEV:
3252 hci_cc_write_class_of_dev(hdev, skb);
3255 case HCI_OP_READ_VOICE_SETTING:
3256 hci_cc_read_voice_setting(hdev, skb);
3259 case HCI_OP_WRITE_VOICE_SETTING:
3260 hci_cc_write_voice_setting(hdev, skb);
3263 case HCI_OP_READ_NUM_SUPPORTED_IAC:
3264 hci_cc_read_num_supported_iac(hdev, skb);
3267 case HCI_OP_WRITE_SSP_MODE:
3268 hci_cc_write_ssp_mode(hdev, skb);
3271 case HCI_OP_WRITE_SC_SUPPORT:
3272 hci_cc_write_sc_support(hdev, skb);
3275 case HCI_OP_READ_AUTH_PAYLOAD_TO:
3276 hci_cc_read_auth_payload_timeout(hdev, skb);
3279 case HCI_OP_WRITE_AUTH_PAYLOAD_TO:
3280 hci_cc_write_auth_payload_timeout(hdev, skb);
3283 case HCI_OP_READ_LOCAL_VERSION:
3284 hci_cc_read_local_version(hdev, skb);
3287 case HCI_OP_READ_LOCAL_COMMANDS:
3288 hci_cc_read_local_commands(hdev, skb);
3291 case HCI_OP_READ_LOCAL_FEATURES:
3292 hci_cc_read_local_features(hdev, skb);
3295 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3296 hci_cc_read_local_ext_features(hdev, skb);
3299 case HCI_OP_READ_BUFFER_SIZE:
3300 hci_cc_read_buffer_size(hdev, skb);
3303 case HCI_OP_READ_BD_ADDR:
3304 hci_cc_read_bd_addr(hdev, skb);
3307 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3308 hci_cc_read_page_scan_activity(hdev, skb);
3311 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3312 hci_cc_write_page_scan_activity(hdev, skb);
3315 case HCI_OP_READ_PAGE_SCAN_TYPE:
3316 hci_cc_read_page_scan_type(hdev, skb);
3319 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3320 hci_cc_write_page_scan_type(hdev, skb);
3323 case HCI_OP_READ_DATA_BLOCK_SIZE:
3324 hci_cc_read_data_block_size(hdev, skb);
3327 case HCI_OP_READ_FLOW_CONTROL_MODE:
3328 hci_cc_read_flow_control_mode(hdev, skb);
3331 case HCI_OP_READ_LOCAL_AMP_INFO:
3332 hci_cc_read_local_amp_info(hdev, skb);
3335 case HCI_OP_READ_CLOCK:
3336 hci_cc_read_clock(hdev, skb);
3339 case HCI_OP_READ_INQ_RSP_TX_POWER:
3340 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3343 case HCI_OP_PIN_CODE_REPLY:
3344 hci_cc_pin_code_reply(hdev, skb);
3347 case HCI_OP_PIN_CODE_NEG_REPLY:
3348 hci_cc_pin_code_neg_reply(hdev, skb);
3351 case HCI_OP_READ_LOCAL_OOB_DATA:
3352 hci_cc_read_local_oob_data(hdev, skb);
3355 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3356 hci_cc_read_local_oob_ext_data(hdev, skb);
3359 case HCI_OP_LE_READ_BUFFER_SIZE:
3360 hci_cc_le_read_buffer_size(hdev, skb);
3363 case HCI_OP_LE_READ_LOCAL_FEATURES:
3364 hci_cc_le_read_local_features(hdev, skb);
3367 case HCI_OP_LE_READ_ADV_TX_POWER:
3368 hci_cc_le_read_adv_tx_power(hdev, skb);
3371 case HCI_OP_USER_CONFIRM_REPLY:
3372 hci_cc_user_confirm_reply(hdev, skb);
3375 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3376 hci_cc_user_confirm_neg_reply(hdev, skb);
3379 case HCI_OP_USER_PASSKEY_REPLY:
3380 hci_cc_user_passkey_reply(hdev, skb);
3383 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3384 hci_cc_user_passkey_neg_reply(hdev, skb);
3387 case HCI_OP_LE_SET_RANDOM_ADDR:
3388 hci_cc_le_set_random_addr(hdev, skb);
3391 case HCI_OP_LE_SET_ADV_ENABLE:
3392 hci_cc_le_set_adv_enable(hdev, skb);
3395 case HCI_OP_LE_SET_SCAN_PARAM:
3396 hci_cc_le_set_scan_param(hdev, skb);
3399 case HCI_OP_LE_SET_SCAN_ENABLE:
3400 hci_cc_le_set_scan_enable(hdev, skb);
3403 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3404 hci_cc_le_read_white_list_size(hdev, skb);
3407 case HCI_OP_LE_CLEAR_WHITE_LIST:
3408 hci_cc_le_clear_white_list(hdev, skb);
3411 case HCI_OP_LE_ADD_TO_WHITE_LIST:
3412 hci_cc_le_add_to_white_list(hdev, skb);
3415 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3416 hci_cc_le_del_from_white_list(hdev, skb);
3419 case HCI_OP_LE_READ_SUPPORTED_STATES:
3420 hci_cc_le_read_supported_states(hdev, skb);
3423 case HCI_OP_LE_READ_DEF_DATA_LEN:
3424 hci_cc_le_read_def_data_len(hdev, skb);
3427 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3428 hci_cc_le_write_def_data_len(hdev, skb);
3431 case HCI_OP_LE_ADD_TO_RESOLV_LIST:
3432 hci_cc_le_add_to_resolv_list(hdev, skb);
3435 case HCI_OP_LE_DEL_FROM_RESOLV_LIST:
3436 hci_cc_le_del_from_resolv_list(hdev, skb);
3439 case HCI_OP_LE_CLEAR_RESOLV_LIST:
3440 hci_cc_le_clear_resolv_list(hdev, skb);
3443 case HCI_OP_LE_READ_RESOLV_LIST_SIZE:
3444 hci_cc_le_read_resolv_list_size(hdev, skb);
3447 case HCI_OP_LE_SET_ADDR_RESOLV_ENABLE:
3448 hci_cc_le_set_addr_resolution_enable(hdev, skb);
3451 case HCI_OP_LE_READ_MAX_DATA_LEN:
3452 hci_cc_le_read_max_data_len(hdev, skb);
3455 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3456 hci_cc_write_le_host_supported(hdev, skb);
3459 case HCI_OP_LE_SET_ADV_PARAM:
3460 hci_cc_set_adv_param(hdev, skb);
3463 case HCI_OP_READ_RSSI:
3464 hci_cc_read_rssi(hdev, skb);
3467 case HCI_OP_READ_TX_POWER:
3468 hci_cc_read_tx_power(hdev, skb);
3471 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3472 hci_cc_write_ssp_debug_mode(hdev, skb);
3475 case HCI_OP_LE_SET_EXT_SCAN_PARAMS:
3476 hci_cc_le_set_ext_scan_param(hdev, skb);
3479 case HCI_OP_LE_SET_EXT_SCAN_ENABLE:
3480 hci_cc_le_set_ext_scan_enable(hdev, skb);
3483 case HCI_OP_LE_SET_DEFAULT_PHY:
3484 hci_cc_le_set_default_phy(hdev, skb);
3487 case HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS:
3488 hci_cc_le_read_num_adv_sets(hdev, skb);
3491 case HCI_OP_LE_SET_EXT_ADV_PARAMS:
3492 hci_cc_set_ext_adv_param(hdev, skb);
3495 case HCI_OP_LE_SET_EXT_ADV_ENABLE:
3496 hci_cc_le_set_ext_adv_enable(hdev, skb);
3499 case HCI_OP_LE_SET_ADV_SET_RAND_ADDR:
3500 hci_cc_le_set_adv_set_random_addr(hdev, skb);
3503 case HCI_OP_ENABLE_RSSI:
3504 hci_cc_enable_rssi(hdev, skb);
3507 case HCI_OP_GET_RAW_RSSI:
3508 hci_cc_get_raw_rssi(hdev, skb);
3512 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3516 if (*opcode != HCI_OP_NOP)
3517 cancel_delayed_work(&hdev->cmd_timer);
3519 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3520 atomic_set(&hdev->cmd_cnt, 1);
3522 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3525 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3527 "unexpected event for opcode 0x%4.4x", *opcode);
3531 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3532 queue_work(hdev->workqueue, &hdev->cmd_work);
3535 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3536 u16 *opcode, u8 *status,
3537 hci_req_complete_t *req_complete,
3538 hci_req_complete_skb_t *req_complete_skb)
3540 struct hci_ev_cmd_status *ev = (void *) skb->data;
3542 skb_pull(skb, sizeof(*ev));
3544 *opcode = __le16_to_cpu(ev->opcode);
3545 *status = ev->status;
3548 case HCI_OP_INQUIRY:
3549 hci_cs_inquiry(hdev, ev->status);
3552 case HCI_OP_CREATE_CONN:
3553 hci_cs_create_conn(hdev, ev->status);
3556 case HCI_OP_DISCONNECT:
3557 hci_cs_disconnect(hdev, ev->status);
3560 case HCI_OP_ADD_SCO:
3561 hci_cs_add_sco(hdev, ev->status);
3564 case HCI_OP_AUTH_REQUESTED:
3565 hci_cs_auth_requested(hdev, ev->status);
3568 case HCI_OP_SET_CONN_ENCRYPT:
3569 hci_cs_set_conn_encrypt(hdev, ev->status);
3572 case HCI_OP_REMOTE_NAME_REQ:
3573 hci_cs_remote_name_req(hdev, ev->status);
3576 case HCI_OP_READ_REMOTE_FEATURES:
3577 hci_cs_read_remote_features(hdev, ev->status);
3580 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3581 hci_cs_read_remote_ext_features(hdev, ev->status);
3584 case HCI_OP_SETUP_SYNC_CONN:
3585 hci_cs_setup_sync_conn(hdev, ev->status);
3588 case HCI_OP_SNIFF_MODE:
3589 hci_cs_sniff_mode(hdev, ev->status);
3592 case HCI_OP_EXIT_SNIFF_MODE:
3593 hci_cs_exit_sniff_mode(hdev, ev->status);
3596 case HCI_OP_SWITCH_ROLE:
3597 hci_cs_switch_role(hdev, ev->status);
3600 case HCI_OP_LE_CREATE_CONN:
3601 hci_cs_le_create_conn(hdev, ev->status);
3604 case HCI_OP_LE_READ_REMOTE_FEATURES:
3605 hci_cs_le_read_remote_features(hdev, ev->status);
3608 case HCI_OP_LE_START_ENC:
3609 hci_cs_le_start_enc(hdev, ev->status);
3612 case HCI_OP_LE_EXT_CREATE_CONN:
3613 hci_cs_le_ext_create_conn(hdev, ev->status);
3617 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3621 if (*opcode != HCI_OP_NOP)
3622 cancel_delayed_work(&hdev->cmd_timer);
3624 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3625 atomic_set(&hdev->cmd_cnt, 1);
3627 /* Indicate request completion if the command failed. Also, if
3628 * we're not waiting for a special event and we get a success
3629 * command status we should try to flag the request as completed
3630 * (since for this kind of commands there will not be a command
3634 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->hci.req_event))
3635 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3638 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
3640 "unexpected event for opcode 0x%4.4x", *opcode);
3644 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3645 queue_work(hdev->workqueue, &hdev->cmd_work);
3648 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3650 struct hci_ev_hardware_error *ev = (void *) skb->data;
3652 hdev->hw_error_code = ev->code;
3654 queue_work(hdev->req_workqueue, &hdev->error_reset);
3657 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3659 struct hci_ev_role_change *ev = (void *) skb->data;
3660 struct hci_conn *conn;
3662 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3666 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3669 conn->role = ev->role;
3671 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3673 hci_role_switch_cfm(conn, ev->status, ev->role);
3676 hci_dev_unlock(hdev);
3679 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3681 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3684 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3685 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3689 if (skb->len < sizeof(*ev) ||
3690 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3691 BT_DBG("%s bad parameters", hdev->name);
3695 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3697 for (i = 0; i < ev->num_hndl; i++) {
3698 struct hci_comp_pkts_info *info = &ev->handles[i];
3699 struct hci_conn *conn;
3700 __u16 handle, count;
3702 handle = __le16_to_cpu(info->handle);
3703 count = __le16_to_cpu(info->count);
3705 conn = hci_conn_hash_lookup_handle(hdev, handle);
3709 conn->sent -= count;
3711 switch (conn->type) {
3713 hdev->acl_cnt += count;
3714 if (hdev->acl_cnt > hdev->acl_pkts)
3715 hdev->acl_cnt = hdev->acl_pkts;
3719 if (hdev->le_pkts) {
3720 hdev->le_cnt += count;
3721 if (hdev->le_cnt > hdev->le_pkts)
3722 hdev->le_cnt = hdev->le_pkts;
3724 hdev->acl_cnt += count;
3725 if (hdev->acl_cnt > hdev->acl_pkts)
3726 hdev->acl_cnt = hdev->acl_pkts;
3731 hdev->sco_cnt += count;
3732 if (hdev->sco_cnt > hdev->sco_pkts)
3733 hdev->sco_cnt = hdev->sco_pkts;
3737 bt_dev_err(hdev, "unknown type %d conn %p",
3743 queue_work(hdev->workqueue, &hdev->tx_work);
3746 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3749 struct hci_chan *chan;
3751 switch (hdev->dev_type) {
3753 return hci_conn_hash_lookup_handle(hdev, handle);
3755 chan = hci_chan_lookup_handle(hdev, handle);
3760 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3767 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3769 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3772 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3773 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
3777 if (skb->len < sizeof(*ev) ||
3778 skb->len < struct_size(ev, handles, ev->num_hndl)) {
3779 BT_DBG("%s bad parameters", hdev->name);
3783 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3786 for (i = 0; i < ev->num_hndl; i++) {
3787 struct hci_comp_blocks_info *info = &ev->handles[i];
3788 struct hci_conn *conn = NULL;
3789 __u16 handle, block_count;
3791 handle = __le16_to_cpu(info->handle);
3792 block_count = __le16_to_cpu(info->blocks);
3794 conn = __hci_conn_lookup_handle(hdev, handle);
3798 conn->sent -= block_count;
3800 switch (conn->type) {
3803 hdev->block_cnt += block_count;
3804 if (hdev->block_cnt > hdev->num_blocks)
3805 hdev->block_cnt = hdev->num_blocks;
3809 bt_dev_err(hdev, "unknown type %d conn %p",
3815 queue_work(hdev->workqueue, &hdev->tx_work);
3818 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3820 struct hci_ev_mode_change *ev = (void *) skb->data;
3821 struct hci_conn *conn;
3823 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3827 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3829 conn->mode = ev->mode;
3831 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3833 if (conn->mode == HCI_CM_ACTIVE)
3834 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3836 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3839 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3840 hci_sco_setup(conn, ev->status);
3843 hci_dev_unlock(hdev);
3846 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3848 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3849 struct hci_conn *conn;
3851 BT_DBG("%s", hdev->name);
3855 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3859 if (conn->state == BT_CONNECTED) {
3860 hci_conn_hold(conn);
3861 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3862 hci_conn_drop(conn);
3865 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3866 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3867 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3868 sizeof(ev->bdaddr), &ev->bdaddr);
3869 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3872 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3877 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3881 hci_dev_unlock(hdev);
3884 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3886 if (key_type == HCI_LK_CHANGED_COMBINATION)
3889 conn->pin_length = pin_len;
3890 conn->key_type = key_type;
3893 case HCI_LK_LOCAL_UNIT:
3894 case HCI_LK_REMOTE_UNIT:
3895 case HCI_LK_DEBUG_COMBINATION:
3897 case HCI_LK_COMBINATION:
3899 conn->pending_sec_level = BT_SECURITY_HIGH;
3901 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3903 case HCI_LK_UNAUTH_COMBINATION_P192:
3904 case HCI_LK_UNAUTH_COMBINATION_P256:
3905 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3907 case HCI_LK_AUTH_COMBINATION_P192:
3908 conn->pending_sec_level = BT_SECURITY_HIGH;
3910 case HCI_LK_AUTH_COMBINATION_P256:
3911 conn->pending_sec_level = BT_SECURITY_FIPS;
3916 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3918 struct hci_ev_link_key_req *ev = (void *) skb->data;
3919 struct hci_cp_link_key_reply cp;
3920 struct hci_conn *conn;
3921 struct link_key *key;
3923 BT_DBG("%s", hdev->name);
3925 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3930 key = hci_find_link_key(hdev, &ev->bdaddr);
3932 BT_DBG("%s link key not found for %pMR", hdev->name,
3937 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3940 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3942 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3944 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3945 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3946 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3947 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3951 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3952 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3953 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3954 BT_DBG("%s ignoring key unauthenticated for high security",
3959 conn_set_key(conn, key->type, key->pin_len);
3962 bacpy(&cp.bdaddr, &ev->bdaddr);
3963 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3965 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3967 hci_dev_unlock(hdev);
3972 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3973 hci_dev_unlock(hdev);
3976 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3978 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3979 struct hci_conn *conn;
3980 struct link_key *key;
3984 BT_DBG("%s", hdev->name);
3988 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3992 hci_conn_hold(conn);
3993 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3994 hci_conn_drop(conn);
3996 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3997 conn_set_key(conn, ev->key_type, conn->pin_length);
3999 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4002 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4003 ev->key_type, pin_len, &persistent);
4007 /* Update connection information since adding the key will have
4008 * fixed up the type in the case of changed combination keys.
4010 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4011 conn_set_key(conn, key->type, key->pin_len);
4013 mgmt_new_link_key(hdev, key, persistent);
4015 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4016 * is set. If it's not set simply remove the key from the kernel
4017 * list (we've still notified user space about it but with
4018 * store_hint being 0).
4020 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4021 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4022 list_del_rcu(&key->list);
4023 kfree_rcu(key, rcu);
4028 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4030 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4033 hci_dev_unlock(hdev);
4036 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
4038 struct hci_ev_clock_offset *ev = (void *) skb->data;
4039 struct hci_conn *conn;
4041 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4045 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4046 if (conn && !ev->status) {
4047 struct inquiry_entry *ie;
4049 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4051 ie->data.clock_offset = ev->clock_offset;
4052 ie->timestamp = jiffies;
4056 hci_dev_unlock(hdev);
4059 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
4061 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
4062 struct hci_conn *conn;
4064 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4068 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4069 if (conn && !ev->status)
4070 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4072 hci_dev_unlock(hdev);
4075 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
4077 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
4078 struct inquiry_entry *ie;
4080 BT_DBG("%s", hdev->name);
4084 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4086 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4087 ie->timestamp = jiffies;
4090 hci_dev_unlock(hdev);
4093 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
4094 struct sk_buff *skb)
4096 struct inquiry_data data;
4097 int num_rsp = *((__u8 *) skb->data);
4099 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4104 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4109 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
4110 struct inquiry_info_with_rssi_and_pscan_mode *info;
4111 info = (void *) (skb->data + 1);
4113 for (; num_rsp; num_rsp--, info++) {
4116 bacpy(&data.bdaddr, &info->bdaddr);
4117 data.pscan_rep_mode = info->pscan_rep_mode;
4118 data.pscan_period_mode = info->pscan_period_mode;
4119 data.pscan_mode = info->pscan_mode;
4120 memcpy(data.dev_class, info->dev_class, 3);
4121 data.clock_offset = info->clock_offset;
4122 data.rssi = info->rssi;
4123 data.ssp_mode = 0x00;
4125 flags = hci_inquiry_cache_update(hdev, &data, false);
4127 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4128 info->dev_class, info->rssi,
4129 flags, NULL, 0, NULL, 0);
4132 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
4134 for (; num_rsp; num_rsp--, info++) {
4137 bacpy(&data.bdaddr, &info->bdaddr);
4138 data.pscan_rep_mode = info->pscan_rep_mode;
4139 data.pscan_period_mode = info->pscan_period_mode;
4140 data.pscan_mode = 0x00;
4141 memcpy(data.dev_class, info->dev_class, 3);
4142 data.clock_offset = info->clock_offset;
4143 data.rssi = info->rssi;
4144 data.ssp_mode = 0x00;
4146 flags = hci_inquiry_cache_update(hdev, &data, false);
4148 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4149 info->dev_class, info->rssi,
4150 flags, NULL, 0, NULL, 0);
4154 hci_dev_unlock(hdev);
4157 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
4158 struct sk_buff *skb)
4160 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
4161 struct hci_conn *conn;
4163 BT_DBG("%s", hdev->name);
4167 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4171 if (ev->page < HCI_MAX_PAGES)
4172 memcpy(conn->features[ev->page], ev->features, 8);
4174 if (!ev->status && ev->page == 0x01) {
4175 struct inquiry_entry *ie;
4177 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4179 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4181 if (ev->features[0] & LMP_HOST_SSP) {
4182 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4184 /* It is mandatory by the Bluetooth specification that
4185 * Extended Inquiry Results are only used when Secure
4186 * Simple Pairing is enabled, but some devices violate
4189 * To make these devices work, the internal SSP
4190 * enabled flag needs to be cleared if the remote host
4191 * features do not indicate SSP support */
4192 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4195 if (ev->features[0] & LMP_HOST_SC)
4196 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4199 if (conn->state != BT_CONFIG)
4202 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4203 struct hci_cp_remote_name_req cp;
4204 memset(&cp, 0, sizeof(cp));
4205 bacpy(&cp.bdaddr, &conn->dst);
4206 cp.pscan_rep_mode = 0x02;
4207 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4208 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4209 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4211 if (!hci_outgoing_auth_needed(hdev, conn)) {
4212 conn->state = BT_CONNECTED;
4213 hci_connect_cfm(conn, ev->status);
4214 hci_conn_drop(conn);
4218 hci_dev_unlock(hdev);
4221 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
4222 struct sk_buff *skb)
4224 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
4225 struct hci_conn *conn;
4227 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4231 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4233 if (ev->link_type == ESCO_LINK)
4236 /* When the link type in the event indicates SCO connection
4237 * and lookup of the connection object fails, then check
4238 * if an eSCO connection object exists.
4240 * The core limits the synchronous connections to either
4241 * SCO or eSCO. The eSCO connection is preferred and tried
4242 * to be setup first and until successfully established,
4243 * the link type will be hinted as eSCO.
4245 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4250 switch (ev->status) {
4252 conn->handle = __le16_to_cpu(ev->handle);
4253 conn->state = BT_CONNECTED;
4254 conn->type = ev->link_type;
4256 hci_debugfs_create_conn(conn);
4257 hci_conn_add_sysfs(conn);
4260 case 0x10: /* Connection Accept Timeout */
4261 case 0x0d: /* Connection Rejected due to Limited Resources */
4262 case 0x11: /* Unsupported Feature or Parameter Value */
4263 case 0x1c: /* SCO interval rejected */
4264 case 0x1a: /* Unsupported Remote Feature */
4265 case 0x1e: /* Invalid LMP Parameters */
4266 case 0x1f: /* Unspecified error */
4267 case 0x20: /* Unsupported LMP Parameter value */
4269 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4270 (hdev->esco_type & EDR_ESCO_MASK);
4271 if (hci_setup_sync(conn, conn->link->handle))
4277 conn->state = BT_CLOSED;
4281 hci_connect_cfm(conn, ev->status);
4286 hci_dev_unlock(hdev);
4289 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4293 while (parsed < eir_len) {
4294 u8 field_len = eir[0];
4299 parsed += field_len + 1;
4300 eir += field_len + 1;
4306 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4307 struct sk_buff *skb)
4309 struct inquiry_data data;
4310 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4311 int num_rsp = *((__u8 *) skb->data);
4314 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4319 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4324 for (; num_rsp; num_rsp--, info++) {
4328 bacpy(&data.bdaddr, &info->bdaddr);
4329 data.pscan_rep_mode = info->pscan_rep_mode;
4330 data.pscan_period_mode = info->pscan_period_mode;
4331 data.pscan_mode = 0x00;
4332 memcpy(data.dev_class, info->dev_class, 3);
4333 data.clock_offset = info->clock_offset;
4334 data.rssi = info->rssi;
4335 data.ssp_mode = 0x01;
4337 if (hci_dev_test_flag(hdev, HCI_MGMT))
4338 name_known = eir_get_data(info->data,
4340 EIR_NAME_COMPLETE, NULL);
4344 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4346 eir_len = eir_get_length(info->data, sizeof(info->data));
4348 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4349 info->dev_class, info->rssi,
4350 flags, info->data, eir_len, NULL, 0);
4353 hci_dev_unlock(hdev);
4356 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4357 struct sk_buff *skb)
4359 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4360 struct hci_conn *conn;
4362 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4363 __le16_to_cpu(ev->handle));
4367 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4371 /* For BR/EDR the necessary steps are taken through the
4372 * auth_complete event.
4374 if (conn->type != LE_LINK)
4378 conn->sec_level = conn->pending_sec_level;
4380 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4382 if (ev->status && conn->state == BT_CONNECTED) {
4383 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4384 hci_conn_drop(conn);
4388 if (conn->state == BT_CONFIG) {
4390 conn->state = BT_CONNECTED;
4392 hci_connect_cfm(conn, ev->status);
4393 hci_conn_drop(conn);
4395 hci_auth_cfm(conn, ev->status);
4397 hci_conn_hold(conn);
4398 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4399 hci_conn_drop(conn);
4403 hci_dev_unlock(hdev);
4406 static u8 hci_get_auth_req(struct hci_conn *conn)
4408 /* If remote requests no-bonding follow that lead */
4409 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4410 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4411 return conn->remote_auth | (conn->auth_type & 0x01);
4413 /* If both remote and local have enough IO capabilities, require
4416 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4417 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4418 return conn->remote_auth | 0x01;
4420 /* No MITM protection possible so ignore remote requirement */
4421 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4424 static u8 bredr_oob_data_present(struct hci_conn *conn)
4426 struct hci_dev *hdev = conn->hdev;
4427 struct oob_data *data;
4429 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4433 if (bredr_sc_enabled(hdev)) {
4434 /* When Secure Connections is enabled, then just
4435 * return the present value stored with the OOB
4436 * data. The stored value contains the right present
4437 * information. However it can only be trusted when
4438 * not in Secure Connection Only mode.
4440 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
4441 return data->present;
4443 /* When Secure Connections Only mode is enabled, then
4444 * the P-256 values are required. If they are not
4445 * available, then do not declare that OOB data is
4448 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
4449 !memcmp(data->hash256, ZERO_KEY, 16))
4455 /* When Secure Connections is not enabled or actually
4456 * not supported by the hardware, then check that if
4457 * P-192 data values are present.
4459 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
4460 !memcmp(data->hash192, ZERO_KEY, 16))
4466 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4468 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4469 struct hci_conn *conn;
4471 BT_DBG("%s", hdev->name);
4475 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4479 hci_conn_hold(conn);
4481 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4484 /* Allow pairing if we're pairable, the initiators of the
4485 * pairing or if the remote is not requesting bonding.
4487 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
4488 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4489 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4490 struct hci_cp_io_capability_reply cp;
4492 bacpy(&cp.bdaddr, &ev->bdaddr);
4493 /* Change the IO capability from KeyboardDisplay
4494 * to DisplayYesNo as it is not supported by BT spec. */
4495 cp.capability = (conn->io_capability == 0x04) ?
4496 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4498 /* If we are initiators, there is no remote information yet */
4499 if (conn->remote_auth == 0xff) {
4500 /* Request MITM protection if our IO caps allow it
4501 * except for the no-bonding case.
4503 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4504 conn->auth_type != HCI_AT_NO_BONDING)
4505 conn->auth_type |= 0x01;
4507 conn->auth_type = hci_get_auth_req(conn);
4510 /* If we're not bondable, force one of the non-bondable
4511 * authentication requirement values.
4513 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
4514 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4516 cp.authentication = conn->auth_type;
4517 cp.oob_data = bredr_oob_data_present(conn);
4519 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4522 struct hci_cp_io_capability_neg_reply cp;
4524 bacpy(&cp.bdaddr, &ev->bdaddr);
4525 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4527 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4532 hci_dev_unlock(hdev);
4535 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4537 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4538 struct hci_conn *conn;
4540 BT_DBG("%s", hdev->name);
4544 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4548 conn->remote_cap = ev->capability;
4549 conn->remote_auth = ev->authentication;
4552 hci_dev_unlock(hdev);
4555 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4556 struct sk_buff *skb)
4558 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4559 int loc_mitm, rem_mitm, confirm_hint = 0;
4560 struct hci_conn *conn;
4562 BT_DBG("%s", hdev->name);
4566 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4569 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4573 loc_mitm = (conn->auth_type & 0x01);
4574 rem_mitm = (conn->remote_auth & 0x01);
4576 /* If we require MITM but the remote device can't provide that
4577 * (it has NoInputNoOutput) then reject the confirmation
4578 * request. We check the security level here since it doesn't
4579 * necessarily match conn->auth_type.
4581 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4582 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4583 BT_DBG("Rejecting request: remote device can't provide MITM");
4584 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4585 sizeof(ev->bdaddr), &ev->bdaddr);
4589 /* If no side requires MITM protection; auto-accept */
4590 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4591 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4593 /* If we're not the initiators request authorization to
4594 * proceed from user space (mgmt_user_confirm with
4595 * confirm_hint set to 1). The exception is if neither
4596 * side had MITM or if the local IO capability is
4597 * NoInputNoOutput, in which case we do auto-accept
4599 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4600 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4601 (loc_mitm || rem_mitm)) {
4602 BT_DBG("Confirming auto-accept as acceptor");
4607 BT_DBG("Auto-accept of user confirmation with %ums delay",
4608 hdev->auto_accept_delay);
4610 if (hdev->auto_accept_delay > 0) {
4611 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4612 queue_delayed_work(conn->hdev->workqueue,
4613 &conn->auto_accept_work, delay);
4617 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4618 sizeof(ev->bdaddr), &ev->bdaddr);
4623 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4624 le32_to_cpu(ev->passkey), confirm_hint);
4627 hci_dev_unlock(hdev);
4630 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4631 struct sk_buff *skb)
4633 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4635 BT_DBG("%s", hdev->name);
4637 if (hci_dev_test_flag(hdev, HCI_MGMT))
4638 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4641 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4642 struct sk_buff *skb)
4644 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4645 struct hci_conn *conn;
4647 BT_DBG("%s", hdev->name);
4649 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4653 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4654 conn->passkey_entered = 0;
4656 if (hci_dev_test_flag(hdev, HCI_MGMT))
4657 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4658 conn->dst_type, conn->passkey_notify,
4659 conn->passkey_entered);
4662 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4664 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4665 struct hci_conn *conn;
4667 BT_DBG("%s", hdev->name);
4669 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4674 case HCI_KEYPRESS_STARTED:
4675 conn->passkey_entered = 0;
4678 case HCI_KEYPRESS_ENTERED:
4679 conn->passkey_entered++;
4682 case HCI_KEYPRESS_ERASED:
4683 conn->passkey_entered--;
4686 case HCI_KEYPRESS_CLEARED:
4687 conn->passkey_entered = 0;
4690 case HCI_KEYPRESS_COMPLETED:
4694 if (hci_dev_test_flag(hdev, HCI_MGMT))
4695 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4696 conn->dst_type, conn->passkey_notify,
4697 conn->passkey_entered);
4700 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4701 struct sk_buff *skb)
4703 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4704 struct hci_conn *conn;
4706 BT_DBG("%s", hdev->name);
4710 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4714 /* Reset the authentication requirement to unknown */
4715 conn->remote_auth = 0xff;
4717 /* To avoid duplicate auth_failed events to user space we check
4718 * the HCI_CONN_AUTH_PEND flag which will be set if we
4719 * initiated the authentication. A traditional auth_complete
4720 * event gets always produced as initiator and is also mapped to
4721 * the mgmt_auth_failed event */
4722 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4723 mgmt_auth_failed(conn, ev->status);
4725 hci_conn_drop(conn);
4728 hci_dev_unlock(hdev);
4731 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4732 struct sk_buff *skb)
4734 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4735 struct inquiry_entry *ie;
4736 struct hci_conn *conn;
4738 BT_DBG("%s", hdev->name);
4742 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4744 memcpy(conn->features[1], ev->features, 8);
4746 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4748 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4750 hci_dev_unlock(hdev);
4753 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4754 struct sk_buff *skb)
4756 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4757 struct oob_data *data;
4759 BT_DBG("%s", hdev->name);
4763 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4766 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4768 struct hci_cp_remote_oob_data_neg_reply cp;
4770 bacpy(&cp.bdaddr, &ev->bdaddr);
4771 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4776 if (bredr_sc_enabled(hdev)) {
4777 struct hci_cp_remote_oob_ext_data_reply cp;
4779 bacpy(&cp.bdaddr, &ev->bdaddr);
4780 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4781 memset(cp.hash192, 0, sizeof(cp.hash192));
4782 memset(cp.rand192, 0, sizeof(cp.rand192));
4784 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4785 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4787 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4788 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4790 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4793 struct hci_cp_remote_oob_data_reply cp;
4795 bacpy(&cp.bdaddr, &ev->bdaddr);
4796 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4797 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4799 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4804 hci_dev_unlock(hdev);
4807 #if IS_ENABLED(CONFIG_BT_HS)
4808 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4810 struct hci_ev_channel_selected *ev = (void *)skb->data;
4811 struct hci_conn *hcon;
4813 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4815 skb_pull(skb, sizeof(*ev));
4817 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4821 amp_read_loc_assoc_final_data(hdev, hcon);
4824 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4825 struct sk_buff *skb)
4827 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4828 struct hci_conn *hcon, *bredr_hcon;
4830 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4835 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4837 hci_dev_unlock(hdev);
4843 hci_dev_unlock(hdev);
4847 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4849 hcon->state = BT_CONNECTED;
4850 bacpy(&hcon->dst, &bredr_hcon->dst);
4852 hci_conn_hold(hcon);
4853 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4854 hci_conn_drop(hcon);
4856 hci_debugfs_create_conn(hcon);
4857 hci_conn_add_sysfs(hcon);
4859 amp_physical_cfm(bredr_hcon, hcon);
4861 hci_dev_unlock(hdev);
4864 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4866 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4867 struct hci_conn *hcon;
4868 struct hci_chan *hchan;
4869 struct amp_mgr *mgr;
4871 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4872 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4875 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4879 /* Create AMP hchan */
4880 hchan = hci_chan_create(hcon);
4884 hchan->handle = le16_to_cpu(ev->handle);
4886 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4888 mgr = hcon->amp_mgr;
4889 if (mgr && mgr->bredr_chan) {
4890 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4892 l2cap_chan_lock(bredr_chan);
4894 bredr_chan->conn->mtu = hdev->block_mtu;
4895 l2cap_logical_cfm(bredr_chan, hchan, 0);
4896 hci_conn_hold(hcon);
4898 l2cap_chan_unlock(bredr_chan);
4902 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4903 struct sk_buff *skb)
4905 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4906 struct hci_chan *hchan;
4908 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4909 le16_to_cpu(ev->handle), ev->status);
4916 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4920 amp_destroy_logical_link(hchan, ev->reason);
4923 hci_dev_unlock(hdev);
4926 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4927 struct sk_buff *skb)
4929 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4930 struct hci_conn *hcon;
4932 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4939 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4941 hcon->state = BT_CLOSED;
4945 hci_dev_unlock(hdev);
4949 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
4950 bdaddr_t *bdaddr, u8 bdaddr_type, u8 role, u16 handle,
4951 u16 interval, u16 latency, u16 supervision_timeout)
4953 struct hci_conn_params *params;
4954 struct hci_conn *conn;
4955 struct smp_irk *irk;
4960 /* All controllers implicitly stop advertising in the event of a
4961 * connection, so ensure that the state bit is cleared.
4963 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4965 conn = hci_lookup_le_connect(hdev);
4967 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
4969 bt_dev_err(hdev, "no memory for new connection");
4973 conn->dst_type = bdaddr_type;
4975 /* If we didn't have a hci_conn object previously
4976 * but we're in master role this must be something
4977 * initiated using a white list. Since white list based
4978 * connections are not "first class citizens" we don't
4979 * have full tracking of them. Therefore, we go ahead
4980 * with a "best effort" approach of determining the
4981 * initiator address based on the HCI_PRIVACY flag.
4984 conn->resp_addr_type = bdaddr_type;
4985 bacpy(&conn->resp_addr, bdaddr);
4986 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4987 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4988 bacpy(&conn->init_addr, &hdev->rpa);
4990 hci_copy_identity_address(hdev,
4992 &conn->init_addr_type);
4996 cancel_delayed_work(&conn->le_conn_timeout);
5000 /* Set the responder (our side) address type based on
5001 * the advertising address type.
5003 conn->resp_addr_type = hdev->adv_addr_type;
5004 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5005 /* In case of ext adv, resp_addr will be updated in
5006 * Adv Terminated event.
5008 if (!ext_adv_capable(hdev))
5009 bacpy(&conn->resp_addr, &hdev->random_addr);
5011 bacpy(&conn->resp_addr, &hdev->bdaddr);
5014 conn->init_addr_type = bdaddr_type;
5015 bacpy(&conn->init_addr, bdaddr);
5017 /* For incoming connections, set the default minimum
5018 * and maximum connection interval. They will be used
5019 * to check if the parameters are in range and if not
5020 * trigger the connection update procedure.
5022 conn->le_conn_min_interval = hdev->le_conn_min_interval;
5023 conn->le_conn_max_interval = hdev->le_conn_max_interval;
5026 /* Lookup the identity address from the stored connection
5027 * address and address type.
5029 * When establishing connections to an identity address, the
5030 * connection procedure will store the resolvable random
5031 * address first. Now if it can be converted back into the
5032 * identity address, start using the identity address from
5035 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5037 bacpy(&conn->dst, &irk->bdaddr);
5038 conn->dst_type = irk->addr_type;
5042 hci_le_conn_failed(conn, status);
5046 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5047 addr_type = BDADDR_LE_PUBLIC;
5049 addr_type = BDADDR_LE_RANDOM;
5051 /* Drop the connection if the device is blocked */
5052 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
5053 hci_conn_drop(conn);
5057 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5058 mgmt_device_connected(hdev, conn, 0, NULL, 0);
5060 conn->sec_level = BT_SECURITY_LOW;
5061 conn->handle = handle;
5062 conn->state = BT_CONFIG;
5064 conn->le_conn_interval = interval;
5065 conn->le_conn_latency = latency;
5066 conn->le_supv_timeout = supervision_timeout;
5068 hci_debugfs_create_conn(conn);
5069 hci_conn_add_sysfs(conn);
5071 /* The remote features procedure is defined for master
5072 * role only. So only in case of an initiated connection
5073 * request the remote features.
5075 * If the local controller supports slave-initiated features
5076 * exchange, then requesting the remote features in slave
5077 * role is possible. Otherwise just transition into the
5078 * connected state without requesting the remote features.
5081 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
5082 struct hci_cp_le_read_remote_features cp;
5084 cp.handle = __cpu_to_le16(conn->handle);
5086 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5089 hci_conn_hold(conn);
5091 conn->state = BT_CONNECTED;
5092 hci_connect_cfm(conn, status);
5095 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5098 list_del_init(¶ms->action);
5100 hci_conn_drop(params->conn);
5101 hci_conn_put(params->conn);
5102 params->conn = NULL;
5107 hci_update_background_scan(hdev);
5108 hci_dev_unlock(hdev);
5111 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
5113 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
5115 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5117 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5118 ev->role, le16_to_cpu(ev->handle),
5119 le16_to_cpu(ev->interval),
5120 le16_to_cpu(ev->latency),
5121 le16_to_cpu(ev->supervision_timeout));
5124 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev,
5125 struct sk_buff *skb)
5127 struct hci_ev_le_enh_conn_complete *ev = (void *) skb->data;
5129 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5131 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5132 ev->role, le16_to_cpu(ev->handle),
5133 le16_to_cpu(ev->interval),
5134 le16_to_cpu(ev->latency),
5135 le16_to_cpu(ev->supervision_timeout));
5138 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, struct sk_buff *skb)
5140 struct hci_evt_le_ext_adv_set_term *ev = (void *) skb->data;
5141 struct hci_conn *conn;
5143 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5148 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
5150 struct adv_info *adv_instance;
5152 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM)
5155 if (!hdev->cur_adv_instance) {
5156 bacpy(&conn->resp_addr, &hdev->random_addr);
5160 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
5162 bacpy(&conn->resp_addr, &adv_instance->random_addr);
5166 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
5167 struct sk_buff *skb)
5169 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
5170 struct hci_conn *conn;
5172 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5179 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5181 conn->le_conn_interval = le16_to_cpu(ev->interval);
5182 conn->le_conn_latency = le16_to_cpu(ev->latency);
5183 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
5186 hci_dev_unlock(hdev);
5189 /* This function requires the caller holds hdev->lock */
5190 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
5192 u8 addr_type, u8 adv_type,
5193 bdaddr_t *direct_rpa)
5195 struct hci_conn *conn;
5196 struct hci_conn_params *params;
5198 /* If the event is not connectable don't proceed further */
5199 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
5202 /* Ignore if the device is blocked */
5203 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
5206 /* Most controller will fail if we try to create new connections
5207 * while we have an existing one in slave role.
5209 if (hdev->conn_hash.le_num_slave > 0)
5212 /* If we're not connectable only connect devices that we have in
5213 * our pend_le_conns list.
5215 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
5220 if (!params->explicit_connect) {
5221 switch (params->auto_connect) {
5222 case HCI_AUTO_CONN_DIRECT:
5223 /* Only devices advertising with ADV_DIRECT_IND are
5224 * triggering a connection attempt. This is allowing
5225 * incoming connections from slave devices.
5227 if (adv_type != LE_ADV_DIRECT_IND)
5230 case HCI_AUTO_CONN_ALWAYS:
5231 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
5232 * are triggering a connection attempt. This means
5233 * that incoming connectioms from slave device are
5234 * accepted and also outgoing connections to slave
5235 * devices are established when found.
5243 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
5244 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
5246 if (!IS_ERR(conn)) {
5247 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
5248 * by higher layer that tried to connect, if no then
5249 * store the pointer since we don't really have any
5250 * other owner of the object besides the params that
5251 * triggered it. This way we can abort the connection if
5252 * the parameters get removed and keep the reference
5253 * count consistent once the connection is established.
5256 if (!params->explicit_connect)
5257 params->conn = hci_conn_get(conn);
5262 switch (PTR_ERR(conn)) {
5264 /* If hci_connect() returns -EBUSY it means there is already
5265 * an LE connection attempt going on. Since controllers don't
5266 * support more than one connection attempt at the time, we
5267 * don't consider this an error case.
5271 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
5278 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
5279 u8 bdaddr_type, bdaddr_t *direct_addr,
5280 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
5282 struct discovery_state *d = &hdev->discovery;
5283 struct smp_irk *irk;
5284 struct hci_conn *conn;
5291 case LE_ADV_DIRECT_IND:
5292 case LE_ADV_SCAN_IND:
5293 case LE_ADV_NONCONN_IND:
5294 case LE_ADV_SCAN_RSP:
5297 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
5298 "type: 0x%02x", type);
5302 /* Find the end of the data in case the report contains padded zero
5303 * bytes at the end causing an invalid length value.
5305 * When data is NULL, len is 0 so there is no need for extra ptr
5306 * check as 'ptr < data + 0' is already false in such case.
5308 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
5309 if (ptr + 1 + *ptr > data + len)
5313 real_len = ptr - data;
5315 /* Adjust for actual length */
5316 if (len != real_len) {
5317 bt_dev_err_ratelimited(hdev, "advertising data len corrected");
5321 /* If the direct address is present, then this report is from
5322 * a LE Direct Advertising Report event. In that case it is
5323 * important to see if the address is matching the local
5324 * controller address.
5327 /* Only resolvable random addresses are valid for these
5328 * kind of reports and others can be ignored.
5330 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
5333 /* If the controller is not using resolvable random
5334 * addresses, then this report can be ignored.
5336 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
5339 /* If the local IRK of the controller does not match
5340 * with the resolvable random address provided, then
5341 * this report can be ignored.
5343 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
5347 /* Check if we need to convert to identity address */
5348 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
5350 bdaddr = &irk->bdaddr;
5351 bdaddr_type = irk->addr_type;
5354 /* Check if we have been requested to connect to this device.
5356 * direct_addr is set only for directed advertising reports (it is NULL
5357 * for advertising reports) and is already verified to be RPA above.
5359 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
5361 if (conn && type == LE_ADV_IND) {
5362 /* Store report for later inclusion by
5363 * mgmt_device_connected
5365 memcpy(conn->le_adv_data, data, len);
5366 conn->le_adv_data_len = len;
5369 /* Passive scanning shouldn't trigger any device found events,
5370 * except for devices marked as CONN_REPORT for which we do send
5371 * device found events.
5373 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
5374 if (type == LE_ADV_DIRECT_IND)
5377 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
5378 bdaddr, bdaddr_type))
5381 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
5382 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5385 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5386 rssi, flags, data, len, NULL, 0);
5390 /* When receiving non-connectable or scannable undirected
5391 * advertising reports, this means that the remote device is
5392 * not connectable and then clearly indicate this in the
5393 * device found event.
5395 * When receiving a scan response, then there is no way to
5396 * know if the remote device is connectable or not. However
5397 * since scan responses are merged with a previously seen
5398 * advertising report, the flags field from that report
5401 * In the really unlikely case that a controller get confused
5402 * and just sends a scan response event, then it is marked as
5403 * not connectable as well.
5405 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5406 type == LE_ADV_SCAN_RSP)
5407 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5411 /* If there's nothing pending either store the data from this
5412 * event or send an immediate device found event if the data
5413 * should not be stored for later.
5415 if (!has_pending_adv_report(hdev)) {
5416 /* If the report will trigger a SCAN_REQ store it for
5419 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5420 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5421 rssi, flags, data, len);
5425 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5426 rssi, flags, data, len, NULL, 0);
5430 /* Check if the pending report is for the same device as the new one */
5431 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5432 bdaddr_type == d->last_adv_addr_type);
5434 /* If the pending data doesn't match this report or this isn't a
5435 * scan response (e.g. we got a duplicate ADV_IND) then force
5436 * sending of the pending data.
5438 if (type != LE_ADV_SCAN_RSP || !match) {
5439 /* Send out whatever is in the cache, but skip duplicates */
5441 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5442 d->last_adv_addr_type, NULL,
5443 d->last_adv_rssi, d->last_adv_flags,
5445 d->last_adv_data_len, NULL, 0);
5447 /* If the new report will trigger a SCAN_REQ store it for
5450 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5451 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5452 rssi, flags, data, len);
5456 /* The advertising reports cannot be merged, so clear
5457 * the pending report and send out a device found event.
5459 clear_pending_adv_report(hdev);
5460 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5461 rssi, flags, data, len, NULL, 0);
5465 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5466 * the new event is a SCAN_RSP. We can therefore proceed with
5467 * sending a merged device found event.
5469 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5470 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5471 d->last_adv_data, d->last_adv_data_len, data, len);
5472 clear_pending_adv_report(hdev);
5475 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5477 u8 num_reports = skb->data[0];
5478 void *ptr = &skb->data[1];
5482 while (num_reports--) {
5483 struct hci_ev_le_advertising_info *ev = ptr;
5486 if (ev->length <= HCI_MAX_AD_LENGTH) {
5487 rssi = ev->data[ev->length];
5488 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5489 ev->bdaddr_type, NULL, 0, rssi,
5490 ev->data, ev->length);
5492 bt_dev_err(hdev, "Dropping invalid advertising data");
5495 ptr += sizeof(*ev) + ev->length + 1;
5498 hci_dev_unlock(hdev);
5501 static u8 ext_evt_type_to_legacy(u16 evt_type)
5503 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
5505 case LE_LEGACY_ADV_IND:
5507 case LE_LEGACY_ADV_DIRECT_IND:
5508 return LE_ADV_DIRECT_IND;
5509 case LE_LEGACY_ADV_SCAN_IND:
5510 return LE_ADV_SCAN_IND;
5511 case LE_LEGACY_NONCONN_IND:
5512 return LE_ADV_NONCONN_IND;
5513 case LE_LEGACY_SCAN_RSP_ADV:
5514 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
5515 return LE_ADV_SCAN_RSP;
5518 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5521 return LE_ADV_INVALID;
5524 if (evt_type & LE_EXT_ADV_CONN_IND) {
5525 if (evt_type & LE_EXT_ADV_DIRECT_IND)
5526 return LE_ADV_DIRECT_IND;
5531 if (evt_type & LE_EXT_ADV_SCAN_RSP)
5532 return LE_ADV_SCAN_RSP;
5534 if (evt_type & LE_EXT_ADV_SCAN_IND)
5535 return LE_ADV_SCAN_IND;
5537 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
5538 evt_type & LE_EXT_ADV_DIRECT_IND)
5539 return LE_ADV_NONCONN_IND;
5541 BT_ERR_RATELIMITED("Unknown advertising packet type: 0x%02x",
5544 return LE_ADV_INVALID;
5547 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5549 u8 num_reports = skb->data[0];
5550 void *ptr = &skb->data[1];
5554 while (num_reports--) {
5555 struct hci_ev_le_ext_adv_report *ev = ptr;
5559 evt_type = __le16_to_cpu(ev->evt_type);
5560 legacy_evt_type = ext_evt_type_to_legacy(evt_type);
5561 if (legacy_evt_type != LE_ADV_INVALID) {
5562 process_adv_report(hdev, legacy_evt_type, &ev->bdaddr,
5563 ev->bdaddr_type, NULL, 0, ev->rssi,
5564 ev->data, ev->length);
5567 ptr += sizeof(*ev) + ev->length;
5570 hci_dev_unlock(hdev);
5573 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
5574 struct sk_buff *skb)
5576 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
5577 struct hci_conn *conn;
5579 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
5583 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5586 memcpy(conn->features[0], ev->features, 8);
5588 if (conn->state == BT_CONFIG) {
5591 /* If the local controller supports slave-initiated
5592 * features exchange, but the remote controller does
5593 * not, then it is possible that the error code 0x1a
5594 * for unsupported remote feature gets returned.
5596 * In this specific case, allow the connection to
5597 * transition into connected state and mark it as
5600 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
5601 !conn->out && ev->status == 0x1a)
5604 status = ev->status;
5606 conn->state = BT_CONNECTED;
5607 hci_connect_cfm(conn, status);
5608 hci_conn_drop(conn);
5612 hci_dev_unlock(hdev);
5615 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5617 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5618 struct hci_cp_le_ltk_reply cp;
5619 struct hci_cp_le_ltk_neg_reply neg;
5620 struct hci_conn *conn;
5621 struct smp_ltk *ltk;
5623 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5627 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5631 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5635 if (smp_ltk_is_sc(ltk)) {
5636 /* With SC both EDiv and Rand are set to zero */
5637 if (ev->ediv || ev->rand)
5640 /* For non-SC keys check that EDiv and Rand match */
5641 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5645 memcpy(cp.ltk, ltk->val, ltk->enc_size);
5646 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
5647 cp.handle = cpu_to_le16(conn->handle);
5649 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5651 conn->enc_key_size = ltk->enc_size;
5653 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5655 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5656 * temporary key used to encrypt a connection following
5657 * pairing. It is used during the Encrypted Session Setup to
5658 * distribute the keys. Later, security can be re-established
5659 * using a distributed LTK.
5661 if (ltk->type == SMP_STK) {
5662 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5663 list_del_rcu(<k->list);
5664 kfree_rcu(ltk, rcu);
5666 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5669 hci_dev_unlock(hdev);
5674 neg.handle = ev->handle;
5675 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5676 hci_dev_unlock(hdev);
5679 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5682 struct hci_cp_le_conn_param_req_neg_reply cp;
5684 cp.handle = cpu_to_le16(handle);
5687 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5691 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5692 struct sk_buff *skb)
5694 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5695 struct hci_cp_le_conn_param_req_reply cp;
5696 struct hci_conn *hcon;
5697 u16 handle, min, max, latency, timeout;
5699 handle = le16_to_cpu(ev->handle);
5700 min = le16_to_cpu(ev->interval_min);
5701 max = le16_to_cpu(ev->interval_max);
5702 latency = le16_to_cpu(ev->latency);
5703 timeout = le16_to_cpu(ev->timeout);
5705 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5706 if (!hcon || hcon->state != BT_CONNECTED)
5707 return send_conn_param_neg_reply(hdev, handle,
5708 HCI_ERROR_UNKNOWN_CONN_ID);
5710 if (hci_check_conn_params(min, max, latency, timeout))
5711 return send_conn_param_neg_reply(hdev, handle,
5712 HCI_ERROR_INVALID_LL_PARAMS);
5714 if (hcon->role == HCI_ROLE_MASTER) {
5715 struct hci_conn_params *params;
5720 params = hci_conn_params_lookup(hdev, &hcon->dst,
5723 params->conn_min_interval = min;
5724 params->conn_max_interval = max;
5725 params->conn_latency = latency;
5726 params->supervision_timeout = timeout;
5732 hci_dev_unlock(hdev);
5734 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5735 store_hint, min, max, latency, timeout);
5738 cp.handle = ev->handle;
5739 cp.interval_min = ev->interval_min;
5740 cp.interval_max = ev->interval_max;
5741 cp.latency = ev->latency;
5742 cp.timeout = ev->timeout;
5746 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5749 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5750 struct sk_buff *skb)
5752 u8 num_reports = skb->data[0];
5753 void *ptr = &skb->data[1];
5757 while (num_reports--) {
5758 struct hci_ev_le_direct_adv_info *ev = ptr;
5760 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5761 ev->bdaddr_type, &ev->direct_addr,
5762 ev->direct_addr_type, ev->rssi, NULL, 0);
5767 hci_dev_unlock(hdev);
5770 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5772 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5774 skb_pull(skb, sizeof(*le_ev));
5776 switch (le_ev->subevent) {
5777 case HCI_EV_LE_CONN_COMPLETE:
5778 hci_le_conn_complete_evt(hdev, skb);
5781 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5782 hci_le_conn_update_complete_evt(hdev, skb);
5785 case HCI_EV_LE_ADVERTISING_REPORT:
5786 hci_le_adv_report_evt(hdev, skb);
5789 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5790 hci_le_remote_feat_complete_evt(hdev, skb);
5793 case HCI_EV_LE_LTK_REQ:
5794 hci_le_ltk_request_evt(hdev, skb);
5797 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5798 hci_le_remote_conn_param_req_evt(hdev, skb);
5801 case HCI_EV_LE_DIRECT_ADV_REPORT:
5802 hci_le_direct_adv_report_evt(hdev, skb);
5805 case HCI_EV_LE_EXT_ADV_REPORT:
5806 hci_le_ext_adv_report_evt(hdev, skb);
5809 case HCI_EV_LE_ENHANCED_CONN_COMPLETE:
5810 hci_le_enh_conn_complete_evt(hdev, skb);
5813 case HCI_EV_LE_EXT_ADV_SET_TERM:
5814 hci_le_ext_adv_term_evt(hdev, skb);
5822 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5823 u8 event, struct sk_buff *skb)
5825 struct hci_ev_cmd_complete *ev;
5826 struct hci_event_hdr *hdr;
5831 if (skb->len < sizeof(*hdr)) {
5832 bt_dev_err(hdev, "too short HCI event");
5836 hdr = (void *) skb->data;
5837 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5840 if (hdr->evt != event)
5845 /* Check if request ended in Command Status - no way to retreive
5846 * any extra parameters in this case.
5848 if (hdr->evt == HCI_EV_CMD_STATUS)
5851 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5852 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
5857 if (skb->len < sizeof(*ev)) {
5858 bt_dev_err(hdev, "too short cmd_complete event");
5862 ev = (void *) skb->data;
5863 skb_pull(skb, sizeof(*ev));
5865 if (opcode != __le16_to_cpu(ev->opcode)) {
5866 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5867 __le16_to_cpu(ev->opcode));
5874 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5876 struct hci_event_hdr *hdr = (void *) skb->data;
5877 hci_req_complete_t req_complete = NULL;
5878 hci_req_complete_skb_t req_complete_skb = NULL;
5879 struct sk_buff *orig_skb = NULL;
5880 u8 status = 0, event = hdr->evt, req_evt = 0;
5881 u16 opcode = HCI_OP_NOP;
5883 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
5884 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5885 opcode = __le16_to_cpu(cmd_hdr->opcode);
5886 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5891 /* If it looks like we might end up having to call
5892 * req_complete_skb, store a pristine copy of the skb since the
5893 * various handlers may modify the original one through
5894 * skb_pull() calls, etc.
5896 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5897 event == HCI_EV_CMD_COMPLETE)
5898 orig_skb = skb_clone(skb, GFP_KERNEL);
5900 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5903 case HCI_EV_INQUIRY_COMPLETE:
5904 hci_inquiry_complete_evt(hdev, skb);
5907 case HCI_EV_INQUIRY_RESULT:
5908 hci_inquiry_result_evt(hdev, skb);
5911 case HCI_EV_CONN_COMPLETE:
5912 hci_conn_complete_evt(hdev, skb);
5915 case HCI_EV_CONN_REQUEST:
5916 hci_conn_request_evt(hdev, skb);
5919 case HCI_EV_DISCONN_COMPLETE:
5920 hci_disconn_complete_evt(hdev, skb);
5923 case HCI_EV_AUTH_COMPLETE:
5924 hci_auth_complete_evt(hdev, skb);
5927 case HCI_EV_REMOTE_NAME:
5928 hci_remote_name_evt(hdev, skb);
5931 case HCI_EV_ENCRYPT_CHANGE:
5932 hci_encrypt_change_evt(hdev, skb);
5935 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5936 hci_change_link_key_complete_evt(hdev, skb);
5939 case HCI_EV_REMOTE_FEATURES:
5940 hci_remote_features_evt(hdev, skb);
5943 case HCI_EV_CMD_COMPLETE:
5944 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5945 &req_complete, &req_complete_skb);
5948 case HCI_EV_CMD_STATUS:
5949 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5953 case HCI_EV_HARDWARE_ERROR:
5954 hci_hardware_error_evt(hdev, skb);
5957 case HCI_EV_ROLE_CHANGE:
5958 hci_role_change_evt(hdev, skb);
5961 case HCI_EV_NUM_COMP_PKTS:
5962 hci_num_comp_pkts_evt(hdev, skb);
5965 case HCI_EV_MODE_CHANGE:
5966 hci_mode_change_evt(hdev, skb);
5969 case HCI_EV_PIN_CODE_REQ:
5970 hci_pin_code_request_evt(hdev, skb);
5973 case HCI_EV_LINK_KEY_REQ:
5974 hci_link_key_request_evt(hdev, skb);
5977 case HCI_EV_LINK_KEY_NOTIFY:
5978 hci_link_key_notify_evt(hdev, skb);
5981 case HCI_EV_CLOCK_OFFSET:
5982 hci_clock_offset_evt(hdev, skb);
5985 case HCI_EV_PKT_TYPE_CHANGE:
5986 hci_pkt_type_change_evt(hdev, skb);
5989 case HCI_EV_PSCAN_REP_MODE:
5990 hci_pscan_rep_mode_evt(hdev, skb);
5993 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5994 hci_inquiry_result_with_rssi_evt(hdev, skb);
5997 case HCI_EV_REMOTE_EXT_FEATURES:
5998 hci_remote_ext_features_evt(hdev, skb);
6001 case HCI_EV_SYNC_CONN_COMPLETE:
6002 hci_sync_conn_complete_evt(hdev, skb);
6005 case HCI_EV_EXTENDED_INQUIRY_RESULT:
6006 hci_extended_inquiry_result_evt(hdev, skb);
6009 case HCI_EV_KEY_REFRESH_COMPLETE:
6010 hci_key_refresh_complete_evt(hdev, skb);
6013 case HCI_EV_IO_CAPA_REQUEST:
6014 hci_io_capa_request_evt(hdev, skb);
6017 case HCI_EV_IO_CAPA_REPLY:
6018 hci_io_capa_reply_evt(hdev, skb);
6021 case HCI_EV_USER_CONFIRM_REQUEST:
6022 hci_user_confirm_request_evt(hdev, skb);
6025 case HCI_EV_USER_PASSKEY_REQUEST:
6026 hci_user_passkey_request_evt(hdev, skb);
6029 case HCI_EV_USER_PASSKEY_NOTIFY:
6030 hci_user_passkey_notify_evt(hdev, skb);
6033 case HCI_EV_KEYPRESS_NOTIFY:
6034 hci_keypress_notify_evt(hdev, skb);
6037 case HCI_EV_SIMPLE_PAIR_COMPLETE:
6038 hci_simple_pair_complete_evt(hdev, skb);
6041 case HCI_EV_REMOTE_HOST_FEATURES:
6042 hci_remote_host_features_evt(hdev, skb);
6045 case HCI_EV_LE_META:
6046 hci_le_meta_evt(hdev, skb);
6049 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
6050 hci_remote_oob_data_request_evt(hdev, skb);
6053 #if IS_ENABLED(CONFIG_BT_HS)
6054 case HCI_EV_CHANNEL_SELECTED:
6055 hci_chan_selected_evt(hdev, skb);
6058 case HCI_EV_PHY_LINK_COMPLETE:
6059 hci_phy_link_complete_evt(hdev, skb);
6062 case HCI_EV_LOGICAL_LINK_COMPLETE:
6063 hci_loglink_complete_evt(hdev, skb);
6066 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
6067 hci_disconn_loglink_complete_evt(hdev, skb);
6070 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
6071 hci_disconn_phylink_complete_evt(hdev, skb);
6075 case HCI_EV_NUM_COMP_BLOCKS:
6076 hci_num_comp_blocks_evt(hdev, skb);
6080 BT_DBG("%s event 0x%2.2x", hdev->name, event);
6085 req_complete(hdev, status, opcode);
6086 } else if (req_complete_skb) {
6087 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
6088 kfree_skb(orig_skb);
6091 req_complete_skb(hdev, status, opcode, orig_skb);
6094 kfree_skb(orig_skb);
6096 hdev->stat.evt_rx++;