2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
42 "\x00\x00\x00\x00\x00\x00\x00\x00"
44 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
46 /* Handle HCI Event packets */
48 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
53 data = skb_pull_data(skb, len);
55 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
60 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
65 data = skb_pull_data(skb, len);
67 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
72 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
77 data = skb_pull_data(skb, len);
79 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
84 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
87 struct hci_ev_status *rp = data;
89 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
91 /* It is possible that we receive Inquiry Complete event right
92 * before we receive Inquiry Cancel Command Complete event, in
93 * which case the latter event should have status of Command
94 * Disallowed (0x0c). This should not be treated as error, since
95 * we actually achieve what Inquiry Cancel wants to achieve,
96 * which is to end the last Inquiry session.
98 if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
99 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
106 clear_bit(HCI_INQUIRY, &hdev->flags);
107 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
108 wake_up_bit(&hdev->flags, HCI_INQUIRY);
111 /* Set discovery state to stopped if we're not doing LE active
114 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
115 hdev->le_scan_type != LE_SCAN_ACTIVE)
116 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
117 hci_dev_unlock(hdev);
119 hci_conn_check_pending(hdev);
124 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
127 struct hci_ev_status *rp = data;
129 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
134 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
139 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
142 struct hci_ev_status *rp = data;
144 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
149 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
151 hci_conn_check_pending(hdev);
156 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
159 struct hci_ev_status *rp = data;
161 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
166 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
169 struct hci_rp_role_discovery *rp = data;
170 struct hci_conn *conn;
172 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
179 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
181 conn->role = rp->role;
183 hci_dev_unlock(hdev);
188 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
191 struct hci_rp_read_link_policy *rp = data;
192 struct hci_conn *conn;
194 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
201 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
203 conn->link_policy = __le16_to_cpu(rp->policy);
205 hci_dev_unlock(hdev);
210 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
213 struct hci_rp_write_link_policy *rp = data;
214 struct hci_conn *conn;
217 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
222 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
228 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
230 conn->link_policy = get_unaligned_le16(sent + 2);
232 hci_dev_unlock(hdev);
237 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
240 struct hci_rp_read_def_link_policy *rp = data;
242 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
247 hdev->link_policy = __le16_to_cpu(rp->policy);
252 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
255 struct hci_ev_status *rp = data;
258 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
263 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
267 hdev->link_policy = get_unaligned_le16(sent);
272 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
274 struct hci_ev_status *rp = data;
276 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
278 clear_bit(HCI_RESET, &hdev->flags);
283 /* Reset all non-persistent flags */
284 hci_dev_clear_volatile_flags(hdev);
286 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
288 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
289 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
291 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
292 hdev->adv_data_len = 0;
294 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
295 hdev->scan_rsp_data_len = 0;
297 hdev->le_scan_type = LE_SCAN_PASSIVE;
299 hdev->ssp_debug_mode = 0;
301 hci_bdaddr_list_clear(&hdev->le_accept_list);
302 hci_bdaddr_list_clear(&hdev->le_resolv_list);
307 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
310 struct hci_rp_read_stored_link_key *rp = data;
311 struct hci_cp_read_stored_link_key *sent;
313 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
315 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
319 if (!rp->status && sent->read_all == 0x01) {
320 hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
321 hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
327 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
330 struct hci_rp_delete_stored_link_key *rp = data;
333 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
338 num_keys = le16_to_cpu(rp->num_keys);
340 if (num_keys <= hdev->stored_num_keys)
341 hdev->stored_num_keys -= num_keys;
343 hdev->stored_num_keys = 0;
348 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
351 struct hci_ev_status *rp = data;
354 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
362 if (hci_dev_test_flag(hdev, HCI_MGMT))
363 mgmt_set_local_name_complete(hdev, sent, rp->status);
364 else if (!rp->status)
365 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
367 hci_dev_unlock(hdev);
372 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
375 struct hci_rp_read_local_name *rp = data;
377 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
382 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
383 hci_dev_test_flag(hdev, HCI_CONFIG))
384 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
389 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
392 struct hci_ev_status *rp = data;
395 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
397 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
404 __u8 param = *((__u8 *) sent);
406 if (param == AUTH_ENABLED)
407 set_bit(HCI_AUTH, &hdev->flags);
409 clear_bit(HCI_AUTH, &hdev->flags);
412 if (hci_dev_test_flag(hdev, HCI_MGMT))
413 mgmt_auth_enable_complete(hdev, rp->status);
415 hci_dev_unlock(hdev);
420 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
423 struct hci_ev_status *rp = data;
427 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
432 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
436 param = *((__u8 *) sent);
439 set_bit(HCI_ENCRYPT, &hdev->flags);
441 clear_bit(HCI_ENCRYPT, &hdev->flags);
446 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
449 struct hci_ev_status *rp = data;
453 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
455 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
459 param = *((__u8 *) sent);
464 hdev->discov_timeout = 0;
468 if (param & SCAN_INQUIRY)
469 set_bit(HCI_ISCAN, &hdev->flags);
471 clear_bit(HCI_ISCAN, &hdev->flags);
473 if (param & SCAN_PAGE)
474 set_bit(HCI_PSCAN, &hdev->flags);
476 clear_bit(HCI_PSCAN, &hdev->flags);
479 hci_dev_unlock(hdev);
484 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
487 struct hci_ev_status *rp = data;
488 struct hci_cp_set_event_filter *cp;
491 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
496 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
500 cp = (struct hci_cp_set_event_filter *)sent;
502 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
503 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
505 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
510 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
513 struct hci_rp_read_class_of_dev *rp = data;
515 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
520 memcpy(hdev->dev_class, rp->dev_class, 3);
522 bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
523 hdev->dev_class[1], hdev->dev_class[0]);
528 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
531 struct hci_ev_status *rp = data;
534 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
536 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
543 memcpy(hdev->dev_class, sent, 3);
545 if (hci_dev_test_flag(hdev, HCI_MGMT))
546 mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
548 hci_dev_unlock(hdev);
553 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
556 struct hci_rp_read_voice_setting *rp = data;
559 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
564 setting = __le16_to_cpu(rp->voice_setting);
566 if (hdev->voice_setting == setting)
569 hdev->voice_setting = setting;
571 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
574 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
579 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
582 struct hci_ev_status *rp = data;
586 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
591 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
595 setting = get_unaligned_le16(sent);
597 if (hdev->voice_setting == setting)
600 hdev->voice_setting = setting;
602 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
605 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
610 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
613 struct hci_rp_read_num_supported_iac *rp = data;
615 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
620 hdev->num_iac = rp->num_iac;
622 bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
627 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
630 struct hci_ev_status *rp = data;
631 struct hci_cp_write_ssp_mode *sent;
633 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
635 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
643 hdev->features[1][0] |= LMP_HOST_SSP;
645 hdev->features[1][0] &= ~LMP_HOST_SSP;
650 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
652 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
655 hci_dev_unlock(hdev);
660 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
663 struct hci_ev_status *rp = data;
664 struct hci_cp_write_sc_support *sent;
666 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
668 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
676 hdev->features[1][0] |= LMP_HOST_SC;
678 hdev->features[1][0] &= ~LMP_HOST_SC;
681 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
683 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
685 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
688 hci_dev_unlock(hdev);
693 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
696 struct hci_rp_read_local_version *rp = data;
698 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
703 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
704 hci_dev_test_flag(hdev, HCI_CONFIG)) {
705 hdev->hci_ver = rp->hci_ver;
706 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
707 hdev->lmp_ver = rp->lmp_ver;
708 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
709 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
715 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
718 struct hci_rp_read_enc_key_size *rp = data;
719 struct hci_conn *conn;
721 u8 status = rp->status;
723 bt_dev_dbg(hdev, "status 0x%2.2x", status);
725 handle = le16_to_cpu(rp->handle);
729 conn = hci_conn_hash_lookup_handle(hdev, handle);
735 /* While unexpected, the read_enc_key_size command may fail. The most
736 * secure approach is to then assume the key size is 0 to force a
740 bt_dev_err(hdev, "failed to read key size for handle %u",
742 conn->enc_key_size = 0;
744 conn->enc_key_size = rp->key_size;
748 hci_encrypt_cfm(conn, 0);
751 hci_dev_unlock(hdev);
756 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
759 struct hci_rp_read_local_commands *rp = data;
761 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
766 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
767 hci_dev_test_flag(hdev, HCI_CONFIG))
768 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
773 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
776 struct hci_rp_read_auth_payload_to *rp = data;
777 struct hci_conn *conn;
779 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
786 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
788 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
790 hci_dev_unlock(hdev);
795 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
798 struct hci_rp_write_auth_payload_to *rp = data;
799 struct hci_conn *conn;
802 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
807 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
813 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
815 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
817 hci_dev_unlock(hdev);
822 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
825 struct hci_rp_read_local_features *rp = data;
827 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
832 memcpy(hdev->features, rp->features, 8);
834 /* Adjust default settings according to features
835 * supported by device. */
837 if (hdev->features[0][0] & LMP_3SLOT)
838 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
840 if (hdev->features[0][0] & LMP_5SLOT)
841 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
843 if (hdev->features[0][1] & LMP_HV2) {
844 hdev->pkt_type |= (HCI_HV2);
845 hdev->esco_type |= (ESCO_HV2);
848 if (hdev->features[0][1] & LMP_HV3) {
849 hdev->pkt_type |= (HCI_HV3);
850 hdev->esco_type |= (ESCO_HV3);
853 if (lmp_esco_capable(hdev))
854 hdev->esco_type |= (ESCO_EV3);
856 if (hdev->features[0][4] & LMP_EV4)
857 hdev->esco_type |= (ESCO_EV4);
859 if (hdev->features[0][4] & LMP_EV5)
860 hdev->esco_type |= (ESCO_EV5);
862 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
863 hdev->esco_type |= (ESCO_2EV3);
865 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
866 hdev->esco_type |= (ESCO_3EV3);
868 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
869 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
874 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
877 struct hci_rp_read_local_ext_features *rp = data;
879 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
884 if (hdev->max_page < rp->max_page) {
885 if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2,
887 bt_dev_warn(hdev, "broken local ext features page 2");
889 hdev->max_page = rp->max_page;
892 if (rp->page < HCI_MAX_PAGES)
893 memcpy(hdev->features[rp->page], rp->features, 8);
898 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
901 struct hci_rp_read_flow_control_mode *rp = data;
903 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
908 hdev->flow_ctl_mode = rp->mode;
913 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
916 struct hci_rp_read_buffer_size *rp = data;
918 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
923 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
924 hdev->sco_mtu = rp->sco_mtu;
925 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
926 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
928 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
933 hdev->acl_cnt = hdev->acl_pkts;
934 hdev->sco_cnt = hdev->sco_pkts;
936 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
937 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
942 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
945 struct hci_rp_read_bd_addr *rp = data;
947 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
952 if (test_bit(HCI_INIT, &hdev->flags))
953 bacpy(&hdev->bdaddr, &rp->bdaddr);
955 if (hci_dev_test_flag(hdev, HCI_SETUP))
956 bacpy(&hdev->setup_addr, &rp->bdaddr);
961 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
964 struct hci_rp_read_local_pairing_opts *rp = data;
966 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
971 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
972 hci_dev_test_flag(hdev, HCI_CONFIG)) {
973 hdev->pairing_opts = rp->pairing_opts;
974 hdev->max_enc_key_size = rp->max_key_size;
980 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
983 struct hci_rp_read_page_scan_activity *rp = data;
985 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
990 if (test_bit(HCI_INIT, &hdev->flags)) {
991 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
992 hdev->page_scan_window = __le16_to_cpu(rp->window);
998 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1001 struct hci_ev_status *rp = data;
1002 struct hci_cp_write_page_scan_activity *sent;
1004 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1009 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1013 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1014 hdev->page_scan_window = __le16_to_cpu(sent->window);
1019 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1020 struct sk_buff *skb)
1022 struct hci_rp_read_page_scan_type *rp = data;
1024 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1029 if (test_bit(HCI_INIT, &hdev->flags))
1030 hdev->page_scan_type = rp->type;
1035 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1036 struct sk_buff *skb)
1038 struct hci_ev_status *rp = data;
1041 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1046 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1048 hdev->page_scan_type = *type;
1053 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1054 struct sk_buff *skb)
1056 struct hci_rp_read_data_block_size *rp = data;
1058 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1063 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1064 hdev->block_len = __le16_to_cpu(rp->block_len);
1065 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1067 hdev->block_cnt = hdev->num_blocks;
1069 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1070 hdev->block_cnt, hdev->block_len);
1075 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1076 struct sk_buff *skb)
1078 struct hci_rp_read_clock *rp = data;
1079 struct hci_cp_read_clock *cp;
1080 struct hci_conn *conn;
1082 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1089 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1093 if (cp->which == 0x00) {
1094 hdev->clock = le32_to_cpu(rp->clock);
1098 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1100 conn->clock = le32_to_cpu(rp->clock);
1101 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1105 hci_dev_unlock(hdev);
1109 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1110 struct sk_buff *skb)
1112 struct hci_rp_read_local_amp_info *rp = data;
1114 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1119 hdev->amp_status = rp->amp_status;
1120 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1121 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1122 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1123 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1124 hdev->amp_type = rp->amp_type;
1125 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1126 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1127 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1128 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1133 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1134 struct sk_buff *skb)
1136 struct hci_rp_read_inq_rsp_tx_power *rp = data;
1138 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1143 hdev->inq_tx_power = rp->tx_power;
1148 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1149 struct sk_buff *skb)
1151 struct hci_rp_read_def_err_data_reporting *rp = data;
1153 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1158 hdev->err_data_reporting = rp->err_data_reporting;
1163 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1164 struct sk_buff *skb)
1166 struct hci_ev_status *rp = data;
1167 struct hci_cp_write_def_err_data_reporting *cp;
1169 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1174 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1178 hdev->err_data_reporting = cp->err_data_reporting;
1183 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1184 struct sk_buff *skb)
1186 struct hci_rp_pin_code_reply *rp = data;
1187 struct hci_cp_pin_code_reply *cp;
1188 struct hci_conn *conn;
1190 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1194 if (hci_dev_test_flag(hdev, HCI_MGMT))
1195 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1200 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1204 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1206 conn->pin_length = cp->pin_len;
1209 hci_dev_unlock(hdev);
1213 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1214 struct sk_buff *skb)
1216 struct hci_rp_pin_code_neg_reply *rp = data;
1218 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1222 if (hci_dev_test_flag(hdev, HCI_MGMT))
1223 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1226 hci_dev_unlock(hdev);
1231 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1232 struct sk_buff *skb)
1234 struct hci_rp_le_read_buffer_size *rp = data;
1236 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1241 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1242 hdev->le_pkts = rp->le_max_pkt;
1244 hdev->le_cnt = hdev->le_pkts;
1246 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1251 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1252 struct sk_buff *skb)
1254 struct hci_rp_le_read_local_features *rp = data;
1256 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1261 memcpy(hdev->le_features, rp->features, 8);
1266 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1267 struct sk_buff *skb)
1269 struct hci_rp_le_read_adv_tx_power *rp = data;
1271 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1276 hdev->adv_tx_power = rp->tx_power;
1281 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1282 struct sk_buff *skb)
1284 struct hci_rp_user_confirm_reply *rp = data;
1286 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1290 if (hci_dev_test_flag(hdev, HCI_MGMT))
1291 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1294 hci_dev_unlock(hdev);
1299 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1300 struct sk_buff *skb)
1302 struct hci_rp_user_confirm_reply *rp = data;
1304 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1308 if (hci_dev_test_flag(hdev, HCI_MGMT))
1309 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1310 ACL_LINK, 0, rp->status);
1312 hci_dev_unlock(hdev);
1317 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1318 struct sk_buff *skb)
1320 struct hci_rp_user_confirm_reply *rp = data;
1322 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1326 if (hci_dev_test_flag(hdev, HCI_MGMT))
1327 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1330 hci_dev_unlock(hdev);
1335 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1336 struct sk_buff *skb)
1338 struct hci_rp_user_confirm_reply *rp = data;
1340 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1344 if (hci_dev_test_flag(hdev, HCI_MGMT))
1345 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1346 ACL_LINK, 0, rp->status);
1348 hci_dev_unlock(hdev);
1353 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1354 struct sk_buff *skb)
1356 struct hci_rp_read_local_oob_data *rp = data;
1358 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1363 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1364 struct sk_buff *skb)
1366 struct hci_rp_read_local_oob_ext_data *rp = data;
1368 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1373 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1374 struct sk_buff *skb)
1376 struct hci_ev_status *rp = data;
1379 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1384 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1390 bacpy(&hdev->random_addr, sent);
1392 if (!bacmp(&hdev->rpa, sent)) {
1393 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1394 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1395 secs_to_jiffies(hdev->rpa_timeout));
1398 hci_dev_unlock(hdev);
1403 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1404 struct sk_buff *skb)
1406 struct hci_ev_status *rp = data;
1407 struct hci_cp_le_set_default_phy *cp;
1409 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1414 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1420 hdev->le_tx_def_phys = cp->tx_phys;
1421 hdev->le_rx_def_phys = cp->rx_phys;
1423 hci_dev_unlock(hdev);
1428 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1429 struct sk_buff *skb)
1431 struct hci_ev_status *rp = data;
1432 struct hci_cp_le_set_adv_set_rand_addr *cp;
1433 struct adv_info *adv;
1435 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1440 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1441 /* Update only in case the adv instance since handle 0x00 shall be using
1442 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1443 * non-extended adverting.
1445 if (!cp || !cp->handle)
1450 adv = hci_find_adv_instance(hdev, cp->handle);
1452 bacpy(&adv->random_addr, &cp->bdaddr);
1453 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1454 adv->rpa_expired = false;
1455 queue_delayed_work(hdev->workqueue,
1456 &adv->rpa_expired_cb,
1457 secs_to_jiffies(hdev->rpa_timeout));
1461 hci_dev_unlock(hdev);
1466 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1467 struct sk_buff *skb)
1469 struct hci_ev_status *rp = data;
1473 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1478 instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1484 err = hci_remove_adv_instance(hdev, *instance);
1486 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1489 hci_dev_unlock(hdev);
1494 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1495 struct sk_buff *skb)
1497 struct hci_ev_status *rp = data;
1498 struct adv_info *adv, *n;
1501 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1506 if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1511 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1512 u8 instance = adv->instance;
1514 err = hci_remove_adv_instance(hdev, instance);
1516 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1520 hci_dev_unlock(hdev);
1525 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1526 struct sk_buff *skb)
1528 struct hci_rp_le_read_transmit_power *rp = data;
1530 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1535 hdev->min_le_tx_power = rp->min_le_tx_power;
1536 hdev->max_le_tx_power = rp->max_le_tx_power;
1541 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1542 struct sk_buff *skb)
1544 struct hci_ev_status *rp = data;
1545 struct hci_cp_le_set_privacy_mode *cp;
1546 struct hci_conn_params *params;
1548 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1553 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1559 params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1561 params->privacy_mode = cp->mode;
1563 hci_dev_unlock(hdev);
1568 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1569 struct sk_buff *skb)
1571 struct hci_ev_status *rp = data;
1574 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1579 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1585 /* If we're doing connection initiation as peripheral. Set a
1586 * timeout in case something goes wrong.
1589 struct hci_conn *conn;
1591 hci_dev_set_flag(hdev, HCI_LE_ADV);
1593 conn = hci_lookup_le_connect(hdev);
1595 queue_delayed_work(hdev->workqueue,
1596 &conn->le_conn_timeout,
1597 conn->conn_timeout);
1599 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1602 hci_dev_unlock(hdev);
1607 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1608 struct sk_buff *skb)
1610 struct hci_cp_le_set_ext_adv_enable *cp;
1611 struct hci_cp_ext_adv_set *set;
1612 struct adv_info *adv = NULL, *n;
1613 struct hci_ev_status *rp = data;
1615 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1620 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1624 set = (void *)cp->data;
1628 if (cp->num_of_sets)
1629 adv = hci_find_adv_instance(hdev, set->handle);
1632 struct hci_conn *conn;
1634 hci_dev_set_flag(hdev, HCI_LE_ADV);
1637 adv->enabled = true;
1639 conn = hci_lookup_le_connect(hdev);
1641 queue_delayed_work(hdev->workqueue,
1642 &conn->le_conn_timeout,
1643 conn->conn_timeout);
1645 if (cp->num_of_sets) {
1647 adv->enabled = false;
1649 /* If just one instance was disabled check if there are
1650 * any other instance enabled before clearing HCI_LE_ADV
1652 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1658 /* All instances shall be considered disabled */
1659 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1661 adv->enabled = false;
1664 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1668 hci_dev_unlock(hdev);
1672 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1673 struct sk_buff *skb)
1675 struct hci_cp_le_set_scan_param *cp;
1676 struct hci_ev_status *rp = data;
1678 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1683 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1689 hdev->le_scan_type = cp->type;
1691 hci_dev_unlock(hdev);
1696 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1697 struct sk_buff *skb)
1699 struct hci_cp_le_set_ext_scan_params *cp;
1700 struct hci_ev_status *rp = data;
1701 struct hci_cp_le_scan_phy_params *phy_param;
1703 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1708 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1712 phy_param = (void *)cp->data;
1716 hdev->le_scan_type = phy_param->type;
1718 hci_dev_unlock(hdev);
1723 static bool has_pending_adv_report(struct hci_dev *hdev)
1725 struct discovery_state *d = &hdev->discovery;
1727 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1730 static void clear_pending_adv_report(struct hci_dev *hdev)
1732 struct discovery_state *d = &hdev->discovery;
1734 bacpy(&d->last_adv_addr, BDADDR_ANY);
1735 d->last_adv_data_len = 0;
1738 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1739 u8 bdaddr_type, s8 rssi, u32 flags,
1742 struct discovery_state *d = &hdev->discovery;
1744 if (len > HCI_MAX_AD_LENGTH)
1747 bacpy(&d->last_adv_addr, bdaddr);
1748 d->last_adv_addr_type = bdaddr_type;
1749 d->last_adv_rssi = rssi;
1750 d->last_adv_flags = flags;
1751 memcpy(d->last_adv_data, data, len);
1752 d->last_adv_data_len = len;
1755 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1760 case LE_SCAN_ENABLE:
1761 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1762 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1763 clear_pending_adv_report(hdev);
1764 if (hci_dev_test_flag(hdev, HCI_MESH))
1765 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1768 case LE_SCAN_DISABLE:
1769 /* We do this here instead of when setting DISCOVERY_STOPPED
1770 * since the latter would potentially require waiting for
1771 * inquiry to stop too.
1773 if (has_pending_adv_report(hdev)) {
1774 struct discovery_state *d = &hdev->discovery;
1776 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1777 d->last_adv_addr_type, NULL,
1778 d->last_adv_rssi, d->last_adv_flags,
1780 d->last_adv_data_len, NULL, 0, 0);
1783 /* Cancel this timer so that we don't try to disable scanning
1784 * when it's already disabled.
1786 cancel_delayed_work(&hdev->le_scan_disable);
1788 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1790 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1791 * interrupted scanning due to a connect request. Mark
1792 * therefore discovery as stopped.
1794 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1795 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1796 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1797 hdev->discovery.state == DISCOVERY_FINDING)
1798 queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1803 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1808 hci_dev_unlock(hdev);
1811 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1812 struct sk_buff *skb)
1814 struct hci_cp_le_set_scan_enable *cp;
1815 struct hci_ev_status *rp = data;
1817 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1822 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1826 le_set_scan_enable_complete(hdev, cp->enable);
1831 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1832 struct sk_buff *skb)
1834 struct hci_cp_le_set_ext_scan_enable *cp;
1835 struct hci_ev_status *rp = data;
1837 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1842 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1846 le_set_scan_enable_complete(hdev, cp->enable);
1851 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1852 struct sk_buff *skb)
1854 struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1856 bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1862 hdev->le_num_of_adv_sets = rp->num_of_sets;
1867 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1868 struct sk_buff *skb)
1870 struct hci_rp_le_read_accept_list_size *rp = data;
1872 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1877 hdev->le_accept_list_size = rp->size;
1882 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1883 struct sk_buff *skb)
1885 struct hci_ev_status *rp = data;
1887 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1893 hci_bdaddr_list_clear(&hdev->le_accept_list);
1894 hci_dev_unlock(hdev);
1899 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1900 struct sk_buff *skb)
1902 struct hci_cp_le_add_to_accept_list *sent;
1903 struct hci_ev_status *rp = data;
1905 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1910 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1915 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1917 hci_dev_unlock(hdev);
1922 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1923 struct sk_buff *skb)
1925 struct hci_cp_le_del_from_accept_list *sent;
1926 struct hci_ev_status *rp = data;
1928 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1933 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1938 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1940 hci_dev_unlock(hdev);
1945 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1946 struct sk_buff *skb)
1948 struct hci_rp_le_read_supported_states *rp = data;
1950 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1955 memcpy(hdev->le_states, rp->le_states, 8);
1960 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1961 struct sk_buff *skb)
1963 struct hci_rp_le_read_def_data_len *rp = data;
1965 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1970 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1971 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1976 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1977 struct sk_buff *skb)
1979 struct hci_cp_le_write_def_data_len *sent;
1980 struct hci_ev_status *rp = data;
1982 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1987 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1991 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1992 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1997 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
1998 struct sk_buff *skb)
2000 struct hci_cp_le_add_to_resolv_list *sent;
2001 struct hci_ev_status *rp = data;
2003 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2008 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2013 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2014 sent->bdaddr_type, sent->peer_irk,
2016 hci_dev_unlock(hdev);
2021 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2022 struct sk_buff *skb)
2024 struct hci_cp_le_del_from_resolv_list *sent;
2025 struct hci_ev_status *rp = data;
2027 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2032 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2037 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2039 hci_dev_unlock(hdev);
2044 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2045 struct sk_buff *skb)
2047 struct hci_ev_status *rp = data;
2049 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2055 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2056 hci_dev_unlock(hdev);
2061 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2062 struct sk_buff *skb)
2064 struct hci_rp_le_read_resolv_list_size *rp = data;
2066 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2071 hdev->le_resolv_list_size = rp->size;
2076 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2077 struct sk_buff *skb)
2079 struct hci_ev_status *rp = data;
2082 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2087 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2094 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2096 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2098 hci_dev_unlock(hdev);
2103 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2104 struct sk_buff *skb)
2106 struct hci_rp_le_read_max_data_len *rp = data;
2108 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2113 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2114 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2115 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2116 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2121 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2122 struct sk_buff *skb)
2124 struct hci_cp_write_le_host_supported *sent;
2125 struct hci_ev_status *rp = data;
2127 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2132 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2139 hdev->features[1][0] |= LMP_HOST_LE;
2140 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2142 hdev->features[1][0] &= ~LMP_HOST_LE;
2143 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2144 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2148 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2150 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2152 hci_dev_unlock(hdev);
2157 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2158 struct sk_buff *skb)
2160 struct hci_cp_le_set_adv_param *cp;
2161 struct hci_ev_status *rp = data;
2163 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2168 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2173 hdev->adv_addr_type = cp->own_address_type;
2174 hci_dev_unlock(hdev);
2179 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2180 struct sk_buff *skb)
2182 struct hci_rp_le_set_ext_adv_params *rp = data;
2183 struct hci_cp_le_set_ext_adv_params *cp;
2184 struct adv_info *adv_instance;
2186 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2191 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2196 hdev->adv_addr_type = cp->own_addr_type;
2198 /* Store in hdev for instance 0 */
2199 hdev->adv_tx_power = rp->tx_power;
2201 adv_instance = hci_find_adv_instance(hdev, cp->handle);
2203 adv_instance->tx_power = rp->tx_power;
2205 /* Update adv data as tx power is known now */
2206 hci_update_adv_data(hdev, cp->handle);
2208 hci_dev_unlock(hdev);
2214 static u8 hci_cc_enable_rssi(struct hci_dev *hdev, void *data,
2215 struct sk_buff *skb)
2217 struct hci_cc_rsp_enable_rssi *rp = data;
2219 BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
2220 hdev->name, rp->status, rp->le_ext_opcode);
2222 mgmt_enable_rssi_cc(hdev, rp, rp->status);
2227 static u8 hci_cc_get_raw_rssi(struct hci_dev *hdev, void *data,
2228 struct sk_buff *skb)
2230 struct hci_cc_rp_get_raw_rssi *rp = data;
2232 BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
2233 hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
2235 mgmt_raw_rssi_response(hdev, rp, rp->status);
2241 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2242 struct sk_buff *skb)
2244 struct hci_rp_read_rssi *rp = data;
2245 struct hci_conn *conn;
2247 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2254 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2256 conn->rssi = rp->rssi;
2258 hci_dev_unlock(hdev);
2263 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2264 struct sk_buff *skb)
2266 struct hci_cp_read_tx_power *sent;
2267 struct hci_rp_read_tx_power *rp = data;
2268 struct hci_conn *conn;
2270 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2275 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2281 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2285 switch (sent->type) {
2287 conn->tx_power = rp->tx_power;
2290 conn->max_tx_power = rp->tx_power;
2295 hci_dev_unlock(hdev);
2299 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2300 struct sk_buff *skb)
2302 struct hci_ev_status *rp = data;
2305 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2310 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2312 hdev->ssp_debug_mode = *mode;
2317 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2319 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2322 hci_conn_check_pending(hdev);
2326 set_bit(HCI_INQUIRY, &hdev->flags);
2329 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2331 struct hci_cp_create_conn *cp;
2332 struct hci_conn *conn;
2334 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2336 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2342 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2344 bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2347 if (conn && conn->state == BT_CONNECT) {
2348 if (status != 0x0c || conn->attempt > 2) {
2349 conn->state = BT_CLOSED;
2350 hci_connect_cfm(conn, status);
2353 conn->state = BT_CONNECT2;
2357 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
2360 bt_dev_err(hdev, "no memory for new connection");
2364 hci_dev_unlock(hdev);
2367 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2369 struct hci_cp_add_sco *cp;
2370 struct hci_conn *acl, *sco;
2373 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2378 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2382 handle = __le16_to_cpu(cp->handle);
2384 bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2388 acl = hci_conn_hash_lookup_handle(hdev, handle);
2392 sco->state = BT_CLOSED;
2394 hci_connect_cfm(sco, status);
2399 hci_dev_unlock(hdev);
2402 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2404 struct hci_cp_auth_requested *cp;
2405 struct hci_conn *conn;
2407 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2412 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2418 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2420 if (conn->state == BT_CONFIG) {
2421 hci_connect_cfm(conn, status);
2422 hci_conn_drop(conn);
2426 hci_dev_unlock(hdev);
2429 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2431 struct hci_cp_set_conn_encrypt *cp;
2432 struct hci_conn *conn;
2434 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2439 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2445 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2447 if (conn->state == BT_CONFIG) {
2448 hci_connect_cfm(conn, status);
2449 hci_conn_drop(conn);
2453 hci_dev_unlock(hdev);
2456 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2457 struct hci_conn *conn)
2459 if (conn->state != BT_CONFIG || !conn->out)
2462 if (conn->pending_sec_level == BT_SECURITY_SDP)
2465 /* Only request authentication for SSP connections or non-SSP
2466 * devices with sec_level MEDIUM or HIGH or if MITM protection
2469 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2470 conn->pending_sec_level != BT_SECURITY_FIPS &&
2471 conn->pending_sec_level != BT_SECURITY_HIGH &&
2472 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2478 static int hci_resolve_name(struct hci_dev *hdev,
2479 struct inquiry_entry *e)
2481 struct hci_cp_remote_name_req cp;
2483 memset(&cp, 0, sizeof(cp));
2485 bacpy(&cp.bdaddr, &e->data.bdaddr);
2486 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2487 cp.pscan_mode = e->data.pscan_mode;
2488 cp.clock_offset = e->data.clock_offset;
2490 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2493 static bool hci_resolve_next_name(struct hci_dev *hdev)
2495 struct discovery_state *discov = &hdev->discovery;
2496 struct inquiry_entry *e;
2498 if (list_empty(&discov->resolve))
2501 /* We should stop if we already spent too much time resolving names. */
2502 if (time_after(jiffies, discov->name_resolve_timeout)) {
2503 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2507 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2511 if (hci_resolve_name(hdev, e) == 0) {
2512 e->name_state = NAME_PENDING;
2519 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2520 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2522 struct discovery_state *discov = &hdev->discovery;
2523 struct inquiry_entry *e;
2526 /* Update the mgmt connected state if necessary. Be careful with
2527 * conn objects that exist but are not (yet) connected however.
2528 * Only those in BT_CONFIG or BT_CONNECTED states can be
2529 * considered connected.
2532 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
2533 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2534 mgmt_device_connected(hdev, conn, 0, name, name_len);
2536 mgmt_device_name_update(hdev, bdaddr, name, name_len);
2540 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2541 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2542 mgmt_device_connected(hdev, conn, name, name_len);
2545 if (discov->state == DISCOVERY_STOPPED)
2548 if (discov->state == DISCOVERY_STOPPING)
2549 goto discov_complete;
2551 if (discov->state != DISCOVERY_RESOLVING)
2554 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2555 /* If the device was not found in a list of found devices names of which
2556 * are pending. there is no need to continue resolving a next name as it
2557 * will be done upon receiving another Remote Name Request Complete
2564 e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2565 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2568 if (hci_resolve_next_name(hdev))
2572 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2575 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2577 struct hci_cp_remote_name_req *cp;
2578 struct hci_conn *conn;
2580 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2582 /* If successful wait for the name req complete event before
2583 * checking for the need to do authentication */
2587 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2593 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2595 if (hci_dev_test_flag(hdev, HCI_MGMT))
2596 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2601 if (!hci_outgoing_auth_needed(hdev, conn))
2604 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2605 struct hci_cp_auth_requested auth_cp;
2607 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2609 auth_cp.handle = __cpu_to_le16(conn->handle);
2610 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2611 sizeof(auth_cp), &auth_cp);
2615 hci_dev_unlock(hdev);
2618 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2620 struct hci_cp_read_remote_features *cp;
2621 struct hci_conn *conn;
2623 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2628 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2634 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2636 if (conn->state == BT_CONFIG) {
2637 hci_connect_cfm(conn, status);
2638 hci_conn_drop(conn);
2642 hci_dev_unlock(hdev);
2645 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2647 struct hci_cp_read_remote_ext_features *cp;
2648 struct hci_conn *conn;
2650 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2655 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2661 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2663 if (conn->state == BT_CONFIG) {
2664 hci_connect_cfm(conn, status);
2665 hci_conn_drop(conn);
2669 hci_dev_unlock(hdev);
2672 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2674 struct hci_cp_setup_sync_conn *cp;
2675 struct hci_conn *acl, *sco;
2678 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2683 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2687 handle = __le16_to_cpu(cp->handle);
2689 bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2693 acl = hci_conn_hash_lookup_handle(hdev, handle);
2697 sco->state = BT_CLOSED;
2699 hci_connect_cfm(sco, status);
2704 hci_dev_unlock(hdev);
2707 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2709 struct hci_cp_enhanced_setup_sync_conn *cp;
2710 struct hci_conn *acl, *sco;
2713 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2718 cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2722 handle = __le16_to_cpu(cp->handle);
2724 bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2728 acl = hci_conn_hash_lookup_handle(hdev, handle);
2732 sco->state = BT_CLOSED;
2734 hci_connect_cfm(sco, status);
2739 hci_dev_unlock(hdev);
2742 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2744 struct hci_cp_sniff_mode *cp;
2745 struct hci_conn *conn;
2747 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2752 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2758 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2760 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2762 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2763 hci_sco_setup(conn, status);
2766 hci_dev_unlock(hdev);
2769 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2771 struct hci_cp_exit_sniff_mode *cp;
2772 struct hci_conn *conn;
2774 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2779 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2785 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2787 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2789 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2790 hci_sco_setup(conn, status);
2793 hci_dev_unlock(hdev);
2796 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2798 struct hci_cp_disconnect *cp;
2799 struct hci_conn_params *params;
2800 struct hci_conn *conn;
2803 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2805 /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2806 * otherwise cleanup the connection immediately.
2808 if (!status && !hdev->suspended)
2811 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2817 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2822 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2823 conn->dst_type, status);
2825 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2826 hdev->cur_adv_instance = conn->adv_instance;
2827 hci_enable_advertising(hdev);
2833 mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2835 if (conn->type == ACL_LINK) {
2836 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2837 hci_remove_link_key(hdev, &conn->dst);
2840 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2842 switch (params->auto_connect) {
2843 case HCI_AUTO_CONN_LINK_LOSS:
2844 if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2848 case HCI_AUTO_CONN_DIRECT:
2849 case HCI_AUTO_CONN_ALWAYS:
2850 list_del_init(¶ms->action);
2851 list_add(¶ms->action, &hdev->pend_le_conns);
2859 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2860 cp->reason, mgmt_conn);
2862 hci_disconn_cfm(conn, cp->reason);
2865 /* If the disconnection failed for any reason, the upper layer
2866 * does not retry to disconnect in current implementation.
2867 * Hence, we need to do some basic cleanup here and re-enable
2868 * advertising if necessary.
2872 hci_dev_unlock(hdev);
2875 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2877 /* When using controller based address resolution, then the new
2878 * address types 0x02 and 0x03 are used. These types need to be
2879 * converted back into either public address or random address type
2882 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2885 return ADDR_LE_DEV_PUBLIC;
2886 case ADDR_LE_DEV_RANDOM_RESOLVED:
2889 return ADDR_LE_DEV_RANDOM;
2897 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2898 u8 peer_addr_type, u8 own_address_type,
2901 struct hci_conn *conn;
2903 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2908 own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2910 /* Store the initiator and responder address information which
2911 * is needed for SMP. These values will not change during the
2912 * lifetime of the connection.
2914 conn->init_addr_type = own_address_type;
2915 if (own_address_type == ADDR_LE_DEV_RANDOM)
2916 bacpy(&conn->init_addr, &hdev->random_addr);
2918 bacpy(&conn->init_addr, &hdev->bdaddr);
2920 conn->resp_addr_type = peer_addr_type;
2921 bacpy(&conn->resp_addr, peer_addr);
2924 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2926 struct hci_cp_le_create_conn *cp;
2928 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2930 /* All connection failure handling is taken care of by the
2931 * hci_conn_failed function which is triggered by the HCI
2932 * request completion callbacks used for connecting.
2937 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2943 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2944 cp->own_address_type, cp->filter_policy);
2946 hci_dev_unlock(hdev);
2949 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2951 struct hci_cp_le_ext_create_conn *cp;
2953 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2955 /* All connection failure handling is taken care of by the
2956 * hci_conn_failed function which is triggered by the HCI
2957 * request completion callbacks used for connecting.
2962 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2968 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2969 cp->own_addr_type, cp->filter_policy);
2971 hci_dev_unlock(hdev);
2974 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2976 struct hci_cp_le_read_remote_features *cp;
2977 struct hci_conn *conn;
2979 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2984 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2990 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2992 if (conn->state == BT_CONFIG) {
2993 hci_connect_cfm(conn, status);
2994 hci_conn_drop(conn);
2998 hci_dev_unlock(hdev);
3001 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
3003 struct hci_cp_le_start_enc *cp;
3004 struct hci_conn *conn;
3006 bt_dev_dbg(hdev, "status 0x%2.2x", status);
3013 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
3017 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3021 if (conn->state != BT_CONNECTED)
3024 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3025 hci_conn_drop(conn);
3028 hci_dev_unlock(hdev);
3031 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
3033 struct hci_cp_switch_role *cp;
3034 struct hci_conn *conn;
3036 BT_DBG("%s status 0x%2.2x", hdev->name, status);
3041 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3047 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3049 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3051 hci_dev_unlock(hdev);
3054 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3055 struct sk_buff *skb)
3057 struct hci_ev_status *ev = data;
3058 struct discovery_state *discov = &hdev->discovery;
3059 struct inquiry_entry *e;
3061 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3063 hci_conn_check_pending(hdev);
3065 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3068 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3069 wake_up_bit(&hdev->flags, HCI_INQUIRY);
3071 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3076 if (discov->state != DISCOVERY_FINDING)
3079 if (list_empty(&discov->resolve)) {
3080 /* When BR/EDR inquiry is active and no LE scanning is in
3081 * progress, then change discovery state to indicate completion.
3083 * When running LE scanning and BR/EDR inquiry simultaneously
3084 * and the LE scan already finished, then change the discovery
3085 * state to indicate completion.
3087 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3088 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3089 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3093 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3094 if (e && hci_resolve_name(hdev, e) == 0) {
3095 e->name_state = NAME_PENDING;
3096 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3097 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3099 /* When BR/EDR inquiry is active and no LE scanning is in
3100 * progress, then change discovery state to indicate completion.
3102 * When running LE scanning and BR/EDR inquiry simultaneously
3103 * and the LE scan already finished, then change the discovery
3104 * state to indicate completion.
3106 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3107 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3108 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3112 hci_dev_unlock(hdev);
3115 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3116 struct sk_buff *skb)
3118 struct hci_ev_inquiry_result *ev = edata;
3119 struct inquiry_data data;
3122 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3123 flex_array_size(ev, info, ev->num)))
3126 bt_dev_dbg(hdev, "num %d", ev->num);
3131 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3136 for (i = 0; i < ev->num; i++) {
3137 struct inquiry_info *info = &ev->info[i];
3140 bacpy(&data.bdaddr, &info->bdaddr);
3141 data.pscan_rep_mode = info->pscan_rep_mode;
3142 data.pscan_period_mode = info->pscan_period_mode;
3143 data.pscan_mode = info->pscan_mode;
3144 memcpy(data.dev_class, info->dev_class, 3);
3145 data.clock_offset = info->clock_offset;
3146 data.rssi = HCI_RSSI_INVALID;
3147 data.ssp_mode = 0x00;
3149 flags = hci_inquiry_cache_update(hdev, &data, false);
3151 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3152 info->dev_class, HCI_RSSI_INVALID,
3153 flags, NULL, 0, NULL, 0, 0);
3156 hci_dev_unlock(hdev);
3159 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3160 struct sk_buff *skb)
3162 struct hci_ev_conn_complete *ev = data;
3163 struct hci_conn *conn;
3164 u8 status = ev->status;
3166 bt_dev_dbg(hdev, "status 0x%2.2x", status);
3170 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3172 /* In case of error status and there is no connection pending
3173 * just unlock as there is nothing to cleanup.
3178 /* Connection may not exist if auto-connected. Check the bredr
3179 * allowlist to see if this device is allowed to auto connect.
3180 * If link is an ACL type, create a connection class
3183 * Auto-connect will only occur if the event filter is
3184 * programmed with a given address. Right now, event filter is
3185 * only used during suspend.
3187 if (ev->link_type == ACL_LINK &&
3188 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3191 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3194 bt_dev_err(hdev, "no memory for new conn");
3198 if (ev->link_type != SCO_LINK)
3201 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3206 conn->type = SCO_LINK;
3210 /* The HCI_Connection_Complete event is only sent once per connection.
3211 * Processing it more than once per connection can corrupt kernel memory.
3213 * As the connection handle is set here for the first time, it indicates
3214 * whether the connection is already set up.
3216 if (conn->handle != HCI_CONN_HANDLE_UNSET) {
3217 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3222 conn->handle = __le16_to_cpu(ev->handle);
3223 if (conn->handle > HCI_CONN_HANDLE_MAX) {
3224 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
3225 conn->handle, HCI_CONN_HANDLE_MAX);
3226 status = HCI_ERROR_INVALID_PARAMETERS;
3230 if (conn->type == ACL_LINK) {
3231 conn->state = BT_CONFIG;
3232 hci_conn_hold(conn);
3234 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3235 !hci_find_link_key(hdev, &ev->bdaddr))
3236 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3238 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3240 conn->state = BT_CONNECTED;
3242 hci_debugfs_create_conn(conn);
3243 hci_conn_add_sysfs(conn);
3245 if (test_bit(HCI_AUTH, &hdev->flags))
3246 set_bit(HCI_CONN_AUTH, &conn->flags);
3248 if (test_bit(HCI_ENCRYPT, &hdev->flags))
3249 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3251 /* Get remote features */
3252 if (conn->type == ACL_LINK) {
3253 struct hci_cp_read_remote_features cp;
3254 cp.handle = ev->handle;
3255 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3258 hci_update_scan(hdev);
3261 /* Set packet type for incoming connection */
3262 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3263 struct hci_cp_change_conn_ptype cp;
3264 cp.handle = ev->handle;
3265 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3266 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3271 if (conn->type == ACL_LINK)
3272 hci_sco_setup(conn, ev->status);
3276 hci_conn_failed(conn, status);
3277 } else if (ev->link_type == SCO_LINK) {
3278 switch (conn->setting & SCO_AIRMODE_MASK) {
3279 case SCO_AIRMODE_CVSD:
3281 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3285 hci_connect_cfm(conn, status);
3289 hci_dev_unlock(hdev);
3291 hci_conn_check_pending(hdev);
3294 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3296 struct hci_cp_reject_conn_req cp;
3298 bacpy(&cp.bdaddr, bdaddr);
3299 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3300 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3303 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3304 struct sk_buff *skb)
3306 struct hci_ev_conn_request *ev = data;
3307 int mask = hdev->link_mode;
3308 struct inquiry_entry *ie;
3309 struct hci_conn *conn;
3312 bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3314 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3317 if (!(mask & HCI_LM_ACCEPT)) {
3318 hci_reject_conn(hdev, &ev->bdaddr);
3324 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3326 hci_reject_conn(hdev, &ev->bdaddr);
3330 /* Require HCI_CONNECTABLE or an accept list entry to accept the
3331 * connection. These features are only touched through mgmt so
3332 * only do the checks if HCI_MGMT is set.
3334 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3335 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3336 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3338 hci_reject_conn(hdev, &ev->bdaddr);
3342 /* Connection accepted */
3344 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3346 memcpy(ie->data.dev_class, ev->dev_class, 3);
3348 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3351 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3354 bt_dev_err(hdev, "no memory for new connection");
3359 memcpy(conn->dev_class, ev->dev_class, 3);
3361 hci_dev_unlock(hdev);
3363 if (ev->link_type == ACL_LINK ||
3364 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3365 struct hci_cp_accept_conn_req cp;
3366 conn->state = BT_CONNECT;
3368 bacpy(&cp.bdaddr, &ev->bdaddr);
3370 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3371 cp.role = 0x00; /* Become central */
3373 cp.role = 0x01; /* Remain peripheral */
3375 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3376 } else if (!(flags & HCI_PROTO_DEFER)) {
3377 struct hci_cp_accept_sync_conn_req cp;
3378 conn->state = BT_CONNECT;
3380 bacpy(&cp.bdaddr, &ev->bdaddr);
3381 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3383 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
3384 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
3385 cp.max_latency = cpu_to_le16(0xffff);
3386 cp.content_format = cpu_to_le16(hdev->voice_setting);
3387 cp.retrans_effort = 0xff;
3389 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3392 conn->state = BT_CONNECT2;
3393 hci_connect_cfm(conn, 0);
3398 hci_dev_unlock(hdev);
3401 static u8 hci_to_mgmt_reason(u8 err)
3404 case HCI_ERROR_CONNECTION_TIMEOUT:
3405 return MGMT_DEV_DISCONN_TIMEOUT;
3406 case HCI_ERROR_REMOTE_USER_TERM:
3407 case HCI_ERROR_REMOTE_LOW_RESOURCES:
3408 case HCI_ERROR_REMOTE_POWER_OFF:
3409 return MGMT_DEV_DISCONN_REMOTE;
3410 case HCI_ERROR_LOCAL_HOST_TERM:
3411 return MGMT_DEV_DISCONN_LOCAL_HOST;
3413 return MGMT_DEV_DISCONN_UNKNOWN;
3417 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3418 struct sk_buff *skb)
3420 struct hci_ev_disconn_complete *ev = data;
3422 struct hci_conn_params *params;
3423 struct hci_conn *conn;
3424 bool mgmt_connected;
3426 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3430 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3435 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3436 conn->dst_type, ev->status);
3440 conn->state = BT_CLOSED;
3442 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3444 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3445 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3447 reason = hci_to_mgmt_reason(ev->reason);
3449 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3450 reason, mgmt_connected);
3452 if (conn->type == ACL_LINK) {
3453 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3454 hci_remove_link_key(hdev, &conn->dst);
3456 hci_update_scan(hdev);
3459 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3461 switch (params->auto_connect) {
3462 case HCI_AUTO_CONN_LINK_LOSS:
3463 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3467 case HCI_AUTO_CONN_DIRECT:
3468 case HCI_AUTO_CONN_ALWAYS:
3469 list_del_init(¶ms->action);
3470 list_add(¶ms->action, &hdev->pend_le_conns);
3471 hci_update_passive_scan(hdev);
3479 hci_disconn_cfm(conn, ev->reason);
3481 /* Re-enable advertising if necessary, since it might
3482 * have been disabled by the connection. From the
3483 * HCI_LE_Set_Advertise_Enable command description in
3484 * the core specification (v4.0):
3485 * "The Controller shall continue advertising until the Host
3486 * issues an LE_Set_Advertise_Enable command with
3487 * Advertising_Enable set to 0x00 (Advertising is disabled)
3488 * or until a connection is created or until the Advertising
3489 * is timed out due to Directed Advertising."
3491 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3492 hdev->cur_adv_instance = conn->adv_instance;
3493 hci_enable_advertising(hdev);
3499 hci_dev_unlock(hdev);
3502 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3503 struct sk_buff *skb)
3505 struct hci_ev_auth_complete *ev = data;
3506 struct hci_conn *conn;
3508 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3512 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3517 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3519 if (!hci_conn_ssp_enabled(conn) &&
3520 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3521 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3523 set_bit(HCI_CONN_AUTH, &conn->flags);
3524 conn->sec_level = conn->pending_sec_level;
3527 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3528 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3530 mgmt_auth_failed(conn, ev->status);
3533 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3534 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3536 if (conn->state == BT_CONFIG) {
3537 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3538 struct hci_cp_set_conn_encrypt cp;
3539 cp.handle = ev->handle;
3541 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3544 conn->state = BT_CONNECTED;
3545 hci_connect_cfm(conn, ev->status);
3546 hci_conn_drop(conn);
3549 hci_auth_cfm(conn, ev->status);
3551 hci_conn_hold(conn);
3552 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3553 hci_conn_drop(conn);
3556 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3558 struct hci_cp_set_conn_encrypt cp;
3559 cp.handle = ev->handle;
3561 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3564 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3565 hci_encrypt_cfm(conn, ev->status);
3570 hci_dev_unlock(hdev);
3573 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3574 struct sk_buff *skb)
3576 struct hci_ev_remote_name *ev = data;
3577 struct hci_conn *conn;
3579 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3581 hci_conn_check_pending(hdev);
3585 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3587 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3590 if (ev->status == 0)
3591 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3592 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3594 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3600 if (!hci_outgoing_auth_needed(hdev, conn))
3603 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3604 struct hci_cp_auth_requested cp;
3606 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3608 cp.handle = __cpu_to_le16(conn->handle);
3609 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3613 hci_dev_unlock(hdev);
3616 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3617 struct sk_buff *skb)
3619 struct hci_ev_encrypt_change *ev = data;
3620 struct hci_conn *conn;
3622 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3626 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3632 /* Encryption implies authentication */
3633 set_bit(HCI_CONN_AUTH, &conn->flags);
3634 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3635 conn->sec_level = conn->pending_sec_level;
3637 /* P-256 authentication key implies FIPS */
3638 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3639 set_bit(HCI_CONN_FIPS, &conn->flags);
3641 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3642 conn->type == LE_LINK)
3643 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3645 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3646 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3650 /* We should disregard the current RPA and generate a new one
3651 * whenever the encryption procedure fails.
3653 if (ev->status && conn->type == LE_LINK) {
3654 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3655 hci_adv_instances_set_rpa_expired(hdev, true);
3658 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3660 /* Check link security requirements are met */
3661 if (!hci_conn_check_link_mode(conn))
3662 ev->status = HCI_ERROR_AUTH_FAILURE;
3664 if (ev->status && conn->state == BT_CONNECTED) {
3665 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3666 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3668 /* Notify upper layers so they can cleanup before
3671 hci_encrypt_cfm(conn, ev->status);
3672 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3673 hci_conn_drop(conn);
3677 /* Try reading the encryption key size for encrypted ACL links */
3678 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3679 struct hci_cp_read_enc_key_size cp;
3681 /* Only send HCI_Read_Encryption_Key_Size if the
3682 * controller really supports it. If it doesn't, assume
3683 * the default size (16).
3685 if (!(hdev->commands[20] & 0x10)) {
3686 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3690 cp.handle = cpu_to_le16(conn->handle);
3691 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3693 bt_dev_err(hdev, "sending read key size failed");
3694 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3701 /* Set the default Authenticated Payload Timeout after
3702 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3703 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3704 * sent when the link is active and Encryption is enabled, the conn
3705 * type can be either LE or ACL and controller must support LMP Ping.
3706 * Ensure for AES-CCM encryption as well.
3708 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3709 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3710 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3711 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3712 struct hci_cp_write_auth_payload_to cp;
3714 cp.handle = cpu_to_le16(conn->handle);
3715 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3716 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3721 hci_encrypt_cfm(conn, ev->status);
3724 hci_dev_unlock(hdev);
3727 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3728 struct sk_buff *skb)
3730 struct hci_ev_change_link_key_complete *ev = data;
3731 struct hci_conn *conn;
3733 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3737 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3740 set_bit(HCI_CONN_SECURE, &conn->flags);
3742 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3744 hci_key_change_cfm(conn, ev->status);
3747 hci_dev_unlock(hdev);
3750 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3751 struct sk_buff *skb)
3753 struct hci_ev_remote_features *ev = data;
3754 struct hci_conn *conn;
3756 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3760 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3765 memcpy(conn->features[0], ev->features, 8);
3767 if (conn->state != BT_CONFIG)
3770 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3771 lmp_ext_feat_capable(conn)) {
3772 struct hci_cp_read_remote_ext_features cp;
3773 cp.handle = ev->handle;
3775 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3780 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3781 struct hci_cp_remote_name_req cp;
3782 memset(&cp, 0, sizeof(cp));
3783 bacpy(&cp.bdaddr, &conn->dst);
3784 cp.pscan_rep_mode = 0x02;
3785 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3786 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3787 mgmt_device_connected(hdev, conn, NULL, 0);
3789 if (!hci_outgoing_auth_needed(hdev, conn)) {
3790 conn->state = BT_CONNECTED;
3791 hci_connect_cfm(conn, ev->status);
3792 hci_conn_drop(conn);
3796 hci_dev_unlock(hdev);
3799 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3801 cancel_delayed_work(&hdev->cmd_timer);
3804 if (!test_bit(HCI_RESET, &hdev->flags)) {
3806 cancel_delayed_work(&hdev->ncmd_timer);
3807 atomic_set(&hdev->cmd_cnt, 1);
3809 if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3810 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3817 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3818 struct sk_buff *skb)
3820 struct hci_rp_le_read_buffer_size_v2 *rp = data;
3822 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3827 hdev->le_mtu = __le16_to_cpu(rp->acl_mtu);
3828 hdev->le_pkts = rp->acl_max_pkt;
3829 hdev->iso_mtu = __le16_to_cpu(rp->iso_mtu);
3830 hdev->iso_pkts = rp->iso_max_pkt;
3832 hdev->le_cnt = hdev->le_pkts;
3833 hdev->iso_cnt = hdev->iso_pkts;
3835 BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3836 hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3841 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3842 struct sk_buff *skb)
3844 struct hci_rp_le_set_cig_params *rp = data;
3845 struct hci_conn *conn;
3848 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3853 while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) {
3854 conn->state = BT_CLOSED;
3855 hci_connect_cfm(conn, rp->status);
3863 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
3864 if (conn->type != ISO_LINK || conn->iso_qos.cig != rp->cig_id ||
3865 conn->state == BT_CONNECTED)
3868 conn->handle = __le16_to_cpu(rp->handle[i++]);
3870 bt_dev_dbg(hdev, "%p handle 0x%4.4x link %p", conn,
3871 conn->handle, conn->link);
3873 /* Create CIS if LE is already connected */
3874 if (conn->link && conn->link->state == BT_CONNECTED) {
3876 hci_le_create_cis(conn->link);
3880 if (i == rp->num_handles)
3887 hci_dev_unlock(hdev);
3892 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3893 struct sk_buff *skb)
3895 struct hci_rp_le_setup_iso_path *rp = data;
3896 struct hci_cp_le_setup_iso_path *cp;
3897 struct hci_conn *conn;
3899 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3901 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3907 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3912 hci_connect_cfm(conn, rp->status);
3917 switch (cp->direction) {
3918 /* Input (Host to Controller) */
3920 /* Only confirm connection if output only */
3921 if (conn->iso_qos.out.sdu && !conn->iso_qos.in.sdu)
3922 hci_connect_cfm(conn, rp->status);
3924 /* Output (Controller to Host) */
3926 /* Confirm connection since conn->iso_qos is always configured
3929 hci_connect_cfm(conn, rp->status);
3934 hci_dev_unlock(hdev);
3938 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3940 bt_dev_dbg(hdev, "status 0x%2.2x", status);
3943 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3944 struct sk_buff *skb)
3946 struct hci_ev_status *rp = data;
3947 struct hci_cp_le_set_per_adv_params *cp;
3949 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3954 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3958 /* TODO: set the conn state */
3962 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3963 struct sk_buff *skb)
3965 struct hci_ev_status *rp = data;
3968 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3973 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
3980 hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
3982 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
3984 hci_dev_unlock(hdev);
3989 #define HCI_CC_VL(_op, _func, _min, _max) \
3997 #define HCI_CC(_op, _func, _len) \
3998 HCI_CC_VL(_op, _func, _len, _len)
4000 #define HCI_CC_STATUS(_op, _func) \
4001 HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4003 static const struct hci_cc {
4005 u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4008 } hci_cc_table[] = {
4009 HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4010 HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4011 HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4012 HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4013 hci_cc_remote_name_req_cancel),
4014 HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4015 sizeof(struct hci_rp_role_discovery)),
4016 HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4017 sizeof(struct hci_rp_read_link_policy)),
4018 HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4019 sizeof(struct hci_rp_write_link_policy)),
4020 HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4021 sizeof(struct hci_rp_read_def_link_policy)),
4022 HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4023 hci_cc_write_def_link_policy),
4024 HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4025 HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4026 sizeof(struct hci_rp_read_stored_link_key)),
4027 HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4028 sizeof(struct hci_rp_delete_stored_link_key)),
4029 HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4030 HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4031 sizeof(struct hci_rp_read_local_name)),
4032 HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4033 HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4034 HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4035 HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4036 HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4037 sizeof(struct hci_rp_read_class_of_dev)),
4038 HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4039 HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4040 sizeof(struct hci_rp_read_voice_setting)),
4041 HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4042 HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4043 sizeof(struct hci_rp_read_num_supported_iac)),
4044 HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4045 HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4046 HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4047 sizeof(struct hci_rp_read_auth_payload_to)),
4048 HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4049 sizeof(struct hci_rp_write_auth_payload_to)),
4050 HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4051 sizeof(struct hci_rp_read_local_version)),
4052 HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4053 sizeof(struct hci_rp_read_local_commands)),
4054 HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4055 sizeof(struct hci_rp_read_local_features)),
4056 HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4057 sizeof(struct hci_rp_read_local_ext_features)),
4058 HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4059 sizeof(struct hci_rp_read_buffer_size)),
4060 HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4061 sizeof(struct hci_rp_read_bd_addr)),
4062 HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4063 sizeof(struct hci_rp_read_local_pairing_opts)),
4064 HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4065 sizeof(struct hci_rp_read_page_scan_activity)),
4066 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4067 hci_cc_write_page_scan_activity),
4068 HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4069 sizeof(struct hci_rp_read_page_scan_type)),
4070 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4071 HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4072 sizeof(struct hci_rp_read_data_block_size)),
4073 HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4074 sizeof(struct hci_rp_read_flow_control_mode)),
4075 HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4076 sizeof(struct hci_rp_read_local_amp_info)),
4077 HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4078 sizeof(struct hci_rp_read_clock)),
4079 HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4080 sizeof(struct hci_rp_read_enc_key_size)),
4081 HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4082 sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4083 HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4084 hci_cc_read_def_err_data_reporting,
4085 sizeof(struct hci_rp_read_def_err_data_reporting)),
4086 HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4087 hci_cc_write_def_err_data_reporting),
4088 HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4089 sizeof(struct hci_rp_pin_code_reply)),
4090 HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4091 sizeof(struct hci_rp_pin_code_neg_reply)),
4092 HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4093 sizeof(struct hci_rp_read_local_oob_data)),
4094 HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4095 sizeof(struct hci_rp_read_local_oob_ext_data)),
4096 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4097 sizeof(struct hci_rp_le_read_buffer_size)),
4098 HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4099 sizeof(struct hci_rp_le_read_local_features)),
4100 HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4101 sizeof(struct hci_rp_le_read_adv_tx_power)),
4102 HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4103 sizeof(struct hci_rp_user_confirm_reply)),
4104 HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4105 sizeof(struct hci_rp_user_confirm_reply)),
4106 HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4107 sizeof(struct hci_rp_user_confirm_reply)),
4108 HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4109 sizeof(struct hci_rp_user_confirm_reply)),
4110 HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4111 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4112 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4113 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4114 HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4115 hci_cc_le_read_accept_list_size,
4116 sizeof(struct hci_rp_le_read_accept_list_size)),
4117 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4118 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4119 hci_cc_le_add_to_accept_list),
4120 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4121 hci_cc_le_del_from_accept_list),
4122 HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4123 sizeof(struct hci_rp_le_read_supported_states)),
4124 HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4125 sizeof(struct hci_rp_le_read_def_data_len)),
4126 HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4127 hci_cc_le_write_def_data_len),
4128 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4129 hci_cc_le_add_to_resolv_list),
4130 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4131 hci_cc_le_del_from_resolv_list),
4132 HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4133 hci_cc_le_clear_resolv_list),
4134 HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4135 sizeof(struct hci_rp_le_read_resolv_list_size)),
4136 HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4137 hci_cc_le_set_addr_resolution_enable),
4138 HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4139 sizeof(struct hci_rp_le_read_max_data_len)),
4140 HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4141 hci_cc_write_le_host_supported),
4142 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4143 HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4144 sizeof(struct hci_rp_read_rssi)),
4145 HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4146 sizeof(struct hci_rp_read_tx_power)),
4147 HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4148 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4149 hci_cc_le_set_ext_scan_param),
4150 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4151 hci_cc_le_set_ext_scan_enable),
4152 HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4153 HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4154 hci_cc_le_read_num_adv_sets,
4155 sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4156 HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4157 sizeof(struct hci_rp_le_set_ext_adv_params)),
4158 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4159 hci_cc_le_set_ext_adv_enable),
4160 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4161 hci_cc_le_set_adv_set_random_addr),
4162 HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4163 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4164 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4165 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4166 hci_cc_le_set_per_adv_enable),
4167 HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4168 sizeof(struct hci_rp_le_read_transmit_power)),
4170 HCI_CC(HCI_OP_ENABLE_RSSI, hci_cc_enable_rssi,
4171 sizeof(struct hci_cc_rsp_enable_rssi)),
4172 HCI_CC(HCI_OP_GET_RAW_RSSI, hci_cc_get_raw_rssi,
4173 sizeof(struct hci_cc_rp_get_raw_rssi)),
4175 HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4176 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4177 sizeof(struct hci_rp_le_read_buffer_size_v2)),
4178 HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4179 sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4180 HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4181 sizeof(struct hci_rp_le_setup_iso_path)),
4184 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4185 struct sk_buff *skb)
4189 if (skb->len < cc->min_len) {
4190 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4191 cc->op, skb->len, cc->min_len);
4192 return HCI_ERROR_UNSPECIFIED;
4195 /* Just warn if the length is over max_len size it still be possible to
4196 * partially parse the cc so leave to callback to decide if that is
4199 if (skb->len > cc->max_len)
4200 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4201 cc->op, skb->len, cc->max_len);
4203 data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4205 return HCI_ERROR_UNSPECIFIED;
4207 return cc->func(hdev, data, skb);
4210 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4211 struct sk_buff *skb, u16 *opcode, u8 *status,
4212 hci_req_complete_t *req_complete,
4213 hci_req_complete_skb_t *req_complete_skb)
4215 struct hci_ev_cmd_complete *ev = data;
4218 *opcode = __le16_to_cpu(ev->opcode);
4220 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4222 for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4223 if (hci_cc_table[i].op == *opcode) {
4224 *status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4229 if (i == ARRAY_SIZE(hci_cc_table)) {
4230 /* Unknown opcode, assume byte 0 contains the status, so
4231 * that e.g. __hci_cmd_sync() properly returns errors
4232 * for vendor specific commands send by HCI drivers.
4233 * If a vendor doesn't actually follow this convention we may
4234 * need to introduce a vendor CC table in order to properly set
4237 *status = skb->data[0];
4240 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4242 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4245 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4247 "unexpected event for opcode 0x%4.4x", *opcode);
4251 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4252 queue_work(hdev->workqueue, &hdev->cmd_work);
4255 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4257 struct hci_cp_le_create_cis *cp;
4260 bt_dev_dbg(hdev, "status 0x%2.2x", status);
4265 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4271 /* Remove connection if command failed */
4272 for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4273 struct hci_conn *conn;
4276 handle = __le16_to_cpu(cp->cis[i].cis_handle);
4278 conn = hci_conn_hash_lookup_handle(hdev, handle);
4280 conn->state = BT_CLOSED;
4281 hci_connect_cfm(conn, status);
4286 hci_dev_unlock(hdev);
4289 #define HCI_CS(_op, _func) \
4295 static const struct hci_cs {
4297 void (*func)(struct hci_dev *hdev, __u8 status);
4298 } hci_cs_table[] = {
4299 HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4300 HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4301 HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4302 HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4303 HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4304 HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4305 HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4306 HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4307 HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4308 hci_cs_read_remote_ext_features),
4309 HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4310 HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4311 hci_cs_enhanced_setup_sync_conn),
4312 HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4313 HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4314 HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4315 HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4316 HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4317 HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4318 HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4319 HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4320 HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4323 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4324 struct sk_buff *skb, u16 *opcode, u8 *status,
4325 hci_req_complete_t *req_complete,
4326 hci_req_complete_skb_t *req_complete_skb)
4328 struct hci_ev_cmd_status *ev = data;
4331 *opcode = __le16_to_cpu(ev->opcode);
4332 *status = ev->status;
4334 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4336 for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4337 if (hci_cs_table[i].op == *opcode) {
4338 hci_cs_table[i].func(hdev, ev->status);
4343 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4345 /* Indicate request completion if the command failed. Also, if
4346 * we're not waiting for a special event and we get a success
4347 * command status we should try to flag the request as completed
4348 * (since for this kind of commands there will not be a command
4351 if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
4352 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4354 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4355 bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4361 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4362 queue_work(hdev->workqueue, &hdev->cmd_work);
4365 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4366 struct sk_buff *skb)
4368 struct hci_ev_hardware_error *ev = data;
4370 bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4372 hdev->hw_error_code = ev->code;
4374 queue_work(hdev->req_workqueue, &hdev->error_reset);
4377 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4378 struct sk_buff *skb)
4380 struct hci_ev_role_change *ev = data;
4381 struct hci_conn *conn;
4383 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4387 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4390 conn->role = ev->role;
4392 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4394 hci_role_switch_cfm(conn, ev->status, ev->role);
4397 hci_dev_unlock(hdev);
4400 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4401 struct sk_buff *skb)
4403 struct hci_ev_num_comp_pkts *ev = data;
4406 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4407 flex_array_size(ev, handles, ev->num)))
4410 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4411 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4415 bt_dev_dbg(hdev, "num %d", ev->num);
4417 for (i = 0; i < ev->num; i++) {
4418 struct hci_comp_pkts_info *info = &ev->handles[i];
4419 struct hci_conn *conn;
4420 __u16 handle, count;
4422 handle = __le16_to_cpu(info->handle);
4423 count = __le16_to_cpu(info->count);
4425 conn = hci_conn_hash_lookup_handle(hdev, handle);
4429 conn->sent -= count;
4431 switch (conn->type) {
4433 hdev->acl_cnt += count;
4434 if (hdev->acl_cnt > hdev->acl_pkts)
4435 hdev->acl_cnt = hdev->acl_pkts;
4439 if (hdev->le_pkts) {
4440 hdev->le_cnt += count;
4441 if (hdev->le_cnt > hdev->le_pkts)
4442 hdev->le_cnt = hdev->le_pkts;
4444 hdev->acl_cnt += count;
4445 if (hdev->acl_cnt > hdev->acl_pkts)
4446 hdev->acl_cnt = hdev->acl_pkts;
4451 hdev->sco_cnt += count;
4452 if (hdev->sco_cnt > hdev->sco_pkts)
4453 hdev->sco_cnt = hdev->sco_pkts;
4457 if (hdev->iso_pkts) {
4458 hdev->iso_cnt += count;
4459 if (hdev->iso_cnt > hdev->iso_pkts)
4460 hdev->iso_cnt = hdev->iso_pkts;
4461 } else if (hdev->le_pkts) {
4462 hdev->le_cnt += count;
4463 if (hdev->le_cnt > hdev->le_pkts)
4464 hdev->le_cnt = hdev->le_pkts;
4466 hdev->acl_cnt += count;
4467 if (hdev->acl_cnt > hdev->acl_pkts)
4468 hdev->acl_cnt = hdev->acl_pkts;
4473 bt_dev_err(hdev, "unknown type %d conn %p",
4479 queue_work(hdev->workqueue, &hdev->tx_work);
4482 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4485 struct hci_chan *chan;
4487 switch (hdev->dev_type) {
4489 return hci_conn_hash_lookup_handle(hdev, handle);
4491 chan = hci_chan_lookup_handle(hdev, handle);
4496 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4503 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4504 struct sk_buff *skb)
4506 struct hci_ev_num_comp_blocks *ev = data;
4509 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4510 flex_array_size(ev, handles, ev->num_hndl)))
4513 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4514 bt_dev_err(hdev, "wrong event for mode %d",
4515 hdev->flow_ctl_mode);
4519 bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4522 for (i = 0; i < ev->num_hndl; i++) {
4523 struct hci_comp_blocks_info *info = &ev->handles[i];
4524 struct hci_conn *conn = NULL;
4525 __u16 handle, block_count;
4527 handle = __le16_to_cpu(info->handle);
4528 block_count = __le16_to_cpu(info->blocks);
4530 conn = __hci_conn_lookup_handle(hdev, handle);
4534 conn->sent -= block_count;
4536 switch (conn->type) {
4539 hdev->block_cnt += block_count;
4540 if (hdev->block_cnt > hdev->num_blocks)
4541 hdev->block_cnt = hdev->num_blocks;
4545 bt_dev_err(hdev, "unknown type %d conn %p",
4551 queue_work(hdev->workqueue, &hdev->tx_work);
4554 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4555 struct sk_buff *skb)
4557 struct hci_ev_mode_change *ev = data;
4558 struct hci_conn *conn;
4560 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4564 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4566 conn->mode = ev->mode;
4568 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4570 if (conn->mode == HCI_CM_ACTIVE)
4571 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4573 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4576 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4577 hci_sco_setup(conn, ev->status);
4580 hci_dev_unlock(hdev);
4583 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4584 struct sk_buff *skb)
4586 struct hci_ev_pin_code_req *ev = data;
4587 struct hci_conn *conn;
4589 bt_dev_dbg(hdev, "");
4593 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4597 if (conn->state == BT_CONNECTED) {
4598 hci_conn_hold(conn);
4599 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4600 hci_conn_drop(conn);
4603 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4604 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4605 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4606 sizeof(ev->bdaddr), &ev->bdaddr);
4607 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4610 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4615 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4619 hci_dev_unlock(hdev);
4622 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4624 if (key_type == HCI_LK_CHANGED_COMBINATION)
4627 conn->pin_length = pin_len;
4628 conn->key_type = key_type;
4631 case HCI_LK_LOCAL_UNIT:
4632 case HCI_LK_REMOTE_UNIT:
4633 case HCI_LK_DEBUG_COMBINATION:
4635 case HCI_LK_COMBINATION:
4637 conn->pending_sec_level = BT_SECURITY_HIGH;
4639 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4641 case HCI_LK_UNAUTH_COMBINATION_P192:
4642 case HCI_LK_UNAUTH_COMBINATION_P256:
4643 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4645 case HCI_LK_AUTH_COMBINATION_P192:
4646 conn->pending_sec_level = BT_SECURITY_HIGH;
4648 case HCI_LK_AUTH_COMBINATION_P256:
4649 conn->pending_sec_level = BT_SECURITY_FIPS;
4654 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4655 struct sk_buff *skb)
4657 struct hci_ev_link_key_req *ev = data;
4658 struct hci_cp_link_key_reply cp;
4659 struct hci_conn *conn;
4660 struct link_key *key;
4662 bt_dev_dbg(hdev, "");
4664 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4669 key = hci_find_link_key(hdev, &ev->bdaddr);
4671 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4675 bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4677 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4679 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4681 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4682 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4683 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4684 bt_dev_dbg(hdev, "ignoring unauthenticated key");
4688 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4689 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4690 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4691 bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4695 conn_set_key(conn, key->type, key->pin_len);
4698 bacpy(&cp.bdaddr, &ev->bdaddr);
4699 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4701 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4703 hci_dev_unlock(hdev);
4708 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4709 hci_dev_unlock(hdev);
4712 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4713 struct sk_buff *skb)
4715 struct hci_ev_link_key_notify *ev = data;
4716 struct hci_conn *conn;
4717 struct link_key *key;
4721 bt_dev_dbg(hdev, "");
4725 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4729 hci_conn_hold(conn);
4730 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4731 hci_conn_drop(conn);
4733 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4734 conn_set_key(conn, ev->key_type, conn->pin_length);
4736 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4739 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4740 ev->key_type, pin_len, &persistent);
4744 /* Update connection information since adding the key will have
4745 * fixed up the type in the case of changed combination keys.
4747 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4748 conn_set_key(conn, key->type, key->pin_len);
4750 mgmt_new_link_key(hdev, key, persistent);
4752 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4753 * is set. If it's not set simply remove the key from the kernel
4754 * list (we've still notified user space about it but with
4755 * store_hint being 0).
4757 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4758 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4759 list_del_rcu(&key->list);
4760 kfree_rcu(key, rcu);
4765 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4767 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4770 hci_dev_unlock(hdev);
4773 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4774 struct sk_buff *skb)
4776 struct hci_ev_clock_offset *ev = data;
4777 struct hci_conn *conn;
4779 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4783 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4784 if (conn && !ev->status) {
4785 struct inquiry_entry *ie;
4787 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4789 ie->data.clock_offset = ev->clock_offset;
4790 ie->timestamp = jiffies;
4794 hci_dev_unlock(hdev);
4797 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4798 struct sk_buff *skb)
4800 struct hci_ev_pkt_type_change *ev = data;
4801 struct hci_conn *conn;
4803 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4807 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4808 if (conn && !ev->status)
4809 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4811 hci_dev_unlock(hdev);
4814 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4815 struct sk_buff *skb)
4817 struct hci_ev_pscan_rep_mode *ev = data;
4818 struct inquiry_entry *ie;
4820 bt_dev_dbg(hdev, "");
4824 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4826 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4827 ie->timestamp = jiffies;
4830 hci_dev_unlock(hdev);
4833 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4834 struct sk_buff *skb)
4836 struct hci_ev_inquiry_result_rssi *ev = edata;
4837 struct inquiry_data data;
4840 bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4845 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4850 if (skb->len == array_size(ev->num,
4851 sizeof(struct inquiry_info_rssi_pscan))) {
4852 struct inquiry_info_rssi_pscan *info;
4854 for (i = 0; i < ev->num; i++) {
4857 info = hci_ev_skb_pull(hdev, skb,
4858 HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4861 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4862 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4866 bacpy(&data.bdaddr, &info->bdaddr);
4867 data.pscan_rep_mode = info->pscan_rep_mode;
4868 data.pscan_period_mode = info->pscan_period_mode;
4869 data.pscan_mode = info->pscan_mode;
4870 memcpy(data.dev_class, info->dev_class, 3);
4871 data.clock_offset = info->clock_offset;
4872 data.rssi = info->rssi;
4873 data.ssp_mode = 0x00;
4875 flags = hci_inquiry_cache_update(hdev, &data, false);
4877 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4878 info->dev_class, info->rssi,
4879 flags, NULL, 0, NULL, 0, 0);
4881 } else if (skb->len == array_size(ev->num,
4882 sizeof(struct inquiry_info_rssi))) {
4883 struct inquiry_info_rssi *info;
4885 for (i = 0; i < ev->num; i++) {
4888 info = hci_ev_skb_pull(hdev, skb,
4889 HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4892 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4893 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4897 bacpy(&data.bdaddr, &info->bdaddr);
4898 data.pscan_rep_mode = info->pscan_rep_mode;
4899 data.pscan_period_mode = info->pscan_period_mode;
4900 data.pscan_mode = 0x00;
4901 memcpy(data.dev_class, info->dev_class, 3);
4902 data.clock_offset = info->clock_offset;
4903 data.rssi = info->rssi;
4904 data.ssp_mode = 0x00;
4906 flags = hci_inquiry_cache_update(hdev, &data, false);
4908 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4909 info->dev_class, info->rssi,
4910 flags, NULL, 0, NULL, 0, 0);
4913 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4914 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4917 hci_dev_unlock(hdev);
4920 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4921 struct sk_buff *skb)
4923 struct hci_ev_remote_ext_features *ev = data;
4924 struct hci_conn *conn;
4926 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4930 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4934 if (ev->page < HCI_MAX_PAGES)
4935 memcpy(conn->features[ev->page], ev->features, 8);
4937 if (!ev->status && ev->page == 0x01) {
4938 struct inquiry_entry *ie;
4940 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4942 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4944 if (ev->features[0] & LMP_HOST_SSP) {
4945 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4947 /* It is mandatory by the Bluetooth specification that
4948 * Extended Inquiry Results are only used when Secure
4949 * Simple Pairing is enabled, but some devices violate
4952 * To make these devices work, the internal SSP
4953 * enabled flag needs to be cleared if the remote host
4954 * features do not indicate SSP support */
4955 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4958 if (ev->features[0] & LMP_HOST_SC)
4959 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4962 if (conn->state != BT_CONFIG)
4965 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4966 struct hci_cp_remote_name_req cp;
4967 memset(&cp, 0, sizeof(cp));
4968 bacpy(&cp.bdaddr, &conn->dst);
4969 cp.pscan_rep_mode = 0x02;
4970 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4971 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4972 mgmt_device_connected(hdev, conn, NULL, 0);
4974 if (!hci_outgoing_auth_needed(hdev, conn)) {
4975 conn->state = BT_CONNECTED;
4976 hci_connect_cfm(conn, ev->status);
4977 hci_conn_drop(conn);
4981 hci_dev_unlock(hdev);
4984 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
4985 struct sk_buff *skb)
4987 struct hci_ev_sync_conn_complete *ev = data;
4988 struct hci_conn *conn;
4989 u8 status = ev->status;
4991 switch (ev->link_type) {
4996 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4997 * for HCI_Synchronous_Connection_Complete is limited to
4998 * either SCO or eSCO
5000 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
5004 bt_dev_dbg(hdev, "status 0x%2.2x", status);
5008 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5010 if (ev->link_type == ESCO_LINK)
5013 /* When the link type in the event indicates SCO connection
5014 * and lookup of the connection object fails, then check
5015 * if an eSCO connection object exists.
5017 * The core limits the synchronous connections to either
5018 * SCO or eSCO. The eSCO connection is preferred and tried
5019 * to be setup first and until successfully established,
5020 * the link type will be hinted as eSCO.
5022 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5027 /* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5028 * Processing it more than once per connection can corrupt kernel memory.
5030 * As the connection handle is set here for the first time, it indicates
5031 * whether the connection is already set up.
5033 if (conn->handle != HCI_CONN_HANDLE_UNSET) {
5034 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5040 conn->handle = __le16_to_cpu(ev->handle);
5041 if (conn->handle > HCI_CONN_HANDLE_MAX) {
5042 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
5043 conn->handle, HCI_CONN_HANDLE_MAX);
5044 status = HCI_ERROR_INVALID_PARAMETERS;
5045 conn->state = BT_CLOSED;
5049 conn->state = BT_CONNECTED;
5050 conn->type = ev->link_type;
5052 hci_debugfs_create_conn(conn);
5053 hci_conn_add_sysfs(conn);
5056 case 0x10: /* Connection Accept Timeout */
5057 case 0x0d: /* Connection Rejected due to Limited Resources */
5058 case 0x11: /* Unsupported Feature or Parameter Value */
5059 case 0x1c: /* SCO interval rejected */
5060 case 0x1a: /* Unsupported Remote Feature */
5061 case 0x1e: /* Invalid LMP Parameters */
5062 case 0x1f: /* Unspecified error */
5063 case 0x20: /* Unsupported LMP Parameter value */
5065 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5066 (hdev->esco_type & EDR_ESCO_MASK);
5067 if (hci_setup_sync(conn, conn->link->handle))
5073 conn->state = BT_CLOSED;
5077 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5078 /* Notify only in case of SCO over HCI transport data path which
5079 * is zero and non-zero value shall be non-HCI transport data path
5081 if (conn->codec.data_path == 0 && hdev->notify) {
5082 switch (ev->air_mode) {
5084 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5087 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5092 hci_connect_cfm(conn, status);
5097 hci_dev_unlock(hdev);
5100 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5104 while (parsed < eir_len) {
5105 u8 field_len = eir[0];
5110 parsed += field_len + 1;
5111 eir += field_len + 1;
5117 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5118 struct sk_buff *skb)
5120 struct hci_ev_ext_inquiry_result *ev = edata;
5121 struct inquiry_data data;
5125 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5126 flex_array_size(ev, info, ev->num)))
5129 bt_dev_dbg(hdev, "num %d", ev->num);
5134 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5139 for (i = 0; i < ev->num; i++) {
5140 struct extended_inquiry_info *info = &ev->info[i];
5144 bacpy(&data.bdaddr, &info->bdaddr);
5145 data.pscan_rep_mode = info->pscan_rep_mode;
5146 data.pscan_period_mode = info->pscan_period_mode;
5147 data.pscan_mode = 0x00;
5148 memcpy(data.dev_class, info->dev_class, 3);
5149 data.clock_offset = info->clock_offset;
5150 data.rssi = info->rssi;
5151 data.ssp_mode = 0x01;
5153 if (hci_dev_test_flag(hdev, HCI_MGMT))
5154 name_known = eir_get_data(info->data,
5156 EIR_NAME_COMPLETE, NULL);
5160 flags = hci_inquiry_cache_update(hdev, &data, name_known);
5162 eir_len = eir_get_length(info->data, sizeof(info->data));
5164 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5165 info->dev_class, info->rssi,
5166 flags, info->data, eir_len, NULL, 0, 0);
5169 hci_dev_unlock(hdev);
5172 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5173 struct sk_buff *skb)
5175 struct hci_ev_key_refresh_complete *ev = data;
5176 struct hci_conn *conn;
5178 bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5179 __le16_to_cpu(ev->handle));
5183 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5187 /* For BR/EDR the necessary steps are taken through the
5188 * auth_complete event.
5190 if (conn->type != LE_LINK)
5194 conn->sec_level = conn->pending_sec_level;
5196 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5198 if (ev->status && conn->state == BT_CONNECTED) {
5199 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5200 hci_conn_drop(conn);
5204 if (conn->state == BT_CONFIG) {
5206 conn->state = BT_CONNECTED;
5208 hci_connect_cfm(conn, ev->status);
5209 hci_conn_drop(conn);
5211 hci_auth_cfm(conn, ev->status);
5213 hci_conn_hold(conn);
5214 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5215 hci_conn_drop(conn);
5219 hci_dev_unlock(hdev);
5222 static u8 hci_get_auth_req(struct hci_conn *conn)
5224 /* If remote requests no-bonding follow that lead */
5225 if (conn->remote_auth == HCI_AT_NO_BONDING ||
5226 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5227 return conn->remote_auth | (conn->auth_type & 0x01);
5229 /* If both remote and local have enough IO capabilities, require
5232 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5233 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5234 return conn->remote_auth | 0x01;
5236 /* No MITM protection possible so ignore remote requirement */
5237 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5240 static u8 bredr_oob_data_present(struct hci_conn *conn)
5242 struct hci_dev *hdev = conn->hdev;
5243 struct oob_data *data;
5245 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5249 if (bredr_sc_enabled(hdev)) {
5250 /* When Secure Connections is enabled, then just
5251 * return the present value stored with the OOB
5252 * data. The stored value contains the right present
5253 * information. However it can only be trusted when
5254 * not in Secure Connection Only mode.
5256 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5257 return data->present;
5259 /* When Secure Connections Only mode is enabled, then
5260 * the P-256 values are required. If they are not
5261 * available, then do not declare that OOB data is
5264 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
5265 !memcmp(data->hash256, ZERO_KEY, 16))
5271 /* When Secure Connections is not enabled or actually
5272 * not supported by the hardware, then check that if
5273 * P-192 data values are present.
5275 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
5276 !memcmp(data->hash192, ZERO_KEY, 16))
5282 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5283 struct sk_buff *skb)
5285 struct hci_ev_io_capa_request *ev = data;
5286 struct hci_conn *conn;
5288 bt_dev_dbg(hdev, "");
5292 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5296 hci_conn_hold(conn);
5298 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5301 /* Allow pairing if we're pairable, the initiators of the
5302 * pairing or if the remote is not requesting bonding.
5304 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5305 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5306 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5307 struct hci_cp_io_capability_reply cp;
5309 bacpy(&cp.bdaddr, &ev->bdaddr);
5310 /* Change the IO capability from KeyboardDisplay
5311 * to DisplayYesNo as it is not supported by BT spec. */
5312 cp.capability = (conn->io_capability == 0x04) ?
5313 HCI_IO_DISPLAY_YESNO : conn->io_capability;
5315 /* If we are initiators, there is no remote information yet */
5316 if (conn->remote_auth == 0xff) {
5317 /* Request MITM protection if our IO caps allow it
5318 * except for the no-bonding case.
5320 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5321 conn->auth_type != HCI_AT_NO_BONDING)
5322 conn->auth_type |= 0x01;
5324 conn->auth_type = hci_get_auth_req(conn);
5327 /* If we're not bondable, force one of the non-bondable
5328 * authentication requirement values.
5330 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5331 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5333 cp.authentication = conn->auth_type;
5334 cp.oob_data = bredr_oob_data_present(conn);
5336 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5339 struct hci_cp_io_capability_neg_reply cp;
5341 bacpy(&cp.bdaddr, &ev->bdaddr);
5342 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5344 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5349 hci_dev_unlock(hdev);
5352 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5353 struct sk_buff *skb)
5355 struct hci_ev_io_capa_reply *ev = data;
5356 struct hci_conn *conn;
5358 bt_dev_dbg(hdev, "");
5362 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5366 conn->remote_cap = ev->capability;
5367 conn->remote_auth = ev->authentication;
5370 hci_dev_unlock(hdev);
5373 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5374 struct sk_buff *skb)
5376 struct hci_ev_user_confirm_req *ev = data;
5377 int loc_mitm, rem_mitm, confirm_hint = 0;
5378 struct hci_conn *conn;
5380 bt_dev_dbg(hdev, "");
5384 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5387 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5391 loc_mitm = (conn->auth_type & 0x01);
5392 rem_mitm = (conn->remote_auth & 0x01);
5394 /* If we require MITM but the remote device can't provide that
5395 * (it has NoInputNoOutput) then reject the confirmation
5396 * request. We check the security level here since it doesn't
5397 * necessarily match conn->auth_type.
5399 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5400 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5401 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5402 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5403 sizeof(ev->bdaddr), &ev->bdaddr);
5407 /* If no side requires MITM protection; auto-accept */
5408 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5409 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5411 /* If we're not the initiators request authorization to
5412 * proceed from user space (mgmt_user_confirm with
5413 * confirm_hint set to 1). The exception is if neither
5414 * side had MITM or if the local IO capability is
5415 * NoInputNoOutput, in which case we do auto-accept
5417 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5418 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5419 (loc_mitm || rem_mitm)) {
5420 bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5425 /* If there already exists link key in local host, leave the
5426 * decision to user space since the remote device could be
5427 * legitimate or malicious.
5429 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5430 bt_dev_dbg(hdev, "Local host already has link key");
5435 BT_DBG("Auto-accept of user confirmation with %ums delay",
5436 hdev->auto_accept_delay);
5438 if (hdev->auto_accept_delay > 0) {
5439 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5440 queue_delayed_work(conn->hdev->workqueue,
5441 &conn->auto_accept_work, delay);
5445 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5446 sizeof(ev->bdaddr), &ev->bdaddr);
5451 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5452 le32_to_cpu(ev->passkey), confirm_hint);
5455 hci_dev_unlock(hdev);
5458 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5459 struct sk_buff *skb)
5461 struct hci_ev_user_passkey_req *ev = data;
5463 bt_dev_dbg(hdev, "");
5465 if (hci_dev_test_flag(hdev, HCI_MGMT))
5466 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5469 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5470 struct sk_buff *skb)
5472 struct hci_ev_user_passkey_notify *ev = data;
5473 struct hci_conn *conn;
5475 bt_dev_dbg(hdev, "");
5477 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5481 conn->passkey_notify = __le32_to_cpu(ev->passkey);
5482 conn->passkey_entered = 0;
5484 if (hci_dev_test_flag(hdev, HCI_MGMT))
5485 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5486 conn->dst_type, conn->passkey_notify,
5487 conn->passkey_entered);
5490 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5491 struct sk_buff *skb)
5493 struct hci_ev_keypress_notify *ev = data;
5494 struct hci_conn *conn;
5496 bt_dev_dbg(hdev, "");
5498 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5503 case HCI_KEYPRESS_STARTED:
5504 conn->passkey_entered = 0;
5507 case HCI_KEYPRESS_ENTERED:
5508 conn->passkey_entered++;
5511 case HCI_KEYPRESS_ERASED:
5512 conn->passkey_entered--;
5515 case HCI_KEYPRESS_CLEARED:
5516 conn->passkey_entered = 0;
5519 case HCI_KEYPRESS_COMPLETED:
5523 if (hci_dev_test_flag(hdev, HCI_MGMT))
5524 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5525 conn->dst_type, conn->passkey_notify,
5526 conn->passkey_entered);
5529 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5530 struct sk_buff *skb)
5532 struct hci_ev_simple_pair_complete *ev = data;
5533 struct hci_conn *conn;
5535 bt_dev_dbg(hdev, "");
5539 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5543 /* Reset the authentication requirement to unknown */
5544 conn->remote_auth = 0xff;
5546 /* To avoid duplicate auth_failed events to user space we check
5547 * the HCI_CONN_AUTH_PEND flag which will be set if we
5548 * initiated the authentication. A traditional auth_complete
5549 * event gets always produced as initiator and is also mapped to
5550 * the mgmt_auth_failed event */
5551 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5552 mgmt_auth_failed(conn, ev->status);
5554 hci_conn_drop(conn);
5557 hci_dev_unlock(hdev);
5560 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5561 struct sk_buff *skb)
5563 struct hci_ev_remote_host_features *ev = data;
5564 struct inquiry_entry *ie;
5565 struct hci_conn *conn;
5567 bt_dev_dbg(hdev, "");
5571 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5573 memcpy(conn->features[1], ev->features, 8);
5575 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5577 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5579 hci_dev_unlock(hdev);
5582 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5583 struct sk_buff *skb)
5585 struct hci_ev_remote_oob_data_request *ev = edata;
5586 struct oob_data *data;
5588 bt_dev_dbg(hdev, "");
5592 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5595 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5597 struct hci_cp_remote_oob_data_neg_reply cp;
5599 bacpy(&cp.bdaddr, &ev->bdaddr);
5600 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5605 if (bredr_sc_enabled(hdev)) {
5606 struct hci_cp_remote_oob_ext_data_reply cp;
5608 bacpy(&cp.bdaddr, &ev->bdaddr);
5609 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5610 memset(cp.hash192, 0, sizeof(cp.hash192));
5611 memset(cp.rand192, 0, sizeof(cp.rand192));
5613 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5614 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5616 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5617 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5619 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5622 struct hci_cp_remote_oob_data_reply cp;
5624 bacpy(&cp.bdaddr, &ev->bdaddr);
5625 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5626 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5628 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5633 hci_dev_unlock(hdev);
5636 #if IS_ENABLED(CONFIG_BT_HS)
5637 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
5638 struct sk_buff *skb)
5640 struct hci_ev_channel_selected *ev = data;
5641 struct hci_conn *hcon;
5643 bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
5645 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5649 amp_read_loc_assoc_final_data(hdev, hcon);
5652 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
5653 struct sk_buff *skb)
5655 struct hci_ev_phy_link_complete *ev = data;
5656 struct hci_conn *hcon, *bredr_hcon;
5658 bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
5663 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5675 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5677 hcon->state = BT_CONNECTED;
5678 bacpy(&hcon->dst, &bredr_hcon->dst);
5680 hci_conn_hold(hcon);
5681 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5682 hci_conn_drop(hcon);
5684 hci_debugfs_create_conn(hcon);
5685 hci_conn_add_sysfs(hcon);
5687 amp_physical_cfm(bredr_hcon, hcon);
5690 hci_dev_unlock(hdev);
5693 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
5694 struct sk_buff *skb)
5696 struct hci_ev_logical_link_complete *ev = data;
5697 struct hci_conn *hcon;
5698 struct hci_chan *hchan;
5699 struct amp_mgr *mgr;
5701 bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5702 le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
5704 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5708 /* Create AMP hchan */
5709 hchan = hci_chan_create(hcon);
5713 hchan->handle = le16_to_cpu(ev->handle);
5716 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5718 mgr = hcon->amp_mgr;
5719 if (mgr && mgr->bredr_chan) {
5720 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5722 l2cap_chan_lock(bredr_chan);
5724 bredr_chan->conn->mtu = hdev->block_mtu;
5725 l2cap_logical_cfm(bredr_chan, hchan, 0);
5726 hci_conn_hold(hcon);
5728 l2cap_chan_unlock(bredr_chan);
5732 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
5733 struct sk_buff *skb)
5735 struct hci_ev_disconn_logical_link_complete *ev = data;
5736 struct hci_chan *hchan;
5738 bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
5739 le16_to_cpu(ev->handle), ev->status);
5746 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5747 if (!hchan || !hchan->amp)
5750 amp_destroy_logical_link(hchan, ev->reason);
5753 hci_dev_unlock(hdev);
5756 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
5757 struct sk_buff *skb)
5759 struct hci_ev_disconn_phy_link_complete *ev = data;
5760 struct hci_conn *hcon;
5762 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5769 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5770 if (hcon && hcon->type == AMP_LINK) {
5771 hcon->state = BT_CLOSED;
5772 hci_disconn_cfm(hcon, ev->reason);
5776 hci_dev_unlock(hdev);
5780 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5781 u8 bdaddr_type, bdaddr_t *local_rpa)
5784 conn->dst_type = bdaddr_type;
5785 conn->resp_addr_type = bdaddr_type;
5786 bacpy(&conn->resp_addr, bdaddr);
5788 /* Check if the controller has set a Local RPA then it must be
5789 * used instead or hdev->rpa.
5791 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5792 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5793 bacpy(&conn->init_addr, local_rpa);
5794 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5795 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5796 bacpy(&conn->init_addr, &conn->hdev->rpa);
5798 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5799 &conn->init_addr_type);
5802 conn->resp_addr_type = conn->hdev->adv_addr_type;
5803 /* Check if the controller has set a Local RPA then it must be
5804 * used instead or hdev->rpa.
5806 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5807 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5808 bacpy(&conn->resp_addr, local_rpa);
5809 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5810 /* In case of ext adv, resp_addr will be updated in
5811 * Adv Terminated event.
5813 if (!ext_adv_capable(conn->hdev))
5814 bacpy(&conn->resp_addr,
5815 &conn->hdev->random_addr);
5817 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5820 conn->init_addr_type = bdaddr_type;
5821 bacpy(&conn->init_addr, bdaddr);
5823 /* For incoming connections, set the default minimum
5824 * and maximum connection interval. They will be used
5825 * to check if the parameters are in range and if not
5826 * trigger the connection update procedure.
5828 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5829 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5833 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5834 bdaddr_t *bdaddr, u8 bdaddr_type,
5835 bdaddr_t *local_rpa, u8 role, u16 handle,
5836 u16 interval, u16 latency,
5837 u16 supervision_timeout)
5839 struct hci_conn_params *params;
5840 struct hci_conn *conn;
5841 struct smp_irk *irk;
5846 /* All controllers implicitly stop advertising in the event of a
5847 * connection, so ensure that the state bit is cleared.
5849 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5851 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5853 /* In case of error status and there is no connection pending
5854 * just unlock as there is nothing to cleanup.
5859 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5861 bt_dev_err(hdev, "no memory for new connection");
5865 conn->dst_type = bdaddr_type;
5867 /* If we didn't have a hci_conn object previously
5868 * but we're in central role this must be something
5869 * initiated using an accept list. Since accept list based
5870 * connections are not "first class citizens" we don't
5871 * have full tracking of them. Therefore, we go ahead
5872 * with a "best effort" approach of determining the
5873 * initiator address based on the HCI_PRIVACY flag.
5876 conn->resp_addr_type = bdaddr_type;
5877 bacpy(&conn->resp_addr, bdaddr);
5878 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5879 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5880 bacpy(&conn->init_addr, &hdev->rpa);
5882 hci_copy_identity_address(hdev,
5884 &conn->init_addr_type);
5888 cancel_delayed_work(&conn->le_conn_timeout);
5891 /* The HCI_LE_Connection_Complete event is only sent once per connection.
5892 * Processing it more than once per connection can corrupt kernel memory.
5894 * As the connection handle is set here for the first time, it indicates
5895 * whether the connection is already set up.
5897 if (conn->handle != HCI_CONN_HANDLE_UNSET) {
5898 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5902 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5904 /* Lookup the identity address from the stored connection
5905 * address and address type.
5907 * When establishing connections to an identity address, the
5908 * connection procedure will store the resolvable random
5909 * address first. Now if it can be converted back into the
5910 * identity address, start using the identity address from
5913 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5915 bacpy(&conn->dst, &irk->bdaddr);
5916 conn->dst_type = irk->addr_type;
5919 conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5921 if (handle > HCI_CONN_HANDLE_MAX) {
5922 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
5923 HCI_CONN_HANDLE_MAX);
5924 status = HCI_ERROR_INVALID_PARAMETERS;
5927 /* All connection failure handling is taken care of by the
5928 * hci_conn_failed function which is triggered by the HCI
5929 * request completion callbacks used for connecting.
5934 /* Drop the connection if it has been aborted */
5935 if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5936 hci_conn_drop(conn);
5940 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5941 addr_type = BDADDR_LE_PUBLIC;
5943 addr_type = BDADDR_LE_RANDOM;
5945 /* Drop the connection if the device is blocked */
5946 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5947 hci_conn_drop(conn);
5951 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5952 mgmt_device_connected(hdev, conn, NULL, 0);
5954 conn->sec_level = BT_SECURITY_LOW;
5955 conn->handle = handle;
5956 conn->state = BT_CONFIG;
5958 /* Store current advertising instance as connection advertising instance
5959 * when sotfware rotation is in use so it can be re-enabled when
5962 if (!ext_adv_capable(hdev))
5963 conn->adv_instance = hdev->cur_adv_instance;
5965 conn->le_conn_interval = interval;
5966 conn->le_conn_latency = latency;
5967 conn->le_supv_timeout = supervision_timeout;
5969 hci_debugfs_create_conn(conn);
5970 hci_conn_add_sysfs(conn);
5972 /* The remote features procedure is defined for central
5973 * role only. So only in case of an initiated connection
5974 * request the remote features.
5976 * If the local controller supports peripheral-initiated features
5977 * exchange, then requesting the remote features in peripheral
5978 * role is possible. Otherwise just transition into the
5979 * connected state without requesting the remote features.
5982 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5983 struct hci_cp_le_read_remote_features cp;
5985 cp.handle = __cpu_to_le16(conn->handle);
5987 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5990 hci_conn_hold(conn);
5992 conn->state = BT_CONNECTED;
5993 hci_connect_cfm(conn, status);
5996 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5999 list_del_init(¶ms->action);
6001 hci_conn_drop(params->conn);
6002 hci_conn_put(params->conn);
6003 params->conn = NULL;
6008 hci_update_passive_scan(hdev);
6009 hci_dev_unlock(hdev);
6012 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
6013 struct sk_buff *skb)
6015 struct hci_ev_le_conn_complete *ev = data;
6017 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6019 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6020 NULL, ev->role, le16_to_cpu(ev->handle),
6021 le16_to_cpu(ev->interval),
6022 le16_to_cpu(ev->latency),
6023 le16_to_cpu(ev->supervision_timeout));
6026 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
6027 struct sk_buff *skb)
6029 struct hci_ev_le_enh_conn_complete *ev = data;
6031 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6033 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6034 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
6035 le16_to_cpu(ev->interval),
6036 le16_to_cpu(ev->latency),
6037 le16_to_cpu(ev->supervision_timeout));
6040 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
6041 struct sk_buff *skb)
6043 struct hci_evt_le_ext_adv_set_term *ev = data;
6044 struct hci_conn *conn;
6045 struct adv_info *adv, *n;
6047 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6049 /* The Bluetooth Core 5.3 specification clearly states that this event
6050 * shall not be sent when the Host disables the advertising set. So in
6051 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
6053 * When the Host disables an advertising set, all cleanup is done via
6054 * its command callback and not needed to be duplicated here.
6056 if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
6057 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
6063 adv = hci_find_adv_instance(hdev, ev->handle);
6069 /* Remove advertising as it has been terminated */
6070 hci_remove_adv_instance(hdev, ev->handle);
6071 mgmt_advertising_removed(NULL, hdev, ev->handle);
6073 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
6078 /* We are no longer advertising, clear HCI_LE_ADV */
6079 hci_dev_clear_flag(hdev, HCI_LE_ADV);
6084 adv->enabled = false;
6086 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
6088 /* Store handle in the connection so the correct advertising
6089 * instance can be re-enabled when disconnected.
6091 conn->adv_instance = ev->handle;
6093 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
6094 bacmp(&conn->resp_addr, BDADDR_ANY))
6098 bacpy(&conn->resp_addr, &hdev->random_addr);
6103 bacpy(&conn->resp_addr, &adv->random_addr);
6107 hci_dev_unlock(hdev);
6110 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6111 struct sk_buff *skb)
6113 struct hci_ev_le_conn_update_complete *ev = data;
6114 struct hci_conn *conn;
6116 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6123 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6125 conn->le_conn_interval = le16_to_cpu(ev->interval);
6126 conn->le_conn_latency = le16_to_cpu(ev->latency);
6127 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6130 hci_dev_unlock(hdev);
6133 /* This function requires the caller holds hdev->lock */
6134 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6136 u8 addr_type, bool addr_resolved,
6139 struct hci_conn *conn;
6140 struct hci_conn_params *params;
6142 /* If the event is not connectable don't proceed further */
6143 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6146 /* Ignore if the device is blocked or hdev is suspended */
6147 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6151 /* Most controller will fail if we try to create new connections
6152 * while we have an existing one in peripheral role.
6154 if (hdev->conn_hash.le_num_peripheral > 0 &&
6155 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6156 !(hdev->le_states[3] & 0x10)))
6159 /* If we're not connectable only connect devices that we have in
6160 * our pend_le_conns list.
6162 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6167 if (!params->explicit_connect) {
6168 switch (params->auto_connect) {
6169 case HCI_AUTO_CONN_DIRECT:
6170 /* Only devices advertising with ADV_DIRECT_IND are
6171 * triggering a connection attempt. This is allowing
6172 * incoming connections from peripheral devices.
6174 if (adv_type != LE_ADV_DIRECT_IND)
6177 case HCI_AUTO_CONN_ALWAYS:
6178 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
6179 * are triggering a connection attempt. This means
6180 * that incoming connections from peripheral device are
6181 * accepted and also outgoing connections to peripheral
6182 * devices are established when found.
6190 conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6191 BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6193 if (!IS_ERR(conn)) {
6194 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6195 * by higher layer that tried to connect, if no then
6196 * store the pointer since we don't really have any
6197 * other owner of the object besides the params that
6198 * triggered it. This way we can abort the connection if
6199 * the parameters get removed and keep the reference
6200 * count consistent once the connection is established.
6203 if (!params->explicit_connect)
6204 params->conn = hci_conn_get(conn);
6209 switch (PTR_ERR(conn)) {
6211 /* If hci_connect() returns -EBUSY it means there is already
6212 * an LE connection attempt going on. Since controllers don't
6213 * support more than one connection attempt at the time, we
6214 * don't consider this an error case.
6218 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6225 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6226 u8 bdaddr_type, bdaddr_t *direct_addr,
6227 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
6228 bool ext_adv, bool ctl_time, u64 instant)
6230 struct discovery_state *d = &hdev->discovery;
6231 struct smp_irk *irk;
6232 struct hci_conn *conn;
6233 bool match, bdaddr_resolved;
6239 case LE_ADV_DIRECT_IND:
6240 case LE_ADV_SCAN_IND:
6241 case LE_ADV_NONCONN_IND:
6242 case LE_ADV_SCAN_RSP:
6245 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6246 "type: 0x%02x", type);
6250 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
6251 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
6255 /* Find the end of the data in case the report contains padded zero
6256 * bytes at the end causing an invalid length value.
6258 * When data is NULL, len is 0 so there is no need for extra ptr
6259 * check as 'ptr < data + 0' is already false in such case.
6261 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6262 if (ptr + 1 + *ptr > data + len)
6266 /* Adjust for actual length. This handles the case when remote
6267 * device is advertising with incorrect data length.
6271 /* If the direct address is present, then this report is from
6272 * a LE Direct Advertising Report event. In that case it is
6273 * important to see if the address is matching the local
6274 * controller address.
6276 if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6277 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6280 /* Only resolvable random addresses are valid for these
6281 * kind of reports and others can be ignored.
6283 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6286 /* If the controller is not using resolvable random
6287 * addresses, then this report can be ignored.
6289 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6292 /* If the local IRK of the controller does not match
6293 * with the resolvable random address provided, then
6294 * this report can be ignored.
6296 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6300 /* Check if we need to convert to identity address */
6301 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6303 bdaddr = &irk->bdaddr;
6304 bdaddr_type = irk->addr_type;
6307 bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6309 /* Check if we have been requested to connect to this device.
6311 * direct_addr is set only for directed advertising reports (it is NULL
6312 * for advertising reports) and is already verified to be RPA above.
6314 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6316 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
6317 /* Store report for later inclusion by
6318 * mgmt_device_connected
6320 memcpy(conn->le_adv_data, data, len);
6321 conn->le_adv_data_len = len;
6324 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6325 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6329 /* All scan results should be sent up for Mesh systems */
6330 if (hci_dev_test_flag(hdev, HCI_MESH)) {
6331 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6332 rssi, flags, data, len, NULL, 0, instant);
6336 /* Passive scanning shouldn't trigger any device found events,
6337 * except for devices marked as CONN_REPORT for which we do send
6338 * device found events, or advertisement monitoring requested.
6340 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6341 if (type == LE_ADV_DIRECT_IND)
6344 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6345 bdaddr, bdaddr_type) &&
6346 idr_is_empty(&hdev->adv_monitors_idr))
6349 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6350 rssi, flags, data, len, NULL, 0, 0);
6354 /* When receiving a scan response, then there is no way to
6355 * know if the remote device is connectable or not. However
6356 * since scan responses are merged with a previously seen
6357 * advertising report, the flags field from that report
6360 * In the unlikely case that a controller just sends a scan
6361 * response event that doesn't match the pending report, then
6362 * it is marked as a standalone SCAN_RSP.
6364 if (type == LE_ADV_SCAN_RSP)
6365 flags = MGMT_DEV_FOUND_SCAN_RSP;
6367 /* If there's nothing pending either store the data from this
6368 * event or send an immediate device found event if the data
6369 * should not be stored for later.
6371 if (!ext_adv && !has_pending_adv_report(hdev)) {
6372 /* If the report will trigger a SCAN_REQ store it for
6375 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6376 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6377 rssi, flags, data, len);
6381 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6382 rssi, flags, data, len, NULL, 0, 0);
6386 /* Check if the pending report is for the same device as the new one */
6387 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6388 bdaddr_type == d->last_adv_addr_type);
6390 /* If the pending data doesn't match this report or this isn't a
6391 * scan response (e.g. we got a duplicate ADV_IND) then force
6392 * sending of the pending data.
6394 if (type != LE_ADV_SCAN_RSP || !match) {
6395 /* Send out whatever is in the cache, but skip duplicates */
6397 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6398 d->last_adv_addr_type, NULL,
6399 d->last_adv_rssi, d->last_adv_flags,
6401 d->last_adv_data_len, NULL, 0, 0);
6403 /* If the new report will trigger a SCAN_REQ store it for
6406 if (!ext_adv && (type == LE_ADV_IND ||
6407 type == LE_ADV_SCAN_IND)) {
6408 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6409 rssi, flags, data, len);
6413 /* The advertising reports cannot be merged, so clear
6414 * the pending report and send out a device found event.
6416 clear_pending_adv_report(hdev);
6417 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6418 rssi, flags, data, len, NULL, 0, 0);
6422 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6423 * the new event is a SCAN_RSP. We can therefore proceed with
6424 * sending a merged device found event.
6426 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6427 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6428 d->last_adv_data, d->last_adv_data_len, data, len, 0);
6429 clear_pending_adv_report(hdev);
6432 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6433 struct sk_buff *skb)
6435 struct hci_ev_le_advertising_report *ev = data;
6436 u64 instant = jiffies;
6444 struct hci_ev_le_advertising_info *info;
6447 info = hci_le_ev_skb_pull(hdev, skb,
6448 HCI_EV_LE_ADVERTISING_REPORT,
6453 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6457 if (info->length <= HCI_MAX_AD_LENGTH) {
6458 rssi = info->data[info->length];
6459 process_adv_report(hdev, info->type, &info->bdaddr,
6460 info->bdaddr_type, NULL, 0, rssi,
6461 info->data, info->length, false,
6464 bt_dev_err(hdev, "Dropping invalid advertising data");
6468 hci_dev_unlock(hdev);
6471 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6473 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6475 case LE_LEGACY_ADV_IND:
6477 case LE_LEGACY_ADV_DIRECT_IND:
6478 return LE_ADV_DIRECT_IND;
6479 case LE_LEGACY_ADV_SCAN_IND:
6480 return LE_ADV_SCAN_IND;
6481 case LE_LEGACY_NONCONN_IND:
6482 return LE_ADV_NONCONN_IND;
6483 case LE_LEGACY_SCAN_RSP_ADV:
6484 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6485 return LE_ADV_SCAN_RSP;
6491 if (evt_type & LE_EXT_ADV_CONN_IND) {
6492 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6493 return LE_ADV_DIRECT_IND;
6498 if (evt_type & LE_EXT_ADV_SCAN_RSP)
6499 return LE_ADV_SCAN_RSP;
6501 if (evt_type & LE_EXT_ADV_SCAN_IND)
6502 return LE_ADV_SCAN_IND;
6504 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6505 evt_type & LE_EXT_ADV_DIRECT_IND)
6506 return LE_ADV_NONCONN_IND;
6509 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6512 return LE_ADV_INVALID;
6515 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6516 struct sk_buff *skb)
6518 struct hci_ev_le_ext_adv_report *ev = data;
6519 u64 instant = jiffies;
6527 struct hci_ev_le_ext_adv_info *info;
6531 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6536 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6540 evt_type = __le16_to_cpu(info->type);
6541 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6542 if (legacy_evt_type != LE_ADV_INVALID) {
6543 process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6544 info->bdaddr_type, NULL, 0,
6545 info->rssi, info->data, info->length,
6546 !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6551 hci_dev_unlock(hdev);
6554 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6556 struct hci_cp_le_pa_term_sync cp;
6558 memset(&cp, 0, sizeof(cp));
6561 return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6564 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6565 struct sk_buff *skb)
6567 struct hci_ev_le_pa_sync_established *ev = data;
6568 int mask = hdev->link_mode;
6571 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6578 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6580 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6581 if (!(mask & HCI_LM_ACCEPT))
6582 hci_le_pa_term_sync(hdev, ev->handle);
6584 hci_dev_unlock(hdev);
6587 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6588 struct sk_buff *skb)
6590 struct hci_ev_le_remote_feat_complete *ev = data;
6591 struct hci_conn *conn;
6593 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6597 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6600 memcpy(conn->features[0], ev->features, 8);
6602 if (conn->state == BT_CONFIG) {
6605 /* If the local controller supports peripheral-initiated
6606 * features exchange, but the remote controller does
6607 * not, then it is possible that the error code 0x1a
6608 * for unsupported remote feature gets returned.
6610 * In this specific case, allow the connection to
6611 * transition into connected state and mark it as
6614 if (!conn->out && ev->status == 0x1a &&
6615 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6618 status = ev->status;
6620 conn->state = BT_CONNECTED;
6621 hci_connect_cfm(conn, status);
6622 hci_conn_drop(conn);
6626 hci_dev_unlock(hdev);
6629 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6630 struct sk_buff *skb)
6632 struct hci_ev_le_ltk_req *ev = data;
6633 struct hci_cp_le_ltk_reply cp;
6634 struct hci_cp_le_ltk_neg_reply neg;
6635 struct hci_conn *conn;
6636 struct smp_ltk *ltk;
6638 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6642 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6646 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6650 if (smp_ltk_is_sc(ltk)) {
6651 /* With SC both EDiv and Rand are set to zero */
6652 if (ev->ediv || ev->rand)
6655 /* For non-SC keys check that EDiv and Rand match */
6656 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6660 memcpy(cp.ltk, ltk->val, ltk->enc_size);
6661 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6662 cp.handle = cpu_to_le16(conn->handle);
6664 conn->pending_sec_level = smp_ltk_sec_level(ltk);
6666 conn->enc_key_size = ltk->enc_size;
6668 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6670 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6671 * temporary key used to encrypt a connection following
6672 * pairing. It is used during the Encrypted Session Setup to
6673 * distribute the keys. Later, security can be re-established
6674 * using a distributed LTK.
6676 if (ltk->type == SMP_STK) {
6677 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6678 list_del_rcu(<k->list);
6679 kfree_rcu(ltk, rcu);
6681 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6684 hci_dev_unlock(hdev);
6689 neg.handle = ev->handle;
6690 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6691 hci_dev_unlock(hdev);
6694 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6697 struct hci_cp_le_conn_param_req_neg_reply cp;
6699 cp.handle = cpu_to_le16(handle);
6702 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6706 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6707 struct sk_buff *skb)
6709 struct hci_ev_le_remote_conn_param_req *ev = data;
6710 struct hci_cp_le_conn_param_req_reply cp;
6711 struct hci_conn *hcon;
6712 u16 handle, min, max, latency, timeout;
6714 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6716 handle = le16_to_cpu(ev->handle);
6717 min = le16_to_cpu(ev->interval_min);
6718 max = le16_to_cpu(ev->interval_max);
6719 latency = le16_to_cpu(ev->latency);
6720 timeout = le16_to_cpu(ev->timeout);
6722 hcon = hci_conn_hash_lookup_handle(hdev, handle);
6723 if (!hcon || hcon->state != BT_CONNECTED)
6724 return send_conn_param_neg_reply(hdev, handle,
6725 HCI_ERROR_UNKNOWN_CONN_ID);
6727 if (hci_check_conn_params(min, max, latency, timeout))
6728 return send_conn_param_neg_reply(hdev, handle,
6729 HCI_ERROR_INVALID_LL_PARAMS);
6731 if (hcon->role == HCI_ROLE_MASTER) {
6732 struct hci_conn_params *params;
6737 params = hci_conn_params_lookup(hdev, &hcon->dst,
6740 params->conn_min_interval = min;
6741 params->conn_max_interval = max;
6742 params->conn_latency = latency;
6743 params->supervision_timeout = timeout;
6749 hci_dev_unlock(hdev);
6751 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6752 store_hint, min, max, latency, timeout);
6755 cp.handle = ev->handle;
6756 cp.interval_min = ev->interval_min;
6757 cp.interval_max = ev->interval_max;
6758 cp.latency = ev->latency;
6759 cp.timeout = ev->timeout;
6763 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6766 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6767 struct sk_buff *skb)
6769 struct hci_ev_le_direct_adv_report *ev = data;
6770 u64 instant = jiffies;
6773 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6774 flex_array_size(ev, info, ev->num)))
6782 for (i = 0; i < ev->num; i++) {
6783 struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6785 process_adv_report(hdev, info->type, &info->bdaddr,
6786 info->bdaddr_type, &info->direct_addr,
6787 info->direct_addr_type, info->rssi, NULL, 0,
6788 false, false, instant);
6791 hci_dev_unlock(hdev);
6794 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6795 struct sk_buff *skb)
6797 struct hci_ev_le_phy_update_complete *ev = data;
6798 struct hci_conn *conn;
6800 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6807 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6811 conn->le_tx_phy = ev->tx_phy;
6812 conn->le_rx_phy = ev->rx_phy;
6815 hci_dev_unlock(hdev);
6818 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
6819 struct sk_buff *skb)
6821 struct hci_evt_le_cis_established *ev = data;
6822 struct hci_conn *conn;
6823 u16 handle = __le16_to_cpu(ev->handle);
6825 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6829 conn = hci_conn_hash_lookup_handle(hdev, handle);
6832 "Unable to find connection with handle 0x%4.4x",
6837 if (conn->type != ISO_LINK) {
6839 "Invalid connection link type handle 0x%4.4x",
6844 if (conn->role == HCI_ROLE_SLAVE) {
6847 memset(&interval, 0, sizeof(interval));
6849 memcpy(&interval, ev->c_latency, sizeof(ev->c_latency));
6850 conn->iso_qos.in.interval = le32_to_cpu(interval);
6851 memcpy(&interval, ev->p_latency, sizeof(ev->p_latency));
6852 conn->iso_qos.out.interval = le32_to_cpu(interval);
6853 conn->iso_qos.in.latency = le16_to_cpu(ev->interval);
6854 conn->iso_qos.out.latency = le16_to_cpu(ev->interval);
6855 conn->iso_qos.in.sdu = le16_to_cpu(ev->c_mtu);
6856 conn->iso_qos.out.sdu = le16_to_cpu(ev->p_mtu);
6857 conn->iso_qos.in.phy = ev->c_phy;
6858 conn->iso_qos.out.phy = ev->p_phy;
6862 conn->state = BT_CONNECTED;
6863 hci_debugfs_create_conn(conn);
6864 hci_conn_add_sysfs(conn);
6865 hci_iso_setup_path(conn);
6869 hci_connect_cfm(conn, ev->status);
6873 hci_dev_unlock(hdev);
6876 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6878 struct hci_cp_le_reject_cis cp;
6880 memset(&cp, 0, sizeof(cp));
6882 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6883 hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6886 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6888 struct hci_cp_le_accept_cis cp;
6890 memset(&cp, 0, sizeof(cp));
6892 hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6895 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6896 struct sk_buff *skb)
6898 struct hci_evt_le_cis_req *ev = data;
6899 u16 acl_handle, cis_handle;
6900 struct hci_conn *acl, *cis;
6904 acl_handle = __le16_to_cpu(ev->acl_handle);
6905 cis_handle = __le16_to_cpu(ev->cis_handle);
6907 bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
6908 acl_handle, cis_handle, ev->cig_id, ev->cis_id);
6912 acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
6916 mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
6917 if (!(mask & HCI_LM_ACCEPT)) {
6918 hci_le_reject_cis(hdev, ev->cis_handle);
6922 cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
6924 cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE);
6926 hci_le_reject_cis(hdev, ev->cis_handle);
6929 cis->handle = cis_handle;
6932 cis->iso_qos.cig = ev->cig_id;
6933 cis->iso_qos.cis = ev->cis_id;
6935 if (!(flags & HCI_PROTO_DEFER)) {
6936 hci_le_accept_cis(hdev, ev->cis_handle);
6938 cis->state = BT_CONNECT2;
6939 hci_connect_cfm(cis, 0);
6943 hci_dev_unlock(hdev);
6946 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
6947 struct sk_buff *skb)
6949 struct hci_evt_le_create_big_complete *ev = data;
6950 struct hci_conn *conn;
6952 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6954 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
6955 flex_array_size(ev, bis_handle, ev->num_bis)))
6960 conn = hci_conn_hash_lookup_big(hdev, ev->handle);
6964 if (conn->type != ISO_LINK) {
6966 "Invalid connection link type handle 0x%2.2x",
6972 conn->handle = __le16_to_cpu(ev->bis_handle[0]);
6975 conn->state = BT_CONNECTED;
6976 hci_debugfs_create_conn(conn);
6977 hci_conn_add_sysfs(conn);
6978 hci_iso_setup_path(conn);
6982 hci_connect_cfm(conn, ev->status);
6986 hci_dev_unlock(hdev);
6989 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
6990 struct sk_buff *skb)
6992 struct hci_evt_le_big_sync_estabilished *ev = data;
6993 struct hci_conn *bis;
6996 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6998 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
6999 flex_array_size(ev, bis, ev->num_bis)))
7007 for (i = 0; i < ev->num_bis; i++) {
7008 u16 handle = le16_to_cpu(ev->bis[i]);
7011 bis = hci_conn_hash_lookup_handle(hdev, handle);
7013 bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
7017 bis->handle = handle;
7020 bis->iso_qos.big = ev->handle;
7021 memset(&interval, 0, sizeof(interval));
7022 memcpy(&interval, ev->latency, sizeof(ev->latency));
7023 bis->iso_qos.in.interval = le32_to_cpu(interval);
7024 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7025 bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7026 bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu);
7028 hci_iso_setup_path(bis);
7031 hci_dev_unlock(hdev);
7034 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7035 struct sk_buff *skb)
7037 struct hci_evt_le_big_info_adv_report *ev = data;
7038 int mask = hdev->link_mode;
7041 bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7045 mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7046 if (!(mask & HCI_LM_ACCEPT))
7047 hci_le_pa_term_sync(hdev, ev->sync_handle);
7049 hci_dev_unlock(hdev);
7052 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7055 .min_len = _min_len, \
7056 .max_len = _max_len, \
7059 #define HCI_LE_EV(_op, _func, _len) \
7060 HCI_LE_EV_VL(_op, _func, _len, _len)
7062 #define HCI_LE_EV_STATUS(_op, _func) \
7063 HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7065 /* Entries in this table shall have their position according to the subevent
7066 * opcode they handle so the use of the macros above is recommend since it does
7067 * attempt to initialize at its proper index using Designated Initializers that
7068 * way events without a callback function can be ommited.
7070 static const struct hci_le_ev {
7071 void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7074 } hci_le_ev_table[U8_MAX + 1] = {
7075 /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7076 HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7077 sizeof(struct hci_ev_le_conn_complete)),
7078 /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7079 HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7080 sizeof(struct hci_ev_le_advertising_report),
7081 HCI_MAX_EVENT_SIZE),
7082 /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7083 HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7084 hci_le_conn_update_complete_evt,
7085 sizeof(struct hci_ev_le_conn_update_complete)),
7086 /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7087 HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7088 hci_le_remote_feat_complete_evt,
7089 sizeof(struct hci_ev_le_remote_feat_complete)),
7090 /* [0x05 = HCI_EV_LE_LTK_REQ] */
7091 HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7092 sizeof(struct hci_ev_le_ltk_req)),
7093 /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7094 HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7095 hci_le_remote_conn_param_req_evt,
7096 sizeof(struct hci_ev_le_remote_conn_param_req)),
7097 /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7098 HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7099 hci_le_enh_conn_complete_evt,
7100 sizeof(struct hci_ev_le_enh_conn_complete)),
7101 /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7102 HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7103 sizeof(struct hci_ev_le_direct_adv_report),
7104 HCI_MAX_EVENT_SIZE),
7105 /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7106 HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7107 sizeof(struct hci_ev_le_phy_update_complete)),
7108 /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7109 HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7110 sizeof(struct hci_ev_le_ext_adv_report),
7111 HCI_MAX_EVENT_SIZE),
7112 /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7113 HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7114 hci_le_pa_sync_estabilished_evt,
7115 sizeof(struct hci_ev_le_pa_sync_established)),
7116 /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7117 HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7118 sizeof(struct hci_evt_le_ext_adv_set_term)),
7119 /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7120 HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7121 sizeof(struct hci_evt_le_cis_established)),
7122 /* [0x1a = HCI_EVT_LE_CIS_REQ] */
7123 HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7124 sizeof(struct hci_evt_le_cis_req)),
7125 /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7126 HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7127 hci_le_create_big_complete_evt,
7128 sizeof(struct hci_evt_le_create_big_complete),
7129 HCI_MAX_EVENT_SIZE),
7130 /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7131 HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7132 hci_le_big_sync_established_evt,
7133 sizeof(struct hci_evt_le_big_sync_estabilished),
7134 HCI_MAX_EVENT_SIZE),
7135 /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7136 HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7137 hci_le_big_info_adv_report_evt,
7138 sizeof(struct hci_evt_le_big_info_adv_report),
7139 HCI_MAX_EVENT_SIZE),
7142 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7143 struct sk_buff *skb, u16 *opcode, u8 *status,
7144 hci_req_complete_t *req_complete,
7145 hci_req_complete_skb_t *req_complete_skb)
7147 struct hci_ev_le_meta *ev = data;
7148 const struct hci_le_ev *subev;
7150 bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7152 /* Only match event if command OGF is for LE */
7153 if (hdev->sent_cmd &&
7154 hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
7155 hci_skb_event(hdev->sent_cmd) == ev->subevent) {
7156 *opcode = hci_skb_opcode(hdev->sent_cmd);
7157 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7161 subev = &hci_le_ev_table[ev->subevent];
7165 if (skb->len < subev->min_len) {
7166 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7167 ev->subevent, skb->len, subev->min_len);
7171 /* Just warn if the length is over max_len size it still be
7172 * possible to partially parse the event so leave to callback to
7173 * decide if that is acceptable.
7175 if (skb->len > subev->max_len)
7176 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7177 ev->subevent, skb->len, subev->max_len);
7178 data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7182 subev->func(hdev, data, skb);
7185 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7186 u8 event, struct sk_buff *skb)
7188 struct hci_ev_cmd_complete *ev;
7189 struct hci_event_hdr *hdr;
7194 hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7199 if (hdr->evt != event)
7204 /* Check if request ended in Command Status - no way to retrieve
7205 * any extra parameters in this case.
7207 if (hdr->evt == HCI_EV_CMD_STATUS)
7210 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7211 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7216 ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7220 if (opcode != __le16_to_cpu(ev->opcode)) {
7221 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7222 __le16_to_cpu(ev->opcode));
7229 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7230 struct sk_buff *skb)
7232 struct hci_ev_le_advertising_info *adv;
7233 struct hci_ev_le_direct_adv_info *direct_adv;
7234 struct hci_ev_le_ext_adv_info *ext_adv;
7235 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7236 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7240 /* If we are currently suspended and this is the first BT event seen,
7241 * save the wake reason associated with the event.
7243 if (!hdev->suspended || hdev->wake_reason)
7246 /* Default to remote wake. Values for wake_reason are documented in the
7247 * Bluez mgmt api docs.
7249 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7251 /* Once configured for remote wakeup, we should only wake up for
7252 * reconnections. It's useful to see which device is waking us up so
7253 * keep track of the bdaddr of the connection event that woke us up.
7255 if (event == HCI_EV_CONN_REQUEST) {
7256 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7257 hdev->wake_addr_type = BDADDR_BREDR;
7258 } else if (event == HCI_EV_CONN_COMPLETE) {
7259 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7260 hdev->wake_addr_type = BDADDR_BREDR;
7261 } else if (event == HCI_EV_LE_META) {
7262 struct hci_ev_le_meta *le_ev = (void *)skb->data;
7263 u8 subevent = le_ev->subevent;
7264 u8 *ptr = &skb->data[sizeof(*le_ev)];
7265 u8 num_reports = *ptr;
7267 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7268 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7269 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7271 adv = (void *)(ptr + 1);
7272 direct_adv = (void *)(ptr + 1);
7273 ext_adv = (void *)(ptr + 1);
7276 case HCI_EV_LE_ADVERTISING_REPORT:
7277 bacpy(&hdev->wake_addr, &adv->bdaddr);
7278 hdev->wake_addr_type = adv->bdaddr_type;
7280 case HCI_EV_LE_DIRECT_ADV_REPORT:
7281 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7282 hdev->wake_addr_type = direct_adv->bdaddr_type;
7284 case HCI_EV_LE_EXT_ADV_REPORT:
7285 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7286 hdev->wake_addr_type = ext_adv->bdaddr_type;
7291 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7295 hci_dev_unlock(hdev);
7298 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7302 .min_len = _min_len, \
7303 .max_len = _max_len, \
7306 #define HCI_EV(_op, _func, _len) \
7307 HCI_EV_VL(_op, _func, _len, _len)
7309 #define HCI_EV_STATUS(_op, _func) \
7310 HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7312 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7315 .func_req = _func, \
7316 .min_len = _min_len, \
7317 .max_len = _max_len, \
7320 #define HCI_EV_REQ(_op, _func, _len) \
7321 HCI_EV_REQ_VL(_op, _func, _len, _len)
7323 /* Entries in this table shall have their position according to the event opcode
7324 * they handle so the use of the macros above is recommend since it does attempt
7325 * to initialize at its proper index using Designated Initializers that way
7326 * events without a callback function don't have entered.
7328 static const struct hci_ev {
7331 void (*func)(struct hci_dev *hdev, void *data,
7332 struct sk_buff *skb);
7333 void (*func_req)(struct hci_dev *hdev, void *data,
7334 struct sk_buff *skb, u16 *opcode, u8 *status,
7335 hci_req_complete_t *req_complete,
7336 hci_req_complete_skb_t *req_complete_skb);
7340 } hci_ev_table[U8_MAX + 1] = {
7341 /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7342 HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7343 /* [0x02 = HCI_EV_INQUIRY_RESULT] */
7344 HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7345 sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7346 /* [0x03 = HCI_EV_CONN_COMPLETE] */
7347 HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7348 sizeof(struct hci_ev_conn_complete)),
7349 /* [0x04 = HCI_EV_CONN_REQUEST] */
7350 HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7351 sizeof(struct hci_ev_conn_request)),
7352 /* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7353 HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7354 sizeof(struct hci_ev_disconn_complete)),
7355 /* [0x06 = HCI_EV_AUTH_COMPLETE] */
7356 HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7357 sizeof(struct hci_ev_auth_complete)),
7358 /* [0x07 = HCI_EV_REMOTE_NAME] */
7359 HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7360 sizeof(struct hci_ev_remote_name)),
7361 /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7362 HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7363 sizeof(struct hci_ev_encrypt_change)),
7364 /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7365 HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7366 hci_change_link_key_complete_evt,
7367 sizeof(struct hci_ev_change_link_key_complete)),
7368 /* [0x0b = HCI_EV_REMOTE_FEATURES] */
7369 HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7370 sizeof(struct hci_ev_remote_features)),
7371 /* [0x0e = HCI_EV_CMD_COMPLETE] */
7372 HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7373 sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7374 /* [0x0f = HCI_EV_CMD_STATUS] */
7375 HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7376 sizeof(struct hci_ev_cmd_status)),
7377 /* [0x10 = HCI_EV_CMD_STATUS] */
7378 HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7379 sizeof(struct hci_ev_hardware_error)),
7380 /* [0x12 = HCI_EV_ROLE_CHANGE] */
7381 HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7382 sizeof(struct hci_ev_role_change)),
7383 /* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7384 HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7385 sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7386 /* [0x14 = HCI_EV_MODE_CHANGE] */
7387 HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7388 sizeof(struct hci_ev_mode_change)),
7389 /* [0x16 = HCI_EV_PIN_CODE_REQ] */
7390 HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7391 sizeof(struct hci_ev_pin_code_req)),
7392 /* [0x17 = HCI_EV_LINK_KEY_REQ] */
7393 HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7394 sizeof(struct hci_ev_link_key_req)),
7395 /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7396 HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7397 sizeof(struct hci_ev_link_key_notify)),
7398 /* [0x1c = HCI_EV_CLOCK_OFFSET] */
7399 HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7400 sizeof(struct hci_ev_clock_offset)),
7401 /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7402 HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7403 sizeof(struct hci_ev_pkt_type_change)),
7404 /* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7405 HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7406 sizeof(struct hci_ev_pscan_rep_mode)),
7407 /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7408 HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7409 hci_inquiry_result_with_rssi_evt,
7410 sizeof(struct hci_ev_inquiry_result_rssi),
7411 HCI_MAX_EVENT_SIZE),
7412 /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7413 HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7414 sizeof(struct hci_ev_remote_ext_features)),
7415 /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7416 HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7417 sizeof(struct hci_ev_sync_conn_complete)),
7418 /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7419 HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7420 hci_extended_inquiry_result_evt,
7421 sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7422 /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7423 HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7424 sizeof(struct hci_ev_key_refresh_complete)),
7425 /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7426 HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7427 sizeof(struct hci_ev_io_capa_request)),
7428 /* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7429 HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7430 sizeof(struct hci_ev_io_capa_reply)),
7431 /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7432 HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7433 sizeof(struct hci_ev_user_confirm_req)),
7434 /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7435 HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7436 sizeof(struct hci_ev_user_passkey_req)),
7437 /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7438 HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7439 sizeof(struct hci_ev_remote_oob_data_request)),
7440 /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7441 HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7442 sizeof(struct hci_ev_simple_pair_complete)),
7443 /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7444 HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7445 sizeof(struct hci_ev_user_passkey_notify)),
7446 /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7447 HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7448 sizeof(struct hci_ev_keypress_notify)),
7449 /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7450 HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7451 sizeof(struct hci_ev_remote_host_features)),
7452 /* [0x3e = HCI_EV_LE_META] */
7453 HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7454 sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7455 #if IS_ENABLED(CONFIG_BT_HS)
7456 /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
7457 HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
7458 sizeof(struct hci_ev_phy_link_complete)),
7459 /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
7460 HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
7461 sizeof(struct hci_ev_channel_selected)),
7462 /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
7463 HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
7464 hci_disconn_loglink_complete_evt,
7465 sizeof(struct hci_ev_disconn_logical_link_complete)),
7466 /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
7467 HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
7468 sizeof(struct hci_ev_logical_link_complete)),
7469 /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
7470 HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
7471 hci_disconn_phylink_complete_evt,
7472 sizeof(struct hci_ev_disconn_phy_link_complete)),
7474 /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7475 HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7476 sizeof(struct hci_ev_num_comp_blocks)),
7477 /* [0xff = HCI_EV_VENDOR] */
7478 HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7481 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7482 u16 *opcode, u8 *status,
7483 hci_req_complete_t *req_complete,
7484 hci_req_complete_skb_t *req_complete_skb)
7486 const struct hci_ev *ev = &hci_ev_table[event];
7492 if (skb->len < ev->min_len) {
7493 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7494 event, skb->len, ev->min_len);
7498 /* Just warn if the length is over max_len size it still be
7499 * possible to partially parse the event so leave to callback to
7500 * decide if that is acceptable.
7502 if (skb->len > ev->max_len)
7503 bt_dev_warn_ratelimited(hdev,
7504 "unexpected event 0x%2.2x length: %u > %u",
7505 event, skb->len, ev->max_len);
7507 data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7512 ev->func_req(hdev, data, skb, opcode, status, req_complete,
7515 ev->func(hdev, data, skb);
7518 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7520 struct hci_event_hdr *hdr = (void *) skb->data;
7521 hci_req_complete_t req_complete = NULL;
7522 hci_req_complete_skb_t req_complete_skb = NULL;
7523 struct sk_buff *orig_skb = NULL;
7524 u8 status = 0, event, req_evt = 0;
7525 u16 opcode = HCI_OP_NOP;
7527 if (skb->len < sizeof(*hdr)) {
7528 bt_dev_err(hdev, "Malformed HCI Event");
7532 kfree_skb(hdev->recv_event);
7533 hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7537 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7542 /* Only match event if command OGF is not for LE */
7543 if (hdev->sent_cmd &&
7544 hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
7545 hci_skb_event(hdev->sent_cmd) == event) {
7546 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
7547 status, &req_complete, &req_complete_skb);
7551 /* If it looks like we might end up having to call
7552 * req_complete_skb, store a pristine copy of the skb since the
7553 * various handlers may modify the original one through
7554 * skb_pull() calls, etc.
7556 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7557 event == HCI_EV_CMD_COMPLETE)
7558 orig_skb = skb_clone(skb, GFP_KERNEL);
7560 skb_pull(skb, HCI_EVENT_HDR_SIZE);
7562 /* Store wake reason if we're suspended */
7563 hci_store_wake_reason(hdev, event, skb);
7565 bt_dev_dbg(hdev, "event 0x%2.2x", event);
7567 hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7571 req_complete(hdev, status, opcode);
7572 } else if (req_complete_skb) {
7573 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7574 kfree_skb(orig_skb);
7577 req_complete_skb(hdev, status, opcode, orig_skb);
7581 kfree_skb(orig_skb);
7583 hdev->stat.evt_rx++;