2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
42 "\x00\x00\x00\x00\x00\x00\x00\x00"
44 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
46 /* Handle HCI Event packets */
48 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
53 data = skb_pull_data(skb, len);
55 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
60 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
65 data = skb_pull_data(skb, len);
67 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
72 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
77 data = skb_pull_data(skb, len);
79 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
84 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
87 struct hci_ev_status *rp = data;
89 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
91 /* It is possible that we receive Inquiry Complete event right
92 * before we receive Inquiry Cancel Command Complete event, in
93 * which case the latter event should have status of Command
94 * Disallowed (0x0c). This should not be treated as error, since
95 * we actually achieve what Inquiry Cancel wants to achieve,
96 * which is to end the last Inquiry session.
98 if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
99 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
106 clear_bit(HCI_INQUIRY, &hdev->flags);
107 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
108 wake_up_bit(&hdev->flags, HCI_INQUIRY);
111 /* Set discovery state to stopped if we're not doing LE active
114 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
115 hdev->le_scan_type != LE_SCAN_ACTIVE)
116 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
117 hci_dev_unlock(hdev);
119 hci_conn_check_pending(hdev);
124 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
127 struct hci_ev_status *rp = data;
129 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
134 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
139 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
142 struct hci_ev_status *rp = data;
144 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
149 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
151 hci_conn_check_pending(hdev);
156 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
159 struct hci_ev_status *rp = data;
161 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
166 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
169 struct hci_rp_role_discovery *rp = data;
170 struct hci_conn *conn;
172 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
179 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
181 conn->role = rp->role;
183 hci_dev_unlock(hdev);
188 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
191 struct hci_rp_read_link_policy *rp = data;
192 struct hci_conn *conn;
194 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
201 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
203 conn->link_policy = __le16_to_cpu(rp->policy);
205 hci_dev_unlock(hdev);
210 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
213 struct hci_rp_write_link_policy *rp = data;
214 struct hci_conn *conn;
217 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
222 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
228 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
230 conn->link_policy = get_unaligned_le16(sent + 2);
232 hci_dev_unlock(hdev);
237 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
240 struct hci_rp_read_def_link_policy *rp = data;
242 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
247 hdev->link_policy = __le16_to_cpu(rp->policy);
252 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
255 struct hci_ev_status *rp = data;
258 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
263 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
267 hdev->link_policy = get_unaligned_le16(sent);
272 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
274 struct hci_ev_status *rp = data;
276 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
278 clear_bit(HCI_RESET, &hdev->flags);
283 /* Reset all non-persistent flags */
284 hci_dev_clear_volatile_flags(hdev);
286 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
288 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
289 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
291 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
292 hdev->adv_data_len = 0;
294 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
295 hdev->scan_rsp_data_len = 0;
297 hdev->le_scan_type = LE_SCAN_PASSIVE;
299 hdev->ssp_debug_mode = 0;
301 hci_bdaddr_list_clear(&hdev->le_accept_list);
302 hci_bdaddr_list_clear(&hdev->le_resolv_list);
307 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
310 struct hci_rp_read_stored_link_key *rp = data;
311 struct hci_cp_read_stored_link_key *sent;
313 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
315 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
319 if (!rp->status && sent->read_all == 0x01) {
320 hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
321 hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
327 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
330 struct hci_rp_delete_stored_link_key *rp = data;
333 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
338 num_keys = le16_to_cpu(rp->num_keys);
340 if (num_keys <= hdev->stored_num_keys)
341 hdev->stored_num_keys -= num_keys;
343 hdev->stored_num_keys = 0;
348 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
351 struct hci_ev_status *rp = data;
354 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
356 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
362 if (hci_dev_test_flag(hdev, HCI_MGMT))
363 mgmt_set_local_name_complete(hdev, sent, rp->status);
364 else if (!rp->status)
365 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
367 hci_dev_unlock(hdev);
372 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
375 struct hci_rp_read_local_name *rp = data;
377 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
382 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
383 hci_dev_test_flag(hdev, HCI_CONFIG))
384 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
389 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
392 struct hci_ev_status *rp = data;
395 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
397 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
404 __u8 param = *((__u8 *) sent);
406 if (param == AUTH_ENABLED)
407 set_bit(HCI_AUTH, &hdev->flags);
409 clear_bit(HCI_AUTH, &hdev->flags);
412 if (hci_dev_test_flag(hdev, HCI_MGMT))
413 mgmt_auth_enable_complete(hdev, rp->status);
415 hci_dev_unlock(hdev);
420 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
423 struct hci_ev_status *rp = data;
427 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
432 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
436 param = *((__u8 *) sent);
439 set_bit(HCI_ENCRYPT, &hdev->flags);
441 clear_bit(HCI_ENCRYPT, &hdev->flags);
446 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
449 struct hci_ev_status *rp = data;
453 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
455 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
459 param = *((__u8 *) sent);
464 hdev->discov_timeout = 0;
468 if (param & SCAN_INQUIRY)
469 set_bit(HCI_ISCAN, &hdev->flags);
471 clear_bit(HCI_ISCAN, &hdev->flags);
473 if (param & SCAN_PAGE)
474 set_bit(HCI_PSCAN, &hdev->flags);
476 clear_bit(HCI_PSCAN, &hdev->flags);
479 hci_dev_unlock(hdev);
484 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
487 struct hci_ev_status *rp = data;
488 struct hci_cp_set_event_filter *cp;
491 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
496 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
500 cp = (struct hci_cp_set_event_filter *)sent;
502 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
503 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
505 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
510 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
513 struct hci_rp_read_class_of_dev *rp = data;
515 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
520 memcpy(hdev->dev_class, rp->dev_class, 3);
522 bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
523 hdev->dev_class[1], hdev->dev_class[0]);
528 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
531 struct hci_ev_status *rp = data;
534 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
536 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
543 memcpy(hdev->dev_class, sent, 3);
545 if (hci_dev_test_flag(hdev, HCI_MGMT))
546 mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
548 hci_dev_unlock(hdev);
553 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
556 struct hci_rp_read_voice_setting *rp = data;
559 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
564 setting = __le16_to_cpu(rp->voice_setting);
566 if (hdev->voice_setting == setting)
569 hdev->voice_setting = setting;
571 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
574 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
579 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
582 struct hci_ev_status *rp = data;
586 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
591 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
595 setting = get_unaligned_le16(sent);
597 if (hdev->voice_setting == setting)
600 hdev->voice_setting = setting;
602 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
605 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
610 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
613 struct hci_rp_read_num_supported_iac *rp = data;
615 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
620 hdev->num_iac = rp->num_iac;
622 bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
627 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
630 struct hci_ev_status *rp = data;
631 struct hci_cp_write_ssp_mode *sent;
633 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
635 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
643 hdev->features[1][0] |= LMP_HOST_SSP;
645 hdev->features[1][0] &= ~LMP_HOST_SSP;
650 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
652 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
655 hci_dev_unlock(hdev);
660 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
663 struct hci_ev_status *rp = data;
664 struct hci_cp_write_sc_support *sent;
666 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
668 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
676 hdev->features[1][0] |= LMP_HOST_SC;
678 hdev->features[1][0] &= ~LMP_HOST_SC;
681 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
683 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
685 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
688 hci_dev_unlock(hdev);
693 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
696 struct hci_rp_read_local_version *rp = data;
698 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
703 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
704 hci_dev_test_flag(hdev, HCI_CONFIG)) {
705 hdev->hci_ver = rp->hci_ver;
706 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
707 hdev->lmp_ver = rp->lmp_ver;
708 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
709 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
715 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
718 struct hci_rp_read_enc_key_size *rp = data;
719 struct hci_conn *conn;
721 u8 status = rp->status;
723 bt_dev_dbg(hdev, "status 0x%2.2x", status);
725 handle = le16_to_cpu(rp->handle);
729 conn = hci_conn_hash_lookup_handle(hdev, handle);
735 /* While unexpected, the read_enc_key_size command may fail. The most
736 * secure approach is to then assume the key size is 0 to force a
740 bt_dev_err(hdev, "failed to read key size for handle %u",
742 conn->enc_key_size = 0;
744 conn->enc_key_size = rp->key_size;
748 hci_encrypt_cfm(conn, 0);
751 hci_dev_unlock(hdev);
756 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
759 struct hci_rp_read_local_commands *rp = data;
761 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
766 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
767 hci_dev_test_flag(hdev, HCI_CONFIG))
768 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
773 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
776 struct hci_rp_read_auth_payload_to *rp = data;
777 struct hci_conn *conn;
779 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
786 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
788 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
790 hci_dev_unlock(hdev);
795 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
798 struct hci_rp_write_auth_payload_to *rp = data;
799 struct hci_conn *conn;
802 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
807 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
813 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
815 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
817 hci_dev_unlock(hdev);
822 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
825 struct hci_rp_read_local_features *rp = data;
827 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
832 memcpy(hdev->features, rp->features, 8);
834 /* Adjust default settings according to features
835 * supported by device. */
837 if (hdev->features[0][0] & LMP_3SLOT)
838 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
840 if (hdev->features[0][0] & LMP_5SLOT)
841 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
843 if (hdev->features[0][1] & LMP_HV2) {
844 hdev->pkt_type |= (HCI_HV2);
845 hdev->esco_type |= (ESCO_HV2);
848 if (hdev->features[0][1] & LMP_HV3) {
849 hdev->pkt_type |= (HCI_HV3);
850 hdev->esco_type |= (ESCO_HV3);
853 if (lmp_esco_capable(hdev))
854 hdev->esco_type |= (ESCO_EV3);
856 if (hdev->features[0][4] & LMP_EV4)
857 hdev->esco_type |= (ESCO_EV4);
859 if (hdev->features[0][4] & LMP_EV5)
860 hdev->esco_type |= (ESCO_EV5);
862 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
863 hdev->esco_type |= (ESCO_2EV3);
865 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
866 hdev->esco_type |= (ESCO_3EV3);
868 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
869 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
874 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
877 struct hci_rp_read_local_ext_features *rp = data;
879 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
884 if (hdev->max_page < rp->max_page)
885 hdev->max_page = rp->max_page;
887 if (rp->page < HCI_MAX_PAGES)
888 memcpy(hdev->features[rp->page], rp->features, 8);
893 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
896 struct hci_rp_read_flow_control_mode *rp = data;
898 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
903 hdev->flow_ctl_mode = rp->mode;
908 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
911 struct hci_rp_read_buffer_size *rp = data;
913 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
918 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
919 hdev->sco_mtu = rp->sco_mtu;
920 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
921 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
923 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
928 hdev->acl_cnt = hdev->acl_pkts;
929 hdev->sco_cnt = hdev->sco_pkts;
931 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
932 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
937 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
940 struct hci_rp_read_bd_addr *rp = data;
942 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
947 if (test_bit(HCI_INIT, &hdev->flags))
948 bacpy(&hdev->bdaddr, &rp->bdaddr);
950 if (hci_dev_test_flag(hdev, HCI_SETUP))
951 bacpy(&hdev->setup_addr, &rp->bdaddr);
956 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
959 struct hci_rp_read_local_pairing_opts *rp = data;
961 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
966 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
967 hci_dev_test_flag(hdev, HCI_CONFIG)) {
968 hdev->pairing_opts = rp->pairing_opts;
969 hdev->max_enc_key_size = rp->max_key_size;
975 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
978 struct hci_rp_read_page_scan_activity *rp = data;
980 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
985 if (test_bit(HCI_INIT, &hdev->flags)) {
986 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
987 hdev->page_scan_window = __le16_to_cpu(rp->window);
993 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
996 struct hci_ev_status *rp = data;
997 struct hci_cp_write_page_scan_activity *sent;
999 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1004 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1008 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1009 hdev->page_scan_window = __le16_to_cpu(sent->window);
1014 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1015 struct sk_buff *skb)
1017 struct hci_rp_read_page_scan_type *rp = data;
1019 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1024 if (test_bit(HCI_INIT, &hdev->flags))
1025 hdev->page_scan_type = rp->type;
1030 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1031 struct sk_buff *skb)
1033 struct hci_ev_status *rp = data;
1036 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1041 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1043 hdev->page_scan_type = *type;
1048 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1049 struct sk_buff *skb)
1051 struct hci_rp_read_data_block_size *rp = data;
1053 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1058 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1059 hdev->block_len = __le16_to_cpu(rp->block_len);
1060 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1062 hdev->block_cnt = hdev->num_blocks;
1064 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1065 hdev->block_cnt, hdev->block_len);
1070 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1071 struct sk_buff *skb)
1073 struct hci_rp_read_clock *rp = data;
1074 struct hci_cp_read_clock *cp;
1075 struct hci_conn *conn;
1077 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1084 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1088 if (cp->which == 0x00) {
1089 hdev->clock = le32_to_cpu(rp->clock);
1093 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1095 conn->clock = le32_to_cpu(rp->clock);
1096 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1100 hci_dev_unlock(hdev);
1104 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1105 struct sk_buff *skb)
1107 struct hci_rp_read_local_amp_info *rp = data;
1109 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1114 hdev->amp_status = rp->amp_status;
1115 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1116 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1117 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1118 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1119 hdev->amp_type = rp->amp_type;
1120 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1121 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1122 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1123 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1128 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1129 struct sk_buff *skb)
1131 struct hci_rp_read_inq_rsp_tx_power *rp = data;
1133 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1138 hdev->inq_tx_power = rp->tx_power;
1143 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1144 struct sk_buff *skb)
1146 struct hci_rp_read_def_err_data_reporting *rp = data;
1148 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1153 hdev->err_data_reporting = rp->err_data_reporting;
1158 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1159 struct sk_buff *skb)
1161 struct hci_ev_status *rp = data;
1162 struct hci_cp_write_def_err_data_reporting *cp;
1164 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1169 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1173 hdev->err_data_reporting = cp->err_data_reporting;
1178 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1179 struct sk_buff *skb)
1181 struct hci_rp_pin_code_reply *rp = data;
1182 struct hci_cp_pin_code_reply *cp;
1183 struct hci_conn *conn;
1185 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1189 if (hci_dev_test_flag(hdev, HCI_MGMT))
1190 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1195 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1199 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1201 conn->pin_length = cp->pin_len;
1204 hci_dev_unlock(hdev);
1208 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1209 struct sk_buff *skb)
1211 struct hci_rp_pin_code_neg_reply *rp = data;
1213 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1217 if (hci_dev_test_flag(hdev, HCI_MGMT))
1218 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1221 hci_dev_unlock(hdev);
1226 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1227 struct sk_buff *skb)
1229 struct hci_rp_le_read_buffer_size *rp = data;
1231 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1236 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1237 hdev->le_pkts = rp->le_max_pkt;
1239 hdev->le_cnt = hdev->le_pkts;
1241 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1246 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1247 struct sk_buff *skb)
1249 struct hci_rp_le_read_local_features *rp = data;
1251 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1256 memcpy(hdev->le_features, rp->features, 8);
1261 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1262 struct sk_buff *skb)
1264 struct hci_rp_le_read_adv_tx_power *rp = data;
1266 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1271 hdev->adv_tx_power = rp->tx_power;
1276 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1277 struct sk_buff *skb)
1279 struct hci_rp_user_confirm_reply *rp = data;
1281 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1285 if (hci_dev_test_flag(hdev, HCI_MGMT))
1286 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1289 hci_dev_unlock(hdev);
1294 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1295 struct sk_buff *skb)
1297 struct hci_rp_user_confirm_reply *rp = data;
1299 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1303 if (hci_dev_test_flag(hdev, HCI_MGMT))
1304 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1305 ACL_LINK, 0, rp->status);
1307 hci_dev_unlock(hdev);
1312 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1313 struct sk_buff *skb)
1315 struct hci_rp_user_confirm_reply *rp = data;
1317 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1321 if (hci_dev_test_flag(hdev, HCI_MGMT))
1322 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1325 hci_dev_unlock(hdev);
1330 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1331 struct sk_buff *skb)
1333 struct hci_rp_user_confirm_reply *rp = data;
1335 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1339 if (hci_dev_test_flag(hdev, HCI_MGMT))
1340 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1341 ACL_LINK, 0, rp->status);
1343 hci_dev_unlock(hdev);
1348 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1349 struct sk_buff *skb)
1351 struct hci_rp_read_local_oob_data *rp = data;
1353 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1358 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1359 struct sk_buff *skb)
1361 struct hci_rp_read_local_oob_ext_data *rp = data;
1363 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1368 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1369 struct sk_buff *skb)
1371 struct hci_ev_status *rp = data;
1374 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1379 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1385 bacpy(&hdev->random_addr, sent);
1387 if (!bacmp(&hdev->rpa, sent)) {
1388 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1389 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1390 secs_to_jiffies(hdev->rpa_timeout));
1393 hci_dev_unlock(hdev);
1398 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1399 struct sk_buff *skb)
1401 struct hci_ev_status *rp = data;
1402 struct hci_cp_le_set_default_phy *cp;
1404 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1409 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1415 hdev->le_tx_def_phys = cp->tx_phys;
1416 hdev->le_rx_def_phys = cp->rx_phys;
1418 hci_dev_unlock(hdev);
1423 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1424 struct sk_buff *skb)
1426 struct hci_ev_status *rp = data;
1427 struct hci_cp_le_set_adv_set_rand_addr *cp;
1428 struct adv_info *adv;
1430 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1435 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1436 /* Update only in case the adv instance since handle 0x00 shall be using
1437 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1438 * non-extended adverting.
1440 if (!cp || !cp->handle)
1445 adv = hci_find_adv_instance(hdev, cp->handle);
1447 bacpy(&adv->random_addr, &cp->bdaddr);
1448 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1449 adv->rpa_expired = false;
1450 queue_delayed_work(hdev->workqueue,
1451 &adv->rpa_expired_cb,
1452 secs_to_jiffies(hdev->rpa_timeout));
1456 hci_dev_unlock(hdev);
1461 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1462 struct sk_buff *skb)
1464 struct hci_ev_status *rp = data;
1468 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1473 instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1479 err = hci_remove_adv_instance(hdev, *instance);
1481 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1484 hci_dev_unlock(hdev);
1489 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1490 struct sk_buff *skb)
1492 struct hci_ev_status *rp = data;
1493 struct adv_info *adv, *n;
1496 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1501 if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1506 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1507 u8 instance = adv->instance;
1509 err = hci_remove_adv_instance(hdev, instance);
1511 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1515 hci_dev_unlock(hdev);
1520 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1521 struct sk_buff *skb)
1523 struct hci_rp_le_read_transmit_power *rp = data;
1525 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1530 hdev->min_le_tx_power = rp->min_le_tx_power;
1531 hdev->max_le_tx_power = rp->max_le_tx_power;
1536 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1537 struct sk_buff *skb)
1539 struct hci_ev_status *rp = data;
1540 struct hci_cp_le_set_privacy_mode *cp;
1541 struct hci_conn_params *params;
1543 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1548 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1554 params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1556 params->privacy_mode = cp->mode;
1558 hci_dev_unlock(hdev);
1563 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1564 struct sk_buff *skb)
1566 struct hci_ev_status *rp = data;
1569 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1574 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1580 /* If we're doing connection initiation as peripheral. Set a
1581 * timeout in case something goes wrong.
1584 struct hci_conn *conn;
1586 hci_dev_set_flag(hdev, HCI_LE_ADV);
1588 conn = hci_lookup_le_connect(hdev);
1590 queue_delayed_work(hdev->workqueue,
1591 &conn->le_conn_timeout,
1592 conn->conn_timeout);
1594 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1597 hci_dev_unlock(hdev);
1602 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1603 struct sk_buff *skb)
1605 struct hci_cp_le_set_ext_adv_enable *cp;
1606 struct hci_cp_ext_adv_set *set;
1607 struct adv_info *adv = NULL, *n;
1608 struct hci_ev_status *rp = data;
1610 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1615 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1619 set = (void *)cp->data;
1623 if (cp->num_of_sets)
1624 adv = hci_find_adv_instance(hdev, set->handle);
1627 struct hci_conn *conn;
1629 hci_dev_set_flag(hdev, HCI_LE_ADV);
1632 adv->enabled = true;
1634 conn = hci_lookup_le_connect(hdev);
1636 queue_delayed_work(hdev->workqueue,
1637 &conn->le_conn_timeout,
1638 conn->conn_timeout);
1640 if (cp->num_of_sets) {
1642 adv->enabled = false;
1644 /* If just one instance was disabled check if there are
1645 * any other instance enabled before clearing HCI_LE_ADV
1647 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1653 /* All instances shall be considered disabled */
1654 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1656 adv->enabled = false;
1659 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1663 hci_dev_unlock(hdev);
1667 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1668 struct sk_buff *skb)
1670 struct hci_cp_le_set_scan_param *cp;
1671 struct hci_ev_status *rp = data;
1673 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1678 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1684 hdev->le_scan_type = cp->type;
1686 hci_dev_unlock(hdev);
1691 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1692 struct sk_buff *skb)
1694 struct hci_cp_le_set_ext_scan_params *cp;
1695 struct hci_ev_status *rp = data;
1696 struct hci_cp_le_scan_phy_params *phy_param;
1698 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1703 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1707 phy_param = (void *)cp->data;
1711 hdev->le_scan_type = phy_param->type;
1713 hci_dev_unlock(hdev);
1718 static bool has_pending_adv_report(struct hci_dev *hdev)
1720 struct discovery_state *d = &hdev->discovery;
1722 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1725 static void clear_pending_adv_report(struct hci_dev *hdev)
1727 struct discovery_state *d = &hdev->discovery;
1729 bacpy(&d->last_adv_addr, BDADDR_ANY);
1730 d->last_adv_data_len = 0;
1733 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1734 u8 bdaddr_type, s8 rssi, u32 flags,
1737 struct discovery_state *d = &hdev->discovery;
1739 if (len > HCI_MAX_AD_LENGTH)
1742 bacpy(&d->last_adv_addr, bdaddr);
1743 d->last_adv_addr_type = bdaddr_type;
1744 d->last_adv_rssi = rssi;
1745 d->last_adv_flags = flags;
1746 memcpy(d->last_adv_data, data, len);
1747 d->last_adv_data_len = len;
1750 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1755 case LE_SCAN_ENABLE:
1756 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1757 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1758 clear_pending_adv_report(hdev);
1759 if (hci_dev_test_flag(hdev, HCI_MESH))
1760 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1763 case LE_SCAN_DISABLE:
1764 /* We do this here instead of when setting DISCOVERY_STOPPED
1765 * since the latter would potentially require waiting for
1766 * inquiry to stop too.
1768 if (has_pending_adv_report(hdev)) {
1769 struct discovery_state *d = &hdev->discovery;
1771 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1772 d->last_adv_addr_type, NULL,
1773 d->last_adv_rssi, d->last_adv_flags,
1775 d->last_adv_data_len, NULL, 0, 0);
1778 /* Cancel this timer so that we don't try to disable scanning
1779 * when it's already disabled.
1781 cancel_delayed_work(&hdev->le_scan_disable);
1783 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1785 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1786 * interrupted scanning due to a connect request. Mark
1787 * therefore discovery as stopped.
1789 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1790 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1791 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1792 hdev->discovery.state == DISCOVERY_FINDING)
1793 queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1798 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1803 hci_dev_unlock(hdev);
1806 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1807 struct sk_buff *skb)
1809 struct hci_cp_le_set_scan_enable *cp;
1810 struct hci_ev_status *rp = data;
1812 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1817 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1821 le_set_scan_enable_complete(hdev, cp->enable);
1826 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1827 struct sk_buff *skb)
1829 struct hci_cp_le_set_ext_scan_enable *cp;
1830 struct hci_ev_status *rp = data;
1832 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1837 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1841 le_set_scan_enable_complete(hdev, cp->enable);
1846 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1847 struct sk_buff *skb)
1849 struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1851 bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1857 hdev->le_num_of_adv_sets = rp->num_of_sets;
1862 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1863 struct sk_buff *skb)
1865 struct hci_rp_le_read_accept_list_size *rp = data;
1867 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1872 hdev->le_accept_list_size = rp->size;
1877 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1878 struct sk_buff *skb)
1880 struct hci_ev_status *rp = data;
1882 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1888 hci_bdaddr_list_clear(&hdev->le_accept_list);
1889 hci_dev_unlock(hdev);
1894 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1895 struct sk_buff *skb)
1897 struct hci_cp_le_add_to_accept_list *sent;
1898 struct hci_ev_status *rp = data;
1900 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1905 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1910 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1912 hci_dev_unlock(hdev);
1917 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1918 struct sk_buff *skb)
1920 struct hci_cp_le_del_from_accept_list *sent;
1921 struct hci_ev_status *rp = data;
1923 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1928 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1933 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1935 hci_dev_unlock(hdev);
1940 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1941 struct sk_buff *skb)
1943 struct hci_rp_le_read_supported_states *rp = data;
1945 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1950 memcpy(hdev->le_states, rp->le_states, 8);
1955 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1956 struct sk_buff *skb)
1958 struct hci_rp_le_read_def_data_len *rp = data;
1960 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1965 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1966 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1971 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
1972 struct sk_buff *skb)
1974 struct hci_cp_le_write_def_data_len *sent;
1975 struct hci_ev_status *rp = data;
1977 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1982 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1986 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1987 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1992 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
1993 struct sk_buff *skb)
1995 struct hci_cp_le_add_to_resolv_list *sent;
1996 struct hci_ev_status *rp = data;
1998 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2003 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2008 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2009 sent->bdaddr_type, sent->peer_irk,
2011 hci_dev_unlock(hdev);
2016 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2017 struct sk_buff *skb)
2019 struct hci_cp_le_del_from_resolv_list *sent;
2020 struct hci_ev_status *rp = data;
2022 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2027 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2032 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2034 hci_dev_unlock(hdev);
2039 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2040 struct sk_buff *skb)
2042 struct hci_ev_status *rp = data;
2044 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2050 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2051 hci_dev_unlock(hdev);
2056 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2057 struct sk_buff *skb)
2059 struct hci_rp_le_read_resolv_list_size *rp = data;
2061 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2066 hdev->le_resolv_list_size = rp->size;
2071 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2072 struct sk_buff *skb)
2074 struct hci_ev_status *rp = data;
2077 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2082 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2089 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2091 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2093 hci_dev_unlock(hdev);
2098 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2099 struct sk_buff *skb)
2101 struct hci_rp_le_read_max_data_len *rp = data;
2103 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2108 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2109 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2110 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2111 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2116 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2117 struct sk_buff *skb)
2119 struct hci_cp_write_le_host_supported *sent;
2120 struct hci_ev_status *rp = data;
2122 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2127 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2134 hdev->features[1][0] |= LMP_HOST_LE;
2135 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2137 hdev->features[1][0] &= ~LMP_HOST_LE;
2138 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2139 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2143 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2145 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2147 hci_dev_unlock(hdev);
2152 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2153 struct sk_buff *skb)
2155 struct hci_cp_le_set_adv_param *cp;
2156 struct hci_ev_status *rp = data;
2158 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2163 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2168 hdev->adv_addr_type = cp->own_address_type;
2169 hci_dev_unlock(hdev);
2174 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2175 struct sk_buff *skb)
2177 struct hci_rp_le_set_ext_adv_params *rp = data;
2178 struct hci_cp_le_set_ext_adv_params *cp;
2179 struct adv_info *adv_instance;
2181 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2186 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2191 hdev->adv_addr_type = cp->own_addr_type;
2193 /* Store in hdev for instance 0 */
2194 hdev->adv_tx_power = rp->tx_power;
2196 adv_instance = hci_find_adv_instance(hdev, cp->handle);
2198 adv_instance->tx_power = rp->tx_power;
2200 /* Update adv data as tx power is known now */
2201 hci_update_adv_data(hdev, cp->handle);
2203 hci_dev_unlock(hdev);
2208 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2209 struct sk_buff *skb)
2211 struct hci_rp_read_rssi *rp = data;
2212 struct hci_conn *conn;
2214 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2221 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2223 conn->rssi = rp->rssi;
2225 hci_dev_unlock(hdev);
2230 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2231 struct sk_buff *skb)
2233 struct hci_cp_read_tx_power *sent;
2234 struct hci_rp_read_tx_power *rp = data;
2235 struct hci_conn *conn;
2237 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2242 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2248 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2252 switch (sent->type) {
2254 conn->tx_power = rp->tx_power;
2257 conn->max_tx_power = rp->tx_power;
2262 hci_dev_unlock(hdev);
2266 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2267 struct sk_buff *skb)
2269 struct hci_ev_status *rp = data;
2272 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2277 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2279 hdev->ssp_debug_mode = *mode;
2284 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2286 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2289 hci_conn_check_pending(hdev);
2293 set_bit(HCI_INQUIRY, &hdev->flags);
2296 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2298 struct hci_cp_create_conn *cp;
2299 struct hci_conn *conn;
2301 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2303 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2309 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2311 bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2314 if (conn && conn->state == BT_CONNECT) {
2315 if (status != 0x0c || conn->attempt > 2) {
2316 conn->state = BT_CLOSED;
2317 hci_connect_cfm(conn, status);
2320 conn->state = BT_CONNECT2;
2324 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
2327 bt_dev_err(hdev, "no memory for new connection");
2331 hci_dev_unlock(hdev);
2334 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2336 struct hci_cp_add_sco *cp;
2337 struct hci_conn *acl, *sco;
2340 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2345 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2349 handle = __le16_to_cpu(cp->handle);
2351 bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2355 acl = hci_conn_hash_lookup_handle(hdev, handle);
2359 sco->state = BT_CLOSED;
2361 hci_connect_cfm(sco, status);
2366 hci_dev_unlock(hdev);
2369 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2371 struct hci_cp_auth_requested *cp;
2372 struct hci_conn *conn;
2374 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2379 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2385 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2387 if (conn->state == BT_CONFIG) {
2388 hci_connect_cfm(conn, status);
2389 hci_conn_drop(conn);
2393 hci_dev_unlock(hdev);
2396 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2398 struct hci_cp_set_conn_encrypt *cp;
2399 struct hci_conn *conn;
2401 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2406 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2412 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2414 if (conn->state == BT_CONFIG) {
2415 hci_connect_cfm(conn, status);
2416 hci_conn_drop(conn);
2420 hci_dev_unlock(hdev);
2423 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2424 struct hci_conn *conn)
2426 if (conn->state != BT_CONFIG || !conn->out)
2429 if (conn->pending_sec_level == BT_SECURITY_SDP)
2432 /* Only request authentication for SSP connections or non-SSP
2433 * devices with sec_level MEDIUM or HIGH or if MITM protection
2436 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2437 conn->pending_sec_level != BT_SECURITY_FIPS &&
2438 conn->pending_sec_level != BT_SECURITY_HIGH &&
2439 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2445 static int hci_resolve_name(struct hci_dev *hdev,
2446 struct inquiry_entry *e)
2448 struct hci_cp_remote_name_req cp;
2450 memset(&cp, 0, sizeof(cp));
2452 bacpy(&cp.bdaddr, &e->data.bdaddr);
2453 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2454 cp.pscan_mode = e->data.pscan_mode;
2455 cp.clock_offset = e->data.clock_offset;
2457 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2460 static bool hci_resolve_next_name(struct hci_dev *hdev)
2462 struct discovery_state *discov = &hdev->discovery;
2463 struct inquiry_entry *e;
2465 if (list_empty(&discov->resolve))
2468 /* We should stop if we already spent too much time resolving names. */
2469 if (time_after(jiffies, discov->name_resolve_timeout)) {
2470 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2474 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2478 if (hci_resolve_name(hdev, e) == 0) {
2479 e->name_state = NAME_PENDING;
2486 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2487 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2489 struct discovery_state *discov = &hdev->discovery;
2490 struct inquiry_entry *e;
2492 /* Update the mgmt connected state if necessary. Be careful with
2493 * conn objects that exist but are not (yet) connected however.
2494 * Only those in BT_CONFIG or BT_CONNECTED states can be
2495 * considered connected.
2498 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2499 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2500 mgmt_device_connected(hdev, conn, name, name_len);
2502 if (discov->state == DISCOVERY_STOPPED)
2505 if (discov->state == DISCOVERY_STOPPING)
2506 goto discov_complete;
2508 if (discov->state != DISCOVERY_RESOLVING)
2511 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2512 /* If the device was not found in a list of found devices names of which
2513 * are pending. there is no need to continue resolving a next name as it
2514 * will be done upon receiving another Remote Name Request Complete
2521 e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2522 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2525 if (hci_resolve_next_name(hdev))
2529 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2532 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2534 struct hci_cp_remote_name_req *cp;
2535 struct hci_conn *conn;
2537 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2539 /* If successful wait for the name req complete event before
2540 * checking for the need to do authentication */
2544 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2550 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2552 if (hci_dev_test_flag(hdev, HCI_MGMT))
2553 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2558 if (!hci_outgoing_auth_needed(hdev, conn))
2561 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2562 struct hci_cp_auth_requested auth_cp;
2564 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2566 auth_cp.handle = __cpu_to_le16(conn->handle);
2567 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2568 sizeof(auth_cp), &auth_cp);
2572 hci_dev_unlock(hdev);
2575 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2577 struct hci_cp_read_remote_features *cp;
2578 struct hci_conn *conn;
2580 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2585 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2591 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2593 if (conn->state == BT_CONFIG) {
2594 hci_connect_cfm(conn, status);
2595 hci_conn_drop(conn);
2599 hci_dev_unlock(hdev);
2602 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2604 struct hci_cp_read_remote_ext_features *cp;
2605 struct hci_conn *conn;
2607 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2612 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2618 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2620 if (conn->state == BT_CONFIG) {
2621 hci_connect_cfm(conn, status);
2622 hci_conn_drop(conn);
2626 hci_dev_unlock(hdev);
2629 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2631 struct hci_cp_setup_sync_conn *cp;
2632 struct hci_conn *acl, *sco;
2635 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2640 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2644 handle = __le16_to_cpu(cp->handle);
2646 bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2650 acl = hci_conn_hash_lookup_handle(hdev, handle);
2654 sco->state = BT_CLOSED;
2656 hci_connect_cfm(sco, status);
2661 hci_dev_unlock(hdev);
2664 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2666 struct hci_cp_enhanced_setup_sync_conn *cp;
2667 struct hci_conn *acl, *sco;
2670 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2675 cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2679 handle = __le16_to_cpu(cp->handle);
2681 bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2685 acl = hci_conn_hash_lookup_handle(hdev, handle);
2689 sco->state = BT_CLOSED;
2691 hci_connect_cfm(sco, status);
2696 hci_dev_unlock(hdev);
2699 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2701 struct hci_cp_sniff_mode *cp;
2702 struct hci_conn *conn;
2704 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2709 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2715 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2717 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2719 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2720 hci_sco_setup(conn, status);
2723 hci_dev_unlock(hdev);
2726 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2728 struct hci_cp_exit_sniff_mode *cp;
2729 struct hci_conn *conn;
2731 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2736 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2742 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2744 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2746 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2747 hci_sco_setup(conn, status);
2750 hci_dev_unlock(hdev);
2753 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2755 struct hci_cp_disconnect *cp;
2756 struct hci_conn_params *params;
2757 struct hci_conn *conn;
2760 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2762 /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2763 * otherwise cleanup the connection immediately.
2765 if (!status && !hdev->suspended)
2768 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2774 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2779 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2780 conn->dst_type, status);
2782 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2783 hdev->cur_adv_instance = conn->adv_instance;
2784 hci_enable_advertising(hdev);
2790 mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2792 if (conn->type == ACL_LINK) {
2793 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2794 hci_remove_link_key(hdev, &conn->dst);
2797 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2799 switch (params->auto_connect) {
2800 case HCI_AUTO_CONN_LINK_LOSS:
2801 if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2805 case HCI_AUTO_CONN_DIRECT:
2806 case HCI_AUTO_CONN_ALWAYS:
2807 list_del_init(¶ms->action);
2808 list_add(¶ms->action, &hdev->pend_le_conns);
2816 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2817 cp->reason, mgmt_conn);
2819 hci_disconn_cfm(conn, cp->reason);
2822 /* If the disconnection failed for any reason, the upper layer
2823 * does not retry to disconnect in current implementation.
2824 * Hence, we need to do some basic cleanup here and re-enable
2825 * advertising if necessary.
2829 hci_dev_unlock(hdev);
2832 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
2834 /* When using controller based address resolution, then the new
2835 * address types 0x02 and 0x03 are used. These types need to be
2836 * converted back into either public address or random address type
2839 case ADDR_LE_DEV_PUBLIC_RESOLVED:
2842 return ADDR_LE_DEV_PUBLIC;
2843 case ADDR_LE_DEV_RANDOM_RESOLVED:
2846 return ADDR_LE_DEV_RANDOM;
2854 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
2855 u8 peer_addr_type, u8 own_address_type,
2858 struct hci_conn *conn;
2860 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
2865 own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
2867 /* Store the initiator and responder address information which
2868 * is needed for SMP. These values will not change during the
2869 * lifetime of the connection.
2871 conn->init_addr_type = own_address_type;
2872 if (own_address_type == ADDR_LE_DEV_RANDOM)
2873 bacpy(&conn->init_addr, &hdev->random_addr);
2875 bacpy(&conn->init_addr, &hdev->bdaddr);
2877 conn->resp_addr_type = peer_addr_type;
2878 bacpy(&conn->resp_addr, peer_addr);
2881 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2883 struct hci_cp_le_create_conn *cp;
2885 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2887 /* All connection failure handling is taken care of by the
2888 * hci_conn_failed function which is triggered by the HCI
2889 * request completion callbacks used for connecting.
2894 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2900 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2901 cp->own_address_type, cp->filter_policy);
2903 hci_dev_unlock(hdev);
2906 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
2908 struct hci_cp_le_ext_create_conn *cp;
2910 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2912 /* All connection failure handling is taken care of by the
2913 * hci_conn_failed function which is triggered by the HCI
2914 * request completion callbacks used for connecting.
2919 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
2925 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
2926 cp->own_addr_type, cp->filter_policy);
2928 hci_dev_unlock(hdev);
2931 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
2933 struct hci_cp_le_read_remote_features *cp;
2934 struct hci_conn *conn;
2936 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2941 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
2947 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2949 if (conn->state == BT_CONFIG) {
2950 hci_connect_cfm(conn, status);
2951 hci_conn_drop(conn);
2955 hci_dev_unlock(hdev);
2958 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2960 struct hci_cp_le_start_enc *cp;
2961 struct hci_conn *conn;
2963 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2970 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2974 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2978 if (conn->state != BT_CONNECTED)
2981 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2982 hci_conn_drop(conn);
2985 hci_dev_unlock(hdev);
2988 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2990 struct hci_cp_switch_role *cp;
2991 struct hci_conn *conn;
2993 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2998 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3004 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3006 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3008 hci_dev_unlock(hdev);
3011 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3012 struct sk_buff *skb)
3014 struct hci_ev_status *ev = data;
3015 struct discovery_state *discov = &hdev->discovery;
3016 struct inquiry_entry *e;
3018 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3020 hci_conn_check_pending(hdev);
3022 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3025 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3026 wake_up_bit(&hdev->flags, HCI_INQUIRY);
3028 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3033 if (discov->state != DISCOVERY_FINDING)
3036 if (list_empty(&discov->resolve)) {
3037 /* When BR/EDR inquiry is active and no LE scanning is in
3038 * progress, then change discovery state to indicate completion.
3040 * When running LE scanning and BR/EDR inquiry simultaneously
3041 * and the LE scan already finished, then change the discovery
3042 * state to indicate completion.
3044 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3045 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3046 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3050 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3051 if (e && hci_resolve_name(hdev, e) == 0) {
3052 e->name_state = NAME_PENDING;
3053 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3054 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3056 /* When BR/EDR inquiry is active and no LE scanning is in
3057 * progress, then change discovery state to indicate completion.
3059 * When running LE scanning and BR/EDR inquiry simultaneously
3060 * and the LE scan already finished, then change the discovery
3061 * state to indicate completion.
3063 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3064 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3065 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3069 hci_dev_unlock(hdev);
3072 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3073 struct sk_buff *skb)
3075 struct hci_ev_inquiry_result *ev = edata;
3076 struct inquiry_data data;
3079 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3080 flex_array_size(ev, info, ev->num)))
3083 bt_dev_dbg(hdev, "num %d", ev->num);
3088 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3093 for (i = 0; i < ev->num; i++) {
3094 struct inquiry_info *info = &ev->info[i];
3097 bacpy(&data.bdaddr, &info->bdaddr);
3098 data.pscan_rep_mode = info->pscan_rep_mode;
3099 data.pscan_period_mode = info->pscan_period_mode;
3100 data.pscan_mode = info->pscan_mode;
3101 memcpy(data.dev_class, info->dev_class, 3);
3102 data.clock_offset = info->clock_offset;
3103 data.rssi = HCI_RSSI_INVALID;
3104 data.ssp_mode = 0x00;
3106 flags = hci_inquiry_cache_update(hdev, &data, false);
3108 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3109 info->dev_class, HCI_RSSI_INVALID,
3110 flags, NULL, 0, NULL, 0, 0);
3113 hci_dev_unlock(hdev);
3116 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3117 struct sk_buff *skb)
3119 struct hci_ev_conn_complete *ev = data;
3120 struct hci_conn *conn;
3121 u8 status = ev->status;
3123 bt_dev_dbg(hdev, "status 0x%2.2x", status);
3127 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3129 /* In case of error status and there is no connection pending
3130 * just unlock as there is nothing to cleanup.
3135 /* Connection may not exist if auto-connected. Check the bredr
3136 * allowlist to see if this device is allowed to auto connect.
3137 * If link is an ACL type, create a connection class
3140 * Auto-connect will only occur if the event filter is
3141 * programmed with a given address. Right now, event filter is
3142 * only used during suspend.
3144 if (ev->link_type == ACL_LINK &&
3145 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3148 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3151 bt_dev_err(hdev, "no memory for new conn");
3155 if (ev->link_type != SCO_LINK)
3158 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3163 conn->type = SCO_LINK;
3167 /* The HCI_Connection_Complete event is only sent once per connection.
3168 * Processing it more than once per connection can corrupt kernel memory.
3170 * As the connection handle is set here for the first time, it indicates
3171 * whether the connection is already set up.
3173 if (conn->handle != HCI_CONN_HANDLE_UNSET) {
3174 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3179 conn->handle = __le16_to_cpu(ev->handle);
3180 if (conn->handle > HCI_CONN_HANDLE_MAX) {
3181 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
3182 conn->handle, HCI_CONN_HANDLE_MAX);
3183 status = HCI_ERROR_INVALID_PARAMETERS;
3187 if (conn->type == ACL_LINK) {
3188 conn->state = BT_CONFIG;
3189 hci_conn_hold(conn);
3191 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3192 !hci_find_link_key(hdev, &ev->bdaddr))
3193 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3195 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3197 conn->state = BT_CONNECTED;
3199 hci_debugfs_create_conn(conn);
3200 hci_conn_add_sysfs(conn);
3202 if (test_bit(HCI_AUTH, &hdev->flags))
3203 set_bit(HCI_CONN_AUTH, &conn->flags);
3205 if (test_bit(HCI_ENCRYPT, &hdev->flags))
3206 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3208 /* Get remote features */
3209 if (conn->type == ACL_LINK) {
3210 struct hci_cp_read_remote_features cp;
3211 cp.handle = ev->handle;
3212 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3215 hci_update_scan(hdev);
3218 /* Set packet type for incoming connection */
3219 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3220 struct hci_cp_change_conn_ptype cp;
3221 cp.handle = ev->handle;
3222 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3223 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3228 if (conn->type == ACL_LINK)
3229 hci_sco_setup(conn, ev->status);
3233 hci_conn_failed(conn, status);
3234 } else if (ev->link_type == SCO_LINK) {
3235 switch (conn->setting & SCO_AIRMODE_MASK) {
3236 case SCO_AIRMODE_CVSD:
3238 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3242 hci_connect_cfm(conn, status);
3246 hci_dev_unlock(hdev);
3248 hci_conn_check_pending(hdev);
3251 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3253 struct hci_cp_reject_conn_req cp;
3255 bacpy(&cp.bdaddr, bdaddr);
3256 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3257 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3260 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3261 struct sk_buff *skb)
3263 struct hci_ev_conn_request *ev = data;
3264 int mask = hdev->link_mode;
3265 struct inquiry_entry *ie;
3266 struct hci_conn *conn;
3269 bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3271 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3274 if (!(mask & HCI_LM_ACCEPT)) {
3275 hci_reject_conn(hdev, &ev->bdaddr);
3281 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3283 hci_reject_conn(hdev, &ev->bdaddr);
3287 /* Require HCI_CONNECTABLE or an accept list entry to accept the
3288 * connection. These features are only touched through mgmt so
3289 * only do the checks if HCI_MGMT is set.
3291 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3292 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3293 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3295 hci_reject_conn(hdev, &ev->bdaddr);
3299 /* Connection accepted */
3301 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3303 memcpy(ie->data.dev_class, ev->dev_class, 3);
3305 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3308 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3311 bt_dev_err(hdev, "no memory for new connection");
3316 memcpy(conn->dev_class, ev->dev_class, 3);
3318 hci_dev_unlock(hdev);
3320 if (ev->link_type == ACL_LINK ||
3321 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3322 struct hci_cp_accept_conn_req cp;
3323 conn->state = BT_CONNECT;
3325 bacpy(&cp.bdaddr, &ev->bdaddr);
3327 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3328 cp.role = 0x00; /* Become central */
3330 cp.role = 0x01; /* Remain peripheral */
3332 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3333 } else if (!(flags & HCI_PROTO_DEFER)) {
3334 struct hci_cp_accept_sync_conn_req cp;
3335 conn->state = BT_CONNECT;
3337 bacpy(&cp.bdaddr, &ev->bdaddr);
3338 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3340 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
3341 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
3342 cp.max_latency = cpu_to_le16(0xffff);
3343 cp.content_format = cpu_to_le16(hdev->voice_setting);
3344 cp.retrans_effort = 0xff;
3346 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3349 conn->state = BT_CONNECT2;
3350 hci_connect_cfm(conn, 0);
3355 hci_dev_unlock(hdev);
3358 static u8 hci_to_mgmt_reason(u8 err)
3361 case HCI_ERROR_CONNECTION_TIMEOUT:
3362 return MGMT_DEV_DISCONN_TIMEOUT;
3363 case HCI_ERROR_REMOTE_USER_TERM:
3364 case HCI_ERROR_REMOTE_LOW_RESOURCES:
3365 case HCI_ERROR_REMOTE_POWER_OFF:
3366 return MGMT_DEV_DISCONN_REMOTE;
3367 case HCI_ERROR_LOCAL_HOST_TERM:
3368 return MGMT_DEV_DISCONN_LOCAL_HOST;
3370 return MGMT_DEV_DISCONN_UNKNOWN;
3374 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3375 struct sk_buff *skb)
3377 struct hci_ev_disconn_complete *ev = data;
3379 struct hci_conn_params *params;
3380 struct hci_conn *conn;
3381 bool mgmt_connected;
3383 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3387 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3392 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3393 conn->dst_type, ev->status);
3397 conn->state = BT_CLOSED;
3399 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3401 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3402 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3404 reason = hci_to_mgmt_reason(ev->reason);
3406 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3407 reason, mgmt_connected);
3409 if (conn->type == ACL_LINK) {
3410 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3411 hci_remove_link_key(hdev, &conn->dst);
3413 hci_update_scan(hdev);
3416 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3418 switch (params->auto_connect) {
3419 case HCI_AUTO_CONN_LINK_LOSS:
3420 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3424 case HCI_AUTO_CONN_DIRECT:
3425 case HCI_AUTO_CONN_ALWAYS:
3426 list_del_init(¶ms->action);
3427 list_add(¶ms->action, &hdev->pend_le_conns);
3428 hci_update_passive_scan(hdev);
3436 hci_disconn_cfm(conn, ev->reason);
3438 /* Re-enable advertising if necessary, since it might
3439 * have been disabled by the connection. From the
3440 * HCI_LE_Set_Advertise_Enable command description in
3441 * the core specification (v4.0):
3442 * "The Controller shall continue advertising until the Host
3443 * issues an LE_Set_Advertise_Enable command with
3444 * Advertising_Enable set to 0x00 (Advertising is disabled)
3445 * or until a connection is created or until the Advertising
3446 * is timed out due to Directed Advertising."
3448 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3449 hdev->cur_adv_instance = conn->adv_instance;
3450 hci_enable_advertising(hdev);
3456 hci_dev_unlock(hdev);
3459 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3460 struct sk_buff *skb)
3462 struct hci_ev_auth_complete *ev = data;
3463 struct hci_conn *conn;
3465 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3469 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3474 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3476 if (!hci_conn_ssp_enabled(conn) &&
3477 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3478 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3480 set_bit(HCI_CONN_AUTH, &conn->flags);
3481 conn->sec_level = conn->pending_sec_level;
3484 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3485 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3487 mgmt_auth_failed(conn, ev->status);
3490 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3491 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3493 if (conn->state == BT_CONFIG) {
3494 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3495 struct hci_cp_set_conn_encrypt cp;
3496 cp.handle = ev->handle;
3498 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3501 conn->state = BT_CONNECTED;
3502 hci_connect_cfm(conn, ev->status);
3503 hci_conn_drop(conn);
3506 hci_auth_cfm(conn, ev->status);
3508 hci_conn_hold(conn);
3509 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3510 hci_conn_drop(conn);
3513 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3515 struct hci_cp_set_conn_encrypt cp;
3516 cp.handle = ev->handle;
3518 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3521 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3522 hci_encrypt_cfm(conn, ev->status);
3527 hci_dev_unlock(hdev);
3530 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3531 struct sk_buff *skb)
3533 struct hci_ev_remote_name *ev = data;
3534 struct hci_conn *conn;
3536 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3538 hci_conn_check_pending(hdev);
3542 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3544 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3547 if (ev->status == 0)
3548 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3549 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3551 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3557 if (!hci_outgoing_auth_needed(hdev, conn))
3560 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3561 struct hci_cp_auth_requested cp;
3563 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3565 cp.handle = __cpu_to_le16(conn->handle);
3566 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3570 hci_dev_unlock(hdev);
3573 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3574 struct sk_buff *skb)
3576 struct hci_ev_encrypt_change *ev = data;
3577 struct hci_conn *conn;
3579 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3583 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3589 /* Encryption implies authentication */
3590 set_bit(HCI_CONN_AUTH, &conn->flags);
3591 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3592 conn->sec_level = conn->pending_sec_level;
3594 /* P-256 authentication key implies FIPS */
3595 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3596 set_bit(HCI_CONN_FIPS, &conn->flags);
3598 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3599 conn->type == LE_LINK)
3600 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3602 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3603 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3607 /* We should disregard the current RPA and generate a new one
3608 * whenever the encryption procedure fails.
3610 if (ev->status && conn->type == LE_LINK) {
3611 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3612 hci_adv_instances_set_rpa_expired(hdev, true);
3615 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3617 /* Check link security requirements are met */
3618 if (!hci_conn_check_link_mode(conn))
3619 ev->status = HCI_ERROR_AUTH_FAILURE;
3621 if (ev->status && conn->state == BT_CONNECTED) {
3622 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3623 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3625 /* Notify upper layers so they can cleanup before
3628 hci_encrypt_cfm(conn, ev->status);
3629 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3630 hci_conn_drop(conn);
3634 /* Try reading the encryption key size for encrypted ACL links */
3635 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3636 struct hci_cp_read_enc_key_size cp;
3638 /* Only send HCI_Read_Encryption_Key_Size if the
3639 * controller really supports it. If it doesn't, assume
3640 * the default size (16).
3642 if (!(hdev->commands[20] & 0x10)) {
3643 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3647 cp.handle = cpu_to_le16(conn->handle);
3648 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3650 bt_dev_err(hdev, "sending read key size failed");
3651 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3658 /* Set the default Authenticated Payload Timeout after
3659 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3660 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3661 * sent when the link is active and Encryption is enabled, the conn
3662 * type can be either LE or ACL and controller must support LMP Ping.
3663 * Ensure for AES-CCM encryption as well.
3665 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3666 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3667 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3668 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3669 struct hci_cp_write_auth_payload_to cp;
3671 cp.handle = cpu_to_le16(conn->handle);
3672 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3673 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3678 hci_encrypt_cfm(conn, ev->status);
3681 hci_dev_unlock(hdev);
3684 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3685 struct sk_buff *skb)
3687 struct hci_ev_change_link_key_complete *ev = data;
3688 struct hci_conn *conn;
3690 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3694 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3697 set_bit(HCI_CONN_SECURE, &conn->flags);
3699 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3701 hci_key_change_cfm(conn, ev->status);
3704 hci_dev_unlock(hdev);
3707 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3708 struct sk_buff *skb)
3710 struct hci_ev_remote_features *ev = data;
3711 struct hci_conn *conn;
3713 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3717 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3722 memcpy(conn->features[0], ev->features, 8);
3724 if (conn->state != BT_CONFIG)
3727 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3728 lmp_ext_feat_capable(conn)) {
3729 struct hci_cp_read_remote_ext_features cp;
3730 cp.handle = ev->handle;
3732 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3737 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3738 struct hci_cp_remote_name_req cp;
3739 memset(&cp, 0, sizeof(cp));
3740 bacpy(&cp.bdaddr, &conn->dst);
3741 cp.pscan_rep_mode = 0x02;
3742 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3743 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3744 mgmt_device_connected(hdev, conn, NULL, 0);
3746 if (!hci_outgoing_auth_needed(hdev, conn)) {
3747 conn->state = BT_CONNECTED;
3748 hci_connect_cfm(conn, ev->status);
3749 hci_conn_drop(conn);
3753 hci_dev_unlock(hdev);
3756 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
3758 cancel_delayed_work(&hdev->cmd_timer);
3761 if (!test_bit(HCI_RESET, &hdev->flags)) {
3763 cancel_delayed_work(&hdev->ncmd_timer);
3764 atomic_set(&hdev->cmd_cnt, 1);
3766 if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
3767 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
3774 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
3775 struct sk_buff *skb)
3777 struct hci_rp_le_read_buffer_size_v2 *rp = data;
3779 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3784 hdev->le_mtu = __le16_to_cpu(rp->acl_mtu);
3785 hdev->le_pkts = rp->acl_max_pkt;
3786 hdev->iso_mtu = __le16_to_cpu(rp->iso_mtu);
3787 hdev->iso_pkts = rp->iso_max_pkt;
3789 hdev->le_cnt = hdev->le_pkts;
3790 hdev->iso_cnt = hdev->iso_pkts;
3792 BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
3793 hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
3798 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
3799 struct sk_buff *skb)
3801 struct hci_rp_le_set_cig_params *rp = data;
3802 struct hci_conn *conn;
3805 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3810 while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) {
3811 conn->state = BT_CLOSED;
3812 hci_connect_cfm(conn, rp->status);
3820 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
3821 if (conn->type != ISO_LINK || conn->iso_qos.cig != rp->cig_id ||
3822 conn->state == BT_CONNECTED)
3825 conn->handle = __le16_to_cpu(rp->handle[i++]);
3827 bt_dev_dbg(hdev, "%p handle 0x%4.4x link %p", conn,
3828 conn->handle, conn->link);
3830 /* Create CIS if LE is already connected */
3831 if (conn->link && conn->link->state == BT_CONNECTED) {
3833 hci_le_create_cis(conn->link);
3837 if (i == rp->num_handles)
3844 hci_dev_unlock(hdev);
3849 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
3850 struct sk_buff *skb)
3852 struct hci_rp_le_setup_iso_path *rp = data;
3853 struct hci_cp_le_setup_iso_path *cp;
3854 struct hci_conn *conn;
3856 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3858 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
3864 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3869 hci_connect_cfm(conn, rp->status);
3874 switch (cp->direction) {
3875 /* Input (Host to Controller) */
3877 /* Only confirm connection if output only */
3878 if (conn->iso_qos.out.sdu && !conn->iso_qos.in.sdu)
3879 hci_connect_cfm(conn, rp->status);
3881 /* Output (Controller to Host) */
3883 /* Confirm connection since conn->iso_qos is always configured
3886 hci_connect_cfm(conn, rp->status);
3891 hci_dev_unlock(hdev);
3895 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
3897 bt_dev_dbg(hdev, "status 0x%2.2x", status);
3900 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
3901 struct sk_buff *skb)
3903 struct hci_ev_status *rp = data;
3904 struct hci_cp_le_set_per_adv_params *cp;
3906 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3911 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
3915 /* TODO: set the conn state */
3919 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
3920 struct sk_buff *skb)
3922 struct hci_ev_status *rp = data;
3925 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
3930 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
3937 hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
3939 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
3941 hci_dev_unlock(hdev);
3946 #define HCI_CC_VL(_op, _func, _min, _max) \
3954 #define HCI_CC(_op, _func, _len) \
3955 HCI_CC_VL(_op, _func, _len, _len)
3957 #define HCI_CC_STATUS(_op, _func) \
3958 HCI_CC(_op, _func, sizeof(struct hci_ev_status))
3960 static const struct hci_cc {
3962 u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
3965 } hci_cc_table[] = {
3966 HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
3967 HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
3968 HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
3969 HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
3970 hci_cc_remote_name_req_cancel),
3971 HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
3972 sizeof(struct hci_rp_role_discovery)),
3973 HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
3974 sizeof(struct hci_rp_read_link_policy)),
3975 HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
3976 sizeof(struct hci_rp_write_link_policy)),
3977 HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
3978 sizeof(struct hci_rp_read_def_link_policy)),
3979 HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
3980 hci_cc_write_def_link_policy),
3981 HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
3982 HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
3983 sizeof(struct hci_rp_read_stored_link_key)),
3984 HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
3985 sizeof(struct hci_rp_delete_stored_link_key)),
3986 HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
3987 HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
3988 sizeof(struct hci_rp_read_local_name)),
3989 HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
3990 HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
3991 HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
3992 HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
3993 HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
3994 sizeof(struct hci_rp_read_class_of_dev)),
3995 HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
3996 HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
3997 sizeof(struct hci_rp_read_voice_setting)),
3998 HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
3999 HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4000 sizeof(struct hci_rp_read_num_supported_iac)),
4001 HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4002 HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4003 HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4004 sizeof(struct hci_rp_read_auth_payload_to)),
4005 HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4006 sizeof(struct hci_rp_write_auth_payload_to)),
4007 HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4008 sizeof(struct hci_rp_read_local_version)),
4009 HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4010 sizeof(struct hci_rp_read_local_commands)),
4011 HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4012 sizeof(struct hci_rp_read_local_features)),
4013 HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4014 sizeof(struct hci_rp_read_local_ext_features)),
4015 HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4016 sizeof(struct hci_rp_read_buffer_size)),
4017 HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4018 sizeof(struct hci_rp_read_bd_addr)),
4019 HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4020 sizeof(struct hci_rp_read_local_pairing_opts)),
4021 HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4022 sizeof(struct hci_rp_read_page_scan_activity)),
4023 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4024 hci_cc_write_page_scan_activity),
4025 HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4026 sizeof(struct hci_rp_read_page_scan_type)),
4027 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4028 HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4029 sizeof(struct hci_rp_read_data_block_size)),
4030 HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4031 sizeof(struct hci_rp_read_flow_control_mode)),
4032 HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4033 sizeof(struct hci_rp_read_local_amp_info)),
4034 HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4035 sizeof(struct hci_rp_read_clock)),
4036 HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4037 sizeof(struct hci_rp_read_enc_key_size)),
4038 HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4039 sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4040 HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4041 hci_cc_read_def_err_data_reporting,
4042 sizeof(struct hci_rp_read_def_err_data_reporting)),
4043 HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4044 hci_cc_write_def_err_data_reporting),
4045 HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4046 sizeof(struct hci_rp_pin_code_reply)),
4047 HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4048 sizeof(struct hci_rp_pin_code_neg_reply)),
4049 HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4050 sizeof(struct hci_rp_read_local_oob_data)),
4051 HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4052 sizeof(struct hci_rp_read_local_oob_ext_data)),
4053 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4054 sizeof(struct hci_rp_le_read_buffer_size)),
4055 HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4056 sizeof(struct hci_rp_le_read_local_features)),
4057 HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4058 sizeof(struct hci_rp_le_read_adv_tx_power)),
4059 HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4060 sizeof(struct hci_rp_user_confirm_reply)),
4061 HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4062 sizeof(struct hci_rp_user_confirm_reply)),
4063 HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4064 sizeof(struct hci_rp_user_confirm_reply)),
4065 HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4066 sizeof(struct hci_rp_user_confirm_reply)),
4067 HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4068 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4069 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4070 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4071 HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4072 hci_cc_le_read_accept_list_size,
4073 sizeof(struct hci_rp_le_read_accept_list_size)),
4074 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4075 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4076 hci_cc_le_add_to_accept_list),
4077 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4078 hci_cc_le_del_from_accept_list),
4079 HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4080 sizeof(struct hci_rp_le_read_supported_states)),
4081 HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4082 sizeof(struct hci_rp_le_read_def_data_len)),
4083 HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4084 hci_cc_le_write_def_data_len),
4085 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4086 hci_cc_le_add_to_resolv_list),
4087 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4088 hci_cc_le_del_from_resolv_list),
4089 HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4090 hci_cc_le_clear_resolv_list),
4091 HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4092 sizeof(struct hci_rp_le_read_resolv_list_size)),
4093 HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4094 hci_cc_le_set_addr_resolution_enable),
4095 HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4096 sizeof(struct hci_rp_le_read_max_data_len)),
4097 HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4098 hci_cc_write_le_host_supported),
4099 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4100 HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4101 sizeof(struct hci_rp_read_rssi)),
4102 HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4103 sizeof(struct hci_rp_read_tx_power)),
4104 HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4105 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4106 hci_cc_le_set_ext_scan_param),
4107 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4108 hci_cc_le_set_ext_scan_enable),
4109 HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4110 HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4111 hci_cc_le_read_num_adv_sets,
4112 sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4113 HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4114 sizeof(struct hci_rp_le_set_ext_adv_params)),
4115 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4116 hci_cc_le_set_ext_adv_enable),
4117 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4118 hci_cc_le_set_adv_set_random_addr),
4119 HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4120 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4121 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4122 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4123 hci_cc_le_set_per_adv_enable),
4124 HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4125 sizeof(struct hci_rp_le_read_transmit_power)),
4126 HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4127 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4128 sizeof(struct hci_rp_le_read_buffer_size_v2)),
4129 HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4130 sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4131 HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4132 sizeof(struct hci_rp_le_setup_iso_path)),
4135 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4136 struct sk_buff *skb)
4140 if (skb->len < cc->min_len) {
4141 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4142 cc->op, skb->len, cc->min_len);
4143 return HCI_ERROR_UNSPECIFIED;
4146 /* Just warn if the length is over max_len size it still be possible to
4147 * partially parse the cc so leave to callback to decide if that is
4150 if (skb->len > cc->max_len)
4151 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4152 cc->op, skb->len, cc->max_len);
4154 data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4156 return HCI_ERROR_UNSPECIFIED;
4158 return cc->func(hdev, data, skb);
4161 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4162 struct sk_buff *skb, u16 *opcode, u8 *status,
4163 hci_req_complete_t *req_complete,
4164 hci_req_complete_skb_t *req_complete_skb)
4166 struct hci_ev_cmd_complete *ev = data;
4169 *opcode = __le16_to_cpu(ev->opcode);
4171 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4173 for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4174 if (hci_cc_table[i].op == *opcode) {
4175 *status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4180 if (i == ARRAY_SIZE(hci_cc_table)) {
4181 /* Unknown opcode, assume byte 0 contains the status, so
4182 * that e.g. __hci_cmd_sync() properly returns errors
4183 * for vendor specific commands send by HCI drivers.
4184 * If a vendor doesn't actually follow this convention we may
4185 * need to introduce a vendor CC table in order to properly set
4188 *status = skb->data[0];
4191 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4193 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4196 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4198 "unexpected event for opcode 0x%4.4x", *opcode);
4202 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4203 queue_work(hdev->workqueue, &hdev->cmd_work);
4206 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4208 struct hci_cp_le_create_cis *cp;
4211 bt_dev_dbg(hdev, "status 0x%2.2x", status);
4216 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4222 /* Remove connection if command failed */
4223 for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4224 struct hci_conn *conn;
4227 handle = __le16_to_cpu(cp->cis[i].cis_handle);
4229 conn = hci_conn_hash_lookup_handle(hdev, handle);
4231 conn->state = BT_CLOSED;
4232 hci_connect_cfm(conn, status);
4237 hci_dev_unlock(hdev);
4240 #define HCI_CS(_op, _func) \
4246 static const struct hci_cs {
4248 void (*func)(struct hci_dev *hdev, __u8 status);
4249 } hci_cs_table[] = {
4250 HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4251 HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4252 HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4253 HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4254 HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4255 HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4256 HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4257 HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4258 HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4259 hci_cs_read_remote_ext_features),
4260 HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4261 HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4262 hci_cs_enhanced_setup_sync_conn),
4263 HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4264 HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4265 HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4266 HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4267 HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4268 HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4269 HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4270 HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4271 HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4274 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4275 struct sk_buff *skb, u16 *opcode, u8 *status,
4276 hci_req_complete_t *req_complete,
4277 hci_req_complete_skb_t *req_complete_skb)
4279 struct hci_ev_cmd_status *ev = data;
4282 *opcode = __le16_to_cpu(ev->opcode);
4283 *status = ev->status;
4285 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4287 for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4288 if (hci_cs_table[i].op == *opcode) {
4289 hci_cs_table[i].func(hdev, ev->status);
4294 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4296 /* Indicate request completion if the command failed. Also, if
4297 * we're not waiting for a special event and we get a success
4298 * command status we should try to flag the request as completed
4299 * (since for this kind of commands there will not be a command
4302 if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
4303 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4305 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4306 bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4312 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4313 queue_work(hdev->workqueue, &hdev->cmd_work);
4316 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4317 struct sk_buff *skb)
4319 struct hci_ev_hardware_error *ev = data;
4321 bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4323 hdev->hw_error_code = ev->code;
4325 queue_work(hdev->req_workqueue, &hdev->error_reset);
4328 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4329 struct sk_buff *skb)
4331 struct hci_ev_role_change *ev = data;
4332 struct hci_conn *conn;
4334 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4338 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4341 conn->role = ev->role;
4343 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4345 hci_role_switch_cfm(conn, ev->status, ev->role);
4348 hci_dev_unlock(hdev);
4351 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4352 struct sk_buff *skb)
4354 struct hci_ev_num_comp_pkts *ev = data;
4357 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4358 flex_array_size(ev, handles, ev->num)))
4361 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4362 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4366 bt_dev_dbg(hdev, "num %d", ev->num);
4368 for (i = 0; i < ev->num; i++) {
4369 struct hci_comp_pkts_info *info = &ev->handles[i];
4370 struct hci_conn *conn;
4371 __u16 handle, count;
4373 handle = __le16_to_cpu(info->handle);
4374 count = __le16_to_cpu(info->count);
4376 conn = hci_conn_hash_lookup_handle(hdev, handle);
4380 conn->sent -= count;
4382 switch (conn->type) {
4384 hdev->acl_cnt += count;
4385 if (hdev->acl_cnt > hdev->acl_pkts)
4386 hdev->acl_cnt = hdev->acl_pkts;
4390 if (hdev->le_pkts) {
4391 hdev->le_cnt += count;
4392 if (hdev->le_cnt > hdev->le_pkts)
4393 hdev->le_cnt = hdev->le_pkts;
4395 hdev->acl_cnt += count;
4396 if (hdev->acl_cnt > hdev->acl_pkts)
4397 hdev->acl_cnt = hdev->acl_pkts;
4402 hdev->sco_cnt += count;
4403 if (hdev->sco_cnt > hdev->sco_pkts)
4404 hdev->sco_cnt = hdev->sco_pkts;
4408 if (hdev->iso_pkts) {
4409 hdev->iso_cnt += count;
4410 if (hdev->iso_cnt > hdev->iso_pkts)
4411 hdev->iso_cnt = hdev->iso_pkts;
4412 } else if (hdev->le_pkts) {
4413 hdev->le_cnt += count;
4414 if (hdev->le_cnt > hdev->le_pkts)
4415 hdev->le_cnt = hdev->le_pkts;
4417 hdev->acl_cnt += count;
4418 if (hdev->acl_cnt > hdev->acl_pkts)
4419 hdev->acl_cnt = hdev->acl_pkts;
4424 bt_dev_err(hdev, "unknown type %d conn %p",
4430 queue_work(hdev->workqueue, &hdev->tx_work);
4433 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4436 struct hci_chan *chan;
4438 switch (hdev->dev_type) {
4440 return hci_conn_hash_lookup_handle(hdev, handle);
4442 chan = hci_chan_lookup_handle(hdev, handle);
4447 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4454 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4455 struct sk_buff *skb)
4457 struct hci_ev_num_comp_blocks *ev = data;
4460 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4461 flex_array_size(ev, handles, ev->num_hndl)))
4464 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4465 bt_dev_err(hdev, "wrong event for mode %d",
4466 hdev->flow_ctl_mode);
4470 bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4473 for (i = 0; i < ev->num_hndl; i++) {
4474 struct hci_comp_blocks_info *info = &ev->handles[i];
4475 struct hci_conn *conn = NULL;
4476 __u16 handle, block_count;
4478 handle = __le16_to_cpu(info->handle);
4479 block_count = __le16_to_cpu(info->blocks);
4481 conn = __hci_conn_lookup_handle(hdev, handle);
4485 conn->sent -= block_count;
4487 switch (conn->type) {
4490 hdev->block_cnt += block_count;
4491 if (hdev->block_cnt > hdev->num_blocks)
4492 hdev->block_cnt = hdev->num_blocks;
4496 bt_dev_err(hdev, "unknown type %d conn %p",
4502 queue_work(hdev->workqueue, &hdev->tx_work);
4505 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4506 struct sk_buff *skb)
4508 struct hci_ev_mode_change *ev = data;
4509 struct hci_conn *conn;
4511 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4515 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4517 conn->mode = ev->mode;
4519 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4521 if (conn->mode == HCI_CM_ACTIVE)
4522 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4524 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4527 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4528 hci_sco_setup(conn, ev->status);
4531 hci_dev_unlock(hdev);
4534 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4535 struct sk_buff *skb)
4537 struct hci_ev_pin_code_req *ev = data;
4538 struct hci_conn *conn;
4540 bt_dev_dbg(hdev, "");
4544 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4548 if (conn->state == BT_CONNECTED) {
4549 hci_conn_hold(conn);
4550 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4551 hci_conn_drop(conn);
4554 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4555 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4556 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4557 sizeof(ev->bdaddr), &ev->bdaddr);
4558 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4561 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4566 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4570 hci_dev_unlock(hdev);
4573 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4575 if (key_type == HCI_LK_CHANGED_COMBINATION)
4578 conn->pin_length = pin_len;
4579 conn->key_type = key_type;
4582 case HCI_LK_LOCAL_UNIT:
4583 case HCI_LK_REMOTE_UNIT:
4584 case HCI_LK_DEBUG_COMBINATION:
4586 case HCI_LK_COMBINATION:
4588 conn->pending_sec_level = BT_SECURITY_HIGH;
4590 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4592 case HCI_LK_UNAUTH_COMBINATION_P192:
4593 case HCI_LK_UNAUTH_COMBINATION_P256:
4594 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4596 case HCI_LK_AUTH_COMBINATION_P192:
4597 conn->pending_sec_level = BT_SECURITY_HIGH;
4599 case HCI_LK_AUTH_COMBINATION_P256:
4600 conn->pending_sec_level = BT_SECURITY_FIPS;
4605 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4606 struct sk_buff *skb)
4608 struct hci_ev_link_key_req *ev = data;
4609 struct hci_cp_link_key_reply cp;
4610 struct hci_conn *conn;
4611 struct link_key *key;
4613 bt_dev_dbg(hdev, "");
4615 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4620 key = hci_find_link_key(hdev, &ev->bdaddr);
4622 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4626 bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4628 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4630 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4632 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4633 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4634 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4635 bt_dev_dbg(hdev, "ignoring unauthenticated key");
4639 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4640 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4641 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4642 bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4646 conn_set_key(conn, key->type, key->pin_len);
4649 bacpy(&cp.bdaddr, &ev->bdaddr);
4650 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4652 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4654 hci_dev_unlock(hdev);
4659 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4660 hci_dev_unlock(hdev);
4663 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4664 struct sk_buff *skb)
4666 struct hci_ev_link_key_notify *ev = data;
4667 struct hci_conn *conn;
4668 struct link_key *key;
4672 bt_dev_dbg(hdev, "");
4676 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4680 hci_conn_hold(conn);
4681 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4682 hci_conn_drop(conn);
4684 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4685 conn_set_key(conn, ev->key_type, conn->pin_length);
4687 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4690 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4691 ev->key_type, pin_len, &persistent);
4695 /* Update connection information since adding the key will have
4696 * fixed up the type in the case of changed combination keys.
4698 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4699 conn_set_key(conn, key->type, key->pin_len);
4701 mgmt_new_link_key(hdev, key, persistent);
4703 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4704 * is set. If it's not set simply remove the key from the kernel
4705 * list (we've still notified user space about it but with
4706 * store_hint being 0).
4708 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4709 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4710 list_del_rcu(&key->list);
4711 kfree_rcu(key, rcu);
4716 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4718 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4721 hci_dev_unlock(hdev);
4724 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4725 struct sk_buff *skb)
4727 struct hci_ev_clock_offset *ev = data;
4728 struct hci_conn *conn;
4730 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4734 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4735 if (conn && !ev->status) {
4736 struct inquiry_entry *ie;
4738 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4740 ie->data.clock_offset = ev->clock_offset;
4741 ie->timestamp = jiffies;
4745 hci_dev_unlock(hdev);
4748 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
4749 struct sk_buff *skb)
4751 struct hci_ev_pkt_type_change *ev = data;
4752 struct hci_conn *conn;
4754 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4758 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4759 if (conn && !ev->status)
4760 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
4762 hci_dev_unlock(hdev);
4765 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
4766 struct sk_buff *skb)
4768 struct hci_ev_pscan_rep_mode *ev = data;
4769 struct inquiry_entry *ie;
4771 bt_dev_dbg(hdev, "");
4775 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4777 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
4778 ie->timestamp = jiffies;
4781 hci_dev_unlock(hdev);
4784 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
4785 struct sk_buff *skb)
4787 struct hci_ev_inquiry_result_rssi *ev = edata;
4788 struct inquiry_data data;
4791 bt_dev_dbg(hdev, "num_rsp %d", ev->num);
4796 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
4801 if (skb->len == array_size(ev->num,
4802 sizeof(struct inquiry_info_rssi_pscan))) {
4803 struct inquiry_info_rssi_pscan *info;
4805 for (i = 0; i < ev->num; i++) {
4808 info = hci_ev_skb_pull(hdev, skb,
4809 HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4812 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4813 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4817 bacpy(&data.bdaddr, &info->bdaddr);
4818 data.pscan_rep_mode = info->pscan_rep_mode;
4819 data.pscan_period_mode = info->pscan_period_mode;
4820 data.pscan_mode = info->pscan_mode;
4821 memcpy(data.dev_class, info->dev_class, 3);
4822 data.clock_offset = info->clock_offset;
4823 data.rssi = info->rssi;
4824 data.ssp_mode = 0x00;
4826 flags = hci_inquiry_cache_update(hdev, &data, false);
4828 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4829 info->dev_class, info->rssi,
4830 flags, NULL, 0, NULL, 0, 0);
4832 } else if (skb->len == array_size(ev->num,
4833 sizeof(struct inquiry_info_rssi))) {
4834 struct inquiry_info_rssi *info;
4836 for (i = 0; i < ev->num; i++) {
4839 info = hci_ev_skb_pull(hdev, skb,
4840 HCI_EV_INQUIRY_RESULT_WITH_RSSI,
4843 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4844 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4848 bacpy(&data.bdaddr, &info->bdaddr);
4849 data.pscan_rep_mode = info->pscan_rep_mode;
4850 data.pscan_period_mode = info->pscan_period_mode;
4851 data.pscan_mode = 0x00;
4852 memcpy(data.dev_class, info->dev_class, 3);
4853 data.clock_offset = info->clock_offset;
4854 data.rssi = info->rssi;
4855 data.ssp_mode = 0x00;
4857 flags = hci_inquiry_cache_update(hdev, &data, false);
4859 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4860 info->dev_class, info->rssi,
4861 flags, NULL, 0, NULL, 0, 0);
4864 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
4865 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
4868 hci_dev_unlock(hdev);
4871 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
4872 struct sk_buff *skb)
4874 struct hci_ev_remote_ext_features *ev = data;
4875 struct hci_conn *conn;
4877 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4881 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4885 if (ev->page < HCI_MAX_PAGES)
4886 memcpy(conn->features[ev->page], ev->features, 8);
4888 if (!ev->status && ev->page == 0x01) {
4889 struct inquiry_entry *ie;
4891 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
4893 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4895 if (ev->features[0] & LMP_HOST_SSP) {
4896 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4898 /* It is mandatory by the Bluetooth specification that
4899 * Extended Inquiry Results are only used when Secure
4900 * Simple Pairing is enabled, but some devices violate
4903 * To make these devices work, the internal SSP
4904 * enabled flag needs to be cleared if the remote host
4905 * features do not indicate SSP support */
4906 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
4909 if (ev->features[0] & LMP_HOST_SC)
4910 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
4913 if (conn->state != BT_CONFIG)
4916 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
4917 struct hci_cp_remote_name_req cp;
4918 memset(&cp, 0, sizeof(cp));
4919 bacpy(&cp.bdaddr, &conn->dst);
4920 cp.pscan_rep_mode = 0x02;
4921 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
4922 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4923 mgmt_device_connected(hdev, conn, NULL, 0);
4925 if (!hci_outgoing_auth_needed(hdev, conn)) {
4926 conn->state = BT_CONNECTED;
4927 hci_connect_cfm(conn, ev->status);
4928 hci_conn_drop(conn);
4932 hci_dev_unlock(hdev);
4935 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
4936 struct sk_buff *skb)
4938 struct hci_ev_sync_conn_complete *ev = data;
4939 struct hci_conn *conn;
4940 u8 status = ev->status;
4942 switch (ev->link_type) {
4947 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
4948 * for HCI_Synchronous_Connection_Complete is limited to
4949 * either SCO or eSCO
4951 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
4955 bt_dev_dbg(hdev, "status 0x%2.2x", status);
4959 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
4961 if (ev->link_type == ESCO_LINK)
4964 /* When the link type in the event indicates SCO connection
4965 * and lookup of the connection object fails, then check
4966 * if an eSCO connection object exists.
4968 * The core limits the synchronous connections to either
4969 * SCO or eSCO. The eSCO connection is preferred and tried
4970 * to be setup first and until successfully established,
4971 * the link type will be hinted as eSCO.
4973 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
4978 /* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
4979 * Processing it more than once per connection can corrupt kernel memory.
4981 * As the connection handle is set here for the first time, it indicates
4982 * whether the connection is already set up.
4984 if (conn->handle != HCI_CONN_HANDLE_UNSET) {
4985 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
4991 conn->handle = __le16_to_cpu(ev->handle);
4992 if (conn->handle > HCI_CONN_HANDLE_MAX) {
4993 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
4994 conn->handle, HCI_CONN_HANDLE_MAX);
4995 status = HCI_ERROR_INVALID_PARAMETERS;
4996 conn->state = BT_CLOSED;
5000 conn->state = BT_CONNECTED;
5001 conn->type = ev->link_type;
5003 hci_debugfs_create_conn(conn);
5004 hci_conn_add_sysfs(conn);
5007 case 0x10: /* Connection Accept Timeout */
5008 case 0x0d: /* Connection Rejected due to Limited Resources */
5009 case 0x11: /* Unsupported Feature or Parameter Value */
5010 case 0x1c: /* SCO interval rejected */
5011 case 0x1a: /* Unsupported Remote Feature */
5012 case 0x1e: /* Invalid LMP Parameters */
5013 case 0x1f: /* Unspecified error */
5014 case 0x20: /* Unsupported LMP Parameter value */
5016 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5017 (hdev->esco_type & EDR_ESCO_MASK);
5018 if (hci_setup_sync(conn, conn->link->handle))
5024 conn->state = BT_CLOSED;
5028 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5029 /* Notify only in case of SCO over HCI transport data path which
5030 * is zero and non-zero value shall be non-HCI transport data path
5032 if (conn->codec.data_path == 0 && hdev->notify) {
5033 switch (ev->air_mode) {
5035 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5038 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5043 hci_connect_cfm(conn, status);
5048 hci_dev_unlock(hdev);
5051 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5055 while (parsed < eir_len) {
5056 u8 field_len = eir[0];
5061 parsed += field_len + 1;
5062 eir += field_len + 1;
5068 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5069 struct sk_buff *skb)
5071 struct hci_ev_ext_inquiry_result *ev = edata;
5072 struct inquiry_data data;
5076 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5077 flex_array_size(ev, info, ev->num)))
5080 bt_dev_dbg(hdev, "num %d", ev->num);
5085 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5090 for (i = 0; i < ev->num; i++) {
5091 struct extended_inquiry_info *info = &ev->info[i];
5095 bacpy(&data.bdaddr, &info->bdaddr);
5096 data.pscan_rep_mode = info->pscan_rep_mode;
5097 data.pscan_period_mode = info->pscan_period_mode;
5098 data.pscan_mode = 0x00;
5099 memcpy(data.dev_class, info->dev_class, 3);
5100 data.clock_offset = info->clock_offset;
5101 data.rssi = info->rssi;
5102 data.ssp_mode = 0x01;
5104 if (hci_dev_test_flag(hdev, HCI_MGMT))
5105 name_known = eir_get_data(info->data,
5107 EIR_NAME_COMPLETE, NULL);
5111 flags = hci_inquiry_cache_update(hdev, &data, name_known);
5113 eir_len = eir_get_length(info->data, sizeof(info->data));
5115 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5116 info->dev_class, info->rssi,
5117 flags, info->data, eir_len, NULL, 0, 0);
5120 hci_dev_unlock(hdev);
5123 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5124 struct sk_buff *skb)
5126 struct hci_ev_key_refresh_complete *ev = data;
5127 struct hci_conn *conn;
5129 bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5130 __le16_to_cpu(ev->handle));
5134 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5138 /* For BR/EDR the necessary steps are taken through the
5139 * auth_complete event.
5141 if (conn->type != LE_LINK)
5145 conn->sec_level = conn->pending_sec_level;
5147 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5149 if (ev->status && conn->state == BT_CONNECTED) {
5150 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5151 hci_conn_drop(conn);
5155 if (conn->state == BT_CONFIG) {
5157 conn->state = BT_CONNECTED;
5159 hci_connect_cfm(conn, ev->status);
5160 hci_conn_drop(conn);
5162 hci_auth_cfm(conn, ev->status);
5164 hci_conn_hold(conn);
5165 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5166 hci_conn_drop(conn);
5170 hci_dev_unlock(hdev);
5173 static u8 hci_get_auth_req(struct hci_conn *conn)
5175 /* If remote requests no-bonding follow that lead */
5176 if (conn->remote_auth == HCI_AT_NO_BONDING ||
5177 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5178 return conn->remote_auth | (conn->auth_type & 0x01);
5180 /* If both remote and local have enough IO capabilities, require
5183 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5184 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5185 return conn->remote_auth | 0x01;
5187 /* No MITM protection possible so ignore remote requirement */
5188 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5191 static u8 bredr_oob_data_present(struct hci_conn *conn)
5193 struct hci_dev *hdev = conn->hdev;
5194 struct oob_data *data;
5196 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5200 if (bredr_sc_enabled(hdev)) {
5201 /* When Secure Connections is enabled, then just
5202 * return the present value stored with the OOB
5203 * data. The stored value contains the right present
5204 * information. However it can only be trusted when
5205 * not in Secure Connection Only mode.
5207 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5208 return data->present;
5210 /* When Secure Connections Only mode is enabled, then
5211 * the P-256 values are required. If they are not
5212 * available, then do not declare that OOB data is
5215 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
5216 !memcmp(data->hash256, ZERO_KEY, 16))
5222 /* When Secure Connections is not enabled or actually
5223 * not supported by the hardware, then check that if
5224 * P-192 data values are present.
5226 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
5227 !memcmp(data->hash192, ZERO_KEY, 16))
5233 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5234 struct sk_buff *skb)
5236 struct hci_ev_io_capa_request *ev = data;
5237 struct hci_conn *conn;
5239 bt_dev_dbg(hdev, "");
5243 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5247 hci_conn_hold(conn);
5249 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5252 /* Allow pairing if we're pairable, the initiators of the
5253 * pairing or if the remote is not requesting bonding.
5255 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5256 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5257 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5258 struct hci_cp_io_capability_reply cp;
5260 bacpy(&cp.bdaddr, &ev->bdaddr);
5261 /* Change the IO capability from KeyboardDisplay
5262 * to DisplayYesNo as it is not supported by BT spec. */
5263 cp.capability = (conn->io_capability == 0x04) ?
5264 HCI_IO_DISPLAY_YESNO : conn->io_capability;
5266 /* If we are initiators, there is no remote information yet */
5267 if (conn->remote_auth == 0xff) {
5268 /* Request MITM protection if our IO caps allow it
5269 * except for the no-bonding case.
5271 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5272 conn->auth_type != HCI_AT_NO_BONDING)
5273 conn->auth_type |= 0x01;
5275 conn->auth_type = hci_get_auth_req(conn);
5278 /* If we're not bondable, force one of the non-bondable
5279 * authentication requirement values.
5281 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5282 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5284 cp.authentication = conn->auth_type;
5285 cp.oob_data = bredr_oob_data_present(conn);
5287 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5290 struct hci_cp_io_capability_neg_reply cp;
5292 bacpy(&cp.bdaddr, &ev->bdaddr);
5293 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5295 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5300 hci_dev_unlock(hdev);
5303 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5304 struct sk_buff *skb)
5306 struct hci_ev_io_capa_reply *ev = data;
5307 struct hci_conn *conn;
5309 bt_dev_dbg(hdev, "");
5313 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5317 conn->remote_cap = ev->capability;
5318 conn->remote_auth = ev->authentication;
5321 hci_dev_unlock(hdev);
5324 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5325 struct sk_buff *skb)
5327 struct hci_ev_user_confirm_req *ev = data;
5328 int loc_mitm, rem_mitm, confirm_hint = 0;
5329 struct hci_conn *conn;
5331 bt_dev_dbg(hdev, "");
5335 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5338 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5342 loc_mitm = (conn->auth_type & 0x01);
5343 rem_mitm = (conn->remote_auth & 0x01);
5345 /* If we require MITM but the remote device can't provide that
5346 * (it has NoInputNoOutput) then reject the confirmation
5347 * request. We check the security level here since it doesn't
5348 * necessarily match conn->auth_type.
5350 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5351 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5352 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5353 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5354 sizeof(ev->bdaddr), &ev->bdaddr);
5358 /* If no side requires MITM protection; auto-accept */
5359 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5360 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5362 /* If we're not the initiators request authorization to
5363 * proceed from user space (mgmt_user_confirm with
5364 * confirm_hint set to 1). The exception is if neither
5365 * side had MITM or if the local IO capability is
5366 * NoInputNoOutput, in which case we do auto-accept
5368 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5369 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5370 (loc_mitm || rem_mitm)) {
5371 bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5376 /* If there already exists link key in local host, leave the
5377 * decision to user space since the remote device could be
5378 * legitimate or malicious.
5380 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5381 bt_dev_dbg(hdev, "Local host already has link key");
5386 BT_DBG("Auto-accept of user confirmation with %ums delay",
5387 hdev->auto_accept_delay);
5389 if (hdev->auto_accept_delay > 0) {
5390 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5391 queue_delayed_work(conn->hdev->workqueue,
5392 &conn->auto_accept_work, delay);
5396 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5397 sizeof(ev->bdaddr), &ev->bdaddr);
5402 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5403 le32_to_cpu(ev->passkey), confirm_hint);
5406 hci_dev_unlock(hdev);
5409 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5410 struct sk_buff *skb)
5412 struct hci_ev_user_passkey_req *ev = data;
5414 bt_dev_dbg(hdev, "");
5416 if (hci_dev_test_flag(hdev, HCI_MGMT))
5417 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5420 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5421 struct sk_buff *skb)
5423 struct hci_ev_user_passkey_notify *ev = data;
5424 struct hci_conn *conn;
5426 bt_dev_dbg(hdev, "");
5428 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5432 conn->passkey_notify = __le32_to_cpu(ev->passkey);
5433 conn->passkey_entered = 0;
5435 if (hci_dev_test_flag(hdev, HCI_MGMT))
5436 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5437 conn->dst_type, conn->passkey_notify,
5438 conn->passkey_entered);
5441 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5442 struct sk_buff *skb)
5444 struct hci_ev_keypress_notify *ev = data;
5445 struct hci_conn *conn;
5447 bt_dev_dbg(hdev, "");
5449 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5454 case HCI_KEYPRESS_STARTED:
5455 conn->passkey_entered = 0;
5458 case HCI_KEYPRESS_ENTERED:
5459 conn->passkey_entered++;
5462 case HCI_KEYPRESS_ERASED:
5463 conn->passkey_entered--;
5466 case HCI_KEYPRESS_CLEARED:
5467 conn->passkey_entered = 0;
5470 case HCI_KEYPRESS_COMPLETED:
5474 if (hci_dev_test_flag(hdev, HCI_MGMT))
5475 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5476 conn->dst_type, conn->passkey_notify,
5477 conn->passkey_entered);
5480 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5481 struct sk_buff *skb)
5483 struct hci_ev_simple_pair_complete *ev = data;
5484 struct hci_conn *conn;
5486 bt_dev_dbg(hdev, "");
5490 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5494 /* Reset the authentication requirement to unknown */
5495 conn->remote_auth = 0xff;
5497 /* To avoid duplicate auth_failed events to user space we check
5498 * the HCI_CONN_AUTH_PEND flag which will be set if we
5499 * initiated the authentication. A traditional auth_complete
5500 * event gets always produced as initiator and is also mapped to
5501 * the mgmt_auth_failed event */
5502 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5503 mgmt_auth_failed(conn, ev->status);
5505 hci_conn_drop(conn);
5508 hci_dev_unlock(hdev);
5511 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5512 struct sk_buff *skb)
5514 struct hci_ev_remote_host_features *ev = data;
5515 struct inquiry_entry *ie;
5516 struct hci_conn *conn;
5518 bt_dev_dbg(hdev, "");
5522 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5524 memcpy(conn->features[1], ev->features, 8);
5526 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5528 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5530 hci_dev_unlock(hdev);
5533 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5534 struct sk_buff *skb)
5536 struct hci_ev_remote_oob_data_request *ev = edata;
5537 struct oob_data *data;
5539 bt_dev_dbg(hdev, "");
5543 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5546 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5548 struct hci_cp_remote_oob_data_neg_reply cp;
5550 bacpy(&cp.bdaddr, &ev->bdaddr);
5551 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5556 if (bredr_sc_enabled(hdev)) {
5557 struct hci_cp_remote_oob_ext_data_reply cp;
5559 bacpy(&cp.bdaddr, &ev->bdaddr);
5560 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5561 memset(cp.hash192, 0, sizeof(cp.hash192));
5562 memset(cp.rand192, 0, sizeof(cp.rand192));
5564 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5565 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5567 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5568 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5570 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5573 struct hci_cp_remote_oob_data_reply cp;
5575 bacpy(&cp.bdaddr, &ev->bdaddr);
5576 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5577 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5579 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5584 hci_dev_unlock(hdev);
5587 #if IS_ENABLED(CONFIG_BT_HS)
5588 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
5589 struct sk_buff *skb)
5591 struct hci_ev_channel_selected *ev = data;
5592 struct hci_conn *hcon;
5594 bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
5596 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5600 amp_read_loc_assoc_final_data(hdev, hcon);
5603 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
5604 struct sk_buff *skb)
5606 struct hci_ev_phy_link_complete *ev = data;
5607 struct hci_conn *hcon, *bredr_hcon;
5609 bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
5614 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5626 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5628 hcon->state = BT_CONNECTED;
5629 bacpy(&hcon->dst, &bredr_hcon->dst);
5631 hci_conn_hold(hcon);
5632 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5633 hci_conn_drop(hcon);
5635 hci_debugfs_create_conn(hcon);
5636 hci_conn_add_sysfs(hcon);
5638 amp_physical_cfm(bredr_hcon, hcon);
5641 hci_dev_unlock(hdev);
5644 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
5645 struct sk_buff *skb)
5647 struct hci_ev_logical_link_complete *ev = data;
5648 struct hci_conn *hcon;
5649 struct hci_chan *hchan;
5650 struct amp_mgr *mgr;
5652 bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5653 le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
5655 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5659 /* Create AMP hchan */
5660 hchan = hci_chan_create(hcon);
5664 hchan->handle = le16_to_cpu(ev->handle);
5667 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5669 mgr = hcon->amp_mgr;
5670 if (mgr && mgr->bredr_chan) {
5671 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5673 l2cap_chan_lock(bredr_chan);
5675 bredr_chan->conn->mtu = hdev->block_mtu;
5676 l2cap_logical_cfm(bredr_chan, hchan, 0);
5677 hci_conn_hold(hcon);
5679 l2cap_chan_unlock(bredr_chan);
5683 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
5684 struct sk_buff *skb)
5686 struct hci_ev_disconn_logical_link_complete *ev = data;
5687 struct hci_chan *hchan;
5689 bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
5690 le16_to_cpu(ev->handle), ev->status);
5697 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5698 if (!hchan || !hchan->amp)
5701 amp_destroy_logical_link(hchan, ev->reason);
5704 hci_dev_unlock(hdev);
5707 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
5708 struct sk_buff *skb)
5710 struct hci_ev_disconn_phy_link_complete *ev = data;
5711 struct hci_conn *hcon;
5713 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5720 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5721 if (hcon && hcon->type == AMP_LINK) {
5722 hcon->state = BT_CLOSED;
5723 hci_disconn_cfm(hcon, ev->reason);
5727 hci_dev_unlock(hdev);
5731 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
5732 u8 bdaddr_type, bdaddr_t *local_rpa)
5735 conn->dst_type = bdaddr_type;
5736 conn->resp_addr_type = bdaddr_type;
5737 bacpy(&conn->resp_addr, bdaddr);
5739 /* Check if the controller has set a Local RPA then it must be
5740 * used instead or hdev->rpa.
5742 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5743 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5744 bacpy(&conn->init_addr, local_rpa);
5745 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
5746 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5747 bacpy(&conn->init_addr, &conn->hdev->rpa);
5749 hci_copy_identity_address(conn->hdev, &conn->init_addr,
5750 &conn->init_addr_type);
5753 conn->resp_addr_type = conn->hdev->adv_addr_type;
5754 /* Check if the controller has set a Local RPA then it must be
5755 * used instead or hdev->rpa.
5757 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
5758 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
5759 bacpy(&conn->resp_addr, local_rpa);
5760 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
5761 /* In case of ext adv, resp_addr will be updated in
5762 * Adv Terminated event.
5764 if (!ext_adv_capable(conn->hdev))
5765 bacpy(&conn->resp_addr,
5766 &conn->hdev->random_addr);
5768 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
5771 conn->init_addr_type = bdaddr_type;
5772 bacpy(&conn->init_addr, bdaddr);
5774 /* For incoming connections, set the default minimum
5775 * and maximum connection interval. They will be used
5776 * to check if the parameters are in range and if not
5777 * trigger the connection update procedure.
5779 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
5780 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
5784 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
5785 bdaddr_t *bdaddr, u8 bdaddr_type,
5786 bdaddr_t *local_rpa, u8 role, u16 handle,
5787 u16 interval, u16 latency,
5788 u16 supervision_timeout)
5790 struct hci_conn_params *params;
5791 struct hci_conn *conn;
5792 struct smp_irk *irk;
5797 /* All controllers implicitly stop advertising in the event of a
5798 * connection, so ensure that the state bit is cleared.
5800 hci_dev_clear_flag(hdev, HCI_LE_ADV);
5802 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
5804 /* In case of error status and there is no connection pending
5805 * just unlock as there is nothing to cleanup.
5810 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
5812 bt_dev_err(hdev, "no memory for new connection");
5816 conn->dst_type = bdaddr_type;
5818 /* If we didn't have a hci_conn object previously
5819 * but we're in central role this must be something
5820 * initiated using an accept list. Since accept list based
5821 * connections are not "first class citizens" we don't
5822 * have full tracking of them. Therefore, we go ahead
5823 * with a "best effort" approach of determining the
5824 * initiator address based on the HCI_PRIVACY flag.
5827 conn->resp_addr_type = bdaddr_type;
5828 bacpy(&conn->resp_addr, bdaddr);
5829 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
5830 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
5831 bacpy(&conn->init_addr, &hdev->rpa);
5833 hci_copy_identity_address(hdev,
5835 &conn->init_addr_type);
5839 cancel_delayed_work(&conn->le_conn_timeout);
5842 /* The HCI_LE_Connection_Complete event is only sent once per connection.
5843 * Processing it more than once per connection can corrupt kernel memory.
5845 * As the connection handle is set here for the first time, it indicates
5846 * whether the connection is already set up.
5848 if (conn->handle != HCI_CONN_HANDLE_UNSET) {
5849 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
5853 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
5855 /* Lookup the identity address from the stored connection
5856 * address and address type.
5858 * When establishing connections to an identity address, the
5859 * connection procedure will store the resolvable random
5860 * address first. Now if it can be converted back into the
5861 * identity address, start using the identity address from
5864 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
5866 bacpy(&conn->dst, &irk->bdaddr);
5867 conn->dst_type = irk->addr_type;
5870 conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
5872 if (handle > HCI_CONN_HANDLE_MAX) {
5873 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
5874 HCI_CONN_HANDLE_MAX);
5875 status = HCI_ERROR_INVALID_PARAMETERS;
5878 /* All connection failure handling is taken care of by the
5879 * hci_conn_failed function which is triggered by the HCI
5880 * request completion callbacks used for connecting.
5885 /* Drop the connection if it has been aborted */
5886 if (test_bit(HCI_CONN_CANCEL, &conn->flags)) {
5887 hci_conn_drop(conn);
5891 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
5892 addr_type = BDADDR_LE_PUBLIC;
5894 addr_type = BDADDR_LE_RANDOM;
5896 /* Drop the connection if the device is blocked */
5897 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
5898 hci_conn_drop(conn);
5902 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5903 mgmt_device_connected(hdev, conn, NULL, 0);
5905 conn->sec_level = BT_SECURITY_LOW;
5906 conn->handle = handle;
5907 conn->state = BT_CONFIG;
5909 /* Store current advertising instance as connection advertising instance
5910 * when sotfware rotation is in use so it can be re-enabled when
5913 if (!ext_adv_capable(hdev))
5914 conn->adv_instance = hdev->cur_adv_instance;
5916 conn->le_conn_interval = interval;
5917 conn->le_conn_latency = latency;
5918 conn->le_supv_timeout = supervision_timeout;
5920 hci_debugfs_create_conn(conn);
5921 hci_conn_add_sysfs(conn);
5923 /* The remote features procedure is defined for central
5924 * role only. So only in case of an initiated connection
5925 * request the remote features.
5927 * If the local controller supports peripheral-initiated features
5928 * exchange, then requesting the remote features in peripheral
5929 * role is possible. Otherwise just transition into the
5930 * connected state without requesting the remote features.
5933 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
5934 struct hci_cp_le_read_remote_features cp;
5936 cp.handle = __cpu_to_le16(conn->handle);
5938 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
5941 hci_conn_hold(conn);
5943 conn->state = BT_CONNECTED;
5944 hci_connect_cfm(conn, status);
5947 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
5950 list_del_init(¶ms->action);
5952 hci_conn_drop(params->conn);
5953 hci_conn_put(params->conn);
5954 params->conn = NULL;
5959 hci_update_passive_scan(hdev);
5960 hci_dev_unlock(hdev);
5963 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
5964 struct sk_buff *skb)
5966 struct hci_ev_le_conn_complete *ev = data;
5968 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5970 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5971 NULL, ev->role, le16_to_cpu(ev->handle),
5972 le16_to_cpu(ev->interval),
5973 le16_to_cpu(ev->latency),
5974 le16_to_cpu(ev->supervision_timeout));
5977 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
5978 struct sk_buff *skb)
5980 struct hci_ev_le_enh_conn_complete *ev = data;
5982 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5984 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
5985 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
5986 le16_to_cpu(ev->interval),
5987 le16_to_cpu(ev->latency),
5988 le16_to_cpu(ev->supervision_timeout));
5991 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
5992 struct sk_buff *skb)
5994 struct hci_evt_le_ext_adv_set_term *ev = data;
5995 struct hci_conn *conn;
5996 struct adv_info *adv, *n;
5998 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6000 /* The Bluetooth Core 5.3 specification clearly states that this event
6001 * shall not be sent when the Host disables the advertising set. So in
6002 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
6004 * When the Host disables an advertising set, all cleanup is done via
6005 * its command callback and not needed to be duplicated here.
6007 if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
6008 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
6014 adv = hci_find_adv_instance(hdev, ev->handle);
6020 /* Remove advertising as it has been terminated */
6021 hci_remove_adv_instance(hdev, ev->handle);
6022 mgmt_advertising_removed(NULL, hdev, ev->handle);
6024 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
6029 /* We are no longer advertising, clear HCI_LE_ADV */
6030 hci_dev_clear_flag(hdev, HCI_LE_ADV);
6035 adv->enabled = false;
6037 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
6039 /* Store handle in the connection so the correct advertising
6040 * instance can be re-enabled when disconnected.
6042 conn->adv_instance = ev->handle;
6044 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
6045 bacmp(&conn->resp_addr, BDADDR_ANY))
6049 bacpy(&conn->resp_addr, &hdev->random_addr);
6054 bacpy(&conn->resp_addr, &adv->random_addr);
6058 hci_dev_unlock(hdev);
6061 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6062 struct sk_buff *skb)
6064 struct hci_ev_le_conn_update_complete *ev = data;
6065 struct hci_conn *conn;
6067 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6074 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6076 conn->le_conn_interval = le16_to_cpu(ev->interval);
6077 conn->le_conn_latency = le16_to_cpu(ev->latency);
6078 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6081 hci_dev_unlock(hdev);
6084 /* This function requires the caller holds hdev->lock */
6085 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6087 u8 addr_type, bool addr_resolved,
6090 struct hci_conn *conn;
6091 struct hci_conn_params *params;
6093 /* If the event is not connectable don't proceed further */
6094 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6097 /* Ignore if the device is blocked or hdev is suspended */
6098 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6102 /* Most controller will fail if we try to create new connections
6103 * while we have an existing one in peripheral role.
6105 if (hdev->conn_hash.le_num_peripheral > 0 &&
6106 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6107 !(hdev->le_states[3] & 0x10)))
6110 /* If we're not connectable only connect devices that we have in
6111 * our pend_le_conns list.
6113 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6118 if (!params->explicit_connect) {
6119 switch (params->auto_connect) {
6120 case HCI_AUTO_CONN_DIRECT:
6121 /* Only devices advertising with ADV_DIRECT_IND are
6122 * triggering a connection attempt. This is allowing
6123 * incoming connections from peripheral devices.
6125 if (adv_type != LE_ADV_DIRECT_IND)
6128 case HCI_AUTO_CONN_ALWAYS:
6129 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
6130 * are triggering a connection attempt. This means
6131 * that incoming connections from peripheral device are
6132 * accepted and also outgoing connections to peripheral
6133 * devices are established when found.
6141 conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6142 BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6144 if (!IS_ERR(conn)) {
6145 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6146 * by higher layer that tried to connect, if no then
6147 * store the pointer since we don't really have any
6148 * other owner of the object besides the params that
6149 * triggered it. This way we can abort the connection if
6150 * the parameters get removed and keep the reference
6151 * count consistent once the connection is established.
6154 if (!params->explicit_connect)
6155 params->conn = hci_conn_get(conn);
6160 switch (PTR_ERR(conn)) {
6162 /* If hci_connect() returns -EBUSY it means there is already
6163 * an LE connection attempt going on. Since controllers don't
6164 * support more than one connection attempt at the time, we
6165 * don't consider this an error case.
6169 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6176 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6177 u8 bdaddr_type, bdaddr_t *direct_addr,
6178 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
6179 bool ext_adv, bool ctl_time, u64 instant)
6181 struct discovery_state *d = &hdev->discovery;
6182 struct smp_irk *irk;
6183 struct hci_conn *conn;
6184 bool match, bdaddr_resolved;
6190 case LE_ADV_DIRECT_IND:
6191 case LE_ADV_SCAN_IND:
6192 case LE_ADV_NONCONN_IND:
6193 case LE_ADV_SCAN_RSP:
6196 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6197 "type: 0x%02x", type);
6201 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
6202 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
6206 /* Find the end of the data in case the report contains padded zero
6207 * bytes at the end causing an invalid length value.
6209 * When data is NULL, len is 0 so there is no need for extra ptr
6210 * check as 'ptr < data + 0' is already false in such case.
6212 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6213 if (ptr + 1 + *ptr > data + len)
6217 /* Adjust for actual length. This handles the case when remote
6218 * device is advertising with incorrect data length.
6222 /* If the direct address is present, then this report is from
6223 * a LE Direct Advertising Report event. In that case it is
6224 * important to see if the address is matching the local
6225 * controller address.
6227 if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6228 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6231 /* Only resolvable random addresses are valid for these
6232 * kind of reports and others can be ignored.
6234 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6237 /* If the controller is not using resolvable random
6238 * addresses, then this report can be ignored.
6240 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6243 /* If the local IRK of the controller does not match
6244 * with the resolvable random address provided, then
6245 * this report can be ignored.
6247 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6251 /* Check if we need to convert to identity address */
6252 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6254 bdaddr = &irk->bdaddr;
6255 bdaddr_type = irk->addr_type;
6258 bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6260 /* Check if we have been requested to connect to this device.
6262 * direct_addr is set only for directed advertising reports (it is NULL
6263 * for advertising reports) and is already verified to be RPA above.
6265 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6267 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
6268 /* Store report for later inclusion by
6269 * mgmt_device_connected
6271 memcpy(conn->le_adv_data, data, len);
6272 conn->le_adv_data_len = len;
6275 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6276 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6280 /* All scan results should be sent up for Mesh systems */
6281 if (hci_dev_test_flag(hdev, HCI_MESH)) {
6282 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6283 rssi, flags, data, len, NULL, 0, instant);
6287 /* Passive scanning shouldn't trigger any device found events,
6288 * except for devices marked as CONN_REPORT for which we do send
6289 * device found events, or advertisement monitoring requested.
6291 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6292 if (type == LE_ADV_DIRECT_IND)
6295 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6296 bdaddr, bdaddr_type) &&
6297 idr_is_empty(&hdev->adv_monitors_idr))
6300 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6301 rssi, flags, data, len, NULL, 0, 0);
6305 /* When receiving non-connectable or scannable undirected
6306 * advertising reports, this means that the remote device is
6307 * not connectable and then clearly indicate this in the
6308 * device found event.
6310 * When receiving a scan response, then there is no way to
6311 * know if the remote device is connectable or not. However
6312 * since scan responses are merged with a previously seen
6313 * advertising report, the flags field from that report
6316 * In the really unlikely case that a controller get confused
6317 * and just sends a scan response event, then it is marked as
6318 * not connectable as well.
6320 if (type == LE_ADV_SCAN_RSP)
6321 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6323 /* If there's nothing pending either store the data from this
6324 * event or send an immediate device found event if the data
6325 * should not be stored for later.
6327 if (!ext_adv && !has_pending_adv_report(hdev)) {
6328 /* If the report will trigger a SCAN_REQ store it for
6331 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6332 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6333 rssi, flags, data, len);
6337 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6338 rssi, flags, data, len, NULL, 0, 0);
6342 /* Check if the pending report is for the same device as the new one */
6343 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6344 bdaddr_type == d->last_adv_addr_type);
6346 /* If the pending data doesn't match this report or this isn't a
6347 * scan response (e.g. we got a duplicate ADV_IND) then force
6348 * sending of the pending data.
6350 if (type != LE_ADV_SCAN_RSP || !match) {
6351 /* Send out whatever is in the cache, but skip duplicates */
6353 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6354 d->last_adv_addr_type, NULL,
6355 d->last_adv_rssi, d->last_adv_flags,
6357 d->last_adv_data_len, NULL, 0, 0);
6359 /* If the new report will trigger a SCAN_REQ store it for
6362 if (!ext_adv && (type == LE_ADV_IND ||
6363 type == LE_ADV_SCAN_IND)) {
6364 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6365 rssi, flags, data, len);
6369 /* The advertising reports cannot be merged, so clear
6370 * the pending report and send out a device found event.
6372 clear_pending_adv_report(hdev);
6373 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6374 rssi, flags, data, len, NULL, 0, 0);
6378 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6379 * the new event is a SCAN_RSP. We can therefore proceed with
6380 * sending a merged device found event.
6382 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6383 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6384 d->last_adv_data, d->last_adv_data_len, data, len, 0);
6385 clear_pending_adv_report(hdev);
6388 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6389 struct sk_buff *skb)
6391 struct hci_ev_le_advertising_report *ev = data;
6392 u64 instant = jiffies;
6400 struct hci_ev_le_advertising_info *info;
6403 info = hci_le_ev_skb_pull(hdev, skb,
6404 HCI_EV_LE_ADVERTISING_REPORT,
6409 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6413 if (info->length <= HCI_MAX_AD_LENGTH) {
6414 rssi = info->data[info->length];
6415 process_adv_report(hdev, info->type, &info->bdaddr,
6416 info->bdaddr_type, NULL, 0, rssi,
6417 info->data, info->length, false,
6420 bt_dev_err(hdev, "Dropping invalid advertising data");
6424 hci_dev_unlock(hdev);
6427 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6429 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6431 case LE_LEGACY_ADV_IND:
6433 case LE_LEGACY_ADV_DIRECT_IND:
6434 return LE_ADV_DIRECT_IND;
6435 case LE_LEGACY_ADV_SCAN_IND:
6436 return LE_ADV_SCAN_IND;
6437 case LE_LEGACY_NONCONN_IND:
6438 return LE_ADV_NONCONN_IND;
6439 case LE_LEGACY_SCAN_RSP_ADV:
6440 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6441 return LE_ADV_SCAN_RSP;
6447 if (evt_type & LE_EXT_ADV_CONN_IND) {
6448 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6449 return LE_ADV_DIRECT_IND;
6454 if (evt_type & LE_EXT_ADV_SCAN_RSP)
6455 return LE_ADV_SCAN_RSP;
6457 if (evt_type & LE_EXT_ADV_SCAN_IND)
6458 return LE_ADV_SCAN_IND;
6460 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6461 evt_type & LE_EXT_ADV_DIRECT_IND)
6462 return LE_ADV_NONCONN_IND;
6465 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6468 return LE_ADV_INVALID;
6471 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6472 struct sk_buff *skb)
6474 struct hci_ev_le_ext_adv_report *ev = data;
6475 u64 instant = jiffies;
6483 struct hci_ev_le_ext_adv_info *info;
6487 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6492 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6496 evt_type = __le16_to_cpu(info->type);
6497 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6498 if (legacy_evt_type != LE_ADV_INVALID) {
6499 process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6500 info->bdaddr_type, NULL, 0,
6501 info->rssi, info->data, info->length,
6502 !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6507 hci_dev_unlock(hdev);
6510 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6512 struct hci_cp_le_pa_term_sync cp;
6514 memset(&cp, 0, sizeof(cp));
6517 return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6520 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6521 struct sk_buff *skb)
6523 struct hci_ev_le_pa_sync_established *ev = data;
6524 int mask = hdev->link_mode;
6527 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6534 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6536 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6537 if (!(mask & HCI_LM_ACCEPT))
6538 hci_le_pa_term_sync(hdev, ev->handle);
6540 hci_dev_unlock(hdev);
6543 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6544 struct sk_buff *skb)
6546 struct hci_ev_le_remote_feat_complete *ev = data;
6547 struct hci_conn *conn;
6549 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6553 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6556 memcpy(conn->features[0], ev->features, 8);
6558 if (conn->state == BT_CONFIG) {
6561 /* If the local controller supports peripheral-initiated
6562 * features exchange, but the remote controller does
6563 * not, then it is possible that the error code 0x1a
6564 * for unsupported remote feature gets returned.
6566 * In this specific case, allow the connection to
6567 * transition into connected state and mark it as
6570 if (!conn->out && ev->status == 0x1a &&
6571 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6574 status = ev->status;
6576 conn->state = BT_CONNECTED;
6577 hci_connect_cfm(conn, status);
6578 hci_conn_drop(conn);
6582 hci_dev_unlock(hdev);
6585 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6586 struct sk_buff *skb)
6588 struct hci_ev_le_ltk_req *ev = data;
6589 struct hci_cp_le_ltk_reply cp;
6590 struct hci_cp_le_ltk_neg_reply neg;
6591 struct hci_conn *conn;
6592 struct smp_ltk *ltk;
6594 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6598 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6602 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6606 if (smp_ltk_is_sc(ltk)) {
6607 /* With SC both EDiv and Rand are set to zero */
6608 if (ev->ediv || ev->rand)
6611 /* For non-SC keys check that EDiv and Rand match */
6612 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6616 memcpy(cp.ltk, ltk->val, ltk->enc_size);
6617 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6618 cp.handle = cpu_to_le16(conn->handle);
6620 conn->pending_sec_level = smp_ltk_sec_level(ltk);
6622 conn->enc_key_size = ltk->enc_size;
6624 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6626 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6627 * temporary key used to encrypt a connection following
6628 * pairing. It is used during the Encrypted Session Setup to
6629 * distribute the keys. Later, security can be re-established
6630 * using a distributed LTK.
6632 if (ltk->type == SMP_STK) {
6633 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6634 list_del_rcu(<k->list);
6635 kfree_rcu(ltk, rcu);
6637 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6640 hci_dev_unlock(hdev);
6645 neg.handle = ev->handle;
6646 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6647 hci_dev_unlock(hdev);
6650 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6653 struct hci_cp_le_conn_param_req_neg_reply cp;
6655 cp.handle = cpu_to_le16(handle);
6658 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6662 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6663 struct sk_buff *skb)
6665 struct hci_ev_le_remote_conn_param_req *ev = data;
6666 struct hci_cp_le_conn_param_req_reply cp;
6667 struct hci_conn *hcon;
6668 u16 handle, min, max, latency, timeout;
6670 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6672 handle = le16_to_cpu(ev->handle);
6673 min = le16_to_cpu(ev->interval_min);
6674 max = le16_to_cpu(ev->interval_max);
6675 latency = le16_to_cpu(ev->latency);
6676 timeout = le16_to_cpu(ev->timeout);
6678 hcon = hci_conn_hash_lookup_handle(hdev, handle);
6679 if (!hcon || hcon->state != BT_CONNECTED)
6680 return send_conn_param_neg_reply(hdev, handle,
6681 HCI_ERROR_UNKNOWN_CONN_ID);
6683 if (hci_check_conn_params(min, max, latency, timeout))
6684 return send_conn_param_neg_reply(hdev, handle,
6685 HCI_ERROR_INVALID_LL_PARAMS);
6687 if (hcon->role == HCI_ROLE_MASTER) {
6688 struct hci_conn_params *params;
6693 params = hci_conn_params_lookup(hdev, &hcon->dst,
6696 params->conn_min_interval = min;
6697 params->conn_max_interval = max;
6698 params->conn_latency = latency;
6699 params->supervision_timeout = timeout;
6705 hci_dev_unlock(hdev);
6707 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
6708 store_hint, min, max, latency, timeout);
6711 cp.handle = ev->handle;
6712 cp.interval_min = ev->interval_min;
6713 cp.interval_max = ev->interval_max;
6714 cp.latency = ev->latency;
6715 cp.timeout = ev->timeout;
6719 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
6722 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
6723 struct sk_buff *skb)
6725 struct hci_ev_le_direct_adv_report *ev = data;
6726 u64 instant = jiffies;
6729 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
6730 flex_array_size(ev, info, ev->num)))
6738 for (i = 0; i < ev->num; i++) {
6739 struct hci_ev_le_direct_adv_info *info = &ev->info[i];
6741 process_adv_report(hdev, info->type, &info->bdaddr,
6742 info->bdaddr_type, &info->direct_addr,
6743 info->direct_addr_type, info->rssi, NULL, 0,
6744 false, false, instant);
6747 hci_dev_unlock(hdev);
6750 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
6751 struct sk_buff *skb)
6753 struct hci_ev_le_phy_update_complete *ev = data;
6754 struct hci_conn *conn;
6756 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6763 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6767 conn->le_tx_phy = ev->tx_phy;
6768 conn->le_rx_phy = ev->rx_phy;
6771 hci_dev_unlock(hdev);
6774 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
6775 struct sk_buff *skb)
6777 struct hci_evt_le_cis_established *ev = data;
6778 struct hci_conn *conn;
6779 u16 handle = __le16_to_cpu(ev->handle);
6781 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6785 conn = hci_conn_hash_lookup_handle(hdev, handle);
6788 "Unable to find connection with handle 0x%4.4x",
6793 if (conn->type != ISO_LINK) {
6795 "Invalid connection link type handle 0x%4.4x",
6800 if (conn->role == HCI_ROLE_SLAVE) {
6803 memset(&interval, 0, sizeof(interval));
6805 memcpy(&interval, ev->c_latency, sizeof(ev->c_latency));
6806 conn->iso_qos.in.interval = le32_to_cpu(interval);
6807 memcpy(&interval, ev->p_latency, sizeof(ev->p_latency));
6808 conn->iso_qos.out.interval = le32_to_cpu(interval);
6809 conn->iso_qos.in.latency = le16_to_cpu(ev->interval);
6810 conn->iso_qos.out.latency = le16_to_cpu(ev->interval);
6811 conn->iso_qos.in.sdu = le16_to_cpu(ev->c_mtu);
6812 conn->iso_qos.out.sdu = le16_to_cpu(ev->p_mtu);
6813 conn->iso_qos.in.phy = ev->c_phy;
6814 conn->iso_qos.out.phy = ev->p_phy;
6818 conn->state = BT_CONNECTED;
6819 hci_debugfs_create_conn(conn);
6820 hci_conn_add_sysfs(conn);
6821 hci_iso_setup_path(conn);
6825 hci_connect_cfm(conn, ev->status);
6829 hci_dev_unlock(hdev);
6832 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
6834 struct hci_cp_le_reject_cis cp;
6836 memset(&cp, 0, sizeof(cp));
6838 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
6839 hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
6842 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
6844 struct hci_cp_le_accept_cis cp;
6846 memset(&cp, 0, sizeof(cp));
6848 hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
6851 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
6852 struct sk_buff *skb)
6854 struct hci_evt_le_cis_req *ev = data;
6855 u16 acl_handle, cis_handle;
6856 struct hci_conn *acl, *cis;
6860 acl_handle = __le16_to_cpu(ev->acl_handle);
6861 cis_handle = __le16_to_cpu(ev->cis_handle);
6863 bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
6864 acl_handle, cis_handle, ev->cig_id, ev->cis_id);
6868 acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
6872 mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
6873 if (!(mask & HCI_LM_ACCEPT)) {
6874 hci_le_reject_cis(hdev, ev->cis_handle);
6878 cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
6880 cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE);
6882 hci_le_reject_cis(hdev, ev->cis_handle);
6885 cis->handle = cis_handle;
6888 cis->iso_qos.cig = ev->cig_id;
6889 cis->iso_qos.cis = ev->cis_id;
6891 if (!(flags & HCI_PROTO_DEFER)) {
6892 hci_le_accept_cis(hdev, ev->cis_handle);
6894 cis->state = BT_CONNECT2;
6895 hci_connect_cfm(cis, 0);
6899 hci_dev_unlock(hdev);
6902 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
6903 struct sk_buff *skb)
6905 struct hci_evt_le_create_big_complete *ev = data;
6906 struct hci_conn *conn;
6908 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
6910 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
6911 flex_array_size(ev, bis_handle, ev->num_bis)))
6916 conn = hci_conn_hash_lookup_big(hdev, ev->handle);
6920 if (conn->type != ISO_LINK) {
6922 "Invalid connection link type handle 0x%2.2x",
6928 conn->handle = __le16_to_cpu(ev->bis_handle[0]);
6931 conn->state = BT_CONNECTED;
6932 hci_debugfs_create_conn(conn);
6933 hci_conn_add_sysfs(conn);
6934 hci_iso_setup_path(conn);
6938 hci_connect_cfm(conn, ev->status);
6942 hci_dev_unlock(hdev);
6945 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
6946 struct sk_buff *skb)
6948 struct hci_evt_le_big_sync_estabilished *ev = data;
6949 struct hci_conn *bis;
6952 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6954 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
6955 flex_array_size(ev, bis, ev->num_bis)))
6963 for (i = 0; i < ev->num_bis; i++) {
6964 u16 handle = le16_to_cpu(ev->bis[i]);
6967 bis = hci_conn_hash_lookup_handle(hdev, handle);
6969 bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
6973 bis->handle = handle;
6976 bis->iso_qos.big = ev->handle;
6977 memset(&interval, 0, sizeof(interval));
6978 memcpy(&interval, ev->latency, sizeof(ev->latency));
6979 bis->iso_qos.in.interval = le32_to_cpu(interval);
6980 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */
6981 bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
6982 bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu);
6984 hci_connect_cfm(bis, ev->status);
6987 hci_dev_unlock(hdev);
6990 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
6991 struct sk_buff *skb)
6993 struct hci_evt_le_big_info_adv_report *ev = data;
6994 int mask = hdev->link_mode;
6997 bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7001 mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7002 if (!(mask & HCI_LM_ACCEPT))
7003 hci_le_pa_term_sync(hdev, ev->sync_handle);
7005 hci_dev_unlock(hdev);
7008 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7011 .min_len = _min_len, \
7012 .max_len = _max_len, \
7015 #define HCI_LE_EV(_op, _func, _len) \
7016 HCI_LE_EV_VL(_op, _func, _len, _len)
7018 #define HCI_LE_EV_STATUS(_op, _func) \
7019 HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7021 /* Entries in this table shall have their position according to the subevent
7022 * opcode they handle so the use of the macros above is recommend since it does
7023 * attempt to initialize at its proper index using Designated Initializers that
7024 * way events without a callback function can be ommited.
7026 static const struct hci_le_ev {
7027 void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7030 } hci_le_ev_table[U8_MAX + 1] = {
7031 /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7032 HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7033 sizeof(struct hci_ev_le_conn_complete)),
7034 /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7035 HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7036 sizeof(struct hci_ev_le_advertising_report),
7037 HCI_MAX_EVENT_SIZE),
7038 /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7039 HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7040 hci_le_conn_update_complete_evt,
7041 sizeof(struct hci_ev_le_conn_update_complete)),
7042 /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7043 HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7044 hci_le_remote_feat_complete_evt,
7045 sizeof(struct hci_ev_le_remote_feat_complete)),
7046 /* [0x05 = HCI_EV_LE_LTK_REQ] */
7047 HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7048 sizeof(struct hci_ev_le_ltk_req)),
7049 /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7050 HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7051 hci_le_remote_conn_param_req_evt,
7052 sizeof(struct hci_ev_le_remote_conn_param_req)),
7053 /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7054 HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7055 hci_le_enh_conn_complete_evt,
7056 sizeof(struct hci_ev_le_enh_conn_complete)),
7057 /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7058 HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7059 sizeof(struct hci_ev_le_direct_adv_report),
7060 HCI_MAX_EVENT_SIZE),
7061 /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7062 HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7063 sizeof(struct hci_ev_le_phy_update_complete)),
7064 /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7065 HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7066 sizeof(struct hci_ev_le_ext_adv_report),
7067 HCI_MAX_EVENT_SIZE),
7068 /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7069 HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7070 hci_le_pa_sync_estabilished_evt,
7071 sizeof(struct hci_ev_le_pa_sync_established)),
7072 /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7073 HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7074 sizeof(struct hci_evt_le_ext_adv_set_term)),
7075 /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7076 HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7077 sizeof(struct hci_evt_le_cis_established)),
7078 /* [0x1a = HCI_EVT_LE_CIS_REQ] */
7079 HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7080 sizeof(struct hci_evt_le_cis_req)),
7081 /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7082 HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7083 hci_le_create_big_complete_evt,
7084 sizeof(struct hci_evt_le_create_big_complete),
7085 HCI_MAX_EVENT_SIZE),
7086 /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7087 HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7088 hci_le_big_sync_established_evt,
7089 sizeof(struct hci_evt_le_big_sync_estabilished),
7090 HCI_MAX_EVENT_SIZE),
7091 /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7092 HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7093 hci_le_big_info_adv_report_evt,
7094 sizeof(struct hci_evt_le_big_info_adv_report),
7095 HCI_MAX_EVENT_SIZE),
7098 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7099 struct sk_buff *skb, u16 *opcode, u8 *status,
7100 hci_req_complete_t *req_complete,
7101 hci_req_complete_skb_t *req_complete_skb)
7103 struct hci_ev_le_meta *ev = data;
7104 const struct hci_le_ev *subev;
7106 bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7108 /* Only match event if command OGF is for LE */
7109 if (hdev->sent_cmd &&
7110 hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
7111 hci_skb_event(hdev->sent_cmd) == ev->subevent) {
7112 *opcode = hci_skb_opcode(hdev->sent_cmd);
7113 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7117 subev = &hci_le_ev_table[ev->subevent];
7121 if (skb->len < subev->min_len) {
7122 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7123 ev->subevent, skb->len, subev->min_len);
7127 /* Just warn if the length is over max_len size it still be
7128 * possible to partially parse the event so leave to callback to
7129 * decide if that is acceptable.
7131 if (skb->len > subev->max_len)
7132 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7133 ev->subevent, skb->len, subev->max_len);
7134 data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7138 subev->func(hdev, data, skb);
7141 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7142 u8 event, struct sk_buff *skb)
7144 struct hci_ev_cmd_complete *ev;
7145 struct hci_event_hdr *hdr;
7150 hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7155 if (hdr->evt != event)
7160 /* Check if request ended in Command Status - no way to retrieve
7161 * any extra parameters in this case.
7163 if (hdr->evt == HCI_EV_CMD_STATUS)
7166 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7167 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7172 ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7176 if (opcode != __le16_to_cpu(ev->opcode)) {
7177 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7178 __le16_to_cpu(ev->opcode));
7185 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7186 struct sk_buff *skb)
7188 struct hci_ev_le_advertising_info *adv;
7189 struct hci_ev_le_direct_adv_info *direct_adv;
7190 struct hci_ev_le_ext_adv_info *ext_adv;
7191 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7192 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7196 /* If we are currently suspended and this is the first BT event seen,
7197 * save the wake reason associated with the event.
7199 if (!hdev->suspended || hdev->wake_reason)
7202 /* Default to remote wake. Values for wake_reason are documented in the
7203 * Bluez mgmt api docs.
7205 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7207 /* Once configured for remote wakeup, we should only wake up for
7208 * reconnections. It's useful to see which device is waking us up so
7209 * keep track of the bdaddr of the connection event that woke us up.
7211 if (event == HCI_EV_CONN_REQUEST) {
7212 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7213 hdev->wake_addr_type = BDADDR_BREDR;
7214 } else if (event == HCI_EV_CONN_COMPLETE) {
7215 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7216 hdev->wake_addr_type = BDADDR_BREDR;
7217 } else if (event == HCI_EV_LE_META) {
7218 struct hci_ev_le_meta *le_ev = (void *)skb->data;
7219 u8 subevent = le_ev->subevent;
7220 u8 *ptr = &skb->data[sizeof(*le_ev)];
7221 u8 num_reports = *ptr;
7223 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7224 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7225 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7227 adv = (void *)(ptr + 1);
7228 direct_adv = (void *)(ptr + 1);
7229 ext_adv = (void *)(ptr + 1);
7232 case HCI_EV_LE_ADVERTISING_REPORT:
7233 bacpy(&hdev->wake_addr, &adv->bdaddr);
7234 hdev->wake_addr_type = adv->bdaddr_type;
7236 case HCI_EV_LE_DIRECT_ADV_REPORT:
7237 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7238 hdev->wake_addr_type = direct_adv->bdaddr_type;
7240 case HCI_EV_LE_EXT_ADV_REPORT:
7241 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7242 hdev->wake_addr_type = ext_adv->bdaddr_type;
7247 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7251 hci_dev_unlock(hdev);
7254 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7258 .min_len = _min_len, \
7259 .max_len = _max_len, \
7262 #define HCI_EV(_op, _func, _len) \
7263 HCI_EV_VL(_op, _func, _len, _len)
7265 #define HCI_EV_STATUS(_op, _func) \
7266 HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7268 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7271 .func_req = _func, \
7272 .min_len = _min_len, \
7273 .max_len = _max_len, \
7276 #define HCI_EV_REQ(_op, _func, _len) \
7277 HCI_EV_REQ_VL(_op, _func, _len, _len)
7279 /* Entries in this table shall have their position according to the event opcode
7280 * they handle so the use of the macros above is recommend since it does attempt
7281 * to initialize at its proper index using Designated Initializers that way
7282 * events without a callback function don't have entered.
7284 static const struct hci_ev {
7287 void (*func)(struct hci_dev *hdev, void *data,
7288 struct sk_buff *skb);
7289 void (*func_req)(struct hci_dev *hdev, void *data,
7290 struct sk_buff *skb, u16 *opcode, u8 *status,
7291 hci_req_complete_t *req_complete,
7292 hci_req_complete_skb_t *req_complete_skb);
7296 } hci_ev_table[U8_MAX + 1] = {
7297 /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7298 HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7299 /* [0x02 = HCI_EV_INQUIRY_RESULT] */
7300 HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7301 sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7302 /* [0x03 = HCI_EV_CONN_COMPLETE] */
7303 HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7304 sizeof(struct hci_ev_conn_complete)),
7305 /* [0x04 = HCI_EV_CONN_REQUEST] */
7306 HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7307 sizeof(struct hci_ev_conn_request)),
7308 /* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7309 HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7310 sizeof(struct hci_ev_disconn_complete)),
7311 /* [0x06 = HCI_EV_AUTH_COMPLETE] */
7312 HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7313 sizeof(struct hci_ev_auth_complete)),
7314 /* [0x07 = HCI_EV_REMOTE_NAME] */
7315 HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7316 sizeof(struct hci_ev_remote_name)),
7317 /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7318 HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7319 sizeof(struct hci_ev_encrypt_change)),
7320 /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7321 HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7322 hci_change_link_key_complete_evt,
7323 sizeof(struct hci_ev_change_link_key_complete)),
7324 /* [0x0b = HCI_EV_REMOTE_FEATURES] */
7325 HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7326 sizeof(struct hci_ev_remote_features)),
7327 /* [0x0e = HCI_EV_CMD_COMPLETE] */
7328 HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7329 sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7330 /* [0x0f = HCI_EV_CMD_STATUS] */
7331 HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7332 sizeof(struct hci_ev_cmd_status)),
7333 /* [0x10 = HCI_EV_CMD_STATUS] */
7334 HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7335 sizeof(struct hci_ev_hardware_error)),
7336 /* [0x12 = HCI_EV_ROLE_CHANGE] */
7337 HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7338 sizeof(struct hci_ev_role_change)),
7339 /* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7340 HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7341 sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7342 /* [0x14 = HCI_EV_MODE_CHANGE] */
7343 HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7344 sizeof(struct hci_ev_mode_change)),
7345 /* [0x16 = HCI_EV_PIN_CODE_REQ] */
7346 HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7347 sizeof(struct hci_ev_pin_code_req)),
7348 /* [0x17 = HCI_EV_LINK_KEY_REQ] */
7349 HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7350 sizeof(struct hci_ev_link_key_req)),
7351 /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7352 HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7353 sizeof(struct hci_ev_link_key_notify)),
7354 /* [0x1c = HCI_EV_CLOCK_OFFSET] */
7355 HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7356 sizeof(struct hci_ev_clock_offset)),
7357 /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7358 HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7359 sizeof(struct hci_ev_pkt_type_change)),
7360 /* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7361 HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7362 sizeof(struct hci_ev_pscan_rep_mode)),
7363 /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7364 HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7365 hci_inquiry_result_with_rssi_evt,
7366 sizeof(struct hci_ev_inquiry_result_rssi),
7367 HCI_MAX_EVENT_SIZE),
7368 /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7369 HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7370 sizeof(struct hci_ev_remote_ext_features)),
7371 /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7372 HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7373 sizeof(struct hci_ev_sync_conn_complete)),
7374 /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7375 HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7376 hci_extended_inquiry_result_evt,
7377 sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7378 /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7379 HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7380 sizeof(struct hci_ev_key_refresh_complete)),
7381 /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7382 HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7383 sizeof(struct hci_ev_io_capa_request)),
7384 /* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7385 HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7386 sizeof(struct hci_ev_io_capa_reply)),
7387 /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7388 HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7389 sizeof(struct hci_ev_user_confirm_req)),
7390 /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7391 HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7392 sizeof(struct hci_ev_user_passkey_req)),
7393 /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7394 HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7395 sizeof(struct hci_ev_remote_oob_data_request)),
7396 /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7397 HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7398 sizeof(struct hci_ev_simple_pair_complete)),
7399 /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7400 HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7401 sizeof(struct hci_ev_user_passkey_notify)),
7402 /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7403 HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7404 sizeof(struct hci_ev_keypress_notify)),
7405 /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7406 HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7407 sizeof(struct hci_ev_remote_host_features)),
7408 /* [0x3e = HCI_EV_LE_META] */
7409 HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7410 sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7411 #if IS_ENABLED(CONFIG_BT_HS)
7412 /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
7413 HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
7414 sizeof(struct hci_ev_phy_link_complete)),
7415 /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
7416 HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
7417 sizeof(struct hci_ev_channel_selected)),
7418 /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
7419 HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
7420 hci_disconn_loglink_complete_evt,
7421 sizeof(struct hci_ev_disconn_logical_link_complete)),
7422 /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
7423 HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
7424 sizeof(struct hci_ev_logical_link_complete)),
7425 /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
7426 HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
7427 hci_disconn_phylink_complete_evt,
7428 sizeof(struct hci_ev_disconn_phy_link_complete)),
7430 /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7431 HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7432 sizeof(struct hci_ev_num_comp_blocks)),
7433 /* [0xff = HCI_EV_VENDOR] */
7434 HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7437 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7438 u16 *opcode, u8 *status,
7439 hci_req_complete_t *req_complete,
7440 hci_req_complete_skb_t *req_complete_skb)
7442 const struct hci_ev *ev = &hci_ev_table[event];
7448 if (skb->len < ev->min_len) {
7449 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7450 event, skb->len, ev->min_len);
7454 /* Just warn if the length is over max_len size it still be
7455 * possible to partially parse the event so leave to callback to
7456 * decide if that is acceptable.
7458 if (skb->len > ev->max_len)
7459 bt_dev_warn_ratelimited(hdev,
7460 "unexpected event 0x%2.2x length: %u > %u",
7461 event, skb->len, ev->max_len);
7463 data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7468 ev->func_req(hdev, data, skb, opcode, status, req_complete,
7471 ev->func(hdev, data, skb);
7474 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7476 struct hci_event_hdr *hdr = (void *) skb->data;
7477 hci_req_complete_t req_complete = NULL;
7478 hci_req_complete_skb_t req_complete_skb = NULL;
7479 struct sk_buff *orig_skb = NULL;
7480 u8 status = 0, event, req_evt = 0;
7481 u16 opcode = HCI_OP_NOP;
7483 if (skb->len < sizeof(*hdr)) {
7484 bt_dev_err(hdev, "Malformed HCI Event");
7488 kfree_skb(hdev->recv_event);
7489 hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7493 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7498 /* Only match event if command OGF is not for LE */
7499 if (hdev->sent_cmd &&
7500 hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
7501 hci_skb_event(hdev->sent_cmd) == event) {
7502 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
7503 status, &req_complete, &req_complete_skb);
7507 /* If it looks like we might end up having to call
7508 * req_complete_skb, store a pristine copy of the skb since the
7509 * various handlers may modify the original one through
7510 * skb_pull() calls, etc.
7512 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7513 event == HCI_EV_CMD_COMPLETE)
7514 orig_skb = skb_clone(skb, GFP_KERNEL);
7516 skb_pull(skb, HCI_EVENT_HDR_SIZE);
7518 /* Store wake reason if we're suspended */
7519 hci_store_wake_reason(hdev, event, skb);
7521 bt_dev_dbg(hdev, "event 0x%2.2x", event);
7523 hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7527 req_complete(hdev, status, opcode);
7528 } else if (req_complete_skb) {
7529 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7530 kfree_skb(orig_skb);
7533 req_complete_skb(hdev, status, opcode, orig_skb);
7537 kfree_skb(orig_skb);
7539 hdev->stat.evt_rx++;