2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
41 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
42 "\x00\x00\x00\x00\x00\x00\x00\x00"
44 #define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
46 /* Handle HCI Event packets */
48 static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
53 data = skb_pull_data(skb, len);
55 bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev);
60 static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
65 data = skb_pull_data(skb, len);
67 bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op);
72 static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
77 data = skb_pull_data(skb, len);
79 bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev);
84 static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data,
87 struct hci_ev_status *rp = data;
89 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
91 /* It is possible that we receive Inquiry Complete event right
92 * before we receive Inquiry Cancel Command Complete event, in
93 * which case the latter event should have status of Command
94 * Disallowed (0x0c). This should not be treated as error, since
95 * we actually achieve what Inquiry Cancel wants to achieve,
96 * which is to end the last Inquiry session.
98 if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
99 bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
106 clear_bit(HCI_INQUIRY, &hdev->flags);
107 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
108 wake_up_bit(&hdev->flags, HCI_INQUIRY);
111 /* Set discovery state to stopped if we're not doing LE active
114 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
115 hdev->le_scan_type != LE_SCAN_ACTIVE)
116 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
117 hci_dev_unlock(hdev);
119 hci_conn_check_pending(hdev);
124 static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data,
127 struct hci_ev_status *rp = data;
129 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
134 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
139 static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data,
142 struct hci_ev_status *rp = data;
144 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
149 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
151 hci_conn_check_pending(hdev);
156 static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data,
159 struct hci_ev_status *rp = data;
161 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
166 static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data,
169 struct hci_rp_role_discovery *rp = data;
170 struct hci_conn *conn;
172 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
179 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
181 conn->role = rp->role;
183 hci_dev_unlock(hdev);
188 static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data,
191 struct hci_rp_read_link_policy *rp = data;
192 struct hci_conn *conn;
194 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
201 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
203 conn->link_policy = __le16_to_cpu(rp->policy);
205 hci_dev_unlock(hdev);
210 static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data,
213 struct hci_rp_write_link_policy *rp = data;
214 struct hci_conn *conn;
217 struct hci_cp_write_link_policy cp;
218 struct hci_conn *sco_conn;
221 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
226 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
232 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
234 conn->link_policy = get_unaligned_le16(sent + 2);
237 sco_conn = hci_conn_hash_lookup_sco(hdev);
238 if (sco_conn && bacmp(&sco_conn->dst, &conn->dst) == 0 &&
239 conn->link_policy & HCI_LP_SNIFF) {
240 BT_ERR("SNIFF is not allowed during sco connection");
241 cp.handle = __cpu_to_le16(conn->handle);
242 cp.policy = __cpu_to_le16(conn->link_policy & ~HCI_LP_SNIFF);
243 hci_send_cmd(hdev, HCI_OP_WRITE_LINK_POLICY, sizeof(cp), &cp);
247 hci_dev_unlock(hdev);
252 static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data,
255 struct hci_rp_read_def_link_policy *rp = data;
257 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
262 hdev->link_policy = __le16_to_cpu(rp->policy);
267 static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data,
270 struct hci_ev_status *rp = data;
273 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
278 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
282 hdev->link_policy = get_unaligned_le16(sent);
287 static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb)
289 struct hci_ev_status *rp = data;
291 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
293 clear_bit(HCI_RESET, &hdev->flags);
298 /* Reset all non-persistent flags */
299 hci_dev_clear_volatile_flags(hdev);
301 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
303 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
304 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
306 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
307 hdev->adv_data_len = 0;
309 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
310 hdev->scan_rsp_data_len = 0;
312 hdev->le_scan_type = LE_SCAN_PASSIVE;
314 hdev->ssp_debug_mode = 0;
316 hci_bdaddr_list_clear(&hdev->le_accept_list);
317 hci_bdaddr_list_clear(&hdev->le_resolv_list);
322 static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data,
325 struct hci_rp_read_stored_link_key *rp = data;
326 struct hci_cp_read_stored_link_key *sent;
328 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
330 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
334 if (!rp->status && sent->read_all == 0x01) {
335 hdev->stored_max_keys = le16_to_cpu(rp->max_keys);
336 hdev->stored_num_keys = le16_to_cpu(rp->num_keys);
342 static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data,
345 struct hci_rp_delete_stored_link_key *rp = data;
348 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
353 num_keys = le16_to_cpu(rp->num_keys);
355 if (num_keys <= hdev->stored_num_keys)
356 hdev->stored_num_keys -= num_keys;
358 hdev->stored_num_keys = 0;
363 static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data,
366 struct hci_ev_status *rp = data;
369 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
371 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
377 if (hci_dev_test_flag(hdev, HCI_MGMT))
378 mgmt_set_local_name_complete(hdev, sent, rp->status);
379 else if (!rp->status)
380 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
382 hci_dev_unlock(hdev);
387 static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data,
390 struct hci_rp_read_local_name *rp = data;
392 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
397 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
398 hci_dev_test_flag(hdev, HCI_CONFIG))
399 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
404 static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data,
407 struct hci_ev_status *rp = data;
410 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
412 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
419 __u8 param = *((__u8 *) sent);
421 if (param == AUTH_ENABLED)
422 set_bit(HCI_AUTH, &hdev->flags);
424 clear_bit(HCI_AUTH, &hdev->flags);
427 if (hci_dev_test_flag(hdev, HCI_MGMT))
428 mgmt_auth_enable_complete(hdev, rp->status);
430 hci_dev_unlock(hdev);
435 static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data,
438 struct hci_ev_status *rp = data;
442 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
447 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
451 param = *((__u8 *) sent);
454 set_bit(HCI_ENCRYPT, &hdev->flags);
456 clear_bit(HCI_ENCRYPT, &hdev->flags);
461 static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data,
464 struct hci_ev_status *rp = data;
468 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
470 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
474 param = *((__u8 *) sent);
479 hdev->discov_timeout = 0;
483 if (param & SCAN_INQUIRY)
484 set_bit(HCI_ISCAN, &hdev->flags);
486 clear_bit(HCI_ISCAN, &hdev->flags);
488 if (param & SCAN_PAGE)
489 set_bit(HCI_PSCAN, &hdev->flags);
491 clear_bit(HCI_PSCAN, &hdev->flags);
494 hci_dev_unlock(hdev);
499 static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data,
502 struct hci_ev_status *rp = data;
503 struct hci_cp_set_event_filter *cp;
506 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
511 sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT);
515 cp = (struct hci_cp_set_event_filter *)sent;
517 if (cp->flt_type == HCI_FLT_CLEAR_ALL)
518 hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
520 hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED);
525 static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data,
528 struct hci_rp_read_class_of_dev *rp = data;
530 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
535 memcpy(hdev->dev_class, rp->dev_class, 3);
537 bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2],
538 hdev->dev_class[1], hdev->dev_class[0]);
543 static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data,
546 struct hci_ev_status *rp = data;
549 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
551 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
558 memcpy(hdev->dev_class, sent, 3);
560 if (hci_dev_test_flag(hdev, HCI_MGMT))
561 mgmt_set_class_of_dev_complete(hdev, sent, rp->status);
563 hci_dev_unlock(hdev);
568 static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data,
571 struct hci_rp_read_voice_setting *rp = data;
574 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
579 setting = __le16_to_cpu(rp->voice_setting);
581 if (hdev->voice_setting == setting)
584 hdev->voice_setting = setting;
586 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
589 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
594 static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data,
597 struct hci_ev_status *rp = data;
601 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
606 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
610 setting = get_unaligned_le16(sent);
612 if (hdev->voice_setting == setting)
615 hdev->voice_setting = setting;
617 bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting);
620 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
625 static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data,
628 struct hci_rp_read_num_supported_iac *rp = data;
630 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
635 hdev->num_iac = rp->num_iac;
637 bt_dev_dbg(hdev, "num iac %d", hdev->num_iac);
642 static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data,
645 struct hci_ev_status *rp = data;
646 struct hci_cp_write_ssp_mode *sent;
648 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
650 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
658 hdev->features[1][0] |= LMP_HOST_SSP;
660 hdev->features[1][0] &= ~LMP_HOST_SSP;
665 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
667 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
670 hci_dev_unlock(hdev);
675 static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data,
678 struct hci_ev_status *rp = data;
679 struct hci_cp_write_sc_support *sent;
681 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
683 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
691 hdev->features[1][0] |= LMP_HOST_SC;
693 hdev->features[1][0] &= ~LMP_HOST_SC;
696 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) {
698 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
700 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
703 hci_dev_unlock(hdev);
708 static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,
711 struct hci_rp_read_local_version *rp = data;
713 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
718 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
719 hci_dev_test_flag(hdev, HCI_CONFIG)) {
720 hdev->hci_ver = rp->hci_ver;
721 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
722 hdev->lmp_ver = rp->lmp_ver;
723 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
724 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
730 static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data,
733 struct hci_rp_read_enc_key_size *rp = data;
734 struct hci_conn *conn;
736 u8 status = rp->status;
738 bt_dev_dbg(hdev, "status 0x%2.2x", status);
740 handle = le16_to_cpu(rp->handle);
744 conn = hci_conn_hash_lookup_handle(hdev, handle);
750 /* While unexpected, the read_enc_key_size command may fail. The most
751 * secure approach is to then assume the key size is 0 to force a
755 bt_dev_err(hdev, "failed to read key size for handle %u",
757 conn->enc_key_size = 0;
759 conn->enc_key_size = rp->key_size;
763 hci_encrypt_cfm(conn, 0);
766 hci_dev_unlock(hdev);
771 static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,
774 struct hci_rp_read_local_commands *rp = data;
776 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
781 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
782 hci_dev_test_flag(hdev, HCI_CONFIG))
783 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
788 static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data,
791 struct hci_rp_read_auth_payload_to *rp = data;
792 struct hci_conn *conn;
794 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
801 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
803 conn->auth_payload_timeout = __le16_to_cpu(rp->timeout);
805 hci_dev_unlock(hdev);
810 static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data,
813 struct hci_rp_write_auth_payload_to *rp = data;
814 struct hci_conn *conn;
817 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
822 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO);
828 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
830 conn->auth_payload_timeout = get_unaligned_le16(sent + 2);
832 hci_dev_unlock(hdev);
837 static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data,
840 struct hci_rp_read_local_features *rp = data;
842 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
847 memcpy(hdev->features, rp->features, 8);
849 /* Adjust default settings according to features
850 * supported by device. */
852 if (hdev->features[0][0] & LMP_3SLOT)
853 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
855 if (hdev->features[0][0] & LMP_5SLOT)
856 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
858 if (hdev->features[0][1] & LMP_HV2) {
859 hdev->pkt_type |= (HCI_HV2);
860 hdev->esco_type |= (ESCO_HV2);
863 if (hdev->features[0][1] & LMP_HV3) {
864 hdev->pkt_type |= (HCI_HV3);
865 hdev->esco_type |= (ESCO_HV3);
868 if (lmp_esco_capable(hdev))
869 hdev->esco_type |= (ESCO_EV3);
871 if (hdev->features[0][4] & LMP_EV4)
872 hdev->esco_type |= (ESCO_EV4);
874 if (hdev->features[0][4] & LMP_EV5)
875 hdev->esco_type |= (ESCO_EV5);
877 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
878 hdev->esco_type |= (ESCO_2EV3);
880 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
881 hdev->esco_type |= (ESCO_3EV3);
883 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
884 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
889 static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data,
892 struct hci_rp_read_local_ext_features *rp = data;
894 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
899 if (hdev->max_page < rp->max_page)
900 hdev->max_page = rp->max_page;
902 if (rp->page < HCI_MAX_PAGES)
903 memcpy(hdev->features[rp->page], rp->features, 8);
908 static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data,
911 struct hci_rp_read_flow_control_mode *rp = data;
913 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
918 hdev->flow_ctl_mode = rp->mode;
923 static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data,
926 struct hci_rp_read_buffer_size *rp = data;
928 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
933 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
934 hdev->sco_mtu = rp->sco_mtu;
935 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
936 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
938 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
943 hdev->acl_cnt = hdev->acl_pkts;
944 hdev->sco_cnt = hdev->sco_pkts;
946 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
947 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
952 static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data,
955 struct hci_rp_read_bd_addr *rp = data;
957 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
962 if (test_bit(HCI_INIT, &hdev->flags))
963 bacpy(&hdev->bdaddr, &rp->bdaddr);
965 if (hci_dev_test_flag(hdev, HCI_SETUP))
966 bacpy(&hdev->setup_addr, &rp->bdaddr);
971 static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data,
974 struct hci_rp_read_local_pairing_opts *rp = data;
976 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
981 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
982 hci_dev_test_flag(hdev, HCI_CONFIG)) {
983 hdev->pairing_opts = rp->pairing_opts;
984 hdev->max_enc_key_size = rp->max_key_size;
990 static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data,
993 struct hci_rp_read_page_scan_activity *rp = data;
995 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1000 if (test_bit(HCI_INIT, &hdev->flags)) {
1001 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
1002 hdev->page_scan_window = __le16_to_cpu(rp->window);
1008 static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data,
1009 struct sk_buff *skb)
1011 struct hci_ev_status *rp = data;
1012 struct hci_cp_write_page_scan_activity *sent;
1014 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1019 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
1023 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
1024 hdev->page_scan_window = __le16_to_cpu(sent->window);
1029 static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data,
1030 struct sk_buff *skb)
1032 struct hci_rp_read_page_scan_type *rp = data;
1034 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1039 if (test_bit(HCI_INIT, &hdev->flags))
1040 hdev->page_scan_type = rp->type;
1045 static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data,
1046 struct sk_buff *skb)
1048 struct hci_ev_status *rp = data;
1051 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1056 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
1058 hdev->page_scan_type = *type;
1063 static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data,
1064 struct sk_buff *skb)
1066 struct hci_rp_read_data_block_size *rp = data;
1068 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1073 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
1074 hdev->block_len = __le16_to_cpu(rp->block_len);
1075 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
1077 hdev->block_cnt = hdev->num_blocks;
1079 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
1080 hdev->block_cnt, hdev->block_len);
1085 static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data,
1086 struct sk_buff *skb)
1088 struct hci_rp_read_clock *rp = data;
1089 struct hci_cp_read_clock *cp;
1090 struct hci_conn *conn;
1092 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1099 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
1103 if (cp->which == 0x00) {
1104 hdev->clock = le32_to_cpu(rp->clock);
1108 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1110 conn->clock = le32_to_cpu(rp->clock);
1111 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
1115 hci_dev_unlock(hdev);
1119 static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data,
1120 struct sk_buff *skb)
1122 struct hci_rp_read_local_amp_info *rp = data;
1124 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1129 hdev->amp_status = rp->amp_status;
1130 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
1131 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
1132 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
1133 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
1134 hdev->amp_type = rp->amp_type;
1135 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
1136 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
1137 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
1138 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
1143 static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data,
1144 struct sk_buff *skb)
1146 struct hci_rp_read_inq_rsp_tx_power *rp = data;
1148 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1153 hdev->inq_tx_power = rp->tx_power;
1158 static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data,
1159 struct sk_buff *skb)
1161 struct hci_rp_read_def_err_data_reporting *rp = data;
1163 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1168 hdev->err_data_reporting = rp->err_data_reporting;
1173 static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data,
1174 struct sk_buff *skb)
1176 struct hci_ev_status *rp = data;
1177 struct hci_cp_write_def_err_data_reporting *cp;
1179 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1184 cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
1188 hdev->err_data_reporting = cp->err_data_reporting;
1193 static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data,
1194 struct sk_buff *skb)
1196 struct hci_rp_pin_code_reply *rp = data;
1197 struct hci_cp_pin_code_reply *cp;
1198 struct hci_conn *conn;
1200 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1204 if (hci_dev_test_flag(hdev, HCI_MGMT))
1205 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1210 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1214 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1216 conn->pin_length = cp->pin_len;
1219 hci_dev_unlock(hdev);
1223 static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data,
1224 struct sk_buff *skb)
1226 struct hci_rp_pin_code_neg_reply *rp = data;
1228 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1232 if (hci_dev_test_flag(hdev, HCI_MGMT))
1233 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1236 hci_dev_unlock(hdev);
1241 static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data,
1242 struct sk_buff *skb)
1244 struct hci_rp_le_read_buffer_size *rp = data;
1246 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1251 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1252 hdev->le_pkts = rp->le_max_pkt;
1254 hdev->le_cnt = hdev->le_pkts;
1256 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1261 static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data,
1262 struct sk_buff *skb)
1264 struct hci_rp_le_read_local_features *rp = data;
1266 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1271 memcpy(hdev->le_features, rp->features, 8);
1276 static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data,
1277 struct sk_buff *skb)
1279 struct hci_rp_le_read_adv_tx_power *rp = data;
1281 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1286 hdev->adv_tx_power = rp->tx_power;
1291 static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data,
1292 struct sk_buff *skb)
1294 struct hci_rp_user_confirm_reply *rp = data;
1296 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1300 if (hci_dev_test_flag(hdev, HCI_MGMT))
1301 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1304 hci_dev_unlock(hdev);
1309 static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data,
1310 struct sk_buff *skb)
1312 struct hci_rp_user_confirm_reply *rp = data;
1314 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1318 if (hci_dev_test_flag(hdev, HCI_MGMT))
1319 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1320 ACL_LINK, 0, rp->status);
1322 hci_dev_unlock(hdev);
1327 static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data,
1328 struct sk_buff *skb)
1330 struct hci_rp_user_confirm_reply *rp = data;
1332 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1336 if (hci_dev_test_flag(hdev, HCI_MGMT))
1337 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1340 hci_dev_unlock(hdev);
1345 static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data,
1346 struct sk_buff *skb)
1348 struct hci_rp_user_confirm_reply *rp = data;
1350 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1354 if (hci_dev_test_flag(hdev, HCI_MGMT))
1355 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1356 ACL_LINK, 0, rp->status);
1358 hci_dev_unlock(hdev);
1363 static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data,
1364 struct sk_buff *skb)
1366 struct hci_rp_read_local_oob_data *rp = data;
1368 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1373 static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data,
1374 struct sk_buff *skb)
1376 struct hci_rp_read_local_oob_ext_data *rp = data;
1378 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1383 static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data,
1384 struct sk_buff *skb)
1386 struct hci_ev_status *rp = data;
1389 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1394 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1400 bacpy(&hdev->random_addr, sent);
1402 if (!bacmp(&hdev->rpa, sent)) {
1403 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
1404 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired,
1405 secs_to_jiffies(hdev->rpa_timeout));
1408 hci_dev_unlock(hdev);
1413 static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data,
1414 struct sk_buff *skb)
1416 struct hci_ev_status *rp = data;
1417 struct hci_cp_le_set_default_phy *cp;
1419 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1424 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY);
1430 hdev->le_tx_def_phys = cp->tx_phys;
1431 hdev->le_rx_def_phys = cp->rx_phys;
1433 hci_dev_unlock(hdev);
1438 static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data,
1439 struct sk_buff *skb)
1441 struct hci_ev_status *rp = data;
1442 struct hci_cp_le_set_adv_set_rand_addr *cp;
1443 struct adv_info *adv;
1445 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1450 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR);
1451 /* Update only in case the adv instance since handle 0x00 shall be using
1452 * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and
1453 * non-extended adverting.
1455 if (!cp || !cp->handle)
1460 adv = hci_find_adv_instance(hdev, cp->handle);
1462 bacpy(&adv->random_addr, &cp->bdaddr);
1463 if (!bacmp(&hdev->rpa, &cp->bdaddr)) {
1464 adv->rpa_expired = false;
1465 queue_delayed_work(hdev->workqueue,
1466 &adv->rpa_expired_cb,
1467 secs_to_jiffies(hdev->rpa_timeout));
1471 hci_dev_unlock(hdev);
1476 static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data,
1477 struct sk_buff *skb)
1479 struct hci_ev_status *rp = data;
1483 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1488 instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET);
1494 err = hci_remove_adv_instance(hdev, *instance);
1496 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev,
1499 hci_dev_unlock(hdev);
1504 static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data,
1505 struct sk_buff *skb)
1507 struct hci_ev_status *rp = data;
1508 struct adv_info *adv, *n;
1511 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1516 if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS))
1521 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1522 u8 instance = adv->instance;
1524 err = hci_remove_adv_instance(hdev, instance);
1526 mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd),
1530 hci_dev_unlock(hdev);
1535 static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data,
1536 struct sk_buff *skb)
1538 struct hci_rp_le_read_transmit_power *rp = data;
1540 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1545 hdev->min_le_tx_power = rp->min_le_tx_power;
1546 hdev->max_le_tx_power = rp->max_le_tx_power;
1551 static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data,
1552 struct sk_buff *skb)
1554 struct hci_ev_status *rp = data;
1555 struct hci_cp_le_set_privacy_mode *cp;
1556 struct hci_conn_params *params;
1558 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1563 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE);
1569 params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type);
1571 params->privacy_mode = cp->mode;
1573 hci_dev_unlock(hdev);
1578 static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data,
1579 struct sk_buff *skb)
1581 struct hci_ev_status *rp = data;
1584 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1589 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1595 /* If we're doing connection initiation as peripheral. Set a
1596 * timeout in case something goes wrong.
1599 struct hci_conn *conn;
1601 hci_dev_set_flag(hdev, HCI_LE_ADV);
1603 conn = hci_lookup_le_connect(hdev);
1605 queue_delayed_work(hdev->workqueue,
1606 &conn->le_conn_timeout,
1607 conn->conn_timeout);
1609 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1612 hci_dev_unlock(hdev);
1617 static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
1618 struct sk_buff *skb)
1620 struct hci_cp_le_set_ext_adv_enable *cp;
1621 struct hci_cp_ext_adv_set *set;
1622 struct adv_info *adv = NULL, *n;
1623 struct hci_ev_status *rp = data;
1625 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1630 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE);
1634 set = (void *)cp->data;
1638 if (cp->num_of_sets)
1639 adv = hci_find_adv_instance(hdev, set->handle);
1642 struct hci_conn *conn;
1644 hci_dev_set_flag(hdev, HCI_LE_ADV);
1647 adv->enabled = true;
1649 conn = hci_lookup_le_connect(hdev);
1651 queue_delayed_work(hdev->workqueue,
1652 &conn->le_conn_timeout,
1653 conn->conn_timeout);
1655 if (cp->num_of_sets) {
1657 adv->enabled = false;
1659 /* If just one instance was disabled check if there are
1660 * any other instance enabled before clearing HCI_LE_ADV
1662 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1668 /* All instances shall be considered disabled */
1669 list_for_each_entry_safe(adv, n, &hdev->adv_instances,
1671 adv->enabled = false;
1674 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1678 hci_dev_unlock(hdev);
1682 static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data,
1683 struct sk_buff *skb)
1685 struct hci_cp_le_set_scan_param *cp;
1686 struct hci_ev_status *rp = data;
1688 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1693 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1699 hdev->le_scan_type = cp->type;
1701 hci_dev_unlock(hdev);
1706 static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data,
1707 struct sk_buff *skb)
1709 struct hci_cp_le_set_ext_scan_params *cp;
1710 struct hci_ev_status *rp = data;
1711 struct hci_cp_le_scan_phy_params *phy_param;
1713 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1718 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS);
1722 phy_param = (void *)cp->data;
1726 hdev->le_scan_type = phy_param->type;
1728 hci_dev_unlock(hdev);
1733 static bool has_pending_adv_report(struct hci_dev *hdev)
1735 struct discovery_state *d = &hdev->discovery;
1737 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1740 static void clear_pending_adv_report(struct hci_dev *hdev)
1742 struct discovery_state *d = &hdev->discovery;
1744 bacpy(&d->last_adv_addr, BDADDR_ANY);
1745 d->last_adv_data_len = 0;
1749 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1750 u8 bdaddr_type, s8 rssi, u32 flags,
1753 struct discovery_state *d = &hdev->discovery;
1755 if (len > HCI_MAX_AD_LENGTH)
1758 bacpy(&d->last_adv_addr, bdaddr);
1759 d->last_adv_addr_type = bdaddr_type;
1760 d->last_adv_rssi = rssi;
1761 d->last_adv_flags = flags;
1762 memcpy(d->last_adv_data, data, len);
1763 d->last_adv_data_len = len;
1767 static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)
1772 case LE_SCAN_ENABLE:
1773 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1774 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1775 clear_pending_adv_report(hdev);
1776 if (hci_dev_test_flag(hdev, HCI_MESH))
1777 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1780 case LE_SCAN_DISABLE:
1781 /* We do this here instead of when setting DISCOVERY_STOPPED
1782 * since the latter would potentially require waiting for
1783 * inquiry to stop too.
1785 if (has_pending_adv_report(hdev)) {
1786 struct discovery_state *d = &hdev->discovery;
1788 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1789 d->last_adv_addr_type, NULL,
1790 d->last_adv_rssi, d->last_adv_flags,
1792 d->last_adv_data_len, NULL, 0, 0);
1795 /* Cancel this timer so that we don't try to disable scanning
1796 * when it's already disabled.
1798 cancel_delayed_work(&hdev->le_scan_disable);
1800 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1802 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1803 * interrupted scanning due to a connect request. Mark
1804 * therefore discovery as stopped.
1806 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1807 #ifndef TIZEN_BT /* The below line is kernel bug. */
1808 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1810 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
1812 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1813 hdev->discovery.state == DISCOVERY_FINDING)
1814 queue_work(hdev->workqueue, &hdev->reenable_adv_work);
1819 bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d",
1824 hci_dev_unlock(hdev);
1827 static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data,
1828 struct sk_buff *skb)
1830 struct hci_cp_le_set_scan_enable *cp;
1831 struct hci_ev_status *rp = data;
1833 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1838 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1842 le_set_scan_enable_complete(hdev, cp->enable);
1847 static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data,
1848 struct sk_buff *skb)
1850 struct hci_cp_le_set_ext_scan_enable *cp;
1851 struct hci_ev_status *rp = data;
1853 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1858 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE);
1862 le_set_scan_enable_complete(hdev, cp->enable);
1867 static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data,
1868 struct sk_buff *skb)
1870 struct hci_rp_le_read_num_supported_adv_sets *rp = data;
1872 bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status,
1878 hdev->le_num_of_adv_sets = rp->num_of_sets;
1883 static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data,
1884 struct sk_buff *skb)
1886 struct hci_rp_le_read_accept_list_size *rp = data;
1888 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
1893 hdev->le_accept_list_size = rp->size;
1898 static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data,
1899 struct sk_buff *skb)
1901 struct hci_ev_status *rp = data;
1903 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1909 hci_bdaddr_list_clear(&hdev->le_accept_list);
1910 hci_dev_unlock(hdev);
1915 static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data,
1916 struct sk_buff *skb)
1918 struct hci_cp_le_add_to_accept_list *sent;
1919 struct hci_ev_status *rp = data;
1921 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1926 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
1931 hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr,
1933 hci_dev_unlock(hdev);
1938 static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data,
1939 struct sk_buff *skb)
1941 struct hci_cp_le_del_from_accept_list *sent;
1942 struct hci_ev_status *rp = data;
1944 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1949 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST);
1954 hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr,
1956 hci_dev_unlock(hdev);
1961 static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data,
1962 struct sk_buff *skb)
1964 struct hci_rp_le_read_supported_states *rp = data;
1966 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1971 memcpy(hdev->le_states, rp->le_states, 8);
1976 static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data,
1977 struct sk_buff *skb)
1979 struct hci_rp_le_read_def_data_len *rp = data;
1981 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
1990 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1991 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1994 mgmt_le_read_host_suggested_data_length_complete(hdev, rp->status);
1996 hci_dev_unlock(hdev);
2002 static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data,
2003 struct sk_buff *skb)
2005 struct hci_cp_le_write_def_data_len *sent;
2006 struct hci_ev_status *rp = data;
2008 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2017 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
2025 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
2026 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
2031 mgmt_le_write_host_suggested_data_length_complete(hdev, rp->status);
2036 static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data,
2037 struct sk_buff *skb)
2039 struct hci_cp_le_add_to_resolv_list *sent;
2040 struct hci_ev_status *rp = data;
2042 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2047 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST);
2052 hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2053 sent->bdaddr_type, sent->peer_irk,
2055 hci_dev_unlock(hdev);
2060 static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data,
2061 struct sk_buff *skb)
2063 struct hci_cp_le_del_from_resolv_list *sent;
2064 struct hci_ev_status *rp = data;
2066 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2071 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST);
2076 hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr,
2078 hci_dev_unlock(hdev);
2083 static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data,
2084 struct sk_buff *skb)
2086 struct hci_ev_status *rp = data;
2088 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2094 hci_bdaddr_list_clear(&hdev->le_resolv_list);
2095 hci_dev_unlock(hdev);
2100 static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data,
2101 struct sk_buff *skb)
2103 struct hci_rp_le_read_resolv_list_size *rp = data;
2105 bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size);
2110 hdev->le_resolv_list_size = rp->size;
2115 static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data,
2116 struct sk_buff *skb)
2118 struct hci_ev_status *rp = data;
2121 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2126 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE);
2133 hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION);
2135 hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION);
2137 hci_dev_unlock(hdev);
2142 static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data,
2143 struct sk_buff *skb)
2145 struct hci_rp_le_read_max_data_len *rp = data;
2147 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2156 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
2157 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
2158 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
2159 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
2162 mgmt_le_read_maximum_data_length_complete(hdev, rp->status);
2163 hci_dev_unlock(hdev);
2169 static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data,
2170 struct sk_buff *skb)
2172 struct hci_cp_write_le_host_supported *sent;
2173 struct hci_ev_status *rp = data;
2175 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2180 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
2187 hdev->features[1][0] |= LMP_HOST_LE;
2188 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2190 hdev->features[1][0] &= ~LMP_HOST_LE;
2191 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
2192 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2196 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
2198 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
2200 hci_dev_unlock(hdev);
2205 static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data,
2206 struct sk_buff *skb)
2208 struct hci_cp_le_set_adv_param *cp;
2209 struct hci_ev_status *rp = data;
2211 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2216 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
2221 hdev->adv_addr_type = cp->own_address_type;
2222 hci_dev_unlock(hdev);
2227 static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,
2228 struct sk_buff *skb)
2230 struct hci_rp_le_set_ext_adv_params *rp = data;
2231 struct hci_cp_le_set_ext_adv_params *cp;
2232 struct adv_info *adv_instance;
2234 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2239 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS);
2244 hdev->adv_addr_type = cp->own_addr_type;
2246 /* Store in hdev for instance 0 */
2247 hdev->adv_tx_power = rp->tx_power;
2249 adv_instance = hci_find_adv_instance(hdev, cp->handle);
2251 adv_instance->tx_power = rp->tx_power;
2253 /* Update adv data as tx power is known now */
2254 hci_update_adv_data(hdev, cp->handle);
2256 hci_dev_unlock(hdev);
2262 static u8 hci_cc_enable_rssi(struct hci_dev *hdev, void *data,
2263 struct sk_buff *skb)
2265 struct hci_cc_rsp_enable_rssi *rp = data;
2267 BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
2268 hdev->name, rp->status, rp->le_ext_opcode);
2270 mgmt_enable_rssi_cc(hdev, rp, rp->status);
2275 static u8 hci_cc_get_raw_rssi(struct hci_dev *hdev, void *data,
2276 struct sk_buff *skb)
2278 struct hci_cc_rp_get_raw_rssi *rp = data;
2280 BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
2281 hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
2283 mgmt_raw_rssi_response(hdev, rp, rp->status);
2288 static void hci_vendor_ext_rssi_link_alert_evt(struct hci_dev *hdev,
2289 struct sk_buff *skb)
2291 struct hci_ev_vendor_specific_rssi_alert *ev = (void *)skb->data;
2293 BT_DBG("RSSI event LE_RSSI_LINK_ALERT %X", LE_RSSI_LINK_ALERT);
2295 mgmt_rssi_alert_evt(hdev, ev->conn_handle, ev->alert_type,
2299 static void hci_vendor_specific_group_ext_evt(struct hci_dev *hdev,
2300 struct sk_buff *skb)
2302 struct hci_ev_ext_vendor_specific *ev = (void *)skb->data;
2303 __u8 event_le_ext_sub_code;
2305 BT_DBG("RSSI event LE_META_VENDOR_SPECIFIC_GROUP_EVENT: %X",
2306 LE_META_VENDOR_SPECIFIC_GROUP_EVENT);
2308 skb_pull(skb, sizeof(*ev));
2309 event_le_ext_sub_code = ev->event_le_ext_sub_code;
2311 switch (event_le_ext_sub_code) {
2312 case LE_RSSI_LINK_ALERT:
2313 hci_vendor_ext_rssi_link_alert_evt(hdev, skb);
2321 static void hci_vendor_multi_adv_state_change_evt(struct hci_dev *hdev,
2322 struct sk_buff *skb)
2324 struct hci_ev_vendor_specific_multi_adv_state *ev = (void *)skb->data;
2326 BT_DBG("LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT");
2328 mgmt_multi_adv_state_change_evt(hdev, ev->adv_instance,
2329 ev->state_change_reason,
2330 ev->connection_handle);
2333 static void hci_vendor_specific_evt(struct hci_dev *hdev, void *data,
2334 struct sk_buff *skb)
2336 struct hci_ev_vendor_specific *ev = (void *)skb->data;
2337 __u8 event_sub_code;
2339 BT_DBG("hci_vendor_specific_evt");
2341 skb_pull(skb, sizeof(*ev));
2342 event_sub_code = ev->event_sub_code;
2344 switch (event_sub_code) {
2345 case LE_META_VENDOR_SPECIFIC_GROUP_EVENT:
2346 hci_vendor_specific_group_ext_evt(hdev, skb);
2349 case LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT:
2350 hci_vendor_multi_adv_state_change_evt(hdev, skb);
2358 static void hci_le_data_length_changed_complete_evt(struct hci_dev *hdev,
2360 struct sk_buff *skb)
2362 struct hci_ev_le_data_len_change *ev = (void *)skb->data;
2363 struct hci_conn *conn;
2365 BT_DBG("%s status", hdev->name);
2369 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2371 conn->tx_len = le16_to_cpu(ev->tx_len);
2372 conn->tx_time = le16_to_cpu(ev->tx_time);
2373 conn->rx_len = le16_to_cpu(ev->rx_len);
2374 conn->rx_time = le16_to_cpu(ev->rx_time);
2376 mgmt_le_data_length_change_complete(hdev, &conn->dst,
2377 conn->tx_len, conn->tx_time,
2378 conn->rx_len, conn->rx_time);
2381 hci_dev_unlock(hdev);
2385 static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data,
2386 struct sk_buff *skb)
2388 struct hci_rp_read_rssi *rp = data;
2389 struct hci_conn *conn;
2391 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2398 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2400 conn->rssi = rp->rssi;
2402 hci_dev_unlock(hdev);
2407 static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data,
2408 struct sk_buff *skb)
2410 struct hci_cp_read_tx_power *sent;
2411 struct hci_rp_read_tx_power *rp = data;
2412 struct hci_conn *conn;
2414 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2419 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
2425 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
2429 switch (sent->type) {
2431 conn->tx_power = rp->tx_power;
2434 conn->max_tx_power = rp->tx_power;
2439 hci_dev_unlock(hdev);
2443 static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data,
2444 struct sk_buff *skb)
2446 struct hci_ev_status *rp = data;
2449 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
2454 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
2456 hdev->ssp_debug_mode = *mode;
2461 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
2463 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2466 hci_conn_check_pending(hdev);
2470 set_bit(HCI_INQUIRY, &hdev->flags);
2473 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
2475 struct hci_cp_create_conn *cp;
2476 struct hci_conn *conn;
2478 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2480 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
2486 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2488 bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn);
2491 if (conn && conn->state == BT_CONNECT) {
2492 if (status != 0x0c || conn->attempt > 2) {
2493 conn->state = BT_CLOSED;
2494 hci_connect_cfm(conn, status);
2497 conn->state = BT_CONNECT2;
2501 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
2504 bt_dev_err(hdev, "no memory for new connection");
2508 hci_dev_unlock(hdev);
2511 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
2513 struct hci_cp_add_sco *cp;
2514 struct hci_conn *acl, *sco;
2517 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2522 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
2526 handle = __le16_to_cpu(cp->handle);
2528 bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2532 acl = hci_conn_hash_lookup_handle(hdev, handle);
2536 sco->state = BT_CLOSED;
2538 hci_connect_cfm(sco, status);
2543 hci_dev_unlock(hdev);
2546 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
2548 struct hci_cp_auth_requested *cp;
2549 struct hci_conn *conn;
2551 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2556 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
2562 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2564 if (conn->state == BT_CONFIG) {
2565 hci_connect_cfm(conn, status);
2566 hci_conn_drop(conn);
2570 hci_dev_unlock(hdev);
2573 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
2575 struct hci_cp_set_conn_encrypt *cp;
2576 struct hci_conn *conn;
2578 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2583 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
2589 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2591 if (conn->state == BT_CONFIG) {
2592 hci_connect_cfm(conn, status);
2593 hci_conn_drop(conn);
2597 hci_dev_unlock(hdev);
2600 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
2601 struct hci_conn *conn)
2603 if (conn->state != BT_CONFIG || !conn->out)
2606 if (conn->pending_sec_level == BT_SECURITY_SDP)
2609 /* Only request authentication for SSP connections or non-SSP
2610 * devices with sec_level MEDIUM or HIGH or if MITM protection
2613 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
2614 conn->pending_sec_level != BT_SECURITY_FIPS &&
2615 conn->pending_sec_level != BT_SECURITY_HIGH &&
2616 conn->pending_sec_level != BT_SECURITY_MEDIUM)
2622 static int hci_resolve_name(struct hci_dev *hdev,
2623 struct inquiry_entry *e)
2625 struct hci_cp_remote_name_req cp;
2627 memset(&cp, 0, sizeof(cp));
2629 bacpy(&cp.bdaddr, &e->data.bdaddr);
2630 cp.pscan_rep_mode = e->data.pscan_rep_mode;
2631 cp.pscan_mode = e->data.pscan_mode;
2632 cp.clock_offset = e->data.clock_offset;
2634 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2637 static bool hci_resolve_next_name(struct hci_dev *hdev)
2639 struct discovery_state *discov = &hdev->discovery;
2640 struct inquiry_entry *e;
2642 if (list_empty(&discov->resolve))
2645 /* We should stop if we already spent too much time resolving names. */
2646 if (time_after(jiffies, discov->name_resolve_timeout)) {
2647 bt_dev_warn_ratelimited(hdev, "Name resolve takes too long.");
2651 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2655 if (hci_resolve_name(hdev, e) == 0) {
2656 e->name_state = NAME_PENDING;
2663 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
2664 bdaddr_t *bdaddr, u8 *name, u8 name_len)
2666 struct discovery_state *discov = &hdev->discovery;
2667 struct inquiry_entry *e;
2670 /* Update the mgmt connected state if necessary. Be careful with
2671 * conn objects that exist but are not (yet) connected however.
2672 * Only those in BT_CONFIG or BT_CONNECTED states can be
2673 * considered connected.
2676 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
2677 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2678 mgmt_device_connected(hdev, conn, name, name_len);
2680 mgmt_device_name_update(hdev, bdaddr, name, name_len);
2684 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
2685 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2686 mgmt_device_connected(hdev, conn, name, name_len);
2689 if (discov->state == DISCOVERY_STOPPED)
2692 if (discov->state == DISCOVERY_STOPPING)
2693 goto discov_complete;
2695 if (discov->state != DISCOVERY_RESOLVING)
2698 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
2699 /* If the device was not found in a list of found devices names of which
2700 * are pending. there is no need to continue resolving a next name as it
2701 * will be done upon receiving another Remote Name Request Complete
2708 e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN;
2709 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi,
2712 if (hci_resolve_next_name(hdev))
2716 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2719 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
2721 struct hci_cp_remote_name_req *cp;
2722 struct hci_conn *conn;
2724 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2726 /* If successful wait for the name req complete event before
2727 * checking for the need to do authentication */
2731 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
2737 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2739 if (hci_dev_test_flag(hdev, HCI_MGMT))
2740 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
2745 if (!hci_outgoing_auth_needed(hdev, conn))
2748 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2749 struct hci_cp_auth_requested auth_cp;
2751 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2753 auth_cp.handle = __cpu_to_le16(conn->handle);
2754 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
2755 sizeof(auth_cp), &auth_cp);
2759 hci_dev_unlock(hdev);
2762 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
2764 struct hci_cp_read_remote_features *cp;
2765 struct hci_conn *conn;
2767 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2772 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
2778 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2780 if (conn->state == BT_CONFIG) {
2781 hci_connect_cfm(conn, status);
2782 hci_conn_drop(conn);
2786 hci_dev_unlock(hdev);
2789 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
2791 struct hci_cp_read_remote_ext_features *cp;
2792 struct hci_conn *conn;
2794 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2799 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
2805 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2807 if (conn->state == BT_CONFIG) {
2808 hci_connect_cfm(conn, status);
2809 hci_conn_drop(conn);
2813 hci_dev_unlock(hdev);
2816 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2818 struct hci_cp_setup_sync_conn *cp;
2819 struct hci_conn *acl, *sco;
2822 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2827 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
2831 handle = __le16_to_cpu(cp->handle);
2833 bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2837 acl = hci_conn_hash_lookup_handle(hdev, handle);
2841 sco->state = BT_CLOSED;
2843 hci_connect_cfm(sco, status);
2848 hci_dev_unlock(hdev);
2851 static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status)
2853 struct hci_cp_enhanced_setup_sync_conn *cp;
2854 struct hci_conn *acl, *sco;
2857 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2862 cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN);
2866 handle = __le16_to_cpu(cp->handle);
2868 bt_dev_dbg(hdev, "handle 0x%4.4x", handle);
2872 acl = hci_conn_hash_lookup_handle(hdev, handle);
2876 sco->state = BT_CLOSED;
2878 hci_connect_cfm(sco, status);
2883 hci_dev_unlock(hdev);
2886 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
2888 struct hci_cp_sniff_mode *cp;
2889 struct hci_conn *conn;
2891 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2896 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
2902 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2904 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2906 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2907 hci_sco_setup(conn, status);
2910 hci_dev_unlock(hdev);
2913 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
2915 struct hci_cp_exit_sniff_mode *cp;
2916 struct hci_conn *conn;
2918 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2923 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2929 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2931 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2933 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2934 hci_sco_setup(conn, status);
2937 hci_dev_unlock(hdev);
2940 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2942 struct hci_cp_disconnect *cp;
2943 struct hci_conn_params *params;
2944 struct hci_conn *conn;
2947 bt_dev_dbg(hdev, "status 0x%2.2x", status);
2949 /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended
2950 * otherwise cleanup the connection immediately.
2952 if (!status && !hdev->suspended)
2955 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2961 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2966 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2967 conn->dst_type, status);
2969 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
2970 hdev->cur_adv_instance = conn->adv_instance;
2971 hci_enable_advertising(hdev);
2977 mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2979 if (conn->type == ACL_LINK) {
2980 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2981 hci_remove_link_key(hdev, &conn->dst);
2984 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2986 switch (params->auto_connect) {
2987 case HCI_AUTO_CONN_LINK_LOSS:
2988 if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2992 case HCI_AUTO_CONN_DIRECT:
2993 case HCI_AUTO_CONN_ALWAYS:
2994 list_del_init(¶ms->action);
2995 list_add(¶ms->action, &hdev->pend_le_conns);
3003 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3004 cp->reason, mgmt_conn);
3006 hci_disconn_cfm(conn, cp->reason);
3009 /* If the disconnection failed for any reason, the upper layer
3010 * does not retry to disconnect in current implementation.
3011 * Hence, we need to do some basic cleanup here and re-enable
3012 * advertising if necessary.
3016 hci_dev_unlock(hdev);
3019 static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved)
3021 /* When using controller based address resolution, then the new
3022 * address types 0x02 and 0x03 are used. These types need to be
3023 * converted back into either public address or random address type
3026 case ADDR_LE_DEV_PUBLIC_RESOLVED:
3029 return ADDR_LE_DEV_PUBLIC;
3030 case ADDR_LE_DEV_RANDOM_RESOLVED:
3033 return ADDR_LE_DEV_RANDOM;
3041 static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr,
3042 u8 peer_addr_type, u8 own_address_type,
3045 struct hci_conn *conn;
3047 conn = hci_conn_hash_lookup_le(hdev, peer_addr,
3052 own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL);
3054 /* Store the initiator and responder address information which
3055 * is needed for SMP. These values will not change during the
3056 * lifetime of the connection.
3058 conn->init_addr_type = own_address_type;
3059 if (own_address_type == ADDR_LE_DEV_RANDOM)
3060 bacpy(&conn->init_addr, &hdev->random_addr);
3062 bacpy(&conn->init_addr, &hdev->bdaddr);
3064 conn->resp_addr_type = peer_addr_type;
3065 bacpy(&conn->resp_addr, peer_addr);
3067 /* We don't want the connection attempt to stick around
3068 * indefinitely since LE doesn't have a page timeout concept
3069 * like BR/EDR. Set a timer for any connection that doesn't use
3070 * the accept list for connecting.
3072 if (filter_policy == HCI_LE_USE_PEER_ADDR)
3073 queue_delayed_work(conn->hdev->workqueue,
3074 &conn->le_conn_timeout,
3075 conn->conn_timeout);
3078 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
3080 struct hci_cp_le_create_conn *cp;
3082 bt_dev_dbg(hdev, "status 0x%2.2x", status);
3084 /* All connection failure handling is taken care of by the
3085 * hci_conn_failed function which is triggered by the HCI
3086 * request completion callbacks used for connecting.
3091 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
3097 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
3098 cp->own_address_type, cp->filter_policy);
3100 hci_dev_unlock(hdev);
3103 static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status)
3105 struct hci_cp_le_ext_create_conn *cp;
3107 bt_dev_dbg(hdev, "status 0x%2.2x", status);
3109 /* All connection failure handling is taken care of by the
3110 * hci_conn_failed function which is triggered by the HCI
3111 * request completion callbacks used for connecting.
3116 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN);
3122 cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type,
3123 cp->own_addr_type, cp->filter_policy);
3125 hci_dev_unlock(hdev);
3128 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
3130 struct hci_cp_le_read_remote_features *cp;
3131 struct hci_conn *conn;
3133 bt_dev_dbg(hdev, "status 0x%2.2x", status);
3138 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
3144 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3146 if (conn->state == BT_CONFIG) {
3147 hci_connect_cfm(conn, status);
3148 hci_conn_drop(conn);
3152 hci_dev_unlock(hdev);
3155 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
3157 struct hci_cp_le_start_enc *cp;
3158 struct hci_conn *conn;
3160 bt_dev_dbg(hdev, "status 0x%2.2x", status);
3167 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
3171 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
3175 if (conn->state != BT_CONNECTED)
3178 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3179 hci_conn_drop(conn);
3182 hci_dev_unlock(hdev);
3185 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
3187 struct hci_cp_switch_role *cp;
3188 struct hci_conn *conn;
3190 BT_DBG("%s status 0x%2.2x", hdev->name, status);
3195 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
3201 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
3203 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3205 hci_dev_unlock(hdev);
3208 static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data,
3209 struct sk_buff *skb)
3211 struct hci_ev_status *ev = data;
3212 struct discovery_state *discov = &hdev->discovery;
3213 struct inquiry_entry *e;
3215 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3217 hci_conn_check_pending(hdev);
3219 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
3222 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
3223 wake_up_bit(&hdev->flags, HCI_INQUIRY);
3225 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3230 if (discov->state != DISCOVERY_FINDING)
3233 if (list_empty(&discov->resolve)) {
3234 /* When BR/EDR inquiry is active and no LE scanning is in
3235 * progress, then change discovery state to indicate completion.
3237 * When running LE scanning and BR/EDR inquiry simultaneously
3238 * and the LE scan already finished, then change the discovery
3239 * state to indicate completion.
3241 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3242 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3243 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3247 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
3248 if (e && hci_resolve_name(hdev, e) == 0) {
3249 e->name_state = NAME_PENDING;
3250 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
3251 discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION;
3253 /* When BR/EDR inquiry is active and no LE scanning is in
3254 * progress, then change discovery state to indicate completion.
3256 * When running LE scanning and BR/EDR inquiry simultaneously
3257 * and the LE scan already finished, then change the discovery
3258 * state to indicate completion.
3260 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
3261 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
3262 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3266 hci_dev_unlock(hdev);
3269 static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,
3270 struct sk_buff *skb)
3272 struct hci_ev_inquiry_result *ev = edata;
3273 struct inquiry_data data;
3276 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT,
3277 flex_array_size(ev, info, ev->num)))
3280 bt_dev_dbg(hdev, "num %d", ev->num);
3285 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3290 for (i = 0; i < ev->num; i++) {
3291 struct inquiry_info *info = &ev->info[i];
3294 bacpy(&data.bdaddr, &info->bdaddr);
3295 data.pscan_rep_mode = info->pscan_rep_mode;
3296 data.pscan_period_mode = info->pscan_period_mode;
3297 data.pscan_mode = info->pscan_mode;
3298 memcpy(data.dev_class, info->dev_class, 3);
3299 data.clock_offset = info->clock_offset;
3300 data.rssi = HCI_RSSI_INVALID;
3301 data.ssp_mode = 0x00;
3303 flags = hci_inquiry_cache_update(hdev, &data, false);
3305 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3306 info->dev_class, HCI_RSSI_INVALID,
3307 flags, NULL, 0, NULL, 0, 0);
3310 hci_dev_unlock(hdev);
3313 static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
3314 struct sk_buff *skb)
3316 struct hci_ev_conn_complete *ev = data;
3317 struct hci_conn *conn;
3318 u8 status = ev->status;
3320 bt_dev_dbg(hdev, "status 0x%2.2x", status);
3324 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3326 /* In case of error status and there is no connection pending
3327 * just unlock as there is nothing to cleanup.
3332 /* Connection may not exist if auto-connected. Check the bredr
3333 * allowlist to see if this device is allowed to auto connect.
3334 * If link is an ACL type, create a connection class
3337 * Auto-connect will only occur if the event filter is
3338 * programmed with a given address. Right now, event filter is
3339 * only used during suspend.
3341 if (ev->link_type == ACL_LINK &&
3342 hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
3345 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3348 bt_dev_err(hdev, "no memory for new conn");
3352 if (ev->link_type != SCO_LINK)
3355 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK,
3360 conn->type = SCO_LINK;
3364 /* The HCI_Connection_Complete event is only sent once per connection.
3365 * Processing it more than once per connection can corrupt kernel memory.
3367 * As the connection handle is set here for the first time, it indicates
3368 * whether the connection is already set up.
3370 if (conn->handle != HCI_CONN_HANDLE_UNSET) {
3371 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
3376 conn->handle = __le16_to_cpu(ev->handle);
3377 if (conn->handle > HCI_CONN_HANDLE_MAX) {
3378 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
3379 conn->handle, HCI_CONN_HANDLE_MAX);
3380 status = HCI_ERROR_INVALID_PARAMETERS;
3384 if (conn->type == ACL_LINK) {
3385 conn->state = BT_CONFIG;
3386 hci_conn_hold(conn);
3388 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
3389 !hci_find_link_key(hdev, &ev->bdaddr))
3390 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3392 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3394 conn->state = BT_CONNECTED;
3396 hci_debugfs_create_conn(conn);
3397 hci_conn_add_sysfs(conn);
3399 if (test_bit(HCI_AUTH, &hdev->flags))
3400 set_bit(HCI_CONN_AUTH, &conn->flags);
3402 if (test_bit(HCI_ENCRYPT, &hdev->flags))
3403 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3405 /* Get remote features */
3406 if (conn->type == ACL_LINK) {
3407 struct hci_cp_read_remote_features cp;
3408 cp.handle = ev->handle;
3409 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
3412 hci_update_scan(hdev);
3415 /* Set packet type for incoming connection */
3416 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
3417 struct hci_cp_change_conn_ptype cp;
3418 cp.handle = ev->handle;
3419 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3420 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
3425 if (get_link_mode(conn) & HCI_LM_MASTER)
3426 hci_conn_change_supervision_timeout(conn,
3427 LINK_SUPERVISION_TIMEOUT);
3431 if (conn->type == ACL_LINK)
3432 hci_sco_setup(conn, ev->status);
3436 hci_conn_failed(conn, status);
3437 } else if (ev->link_type == SCO_LINK) {
3438 switch (conn->setting & SCO_AIRMODE_MASK) {
3439 case SCO_AIRMODE_CVSD:
3441 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
3445 hci_connect_cfm(conn, status);
3449 hci_dev_unlock(hdev);
3451 hci_conn_check_pending(hdev);
3454 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
3456 struct hci_cp_reject_conn_req cp;
3458 bacpy(&cp.bdaddr, bdaddr);
3459 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
3460 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
3463 static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
3464 struct sk_buff *skb)
3466 struct hci_ev_conn_request *ev = data;
3467 int mask = hdev->link_mode;
3468 struct inquiry_entry *ie;
3469 struct hci_conn *conn;
3472 bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
3474 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
3477 if (!(mask & HCI_LM_ACCEPT)) {
3478 hci_reject_conn(hdev, &ev->bdaddr);
3484 if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr,
3486 hci_reject_conn(hdev, &ev->bdaddr);
3490 /* Require HCI_CONNECTABLE or an accept list entry to accept the
3491 * connection. These features are only touched through mgmt so
3492 * only do the checks if HCI_MGMT is set.
3494 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3495 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
3496 !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr,
3498 hci_reject_conn(hdev, &ev->bdaddr);
3502 /* Connection accepted */
3504 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3506 memcpy(ie->data.dev_class, ev->dev_class, 3);
3509 if ((ev->link_type == SCO_LINK || ev->link_type == ESCO_LINK) &&
3510 hci_conn_hash_lookup_sco(hdev)) {
3511 struct hci_cp_reject_conn_req cp;
3513 bacpy(&cp.bdaddr, &ev->bdaddr);
3514 cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
3515 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ,
3517 hci_dev_unlock(hdev);
3522 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
3525 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
3528 bt_dev_err(hdev, "no memory for new connection");
3533 memcpy(conn->dev_class, ev->dev_class, 3);
3535 hci_dev_unlock(hdev);
3537 if (ev->link_type == ACL_LINK ||
3538 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
3539 struct hci_cp_accept_conn_req cp;
3540 conn->state = BT_CONNECT;
3542 bacpy(&cp.bdaddr, &ev->bdaddr);
3544 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
3545 cp.role = 0x00; /* Become central */
3547 cp.role = 0x01; /* Remain peripheral */
3549 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
3550 } else if (!(flags & HCI_PROTO_DEFER)) {
3551 struct hci_cp_accept_sync_conn_req cp;
3552 conn->state = BT_CONNECT;
3554 bacpy(&cp.bdaddr, &ev->bdaddr);
3555 cp.pkt_type = cpu_to_le16(conn->pkt_type);
3557 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
3558 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
3559 cp.max_latency = cpu_to_le16(0xffff);
3560 cp.content_format = cpu_to_le16(hdev->voice_setting);
3561 cp.retrans_effort = 0xff;
3563 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
3566 conn->state = BT_CONNECT2;
3567 hci_connect_cfm(conn, 0);
3572 hci_dev_unlock(hdev);
3575 static u8 hci_to_mgmt_reason(u8 err)
3578 case HCI_ERROR_CONNECTION_TIMEOUT:
3579 return MGMT_DEV_DISCONN_TIMEOUT;
3580 case HCI_ERROR_REMOTE_USER_TERM:
3581 case HCI_ERROR_REMOTE_LOW_RESOURCES:
3582 case HCI_ERROR_REMOTE_POWER_OFF:
3583 return MGMT_DEV_DISCONN_REMOTE;
3584 case HCI_ERROR_LOCAL_HOST_TERM:
3585 return MGMT_DEV_DISCONN_LOCAL_HOST;
3587 return MGMT_DEV_DISCONN_UNKNOWN;
3591 static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data,
3592 struct sk_buff *skb)
3594 struct hci_ev_disconn_complete *ev = data;
3596 struct hci_conn_params *params;
3597 struct hci_conn *conn;
3598 bool mgmt_connected;
3600 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3604 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3609 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
3610 conn->dst_type, ev->status);
3614 conn->state = BT_CLOSED;
3616 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
3618 if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags))
3619 reason = MGMT_DEV_DISCONN_AUTH_FAILURE;
3621 reason = hci_to_mgmt_reason(ev->reason);
3623 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
3624 reason, mgmt_connected);
3626 if (conn->type == ACL_LINK) {
3627 if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
3628 hci_remove_link_key(hdev, &conn->dst);
3630 hci_update_scan(hdev);
3633 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
3635 switch (params->auto_connect) {
3636 case HCI_AUTO_CONN_LINK_LOSS:
3637 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
3641 case HCI_AUTO_CONN_DIRECT:
3642 case HCI_AUTO_CONN_ALWAYS:
3643 list_del_init(¶ms->action);
3644 list_add(¶ms->action, &hdev->pend_le_conns);
3645 hci_update_passive_scan(hdev);
3653 hci_disconn_cfm(conn, ev->reason);
3655 /* Re-enable advertising if necessary, since it might
3656 * have been disabled by the connection. From the
3657 * HCI_LE_Set_Advertise_Enable command description in
3658 * the core specification (v4.0):
3659 * "The Controller shall continue advertising until the Host
3660 * issues an LE_Set_Advertise_Enable command with
3661 * Advertising_Enable set to 0x00 (Advertising is disabled)
3662 * or until a connection is created or until the Advertising
3663 * is timed out due to Directed Advertising."
3665 if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) {
3666 hdev->cur_adv_instance = conn->adv_instance;
3667 hci_enable_advertising(hdev);
3673 if (conn->type == ACL_LINK && !hci_conn_num(hdev, ACL_LINK)) {
3677 iscan = test_bit(HCI_ISCAN, &hdev->flags);
3678 pscan = test_bit(HCI_PSCAN, &hdev->flags);
3679 if (!iscan && !pscan) {
3680 u8 scan_enable = SCAN_PAGE;
3682 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE,
3683 sizeof(scan_enable), &scan_enable);
3689 hci_dev_unlock(hdev);
3692 static void hci_auth_complete_evt(struct hci_dev *hdev, void *data,
3693 struct sk_buff *skb)
3695 struct hci_ev_auth_complete *ev = data;
3696 struct hci_conn *conn;
3698 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3702 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3707 /* PIN or Key Missing patch */
3708 BT_DBG("remote_auth %x, remote_cap %x, auth_type %x, io_capability %x",
3709 conn->remote_auth, conn->remote_cap,
3710 conn->auth_type, conn->io_capability);
3712 if (ev->status == 0x06 && hci_conn_ssp_enabled(conn)) {
3713 struct hci_cp_auth_requested cp;
3715 BT_DBG("Pin or key missing");
3716 hci_remove_link_key(hdev, &conn->dst);
3717 cp.handle = cpu_to_le16(conn->handle);
3718 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
3725 clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3727 if (!hci_conn_ssp_enabled(conn) &&
3728 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
3729 bt_dev_info(hdev, "re-auth of legacy device is not possible.");
3731 set_bit(HCI_CONN_AUTH, &conn->flags);
3732 conn->sec_level = conn->pending_sec_level;
3735 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3736 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3738 mgmt_auth_failed(conn, ev->status);
3741 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3742 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
3744 if (conn->state == BT_CONFIG) {
3745 if (!ev->status && hci_conn_ssp_enabled(conn)) {
3746 struct hci_cp_set_conn_encrypt cp;
3747 cp.handle = ev->handle;
3749 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3752 conn->state = BT_CONNECTED;
3753 hci_connect_cfm(conn, ev->status);
3754 hci_conn_drop(conn);
3757 hci_auth_cfm(conn, ev->status);
3759 hci_conn_hold(conn);
3760 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3761 hci_conn_drop(conn);
3764 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
3766 struct hci_cp_set_conn_encrypt cp;
3767 cp.handle = ev->handle;
3769 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
3772 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3773 hci_encrypt_cfm(conn, ev->status);
3778 hci_dev_unlock(hdev);
3781 static void hci_remote_name_evt(struct hci_dev *hdev, void *data,
3782 struct sk_buff *skb)
3784 struct hci_ev_remote_name *ev = data;
3785 struct hci_conn *conn;
3787 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3789 hci_conn_check_pending(hdev);
3793 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3795 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3798 if (ev->status == 0)
3799 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
3800 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
3802 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
3808 if (!hci_outgoing_auth_needed(hdev, conn))
3811 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3812 struct hci_cp_auth_requested cp;
3814 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
3816 cp.handle = __cpu_to_le16(conn->handle);
3817 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
3821 hci_dev_unlock(hdev);
3824 static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
3825 struct sk_buff *skb)
3827 struct hci_ev_encrypt_change *ev = data;
3828 struct hci_conn *conn;
3830 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3834 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3840 /* Encryption implies authentication */
3841 set_bit(HCI_CONN_AUTH, &conn->flags);
3842 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
3843 conn->sec_level = conn->pending_sec_level;
3845 /* P-256 authentication key implies FIPS */
3846 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
3847 set_bit(HCI_CONN_FIPS, &conn->flags);
3849 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
3850 conn->type == LE_LINK)
3851 set_bit(HCI_CONN_AES_CCM, &conn->flags);
3853 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
3854 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
3858 /* We should disregard the current RPA and generate a new one
3859 * whenever the encryption procedure fails.
3861 if (ev->status && conn->type == LE_LINK) {
3862 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
3863 hci_adv_instances_set_rpa_expired(hdev, true);
3866 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3868 /* Check link security requirements are met */
3869 if (!hci_conn_check_link_mode(conn))
3870 ev->status = HCI_ERROR_AUTH_FAILURE;
3872 if (ev->status && conn->state == BT_CONNECTED) {
3873 if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING)
3874 set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags);
3876 /* Notify upper layers so they can cleanup before
3879 hci_encrypt_cfm(conn, ev->status);
3880 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3881 hci_conn_drop(conn);
3885 /* Try reading the encryption key size for encrypted ACL links */
3886 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
3887 struct hci_cp_read_enc_key_size cp;
3889 /* Only send HCI_Read_Encryption_Key_Size if the
3890 * controller really supports it. If it doesn't, assume
3891 * the default size (16).
3893 if (!(hdev->commands[20] & 0x10)) {
3894 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3898 cp.handle = cpu_to_le16(conn->handle);
3899 if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE,
3901 bt_dev_err(hdev, "sending read key size failed");
3902 conn->enc_key_size = HCI_LINK_KEY_SIZE;
3909 /* Set the default Authenticated Payload Timeout after
3910 * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B
3911 * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be
3912 * sent when the link is active and Encryption is enabled, the conn
3913 * type can be either LE or ACL and controller must support LMP Ping.
3914 * Ensure for AES-CCM encryption as well.
3916 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) &&
3917 test_bit(HCI_CONN_AES_CCM, &conn->flags) &&
3918 ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) ||
3919 (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) {
3920 struct hci_cp_write_auth_payload_to cp;
3922 cp.handle = cpu_to_le16(conn->handle);
3923 cp.timeout = cpu_to_le16(hdev->auth_payload_timeout);
3924 hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO,
3929 hci_encrypt_cfm(conn, ev->status);
3932 hci_dev_unlock(hdev);
3935 static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data,
3936 struct sk_buff *skb)
3938 struct hci_ev_change_link_key_complete *ev = data;
3939 struct hci_conn *conn;
3941 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3945 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3948 set_bit(HCI_CONN_SECURE, &conn->flags);
3950 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
3952 hci_key_change_cfm(conn, ev->status);
3955 hci_dev_unlock(hdev);
3958 static void hci_remote_features_evt(struct hci_dev *hdev, void *data,
3959 struct sk_buff *skb)
3961 struct hci_ev_remote_features *ev = data;
3962 struct hci_conn *conn;
3964 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
3968 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3973 memcpy(conn->features[0], ev->features, 8);
3975 if (conn->state != BT_CONFIG)
3978 if (!ev->status && lmp_ext_feat_capable(hdev) &&
3979 lmp_ext_feat_capable(conn)) {
3980 struct hci_cp_read_remote_ext_features cp;
3981 cp.handle = ev->handle;
3983 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
3988 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3989 struct hci_cp_remote_name_req cp;
3990 memset(&cp, 0, sizeof(cp));
3991 bacpy(&cp.bdaddr, &conn->dst);
3992 cp.pscan_rep_mode = 0x02;
3993 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3994 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3995 mgmt_device_connected(hdev, conn, NULL, 0);
3997 if (!hci_outgoing_auth_needed(hdev, conn)) {
3998 conn->state = BT_CONNECTED;
3999 hci_connect_cfm(conn, ev->status);
4000 hci_conn_drop(conn);
4004 hci_dev_unlock(hdev);
4007 static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)
4009 cancel_delayed_work(&hdev->cmd_timer);
4012 if (!test_bit(HCI_RESET, &hdev->flags)) {
4014 cancel_delayed_work(&hdev->ncmd_timer);
4015 atomic_set(&hdev->cmd_cnt, 1);
4017 if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4018 queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer,
4025 static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data,
4026 struct sk_buff *skb)
4028 struct hci_rp_le_read_buffer_size_v2 *rp = data;
4030 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4035 hdev->le_mtu = __le16_to_cpu(rp->acl_mtu);
4036 hdev->le_pkts = rp->acl_max_pkt;
4037 hdev->iso_mtu = __le16_to_cpu(rp->iso_mtu);
4038 hdev->iso_pkts = rp->iso_max_pkt;
4040 hdev->le_cnt = hdev->le_pkts;
4041 hdev->iso_cnt = hdev->iso_pkts;
4043 BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu,
4044 hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts);
4049 static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
4050 struct sk_buff *skb)
4052 struct hci_rp_le_set_cig_params *rp = data;
4053 struct hci_conn *conn;
4056 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4061 while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) {
4062 conn->state = BT_CLOSED;
4063 hci_connect_cfm(conn, rp->status);
4071 list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
4072 if (conn->type != ISO_LINK || conn->iso_qos.cig != rp->cig_id ||
4073 conn->state == BT_CONNECTED)
4076 conn->handle = __le16_to_cpu(rp->handle[i++]);
4078 bt_dev_dbg(hdev, "%p handle 0x%4.4x link %p", conn,
4079 conn->handle, conn->link);
4081 /* Create CIS if LE is already connected */
4082 if (conn->link && conn->link->state == BT_CONNECTED) {
4084 hci_le_create_cis(conn->link);
4088 if (i == rp->num_handles)
4095 hci_dev_unlock(hdev);
4100 static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data,
4101 struct sk_buff *skb)
4103 struct hci_rp_le_setup_iso_path *rp = data;
4104 struct hci_cp_le_setup_iso_path *cp;
4105 struct hci_conn *conn;
4107 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4109 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH);
4115 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
4120 hci_connect_cfm(conn, rp->status);
4125 switch (cp->direction) {
4126 /* Input (Host to Controller) */
4128 /* Only confirm connection if output only */
4129 if (conn->iso_qos.out.sdu && !conn->iso_qos.in.sdu)
4130 hci_connect_cfm(conn, rp->status);
4132 /* Output (Controller to Host) */
4134 /* Confirm connection since conn->iso_qos is always configured
4137 hci_connect_cfm(conn, rp->status);
4142 hci_dev_unlock(hdev);
4146 static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status)
4148 bt_dev_dbg(hdev, "status 0x%2.2x", status);
4151 static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data,
4152 struct sk_buff *skb)
4154 struct hci_ev_status *rp = data;
4155 struct hci_cp_le_set_per_adv_params *cp;
4157 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4162 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS);
4166 /* TODO: set the conn state */
4170 static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
4171 struct sk_buff *skb)
4173 struct hci_ev_status *rp = data;
4176 bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
4181 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE);
4188 hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
4190 hci_dev_clear_flag(hdev, HCI_LE_PER_ADV);
4192 hci_dev_unlock(hdev);
4197 #define HCI_CC_VL(_op, _func, _min, _max) \
4205 #define HCI_CC(_op, _func, _len) \
4206 HCI_CC_VL(_op, _func, _len, _len)
4208 #define HCI_CC_STATUS(_op, _func) \
4209 HCI_CC(_op, _func, sizeof(struct hci_ev_status))
4211 static const struct hci_cc {
4213 u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
4216 } hci_cc_table[] = {
4217 HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel),
4218 HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq),
4219 HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq),
4220 HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL,
4221 hci_cc_remote_name_req_cancel),
4222 HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery,
4223 sizeof(struct hci_rp_role_discovery)),
4224 HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy,
4225 sizeof(struct hci_rp_read_link_policy)),
4226 HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy,
4227 sizeof(struct hci_rp_write_link_policy)),
4228 HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy,
4229 sizeof(struct hci_rp_read_def_link_policy)),
4230 HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY,
4231 hci_cc_write_def_link_policy),
4232 HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset),
4233 HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key,
4234 sizeof(struct hci_rp_read_stored_link_key)),
4235 HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key,
4236 sizeof(struct hci_rp_delete_stored_link_key)),
4237 HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name),
4238 HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name,
4239 sizeof(struct hci_rp_read_local_name)),
4240 HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable),
4241 HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode),
4242 HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable),
4243 HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter),
4244 HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev,
4245 sizeof(struct hci_rp_read_class_of_dev)),
4246 HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev),
4247 HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting,
4248 sizeof(struct hci_rp_read_voice_setting)),
4249 HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting),
4250 HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac,
4251 sizeof(struct hci_rp_read_num_supported_iac)),
4252 HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode),
4253 HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support),
4254 HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout,
4255 sizeof(struct hci_rp_read_auth_payload_to)),
4256 HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout,
4257 sizeof(struct hci_rp_write_auth_payload_to)),
4258 HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version,
4259 sizeof(struct hci_rp_read_local_version)),
4260 HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands,
4261 sizeof(struct hci_rp_read_local_commands)),
4262 HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features,
4263 sizeof(struct hci_rp_read_local_features)),
4264 HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features,
4265 sizeof(struct hci_rp_read_local_ext_features)),
4266 HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size,
4267 sizeof(struct hci_rp_read_buffer_size)),
4268 HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr,
4269 sizeof(struct hci_rp_read_bd_addr)),
4270 HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts,
4271 sizeof(struct hci_rp_read_local_pairing_opts)),
4272 HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity,
4273 sizeof(struct hci_rp_read_page_scan_activity)),
4274 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
4275 hci_cc_write_page_scan_activity),
4276 HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type,
4277 sizeof(struct hci_rp_read_page_scan_type)),
4278 HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type),
4279 HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size,
4280 sizeof(struct hci_rp_read_data_block_size)),
4281 HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode,
4282 sizeof(struct hci_rp_read_flow_control_mode)),
4283 HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info,
4284 sizeof(struct hci_rp_read_local_amp_info)),
4285 HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,
4286 sizeof(struct hci_rp_read_clock)),
4287 HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size,
4288 sizeof(struct hci_rp_read_enc_key_size)),
4289 HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,
4290 sizeof(struct hci_rp_read_inq_rsp_tx_power)),
4291 HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4292 hci_cc_read_def_err_data_reporting,
4293 sizeof(struct hci_rp_read_def_err_data_reporting)),
4294 HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4295 hci_cc_write_def_err_data_reporting),
4296 HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply,
4297 sizeof(struct hci_rp_pin_code_reply)),
4298 HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply,
4299 sizeof(struct hci_rp_pin_code_neg_reply)),
4300 HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data,
4301 sizeof(struct hci_rp_read_local_oob_data)),
4302 HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data,
4303 sizeof(struct hci_rp_read_local_oob_ext_data)),
4304 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size,
4305 sizeof(struct hci_rp_le_read_buffer_size)),
4306 HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features,
4307 sizeof(struct hci_rp_le_read_local_features)),
4308 HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power,
4309 sizeof(struct hci_rp_le_read_adv_tx_power)),
4310 HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply,
4311 sizeof(struct hci_rp_user_confirm_reply)),
4312 HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply,
4313 sizeof(struct hci_rp_user_confirm_reply)),
4314 HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply,
4315 sizeof(struct hci_rp_user_confirm_reply)),
4316 HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply,
4317 sizeof(struct hci_rp_user_confirm_reply)),
4318 HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr),
4319 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable),
4320 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param),
4321 HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable),
4322 HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4323 hci_cc_le_read_accept_list_size,
4324 sizeof(struct hci_rp_le_read_accept_list_size)),
4325 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list),
4326 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST,
4327 hci_cc_le_add_to_accept_list),
4328 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
4329 hci_cc_le_del_from_accept_list),
4330 HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states,
4331 sizeof(struct hci_rp_le_read_supported_states)),
4332 HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len,
4333 sizeof(struct hci_rp_le_read_def_data_len)),
4334 HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN,
4335 hci_cc_le_write_def_data_len),
4336 HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST,
4337 hci_cc_le_add_to_resolv_list),
4338 HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST,
4339 hci_cc_le_del_from_resolv_list),
4340 HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST,
4341 hci_cc_le_clear_resolv_list),
4342 HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size,
4343 sizeof(struct hci_rp_le_read_resolv_list_size)),
4344 HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
4345 hci_cc_le_set_addr_resolution_enable),
4346 HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len,
4347 sizeof(struct hci_rp_le_read_max_data_len)),
4348 HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED,
4349 hci_cc_write_le_host_supported),
4350 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param),
4351 HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi,
4352 sizeof(struct hci_rp_read_rssi)),
4353 HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power,
4354 sizeof(struct hci_rp_read_tx_power)),
4355 HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode),
4356 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS,
4357 hci_cc_le_set_ext_scan_param),
4358 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE,
4359 hci_cc_le_set_ext_scan_enable),
4360 HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy),
4361 HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4362 hci_cc_le_read_num_adv_sets,
4363 sizeof(struct hci_rp_le_read_num_supported_adv_sets)),
4364 HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param,
4365 sizeof(struct hci_rp_le_set_ext_adv_params)),
4366 HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE,
4367 hci_cc_le_set_ext_adv_enable),
4368 HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
4369 hci_cc_le_set_adv_set_random_addr),
4370 HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set),
4371 HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets),
4372 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param),
4373 HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE,
4374 hci_cc_le_set_per_adv_enable),
4375 HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power,
4376 sizeof(struct hci_rp_le_read_transmit_power)),
4378 HCI_CC(HCI_OP_ENABLE_RSSI, hci_cc_enable_rssi,
4379 sizeof(struct hci_cc_rsp_enable_rssi)),
4380 HCI_CC(HCI_OP_GET_RAW_RSSI, hci_cc_get_raw_rssi,
4381 sizeof(struct hci_cc_rp_get_raw_rssi)),
4383 HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode),
4384 HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2,
4385 sizeof(struct hci_rp_le_read_buffer_size_v2)),
4386 HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params,
4387 sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE),
4388 HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path,
4389 sizeof(struct hci_rp_le_setup_iso_path)),
4392 static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc,
4393 struct sk_buff *skb)
4397 if (skb->len < cc->min_len) {
4398 bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u",
4399 cc->op, skb->len, cc->min_len);
4400 return HCI_ERROR_UNSPECIFIED;
4403 /* Just warn if the length is over max_len size it still be possible to
4404 * partially parse the cc so leave to callback to decide if that is
4407 if (skb->len > cc->max_len)
4408 bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u",
4409 cc->op, skb->len, cc->max_len);
4411 data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len);
4413 return HCI_ERROR_UNSPECIFIED;
4415 return cc->func(hdev, data, skb);
4418 static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
4419 struct sk_buff *skb, u16 *opcode, u8 *status,
4420 hci_req_complete_t *req_complete,
4421 hci_req_complete_skb_t *req_complete_skb)
4423 struct hci_ev_cmd_complete *ev = data;
4426 *opcode = __le16_to_cpu(ev->opcode);
4428 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4430 for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) {
4431 if (hci_cc_table[i].op == *opcode) {
4432 *status = hci_cc_func(hdev, &hci_cc_table[i], skb);
4437 if (i == ARRAY_SIZE(hci_cc_table)) {
4438 /* Unknown opcode, assume byte 0 contains the status, so
4439 * that e.g. __hci_cmd_sync() properly returns errors
4440 * for vendor specific commands send by HCI drivers.
4441 * If a vendor doesn't actually follow this convention we may
4442 * need to introduce a vendor CC table in order to properly set
4445 *status = skb->data[0];
4448 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4450 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
4453 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4455 "unexpected event for opcode 0x%4.4x", *opcode);
4459 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4460 queue_work(hdev->workqueue, &hdev->cmd_work);
4463 static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status)
4465 struct hci_cp_le_create_cis *cp;
4468 bt_dev_dbg(hdev, "status 0x%2.2x", status);
4473 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS);
4479 /* Remove connection if command failed */
4480 for (i = 0; cp->num_cis; cp->num_cis--, i++) {
4481 struct hci_conn *conn;
4484 handle = __le16_to_cpu(cp->cis[i].cis_handle);
4486 conn = hci_conn_hash_lookup_handle(hdev, handle);
4488 conn->state = BT_CLOSED;
4489 hci_connect_cfm(conn, status);
4494 hci_dev_unlock(hdev);
4497 #define HCI_CS(_op, _func) \
4503 static const struct hci_cs {
4505 void (*func)(struct hci_dev *hdev, __u8 status);
4506 } hci_cs_table[] = {
4507 HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry),
4508 HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn),
4509 HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect),
4510 HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco),
4511 HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested),
4512 HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt),
4513 HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req),
4514 HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features),
4515 HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES,
4516 hci_cs_read_remote_ext_features),
4517 HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn),
4518 HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN,
4519 hci_cs_enhanced_setup_sync_conn),
4520 HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode),
4521 HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode),
4522 HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role),
4523 HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn),
4524 HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features),
4525 HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc),
4526 HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn),
4527 HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis),
4528 HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big),
4531 static void hci_cmd_status_evt(struct hci_dev *hdev, void *data,
4532 struct sk_buff *skb, u16 *opcode, u8 *status,
4533 hci_req_complete_t *req_complete,
4534 hci_req_complete_skb_t *req_complete_skb)
4536 struct hci_ev_cmd_status *ev = data;
4539 *opcode = __le16_to_cpu(ev->opcode);
4540 *status = ev->status;
4542 bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode);
4544 for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) {
4545 if (hci_cs_table[i].op == *opcode) {
4546 hci_cs_table[i].func(hdev, ev->status);
4551 handle_cmd_cnt_and_timer(hdev, ev->ncmd);
4553 /* Indicate request completion if the command failed. Also, if
4554 * we're not waiting for a special event and we get a success
4555 * command status we should try to flag the request as completed
4556 * (since for this kind of commands there will not be a command
4559 if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) {
4560 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
4562 if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
4563 bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x",
4569 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
4570 queue_work(hdev->workqueue, &hdev->cmd_work);
4573 static void hci_hardware_error_evt(struct hci_dev *hdev, void *data,
4574 struct sk_buff *skb)
4576 struct hci_ev_hardware_error *ev = data;
4578 bt_dev_dbg(hdev, "code 0x%2.2x", ev->code);
4582 mgmt_hardware_error(hdev, ev->code);
4583 hci_dev_unlock(hdev);
4585 hdev->hw_error_code = ev->code;
4587 queue_work(hdev->req_workqueue, &hdev->error_reset);
4590 static void hci_role_change_evt(struct hci_dev *hdev, void *data,
4591 struct sk_buff *skb)
4593 struct hci_ev_role_change *ev = data;
4594 struct hci_conn *conn;
4596 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4600 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4603 conn->role = ev->role;
4605 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
4607 hci_role_switch_cfm(conn, ev->status, ev->role);
4609 if (!ev->status && (get_link_mode(conn) & HCI_LM_MASTER))
4610 hci_conn_change_supervision_timeout(conn,
4611 LINK_SUPERVISION_TIMEOUT);
4615 hci_dev_unlock(hdev);
4618 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
4619 struct sk_buff *skb)
4621 struct hci_ev_num_comp_pkts *ev = data;
4624 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS,
4625 flex_array_size(ev, handles, ev->num)))
4628 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
4629 bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode);
4633 bt_dev_dbg(hdev, "num %d", ev->num);
4635 for (i = 0; i < ev->num; i++) {
4636 struct hci_comp_pkts_info *info = &ev->handles[i];
4637 struct hci_conn *conn;
4638 __u16 handle, count;
4640 handle = __le16_to_cpu(info->handle);
4641 count = __le16_to_cpu(info->count);
4643 conn = hci_conn_hash_lookup_handle(hdev, handle);
4647 conn->sent -= count;
4649 switch (conn->type) {
4651 hdev->acl_cnt += count;
4652 if (hdev->acl_cnt > hdev->acl_pkts)
4653 hdev->acl_cnt = hdev->acl_pkts;
4657 if (hdev->le_pkts) {
4658 hdev->le_cnt += count;
4659 if (hdev->le_cnt > hdev->le_pkts)
4660 hdev->le_cnt = hdev->le_pkts;
4662 hdev->acl_cnt += count;
4663 if (hdev->acl_cnt > hdev->acl_pkts)
4664 hdev->acl_cnt = hdev->acl_pkts;
4669 hdev->sco_cnt += count;
4670 if (hdev->sco_cnt > hdev->sco_pkts)
4671 hdev->sco_cnt = hdev->sco_pkts;
4675 if (hdev->iso_pkts) {
4676 hdev->iso_cnt += count;
4677 if (hdev->iso_cnt > hdev->iso_pkts)
4678 hdev->iso_cnt = hdev->iso_pkts;
4679 } else if (hdev->le_pkts) {
4680 hdev->le_cnt += count;
4681 if (hdev->le_cnt > hdev->le_pkts)
4682 hdev->le_cnt = hdev->le_pkts;
4684 hdev->acl_cnt += count;
4685 if (hdev->acl_cnt > hdev->acl_pkts)
4686 hdev->acl_cnt = hdev->acl_pkts;
4691 bt_dev_err(hdev, "unknown type %d conn %p",
4697 queue_work(hdev->workqueue, &hdev->tx_work);
4700 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
4703 struct hci_chan *chan;
4705 switch (hdev->dev_type) {
4707 return hci_conn_hash_lookup_handle(hdev, handle);
4709 chan = hci_chan_lookup_handle(hdev, handle);
4714 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
4721 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data,
4722 struct sk_buff *skb)
4724 struct hci_ev_num_comp_blocks *ev = data;
4727 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS,
4728 flex_array_size(ev, handles, ev->num_hndl)))
4731 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
4732 bt_dev_err(hdev, "wrong event for mode %d",
4733 hdev->flow_ctl_mode);
4737 bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks,
4740 for (i = 0; i < ev->num_hndl; i++) {
4741 struct hci_comp_blocks_info *info = &ev->handles[i];
4742 struct hci_conn *conn = NULL;
4743 __u16 handle, block_count;
4745 handle = __le16_to_cpu(info->handle);
4746 block_count = __le16_to_cpu(info->blocks);
4748 conn = __hci_conn_lookup_handle(hdev, handle);
4752 conn->sent -= block_count;
4754 switch (conn->type) {
4757 hdev->block_cnt += block_count;
4758 if (hdev->block_cnt > hdev->num_blocks)
4759 hdev->block_cnt = hdev->num_blocks;
4763 bt_dev_err(hdev, "unknown type %d conn %p",
4769 queue_work(hdev->workqueue, &hdev->tx_work);
4772 static void hci_mode_change_evt(struct hci_dev *hdev, void *data,
4773 struct sk_buff *skb)
4775 struct hci_ev_mode_change *ev = data;
4776 struct hci_conn *conn;
4778 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
4782 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4784 conn->mode = ev->mode;
4786 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
4788 if (conn->mode == HCI_CM_ACTIVE)
4789 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4791 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
4794 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
4795 hci_sco_setup(conn, ev->status);
4798 hci_dev_unlock(hdev);
4801 static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data,
4802 struct sk_buff *skb)
4804 struct hci_ev_pin_code_req *ev = data;
4805 struct hci_conn *conn;
4807 bt_dev_dbg(hdev, "");
4811 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4815 if (conn->state == BT_CONNECTED) {
4816 hci_conn_hold(conn);
4817 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
4818 hci_conn_drop(conn);
4821 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
4822 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
4823 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
4824 sizeof(ev->bdaddr), &ev->bdaddr);
4825 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
4828 if (conn->pending_sec_level == BT_SECURITY_HIGH)
4833 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
4837 hci_dev_unlock(hdev);
4840 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
4842 if (key_type == HCI_LK_CHANGED_COMBINATION)
4845 conn->pin_length = pin_len;
4846 conn->key_type = key_type;
4849 case HCI_LK_LOCAL_UNIT:
4850 case HCI_LK_REMOTE_UNIT:
4851 case HCI_LK_DEBUG_COMBINATION:
4853 case HCI_LK_COMBINATION:
4855 conn->pending_sec_level = BT_SECURITY_HIGH;
4857 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4859 case HCI_LK_UNAUTH_COMBINATION_P192:
4860 case HCI_LK_UNAUTH_COMBINATION_P256:
4861 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4863 case HCI_LK_AUTH_COMBINATION_P192:
4864 conn->pending_sec_level = BT_SECURITY_HIGH;
4866 case HCI_LK_AUTH_COMBINATION_P256:
4867 conn->pending_sec_level = BT_SECURITY_FIPS;
4872 static void hci_link_key_request_evt(struct hci_dev *hdev, void *data,
4873 struct sk_buff *skb)
4875 struct hci_ev_link_key_req *ev = data;
4876 struct hci_cp_link_key_reply cp;
4877 struct hci_conn *conn;
4878 struct link_key *key;
4880 bt_dev_dbg(hdev, "");
4882 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4887 key = hci_find_link_key(hdev, &ev->bdaddr);
4889 bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr);
4893 bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr);
4895 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4897 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4899 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
4900 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
4901 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
4902 bt_dev_dbg(hdev, "ignoring unauthenticated key");
4906 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
4907 (conn->pending_sec_level == BT_SECURITY_HIGH ||
4908 conn->pending_sec_level == BT_SECURITY_FIPS)) {
4909 bt_dev_dbg(hdev, "ignoring key unauthenticated for high security");
4913 conn_set_key(conn, key->type, key->pin_len);
4916 bacpy(&cp.bdaddr, &ev->bdaddr);
4917 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
4919 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
4921 hci_dev_unlock(hdev);
4926 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
4927 hci_dev_unlock(hdev);
4930 static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
4931 struct sk_buff *skb)
4933 struct hci_ev_link_key_notify *ev = data;
4934 struct hci_conn *conn;
4935 struct link_key *key;
4939 bt_dev_dbg(hdev, "");
4943 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4947 hci_conn_hold(conn);
4948 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4949 hci_conn_drop(conn);
4951 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
4952 conn_set_key(conn, ev->key_type, conn->pin_length);
4954 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4957 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
4958 ev->key_type, pin_len, &persistent);
4962 /* Update connection information since adding the key will have
4963 * fixed up the type in the case of changed combination keys.
4965 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
4966 conn_set_key(conn, key->type, key->pin_len);
4968 mgmt_new_link_key(hdev, key, persistent);
4970 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
4971 * is set. If it's not set simply remove the key from the kernel
4972 * list (we've still notified user space about it but with
4973 * store_hint being 0).
4975 if (key->type == HCI_LK_DEBUG_COMBINATION &&
4976 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
4977 list_del_rcu(&key->list);
4978 kfree_rcu(key, rcu);
4983 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4985 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
4988 hci_dev_unlock(hdev);
4991 static void hci_clock_offset_evt(struct hci_dev *hdev, void *data,
4992 struct sk_buff *skb)
4994 struct hci_ev_clock_offset *ev = data;
4995 struct hci_conn *conn;
4997 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5001 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5002 if (conn && !ev->status) {
5003 struct inquiry_entry *ie;
5005 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
5007 ie->data.clock_offset = ev->clock_offset;
5008 ie->timestamp = jiffies;
5012 hci_dev_unlock(hdev);
5015 static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data,
5016 struct sk_buff *skb)
5018 struct hci_ev_pkt_type_change *ev = data;
5019 struct hci_conn *conn;
5021 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5025 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5026 if (conn && !ev->status)
5027 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
5029 hci_dev_unlock(hdev);
5032 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data,
5033 struct sk_buff *skb)
5035 struct hci_ev_pscan_rep_mode *ev = data;
5036 struct inquiry_entry *ie;
5038 bt_dev_dbg(hdev, "");
5042 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5044 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
5045 ie->timestamp = jiffies;
5048 hci_dev_unlock(hdev);
5051 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,
5052 struct sk_buff *skb)
5054 struct hci_ev_inquiry_result_rssi *ev = edata;
5055 struct inquiry_data data;
5058 bt_dev_dbg(hdev, "num_rsp %d", ev->num);
5063 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5068 if (skb->len == array_size(ev->num,
5069 sizeof(struct inquiry_info_rssi_pscan))) {
5070 struct inquiry_info_rssi_pscan *info;
5072 for (i = 0; i < ev->num; i++) {
5075 info = hci_ev_skb_pull(hdev, skb,
5076 HCI_EV_INQUIRY_RESULT_WITH_RSSI,
5079 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
5080 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
5084 bacpy(&data.bdaddr, &info->bdaddr);
5085 data.pscan_rep_mode = info->pscan_rep_mode;
5086 data.pscan_period_mode = info->pscan_period_mode;
5087 data.pscan_mode = info->pscan_mode;
5088 memcpy(data.dev_class, info->dev_class, 3);
5089 data.clock_offset = info->clock_offset;
5090 data.rssi = info->rssi;
5091 data.ssp_mode = 0x00;
5093 flags = hci_inquiry_cache_update(hdev, &data, false);
5095 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5096 info->dev_class, info->rssi,
5097 flags, NULL, 0, NULL, 0, 0);
5099 } else if (skb->len == array_size(ev->num,
5100 sizeof(struct inquiry_info_rssi))) {
5101 struct inquiry_info_rssi *info;
5103 for (i = 0; i < ev->num; i++) {
5106 info = hci_ev_skb_pull(hdev, skb,
5107 HCI_EV_INQUIRY_RESULT_WITH_RSSI,
5110 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
5111 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
5115 bacpy(&data.bdaddr, &info->bdaddr);
5116 data.pscan_rep_mode = info->pscan_rep_mode;
5117 data.pscan_period_mode = info->pscan_period_mode;
5118 data.pscan_mode = 0x00;
5119 memcpy(data.dev_class, info->dev_class, 3);
5120 data.clock_offset = info->clock_offset;
5121 data.rssi = info->rssi;
5122 data.ssp_mode = 0x00;
5124 flags = hci_inquiry_cache_update(hdev, &data, false);
5126 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5127 info->dev_class, info->rssi,
5128 flags, NULL, 0, NULL, 0, 0);
5131 bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x",
5132 HCI_EV_INQUIRY_RESULT_WITH_RSSI);
5135 hci_dev_unlock(hdev);
5138 static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data,
5139 struct sk_buff *skb)
5141 struct hci_ev_remote_ext_features *ev = data;
5142 struct hci_conn *conn;
5144 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5148 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5152 if (ev->page < HCI_MAX_PAGES)
5153 memcpy(conn->features[ev->page], ev->features, 8);
5155 if (!ev->status && ev->page == 0x01) {
5156 struct inquiry_entry *ie;
5158 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
5160 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5162 if (ev->features[0] & LMP_HOST_SSP) {
5163 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5165 /* It is mandatory by the Bluetooth specification that
5166 * Extended Inquiry Results are only used when Secure
5167 * Simple Pairing is enabled, but some devices violate
5170 * To make these devices work, the internal SSP
5171 * enabled flag needs to be cleared if the remote host
5172 * features do not indicate SSP support */
5173 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
5176 if (ev->features[0] & LMP_HOST_SC)
5177 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
5180 if (conn->state != BT_CONFIG)
5183 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
5184 struct hci_cp_remote_name_req cp;
5185 memset(&cp, 0, sizeof(cp));
5186 bacpy(&cp.bdaddr, &conn->dst);
5187 cp.pscan_rep_mode = 0x02;
5188 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
5189 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
5190 mgmt_device_connected(hdev, conn, NULL, 0);
5192 if (!hci_outgoing_auth_needed(hdev, conn)) {
5193 conn->state = BT_CONNECTED;
5194 hci_connect_cfm(conn, ev->status);
5195 hci_conn_drop(conn);
5199 hci_dev_unlock(hdev);
5202 static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data,
5203 struct sk_buff *skb)
5205 struct hci_ev_sync_conn_complete *ev = data;
5206 struct hci_conn *conn;
5207 u8 status = ev->status;
5209 switch (ev->link_type) {
5214 /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type
5215 * for HCI_Synchronous_Connection_Complete is limited to
5216 * either SCO or eSCO
5218 bt_dev_err(hdev, "Ignoring connect complete event for invalid link type");
5222 bt_dev_dbg(hdev, "status 0x%2.2x", status);
5226 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
5228 if (ev->link_type == ESCO_LINK)
5231 /* When the link type in the event indicates SCO connection
5232 * and lookup of the connection object fails, then check
5233 * if an eSCO connection object exists.
5235 * The core limits the synchronous connections to either
5236 * SCO or eSCO. The eSCO connection is preferred and tried
5237 * to be setup first and until successfully established,
5238 * the link type will be hinted as eSCO.
5240 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
5245 /* The HCI_Synchronous_Connection_Complete event is only sent once per connection.
5246 * Processing it more than once per connection can corrupt kernel memory.
5248 * As the connection handle is set here for the first time, it indicates
5249 * whether the connection is already set up.
5251 if (conn->handle != HCI_CONN_HANDLE_UNSET) {
5252 bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection");
5258 conn->handle = __le16_to_cpu(ev->handle);
5259 if (conn->handle > HCI_CONN_HANDLE_MAX) {
5260 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x",
5261 conn->handle, HCI_CONN_HANDLE_MAX);
5262 status = HCI_ERROR_INVALID_PARAMETERS;
5263 conn->state = BT_CLOSED;
5267 conn->state = BT_CONNECTED;
5268 conn->type = ev->link_type;
5270 hci_debugfs_create_conn(conn);
5271 hci_conn_add_sysfs(conn);
5274 case 0x10: /* Connection Accept Timeout */
5275 case 0x0d: /* Connection Rejected due to Limited Resources */
5276 case 0x11: /* Unsupported Feature or Parameter Value */
5277 case 0x1c: /* SCO interval rejected */
5278 case 0x1a: /* Unsupported Remote Feature */
5279 case 0x1e: /* Invalid LMP Parameters */
5280 case 0x1f: /* Unspecified error */
5281 case 0x20: /* Unsupported LMP Parameter value */
5283 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
5284 (hdev->esco_type & EDR_ESCO_MASK);
5285 if (hci_setup_sync(conn, conn->link->handle))
5291 conn->state = BT_CLOSED;
5295 bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode);
5296 /* Notify only in case of SCO over HCI transport data path which
5297 * is zero and non-zero value shall be non-HCI transport data path
5299 if (conn->codec.data_path == 0 && hdev->notify) {
5300 switch (ev->air_mode) {
5302 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD);
5305 hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP);
5310 hci_connect_cfm(conn, status);
5315 hci_dev_unlock(hdev);
5318 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
5322 while (parsed < eir_len) {
5323 u8 field_len = eir[0];
5328 parsed += field_len + 1;
5329 eir += field_len + 1;
5335 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,
5336 struct sk_buff *skb)
5338 struct hci_ev_ext_inquiry_result *ev = edata;
5339 struct inquiry_data data;
5343 if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT,
5344 flex_array_size(ev, info, ev->num)))
5347 bt_dev_dbg(hdev, "num %d", ev->num);
5352 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
5357 for (i = 0; i < ev->num; i++) {
5358 struct extended_inquiry_info *info = &ev->info[i];
5362 bacpy(&data.bdaddr, &info->bdaddr);
5363 data.pscan_rep_mode = info->pscan_rep_mode;
5364 data.pscan_period_mode = info->pscan_period_mode;
5365 data.pscan_mode = 0x00;
5366 memcpy(data.dev_class, info->dev_class, 3);
5367 data.clock_offset = info->clock_offset;
5368 data.rssi = info->rssi;
5369 data.ssp_mode = 0x01;
5371 if (hci_dev_test_flag(hdev, HCI_MGMT))
5372 name_known = eir_get_data(info->data,
5374 EIR_NAME_COMPLETE, NULL);
5378 flags = hci_inquiry_cache_update(hdev, &data, name_known);
5380 eir_len = eir_get_length(info->data, sizeof(info->data));
5382 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
5383 info->dev_class, info->rssi,
5384 flags, info->data, eir_len, NULL, 0, 0);
5387 hci_dev_unlock(hdev);
5390 static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data,
5391 struct sk_buff *skb)
5393 struct hci_ev_key_refresh_complete *ev = data;
5394 struct hci_conn *conn;
5396 bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status,
5397 __le16_to_cpu(ev->handle));
5401 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5405 /* For BR/EDR the necessary steps are taken through the
5406 * auth_complete event.
5408 if (conn->type != LE_LINK)
5412 conn->sec_level = conn->pending_sec_level;
5414 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
5416 if (ev->status && conn->state == BT_CONNECTED) {
5417 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
5418 hci_conn_drop(conn);
5422 if (conn->state == BT_CONFIG) {
5424 conn->state = BT_CONNECTED;
5426 hci_connect_cfm(conn, ev->status);
5427 hci_conn_drop(conn);
5429 hci_auth_cfm(conn, ev->status);
5431 hci_conn_hold(conn);
5432 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
5433 hci_conn_drop(conn);
5437 hci_dev_unlock(hdev);
5440 static u8 hci_get_auth_req(struct hci_conn *conn)
5443 if (conn->remote_auth == HCI_AT_GENERAL_BONDING_MITM) {
5444 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5445 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5446 return HCI_AT_GENERAL_BONDING_MITM;
5450 /* If remote requests no-bonding follow that lead */
5451 if (conn->remote_auth == HCI_AT_NO_BONDING ||
5452 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
5453 return conn->remote_auth | (conn->auth_type & 0x01);
5455 /* If both remote and local have enough IO capabilities, require
5458 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
5459 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
5460 return conn->remote_auth | 0x01;
5462 /* No MITM protection possible so ignore remote requirement */
5463 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
5466 static u8 bredr_oob_data_present(struct hci_conn *conn)
5468 struct hci_dev *hdev = conn->hdev;
5469 struct oob_data *data;
5471 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
5475 if (bredr_sc_enabled(hdev)) {
5476 /* When Secure Connections is enabled, then just
5477 * return the present value stored with the OOB
5478 * data. The stored value contains the right present
5479 * information. However it can only be trusted when
5480 * not in Secure Connection Only mode.
5482 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
5483 return data->present;
5485 /* When Secure Connections Only mode is enabled, then
5486 * the P-256 values are required. If they are not
5487 * available, then do not declare that OOB data is
5490 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
5491 !memcmp(data->hash256, ZERO_KEY, 16))
5497 /* When Secure Connections is not enabled or actually
5498 * not supported by the hardware, then check that if
5499 * P-192 data values are present.
5501 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
5502 !memcmp(data->hash192, ZERO_KEY, 16))
5508 static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
5509 struct sk_buff *skb)
5511 struct hci_ev_io_capa_request *ev = data;
5512 struct hci_conn *conn;
5514 bt_dev_dbg(hdev, "");
5518 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5522 hci_conn_hold(conn);
5524 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5527 /* Allow pairing if we're pairable, the initiators of the
5528 * pairing or if the remote is not requesting bonding.
5530 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
5531 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
5532 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
5533 struct hci_cp_io_capability_reply cp;
5535 bacpy(&cp.bdaddr, &ev->bdaddr);
5536 /* Change the IO capability from KeyboardDisplay
5537 * to DisplayYesNo as it is not supported by BT spec. */
5538 cp.capability = (conn->io_capability == 0x04) ?
5539 HCI_IO_DISPLAY_YESNO : conn->io_capability;
5541 /* If we are initiators, there is no remote information yet */
5542 if (conn->remote_auth == 0xff) {
5543 /* Request MITM protection if our IO caps allow it
5544 * except for the no-bonding case.
5546 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5547 conn->auth_type != HCI_AT_NO_BONDING)
5548 conn->auth_type |= 0x01;
5550 conn->auth_type = hci_get_auth_req(conn);
5553 /* If we're not bondable, force one of the non-bondable
5554 * authentication requirement values.
5556 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
5557 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
5559 cp.authentication = conn->auth_type;
5560 cp.oob_data = bredr_oob_data_present(conn);
5562 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
5565 struct hci_cp_io_capability_neg_reply cp;
5567 bacpy(&cp.bdaddr, &ev->bdaddr);
5568 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
5570 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
5575 hci_dev_unlock(hdev);
5578 static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data,
5579 struct sk_buff *skb)
5581 struct hci_ev_io_capa_reply *ev = data;
5582 struct hci_conn *conn;
5584 bt_dev_dbg(hdev, "");
5588 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5592 conn->remote_cap = ev->capability;
5593 conn->remote_auth = ev->authentication;
5596 hci_dev_unlock(hdev);
5599 static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data,
5600 struct sk_buff *skb)
5602 struct hci_ev_user_confirm_req *ev = data;
5603 int loc_mitm, rem_mitm, confirm_hint = 0;
5604 struct hci_conn *conn;
5606 bt_dev_dbg(hdev, "");
5610 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5613 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5617 loc_mitm = (conn->auth_type & 0x01);
5618 rem_mitm = (conn->remote_auth & 0x01);
5620 /* If we require MITM but the remote device can't provide that
5621 * (it has NoInputNoOutput) then reject the confirmation
5622 * request. We check the security level here since it doesn't
5623 * necessarily match conn->auth_type.
5625 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
5626 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
5627 bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM");
5628 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
5629 sizeof(ev->bdaddr), &ev->bdaddr);
5633 /* If no side requires MITM protection; auto-accept */
5634 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
5635 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
5637 /* If we're not the initiators request authorization to
5638 * proceed from user space (mgmt_user_confirm with
5639 * confirm_hint set to 1). The exception is if neither
5640 * side had MITM or if the local IO capability is
5641 * NoInputNoOutput, in which case we do auto-accept
5643 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
5644 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
5645 (loc_mitm || rem_mitm)) {
5646 bt_dev_dbg(hdev, "Confirming auto-accept as acceptor");
5651 /* If there already exists link key in local host, leave the
5652 * decision to user space since the remote device could be
5653 * legitimate or malicious.
5655 if (hci_find_link_key(hdev, &ev->bdaddr)) {
5656 bt_dev_dbg(hdev, "Local host already has link key");
5661 BT_DBG("Auto-accept of user confirmation with %ums delay",
5662 hdev->auto_accept_delay);
5664 if (hdev->auto_accept_delay > 0) {
5665 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
5666 queue_delayed_work(conn->hdev->workqueue,
5667 &conn->auto_accept_work, delay);
5671 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
5672 sizeof(ev->bdaddr), &ev->bdaddr);
5677 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
5678 le32_to_cpu(ev->passkey), confirm_hint);
5681 hci_dev_unlock(hdev);
5684 static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data,
5685 struct sk_buff *skb)
5687 struct hci_ev_user_passkey_req *ev = data;
5689 bt_dev_dbg(hdev, "");
5691 if (hci_dev_test_flag(hdev, HCI_MGMT))
5692 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
5695 static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data,
5696 struct sk_buff *skb)
5698 struct hci_ev_user_passkey_notify *ev = data;
5699 struct hci_conn *conn;
5701 bt_dev_dbg(hdev, "");
5703 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5707 conn->passkey_notify = __le32_to_cpu(ev->passkey);
5708 conn->passkey_entered = 0;
5710 if (hci_dev_test_flag(hdev, HCI_MGMT))
5711 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5712 conn->dst_type, conn->passkey_notify,
5713 conn->passkey_entered);
5716 static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data,
5717 struct sk_buff *skb)
5719 struct hci_ev_keypress_notify *ev = data;
5720 struct hci_conn *conn;
5722 bt_dev_dbg(hdev, "");
5724 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5729 case HCI_KEYPRESS_STARTED:
5730 conn->passkey_entered = 0;
5733 case HCI_KEYPRESS_ENTERED:
5734 conn->passkey_entered++;
5737 case HCI_KEYPRESS_ERASED:
5738 conn->passkey_entered--;
5741 case HCI_KEYPRESS_CLEARED:
5742 conn->passkey_entered = 0;
5745 case HCI_KEYPRESS_COMPLETED:
5749 if (hci_dev_test_flag(hdev, HCI_MGMT))
5750 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
5751 conn->dst_type, conn->passkey_notify,
5752 conn->passkey_entered);
5755 static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
5756 struct sk_buff *skb)
5758 struct hci_ev_simple_pair_complete *ev = data;
5759 struct hci_conn *conn;
5761 bt_dev_dbg(hdev, "");
5765 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5769 /* Reset the authentication requirement to unknown */
5770 conn->remote_auth = 0xff;
5772 /* To avoid duplicate auth_failed events to user space we check
5773 * the HCI_CONN_AUTH_PEND flag which will be set if we
5774 * initiated the authentication. A traditional auth_complete
5775 * event gets always produced as initiator and is also mapped to
5776 * the mgmt_auth_failed event */
5777 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
5778 mgmt_auth_failed(conn, ev->status);
5780 hci_conn_drop(conn);
5783 hci_dev_unlock(hdev);
5786 static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data,
5787 struct sk_buff *skb)
5789 struct hci_ev_remote_host_features *ev = data;
5790 struct inquiry_entry *ie;
5791 struct hci_conn *conn;
5793 bt_dev_dbg(hdev, "");
5797 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
5799 memcpy(conn->features[1], ev->features, 8);
5801 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
5803 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
5805 hci_dev_unlock(hdev);
5808 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata,
5809 struct sk_buff *skb)
5811 struct hci_ev_remote_oob_data_request *ev = edata;
5812 struct oob_data *data;
5814 bt_dev_dbg(hdev, "");
5818 if (!hci_dev_test_flag(hdev, HCI_MGMT))
5821 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
5823 struct hci_cp_remote_oob_data_neg_reply cp;
5825 bacpy(&cp.bdaddr, &ev->bdaddr);
5826 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
5831 if (bredr_sc_enabled(hdev)) {
5832 struct hci_cp_remote_oob_ext_data_reply cp;
5834 bacpy(&cp.bdaddr, &ev->bdaddr);
5835 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
5836 memset(cp.hash192, 0, sizeof(cp.hash192));
5837 memset(cp.rand192, 0, sizeof(cp.rand192));
5839 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
5840 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
5842 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
5843 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
5845 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
5848 struct hci_cp_remote_oob_data_reply cp;
5850 bacpy(&cp.bdaddr, &ev->bdaddr);
5851 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
5852 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
5854 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
5859 hci_dev_unlock(hdev);
5862 #if IS_ENABLED(CONFIG_BT_HS)
5863 static void hci_chan_selected_evt(struct hci_dev *hdev, void *data,
5864 struct sk_buff *skb)
5866 struct hci_ev_channel_selected *ev = data;
5867 struct hci_conn *hcon;
5869 bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle);
5871 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5875 amp_read_loc_assoc_final_data(hdev, hcon);
5878 static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data,
5879 struct sk_buff *skb)
5881 struct hci_ev_phy_link_complete *ev = data;
5882 struct hci_conn *hcon, *bredr_hcon;
5884 bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle,
5889 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5901 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
5903 hcon->state = BT_CONNECTED;
5904 bacpy(&hcon->dst, &bredr_hcon->dst);
5906 hci_conn_hold(hcon);
5907 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
5908 hci_conn_drop(hcon);
5910 hci_debugfs_create_conn(hcon);
5911 hci_conn_add_sysfs(hcon);
5913 amp_physical_cfm(bredr_hcon, hcon);
5916 hci_dev_unlock(hdev);
5919 static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data,
5920 struct sk_buff *skb)
5922 struct hci_ev_logical_link_complete *ev = data;
5923 struct hci_conn *hcon;
5924 struct hci_chan *hchan;
5925 struct amp_mgr *mgr;
5927 bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
5928 le16_to_cpu(ev->handle), ev->phy_handle, ev->status);
5930 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5934 /* Create AMP hchan */
5935 hchan = hci_chan_create(hcon);
5939 hchan->handle = le16_to_cpu(ev->handle);
5942 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
5944 mgr = hcon->amp_mgr;
5945 if (mgr && mgr->bredr_chan) {
5946 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
5948 l2cap_chan_lock(bredr_chan);
5950 bredr_chan->conn->mtu = hdev->block_mtu;
5951 l2cap_logical_cfm(bredr_chan, hchan, 0);
5952 hci_conn_hold(hcon);
5954 l2cap_chan_unlock(bredr_chan);
5958 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data,
5959 struct sk_buff *skb)
5961 struct hci_ev_disconn_logical_link_complete *ev = data;
5962 struct hci_chan *hchan;
5964 bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x",
5965 le16_to_cpu(ev->handle), ev->status);
5972 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
5973 if (!hchan || !hchan->amp)
5976 amp_destroy_logical_link(hchan, ev->reason);
5979 hci_dev_unlock(hdev);
5982 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data,
5983 struct sk_buff *skb)
5985 struct hci_ev_disconn_phy_link_complete *ev = data;
5986 struct hci_conn *hcon;
5988 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
5995 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5996 if (hcon && hcon->type == AMP_LINK) {
5997 hcon->state = BT_CLOSED;
5998 hci_disconn_cfm(hcon, ev->reason);
6002 hci_dev_unlock(hdev);
6006 static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr,
6007 u8 bdaddr_type, bdaddr_t *local_rpa)
6010 conn->dst_type = bdaddr_type;
6011 conn->resp_addr_type = bdaddr_type;
6012 bacpy(&conn->resp_addr, bdaddr);
6014 /* Check if the controller has set a Local RPA then it must be
6015 * used instead or hdev->rpa.
6017 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
6018 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
6019 bacpy(&conn->init_addr, local_rpa);
6020 } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) {
6021 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
6022 bacpy(&conn->init_addr, &conn->hdev->rpa);
6024 hci_copy_identity_address(conn->hdev, &conn->init_addr,
6025 &conn->init_addr_type);
6028 conn->resp_addr_type = conn->hdev->adv_addr_type;
6029 /* Check if the controller has set a Local RPA then it must be
6030 * used instead or hdev->rpa.
6032 if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) {
6033 conn->resp_addr_type = ADDR_LE_DEV_RANDOM;
6034 bacpy(&conn->resp_addr, local_rpa);
6035 } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) {
6036 /* In case of ext adv, resp_addr will be updated in
6037 * Adv Terminated event.
6039 if (!ext_adv_capable(conn->hdev))
6040 bacpy(&conn->resp_addr,
6041 &conn->hdev->random_addr);
6043 bacpy(&conn->resp_addr, &conn->hdev->bdaddr);
6046 conn->init_addr_type = bdaddr_type;
6047 bacpy(&conn->init_addr, bdaddr);
6049 /* For incoming connections, set the default minimum
6050 * and maximum connection interval. They will be used
6051 * to check if the parameters are in range and if not
6052 * trigger the connection update procedure.
6054 conn->le_conn_min_interval = conn->hdev->le_conn_min_interval;
6055 conn->le_conn_max_interval = conn->hdev->le_conn_max_interval;
6059 static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
6060 bdaddr_t *bdaddr, u8 bdaddr_type,
6061 bdaddr_t *local_rpa, u8 role, u16 handle,
6062 u16 interval, u16 latency,
6063 u16 supervision_timeout)
6065 struct hci_conn_params *params;
6066 struct hci_conn *conn;
6067 struct smp_irk *irk;
6072 /* All controllers implicitly stop advertising in the event of a
6073 * connection, so ensure that the state bit is cleared.
6075 hci_dev_clear_flag(hdev, HCI_LE_ADV);
6077 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
6079 /* In case of error status and there is no connection pending
6080 * just unlock as there is nothing to cleanup.
6085 conn = hci_conn_add(hdev, LE_LINK, bdaddr, role);
6087 bt_dev_err(hdev, "no memory for new connection");
6091 conn->dst_type = bdaddr_type;
6093 /* If we didn't have a hci_conn object previously
6094 * but we're in central role this must be something
6095 * initiated using an accept list. Since accept list based
6096 * connections are not "first class citizens" we don't
6097 * have full tracking of them. Therefore, we go ahead
6098 * with a "best effort" approach of determining the
6099 * initiator address based on the HCI_PRIVACY flag.
6102 conn->resp_addr_type = bdaddr_type;
6103 bacpy(&conn->resp_addr, bdaddr);
6104 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6105 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
6106 bacpy(&conn->init_addr, &hdev->rpa);
6108 hci_copy_identity_address(hdev,
6110 &conn->init_addr_type);
6115 /* LE auto connect */
6116 bacpy(&conn->dst, bdaddr);
6118 cancel_delayed_work(&conn->le_conn_timeout);
6121 /* The HCI_LE_Connection_Complete event is only sent once per connection.
6122 * Processing it more than once per connection can corrupt kernel memory.
6124 * As the connection handle is set here for the first time, it indicates
6125 * whether the connection is already set up.
6127 if (conn->handle != HCI_CONN_HANDLE_UNSET) {
6128 bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection");
6132 le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa);
6134 /* Lookup the identity address from the stored connection
6135 * address and address type.
6137 * When establishing connections to an identity address, the
6138 * connection procedure will store the resolvable random
6139 * address first. Now if it can be converted back into the
6140 * identity address, start using the identity address from
6143 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
6145 bacpy(&conn->dst, &irk->bdaddr);
6146 conn->dst_type = irk->addr_type;
6149 conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL);
6151 if (handle > HCI_CONN_HANDLE_MAX) {
6152 bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle,
6153 HCI_CONN_HANDLE_MAX);
6154 status = HCI_ERROR_INVALID_PARAMETERS;
6157 /* All connection failure handling is taken care of by the
6158 * hci_conn_failed function which is triggered by the HCI
6159 * request completion callbacks used for connecting.
6164 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
6165 addr_type = BDADDR_LE_PUBLIC;
6167 addr_type = BDADDR_LE_RANDOM;
6169 /* Drop the connection if the device is blocked */
6170 if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) {
6171 hci_conn_drop(conn);
6175 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
6176 mgmt_device_connected(hdev, conn, NULL, 0);
6178 conn->sec_level = BT_SECURITY_LOW;
6179 conn->handle = handle;
6180 conn->state = BT_CONFIG;
6182 /* Store current advertising instance as connection advertising instance
6183 * when sotfware rotation is in use so it can be re-enabled when
6186 if (!ext_adv_capable(hdev))
6187 conn->adv_instance = hdev->cur_adv_instance;
6189 conn->le_conn_interval = interval;
6190 conn->le_conn_latency = latency;
6191 conn->le_supv_timeout = supervision_timeout;
6193 hci_debugfs_create_conn(conn);
6194 hci_conn_add_sysfs(conn);
6196 /* The remote features procedure is defined for central
6197 * role only. So only in case of an initiated connection
6198 * request the remote features.
6200 * If the local controller supports peripheral-initiated features
6201 * exchange, then requesting the remote features in peripheral
6202 * role is possible. Otherwise just transition into the
6203 * connected state without requesting the remote features.
6206 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) {
6207 struct hci_cp_le_read_remote_features cp;
6209 cp.handle = __cpu_to_le16(conn->handle);
6211 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
6214 hci_conn_hold(conn);
6216 conn->state = BT_CONNECTED;
6217 hci_connect_cfm(conn, status);
6220 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
6223 list_del_init(¶ms->action);
6225 hci_conn_drop(params->conn);
6226 hci_conn_put(params->conn);
6227 params->conn = NULL;
6232 hci_update_passive_scan(hdev);
6233 hci_dev_unlock(hdev);
6236 static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data,
6237 struct sk_buff *skb)
6239 struct hci_ev_le_conn_complete *ev = data;
6241 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6243 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6244 NULL, ev->role, le16_to_cpu(ev->handle),
6245 le16_to_cpu(ev->interval),
6246 le16_to_cpu(ev->latency),
6247 le16_to_cpu(ev->supervision_timeout));
6250 static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data,
6251 struct sk_buff *skb)
6253 struct hci_ev_le_enh_conn_complete *ev = data;
6255 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6257 le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type,
6258 &ev->local_rpa, ev->role, le16_to_cpu(ev->handle),
6259 le16_to_cpu(ev->interval),
6260 le16_to_cpu(ev->latency),
6261 le16_to_cpu(ev->supervision_timeout));
6264 static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data,
6265 struct sk_buff *skb)
6267 struct hci_evt_le_ext_adv_set_term *ev = data;
6268 struct hci_conn *conn;
6269 struct adv_info *adv, *n;
6271 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6273 /* The Bluetooth Core 5.3 specification clearly states that this event
6274 * shall not be sent when the Host disables the advertising set. So in
6275 * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event.
6277 * When the Host disables an advertising set, all cleanup is done via
6278 * its command callback and not needed to be duplicated here.
6280 if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) {
6281 bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event");
6287 adv = hci_find_adv_instance(hdev, ev->handle);
6293 /* Remove advertising as it has been terminated */
6294 hci_remove_adv_instance(hdev, ev->handle);
6295 mgmt_advertising_removed(NULL, hdev, ev->handle);
6297 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
6302 /* We are no longer advertising, clear HCI_LE_ADV */
6303 hci_dev_clear_flag(hdev, HCI_LE_ADV);
6308 adv->enabled = false;
6310 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle));
6312 /* Store handle in the connection so the correct advertising
6313 * instance can be re-enabled when disconnected.
6315 conn->adv_instance = ev->handle;
6317 if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM ||
6318 bacmp(&conn->resp_addr, BDADDR_ANY))
6322 bacpy(&conn->resp_addr, &hdev->random_addr);
6327 bacpy(&conn->resp_addr, &adv->random_addr);
6331 hci_dev_unlock(hdev);
6334 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
6335 struct sk_buff *skb)
6337 struct hci_ev_le_conn_update_complete *ev = data;
6338 struct hci_conn *conn;
6340 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6347 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6351 hci_dev_unlock(hdev);
6352 mgmt_le_conn_update_failed(hdev, &conn->dst,
6353 conn->type, conn->dst_type, ev->status);
6357 conn->le_conn_interval = le16_to_cpu(ev->interval);
6358 conn->le_conn_latency = le16_to_cpu(ev->latency);
6359 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
6362 hci_dev_unlock(hdev);
6365 mgmt_le_conn_updated(hdev, &conn->dst, conn->type,
6366 conn->dst_type, conn->le_conn_interval,
6367 conn->le_conn_latency, conn->le_supv_timeout);
6371 /* This function requires the caller holds hdev->lock */
6372 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
6374 u8 addr_type, bool addr_resolved,
6377 struct hci_conn *conn;
6378 struct hci_conn_params *params;
6380 /* If the event is not connectable don't proceed further */
6381 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
6384 /* Ignore if the device is blocked or hdev is suspended */
6385 if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) ||
6389 /* Most controller will fail if we try to create new connections
6390 * while we have an existing one in peripheral role.
6392 if (hdev->conn_hash.le_num_peripheral > 0 &&
6393 (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) ||
6394 !(hdev->le_states[3] & 0x10)))
6397 /* If we're not connectable only connect devices that we have in
6398 * our pend_le_conns list.
6400 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr,
6405 if (!params->explicit_connect) {
6406 switch (params->auto_connect) {
6407 case HCI_AUTO_CONN_DIRECT:
6408 /* Only devices advertising with ADV_DIRECT_IND are
6409 * triggering a connection attempt. This is allowing
6410 * incoming connections from peripheral devices.
6412 if (adv_type != LE_ADV_DIRECT_IND)
6415 case HCI_AUTO_CONN_ALWAYS:
6416 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
6417 * are triggering a connection attempt. This means
6418 * that incoming connections from peripheral device are
6419 * accepted and also outgoing connections to peripheral
6420 * devices are established when found.
6428 conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
6429 BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
6431 if (!IS_ERR(conn)) {
6432 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
6433 * by higher layer that tried to connect, if no then
6434 * store the pointer since we don't really have any
6435 * other owner of the object besides the params that
6436 * triggered it. This way we can abort the connection if
6437 * the parameters get removed and keep the reference
6438 * count consistent once the connection is established.
6441 if (!params->explicit_connect)
6442 params->conn = hci_conn_get(conn);
6447 switch (PTR_ERR(conn)) {
6449 /* If hci_connect() returns -EBUSY it means there is already
6450 * an LE connection attempt going on. Since controllers don't
6451 * support more than one connection attempt at the time, we
6452 * don't consider this an error case.
6456 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
6463 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
6464 u8 bdaddr_type, bdaddr_t *direct_addr,
6465 u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
6466 bool ext_adv, bool ctl_time, u64 instant)
6468 struct discovery_state *d = &hdev->discovery;
6469 struct smp_irk *irk;
6470 struct hci_conn *conn;
6471 bool match, bdaddr_resolved;
6477 case LE_ADV_DIRECT_IND:
6478 case LE_ADV_SCAN_IND:
6479 case LE_ADV_NONCONN_IND:
6480 case LE_ADV_SCAN_RSP:
6483 bt_dev_err_ratelimited(hdev, "unknown advertising packet "
6484 "type: 0x%02x", type);
6488 if (!ext_adv && len > HCI_MAX_AD_LENGTH) {
6489 bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes");
6493 /* Find the end of the data in case the report contains padded zero
6494 * bytes at the end causing an invalid length value.
6496 * When data is NULL, len is 0 so there is no need for extra ptr
6497 * check as 'ptr < data + 0' is already false in such case.
6499 for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) {
6500 if (ptr + 1 + *ptr > data + len)
6504 /* Adjust for actual length. This handles the case when remote
6505 * device is advertising with incorrect data length.
6509 /* If the direct address is present, then this report is from
6510 * a LE Direct Advertising Report event. In that case it is
6511 * important to see if the address is matching the local
6512 * controller address.
6514 if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {
6515 direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,
6518 /* Only resolvable random addresses are valid for these
6519 * kind of reports and others can be ignored.
6521 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
6524 /* If the controller is not using resolvable random
6525 * addresses, then this report can be ignored.
6527 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
6530 /* If the local IRK of the controller does not match
6531 * with the resolvable random address provided, then
6532 * this report can be ignored.
6534 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
6538 /* Check if we need to convert to identity address */
6539 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
6541 bdaddr = &irk->bdaddr;
6542 bdaddr_type = irk->addr_type;
6545 bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved);
6547 /* Check if we have been requested to connect to this device.
6549 * direct_addr is set only for directed advertising reports (it is NULL
6550 * for advertising reports) and is already verified to be RPA above.
6552 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
6554 if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) {
6555 /* Store report for later inclusion by
6556 * mgmt_device_connected
6558 memcpy(conn->le_adv_data, data, len);
6559 conn->le_adv_data_len = len;
6562 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
6563 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6567 /* All scan results should be sent up for Mesh systems */
6568 if (hci_dev_test_flag(hdev, HCI_MESH)) {
6569 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6570 rssi, flags, data, len, NULL, 0, instant);
6574 /* Passive scanning shouldn't trigger any device found events,
6575 * except for devices marked as CONN_REPORT for which we do send
6576 * device found events, or advertisement monitoring requested.
6578 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
6579 if (type == LE_ADV_DIRECT_IND)
6583 /* Handle all adv packet in platform */
6584 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
6585 bdaddr, bdaddr_type) &&
6586 idr_is_empty(&hdev->adv_monitors_idr))
6591 mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6592 rssi, flags, data, len, NULL, 0, type);
6594 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6595 rssi, flags, data, len, NULL, 0, 0);
6600 /* When receiving non-connectable or scannable undirected
6601 * advertising reports, this means that the remote device is
6602 * not connectable and then clearly indicate this in the
6603 * device found event.
6605 * When receiving a scan response, then there is no way to
6606 * know if the remote device is connectable or not. However
6607 * since scan responses are merged with a previously seen
6608 * advertising report, the flags field from that report
6611 * In the really unlikely case that a controller get confused
6612 * and just sends a scan response event, then it is marked as
6613 * not connectable as well.
6615 if (type == LE_ADV_SCAN_RSP)
6616 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
6619 /* Disable adv ind and scan rsp merging */
6620 mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6621 rssi, flags, data, len, NULL, 0, type);
6623 /* If there's nothing pending either store the data from this
6624 * event or send an immediate device found event if the data
6625 * should not be stored for later.
6627 if (!ext_adv && !has_pending_adv_report(hdev)) {
6628 /* If the report will trigger a SCAN_REQ store it for
6631 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
6632 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6633 rssi, flags, data, len);
6637 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6638 rssi, flags, data, len, NULL, 0, 0);
6642 /* Check if the pending report is for the same device as the new one */
6643 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
6644 bdaddr_type == d->last_adv_addr_type);
6646 /* If the pending data doesn't match this report or this isn't a
6647 * scan response (e.g. we got a duplicate ADV_IND) then force
6648 * sending of the pending data.
6650 if (type != LE_ADV_SCAN_RSP || !match) {
6651 /* Send out whatever is in the cache, but skip duplicates */
6653 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6654 d->last_adv_addr_type, NULL,
6655 d->last_adv_rssi, d->last_adv_flags,
6657 d->last_adv_data_len, NULL, 0, 0);
6659 /* If the new report will trigger a SCAN_REQ store it for
6662 if (!ext_adv && (type == LE_ADV_IND ||
6663 type == LE_ADV_SCAN_IND)) {
6664 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
6665 rssi, flags, data, len);
6669 /* The advertising reports cannot be merged, so clear
6670 * the pending report and send out a device found event.
6672 clear_pending_adv_report(hdev);
6673 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
6674 rssi, flags, data, len, NULL, 0, 0);
6678 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
6679 * the new event is a SCAN_RSP. We can therefore proceed with
6680 * sending a merged device found event.
6682 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
6683 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
6684 d->last_adv_data, d->last_adv_data_len, data, len, 0);
6685 clear_pending_adv_report(hdev);
6689 static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
6690 struct sk_buff *skb)
6692 struct hci_ev_le_advertising_report *ev = data;
6693 u64 instant = jiffies;
6701 struct hci_ev_le_advertising_info *info;
6704 info = hci_le_ev_skb_pull(hdev, skb,
6705 HCI_EV_LE_ADVERTISING_REPORT,
6710 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT,
6714 if (info->length <= HCI_MAX_AD_LENGTH) {
6715 rssi = info->data[info->length];
6716 process_adv_report(hdev, info->type, &info->bdaddr,
6717 info->bdaddr_type, NULL, 0, rssi,
6718 info->data, info->length, false,
6721 bt_dev_err(hdev, "Dropping invalid advertising data");
6725 hci_dev_unlock(hdev);
6728 static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type)
6730 if (evt_type & LE_EXT_ADV_LEGACY_PDU) {
6732 case LE_LEGACY_ADV_IND:
6734 case LE_LEGACY_ADV_DIRECT_IND:
6735 return LE_ADV_DIRECT_IND;
6736 case LE_LEGACY_ADV_SCAN_IND:
6737 return LE_ADV_SCAN_IND;
6738 case LE_LEGACY_NONCONN_IND:
6739 return LE_ADV_NONCONN_IND;
6740 case LE_LEGACY_SCAN_RSP_ADV:
6741 case LE_LEGACY_SCAN_RSP_ADV_SCAN:
6742 return LE_ADV_SCAN_RSP;
6748 if (evt_type & LE_EXT_ADV_CONN_IND) {
6749 if (evt_type & LE_EXT_ADV_DIRECT_IND)
6750 return LE_ADV_DIRECT_IND;
6755 if (evt_type & LE_EXT_ADV_SCAN_RSP)
6756 return LE_ADV_SCAN_RSP;
6758 if (evt_type & LE_EXT_ADV_SCAN_IND)
6759 return LE_ADV_SCAN_IND;
6761 if (evt_type == LE_EXT_ADV_NON_CONN_IND ||
6762 evt_type & LE_EXT_ADV_DIRECT_IND)
6763 return LE_ADV_NONCONN_IND;
6766 bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x",
6769 return LE_ADV_INVALID;
6772 static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
6773 struct sk_buff *skb)
6775 struct hci_ev_le_ext_adv_report *ev = data;
6776 u64 instant = jiffies;
6784 struct hci_ev_le_ext_adv_info *info;
6788 info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6793 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT,
6797 evt_type = __le16_to_cpu(info->type);
6798 legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type);
6799 if (legacy_evt_type != LE_ADV_INVALID) {
6800 process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
6801 info->bdaddr_type, NULL, 0,
6802 info->rssi, info->data, info->length,
6803 !(evt_type & LE_EXT_ADV_LEGACY_PDU),
6808 hci_dev_unlock(hdev);
6811 static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle)
6813 struct hci_cp_le_pa_term_sync cp;
6815 memset(&cp, 0, sizeof(cp));
6818 return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp);
6821 static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
6822 struct sk_buff *skb)
6824 struct hci_ev_le_pa_sync_established *ev = data;
6825 int mask = hdev->link_mode;
6828 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6835 hci_dev_clear_flag(hdev, HCI_PA_SYNC);
6837 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags);
6838 if (!(mask & HCI_LM_ACCEPT))
6839 hci_le_pa_term_sync(hdev, ev->handle);
6841 hci_dev_unlock(hdev);
6844 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data,
6845 struct sk_buff *skb)
6847 struct hci_ev_le_remote_feat_complete *ev = data;
6848 struct hci_conn *conn;
6850 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
6854 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6857 memcpy(conn->features[0], ev->features, 8);
6859 if (conn->state == BT_CONFIG) {
6862 /* If the local controller supports peripheral-initiated
6863 * features exchange, but the remote controller does
6864 * not, then it is possible that the error code 0x1a
6865 * for unsupported remote feature gets returned.
6867 * In this specific case, allow the connection to
6868 * transition into connected state and mark it as
6871 if (!conn->out && ev->status == 0x1a &&
6872 (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES))
6875 status = ev->status;
6877 conn->state = BT_CONNECTED;
6878 hci_connect_cfm(conn, status);
6879 hci_conn_drop(conn);
6883 hci_dev_unlock(hdev);
6886 static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data,
6887 struct sk_buff *skb)
6889 struct hci_ev_le_ltk_req *ev = data;
6890 struct hci_cp_le_ltk_reply cp;
6891 struct hci_cp_le_ltk_neg_reply neg;
6892 struct hci_conn *conn;
6893 struct smp_ltk *ltk;
6895 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6899 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
6903 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
6907 if (smp_ltk_is_sc(ltk)) {
6908 /* With SC both EDiv and Rand are set to zero */
6909 if (ev->ediv || ev->rand)
6912 /* For non-SC keys check that EDiv and Rand match */
6913 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
6917 memcpy(cp.ltk, ltk->val, ltk->enc_size);
6918 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
6919 cp.handle = cpu_to_le16(conn->handle);
6921 conn->pending_sec_level = smp_ltk_sec_level(ltk);
6923 conn->enc_key_size = ltk->enc_size;
6925 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
6927 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
6928 * temporary key used to encrypt a connection following
6929 * pairing. It is used during the Encrypted Session Setup to
6930 * distribute the keys. Later, security can be re-established
6931 * using a distributed LTK.
6933 if (ltk->type == SMP_STK) {
6934 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6935 list_del_rcu(<k->list);
6936 kfree_rcu(ltk, rcu);
6938 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
6941 hci_dev_unlock(hdev);
6946 neg.handle = ev->handle;
6947 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
6948 hci_dev_unlock(hdev);
6951 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
6954 struct hci_cp_le_conn_param_req_neg_reply cp;
6956 cp.handle = cpu_to_le16(handle);
6959 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
6963 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data,
6964 struct sk_buff *skb)
6966 struct hci_ev_le_remote_conn_param_req *ev = data;
6967 struct hci_cp_le_conn_param_req_reply cp;
6968 struct hci_conn *hcon;
6969 u16 handle, min, max, latency, timeout;
6971 bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle));
6973 handle = le16_to_cpu(ev->handle);
6974 min = le16_to_cpu(ev->interval_min);
6975 max = le16_to_cpu(ev->interval_max);
6976 latency = le16_to_cpu(ev->latency);
6977 timeout = le16_to_cpu(ev->timeout);
6979 hcon = hci_conn_hash_lookup_handle(hdev, handle);
6980 if (!hcon || hcon->state != BT_CONNECTED)
6981 return send_conn_param_neg_reply(hdev, handle,
6982 HCI_ERROR_UNKNOWN_CONN_ID);
6984 if (hci_check_conn_params(min, max, latency, timeout))
6985 return send_conn_param_neg_reply(hdev, handle,
6986 HCI_ERROR_INVALID_LL_PARAMS);
6988 if (hcon->role == HCI_ROLE_MASTER) {
6989 struct hci_conn_params *params;
6994 params = hci_conn_params_lookup(hdev, &hcon->dst,
6997 params->conn_min_interval = min;
6998 params->conn_max_interval = max;
6999 params->conn_latency = latency;
7000 params->supervision_timeout = timeout;
7006 hci_dev_unlock(hdev);
7008 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
7009 store_hint, min, max, latency, timeout);
7012 cp.handle = ev->handle;
7013 cp.interval_min = ev->interval_min;
7014 cp.interval_max = ev->interval_max;
7015 cp.latency = ev->latency;
7016 cp.timeout = ev->timeout;
7020 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
7023 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
7024 struct sk_buff *skb)
7026 struct hci_ev_le_direct_adv_report *ev = data;
7027 u64 instant = jiffies;
7030 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT,
7031 flex_array_size(ev, info, ev->num)))
7039 for (i = 0; i < ev->num; i++) {
7040 struct hci_ev_le_direct_adv_info *info = &ev->info[i];
7042 process_adv_report(hdev, info->type, &info->bdaddr,
7043 info->bdaddr_type, &info->direct_addr,
7044 info->direct_addr_type, info->rssi, NULL, 0,
7045 false, false, instant);
7048 hci_dev_unlock(hdev);
7051 static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data,
7052 struct sk_buff *skb)
7054 struct hci_ev_le_phy_update_complete *ev = data;
7055 struct hci_conn *conn;
7057 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7064 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
7068 conn->le_tx_phy = ev->tx_phy;
7069 conn->le_rx_phy = ev->rx_phy;
7072 hci_dev_unlock(hdev);
7075 static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
7076 struct sk_buff *skb)
7078 struct hci_evt_le_cis_established *ev = data;
7079 struct hci_conn *conn;
7080 u16 handle = __le16_to_cpu(ev->handle);
7082 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7086 conn = hci_conn_hash_lookup_handle(hdev, handle);
7089 "Unable to find connection with handle 0x%4.4x",
7094 if (conn->type != ISO_LINK) {
7096 "Invalid connection link type handle 0x%4.4x",
7101 if (conn->role == HCI_ROLE_SLAVE) {
7104 memset(&interval, 0, sizeof(interval));
7106 memcpy(&interval, ev->c_latency, sizeof(ev->c_latency));
7107 conn->iso_qos.in.interval = le32_to_cpu(interval);
7108 memcpy(&interval, ev->p_latency, sizeof(ev->p_latency));
7109 conn->iso_qos.out.interval = le32_to_cpu(interval);
7110 conn->iso_qos.in.latency = le16_to_cpu(ev->interval);
7111 conn->iso_qos.out.latency = le16_to_cpu(ev->interval);
7112 conn->iso_qos.in.sdu = le16_to_cpu(ev->c_mtu);
7113 conn->iso_qos.out.sdu = le16_to_cpu(ev->p_mtu);
7114 conn->iso_qos.in.phy = ev->c_phy;
7115 conn->iso_qos.out.phy = ev->p_phy;
7119 conn->state = BT_CONNECTED;
7120 hci_debugfs_create_conn(conn);
7121 hci_conn_add_sysfs(conn);
7122 hci_iso_setup_path(conn);
7126 hci_connect_cfm(conn, ev->status);
7130 hci_dev_unlock(hdev);
7133 static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle)
7135 struct hci_cp_le_reject_cis cp;
7137 memset(&cp, 0, sizeof(cp));
7139 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
7140 hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp);
7143 static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle)
7145 struct hci_cp_le_accept_cis cp;
7147 memset(&cp, 0, sizeof(cp));
7149 hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp);
7152 static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data,
7153 struct sk_buff *skb)
7155 struct hci_evt_le_cis_req *ev = data;
7156 u16 acl_handle, cis_handle;
7157 struct hci_conn *acl, *cis;
7161 acl_handle = __le16_to_cpu(ev->acl_handle);
7162 cis_handle = __le16_to_cpu(ev->cis_handle);
7164 bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x",
7165 acl_handle, cis_handle, ev->cig_id, ev->cis_id);
7169 acl = hci_conn_hash_lookup_handle(hdev, acl_handle);
7173 mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags);
7174 if (!(mask & HCI_LM_ACCEPT)) {
7175 hci_le_reject_cis(hdev, ev->cis_handle);
7179 cis = hci_conn_hash_lookup_handle(hdev, cis_handle);
7181 cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE);
7183 hci_le_reject_cis(hdev, ev->cis_handle);
7186 cis->handle = cis_handle;
7189 cis->iso_qos.cig = ev->cig_id;
7190 cis->iso_qos.cis = ev->cis_id;
7192 if (!(flags & HCI_PROTO_DEFER)) {
7193 hci_le_accept_cis(hdev, ev->cis_handle);
7195 cis->state = BT_CONNECT2;
7196 hci_connect_cfm(cis, 0);
7200 hci_dev_unlock(hdev);
7203 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
7204 struct sk_buff *skb)
7206 struct hci_evt_le_create_big_complete *ev = data;
7207 struct hci_conn *conn;
7209 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
7211 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE,
7212 flex_array_size(ev, bis_handle, ev->num_bis)))
7217 conn = hci_conn_hash_lookup_big(hdev, ev->handle);
7221 if (conn->type != ISO_LINK) {
7223 "Invalid connection link type handle 0x%2.2x",
7229 conn->handle = __le16_to_cpu(ev->bis_handle[0]);
7232 conn->state = BT_CONNECTED;
7233 hci_debugfs_create_conn(conn);
7234 hci_conn_add_sysfs(conn);
7235 hci_iso_setup_path(conn);
7239 hci_connect_cfm(conn, ev->status);
7243 hci_dev_unlock(hdev);
7246 static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data,
7247 struct sk_buff *skb)
7249 struct hci_evt_le_big_sync_estabilished *ev = data;
7250 struct hci_conn *bis;
7253 bt_dev_dbg(hdev, "status 0x%2.2x", ev->status);
7255 if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7256 flex_array_size(ev, bis, ev->num_bis)))
7264 for (i = 0; i < ev->num_bis; i++) {
7265 u16 handle = le16_to_cpu(ev->bis[i]);
7268 bis = hci_conn_hash_lookup_handle(hdev, handle);
7270 bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY,
7274 bis->handle = handle;
7277 bis->iso_qos.big = ev->handle;
7278 memset(&interval, 0, sizeof(interval));
7279 memcpy(&interval, ev->latency, sizeof(ev->latency));
7280 bis->iso_qos.in.interval = le32_to_cpu(interval);
7281 /* Convert ISO Interval (1.25 ms slots) to latency (ms) */
7282 bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100;
7283 bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu);
7285 hci_connect_cfm(bis, ev->status);
7288 hci_dev_unlock(hdev);
7291 static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data,
7292 struct sk_buff *skb)
7294 struct hci_evt_le_big_info_adv_report *ev = data;
7295 int mask = hdev->link_mode;
7298 bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle));
7302 mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags);
7303 if (!(mask & HCI_LM_ACCEPT))
7304 hci_le_pa_term_sync(hdev, ev->sync_handle);
7306 hci_dev_unlock(hdev);
7309 #define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \
7312 .min_len = _min_len, \
7313 .max_len = _max_len, \
7316 #define HCI_LE_EV(_op, _func, _len) \
7317 HCI_LE_EV_VL(_op, _func, _len, _len)
7319 #define HCI_LE_EV_STATUS(_op, _func) \
7320 HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status))
7322 /* Entries in this table shall have their position according to the subevent
7323 * opcode they handle so the use of the macros above is recommend since it does
7324 * attempt to initialize at its proper index using Designated Initializers that
7325 * way events without a callback function can be ommited.
7327 static const struct hci_le_ev {
7328 void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb);
7331 } hci_le_ev_table[U8_MAX + 1] = {
7332 /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */
7333 HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt,
7334 sizeof(struct hci_ev_le_conn_complete)),
7335 /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */
7336 HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt,
7337 sizeof(struct hci_ev_le_advertising_report),
7338 HCI_MAX_EVENT_SIZE),
7339 /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */
7340 HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE,
7341 hci_le_conn_update_complete_evt,
7342 sizeof(struct hci_ev_le_conn_update_complete)),
7343 /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */
7344 HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE,
7345 hci_le_remote_feat_complete_evt,
7346 sizeof(struct hci_ev_le_remote_feat_complete)),
7347 /* [0x05 = HCI_EV_LE_LTK_REQ] */
7348 HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt,
7349 sizeof(struct hci_ev_le_ltk_req)),
7350 /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */
7351 HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ,
7352 hci_le_remote_conn_param_req_evt,
7353 sizeof(struct hci_ev_le_remote_conn_param_req)),
7355 /* [0x07 = HCI_EV_LE_DATA_LEN_CHANGE] */
7356 HCI_LE_EV(HCI_EV_LE_DATA_LEN_CHANGE,
7357 hci_le_data_length_changed_complete_evt,
7358 sizeof(struct hci_ev_le_data_len_change)),
7360 /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */
7361 HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE,
7362 hci_le_enh_conn_complete_evt,
7363 sizeof(struct hci_ev_le_enh_conn_complete)),
7364 /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */
7365 HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt,
7366 sizeof(struct hci_ev_le_direct_adv_report),
7367 HCI_MAX_EVENT_SIZE),
7368 /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */
7369 HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt,
7370 sizeof(struct hci_ev_le_phy_update_complete)),
7371 /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */
7372 HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt,
7373 sizeof(struct hci_ev_le_ext_adv_report),
7374 HCI_MAX_EVENT_SIZE),
7375 /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */
7376 HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED,
7377 hci_le_pa_sync_estabilished_evt,
7378 sizeof(struct hci_ev_le_pa_sync_established)),
7379 /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */
7380 HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt,
7381 sizeof(struct hci_evt_le_ext_adv_set_term)),
7382 /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */
7383 HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt,
7384 sizeof(struct hci_evt_le_cis_established)),
7385 /* [0x1a = HCI_EVT_LE_CIS_REQ] */
7386 HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt,
7387 sizeof(struct hci_evt_le_cis_req)),
7388 /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */
7389 HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE,
7390 hci_le_create_big_complete_evt,
7391 sizeof(struct hci_evt_le_create_big_complete),
7392 HCI_MAX_EVENT_SIZE),
7393 /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */
7394 HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED,
7395 hci_le_big_sync_established_evt,
7396 sizeof(struct hci_evt_le_big_sync_estabilished),
7397 HCI_MAX_EVENT_SIZE),
7398 /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */
7399 HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT,
7400 hci_le_big_info_adv_report_evt,
7401 sizeof(struct hci_evt_le_big_info_adv_report),
7402 HCI_MAX_EVENT_SIZE),
7405 static void hci_le_meta_evt(struct hci_dev *hdev, void *data,
7406 struct sk_buff *skb, u16 *opcode, u8 *status,
7407 hci_req_complete_t *req_complete,
7408 hci_req_complete_skb_t *req_complete_skb)
7410 struct hci_ev_le_meta *ev = data;
7411 const struct hci_le_ev *subev;
7413 bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent);
7415 /* Only match event if command OGF is for LE */
7416 if (hdev->sent_cmd &&
7417 hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 &&
7418 hci_skb_event(hdev->sent_cmd) == ev->subevent) {
7419 *opcode = hci_skb_opcode(hdev->sent_cmd);
7420 hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete,
7424 subev = &hci_le_ev_table[ev->subevent];
7428 if (skb->len < subev->min_len) {
7429 bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u",
7430 ev->subevent, skb->len, subev->min_len);
7434 /* Just warn if the length is over max_len size it still be
7435 * possible to partially parse the event so leave to callback to
7436 * decide if that is acceptable.
7438 if (skb->len > subev->max_len)
7439 bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u",
7440 ev->subevent, skb->len, subev->max_len);
7441 data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len);
7445 subev->func(hdev, data, skb);
7448 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
7449 u8 event, struct sk_buff *skb)
7451 struct hci_ev_cmd_complete *ev;
7452 struct hci_event_hdr *hdr;
7457 hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr));
7462 if (hdr->evt != event)
7467 /* Check if request ended in Command Status - no way to retrieve
7468 * any extra parameters in this case.
7470 if (hdr->evt == HCI_EV_CMD_STATUS)
7473 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
7474 bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)",
7479 ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev));
7483 if (opcode != __le16_to_cpu(ev->opcode)) {
7484 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
7485 __le16_to_cpu(ev->opcode));
7492 static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
7493 struct sk_buff *skb)
7495 struct hci_ev_le_advertising_info *adv;
7496 struct hci_ev_le_direct_adv_info *direct_adv;
7497 struct hci_ev_le_ext_adv_info *ext_adv;
7498 const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
7499 const struct hci_ev_conn_request *conn_request = (void *)skb->data;
7503 /* If we are currently suspended and this is the first BT event seen,
7504 * save the wake reason associated with the event.
7506 if (!hdev->suspended || hdev->wake_reason)
7509 /* Default to remote wake. Values for wake_reason are documented in the
7510 * Bluez mgmt api docs.
7512 hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
7514 /* Once configured for remote wakeup, we should only wake up for
7515 * reconnections. It's useful to see which device is waking us up so
7516 * keep track of the bdaddr of the connection event that woke us up.
7518 if (event == HCI_EV_CONN_REQUEST) {
7519 bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
7520 hdev->wake_addr_type = BDADDR_BREDR;
7521 } else if (event == HCI_EV_CONN_COMPLETE) {
7522 bacpy(&hdev->wake_addr, &conn_request->bdaddr);
7523 hdev->wake_addr_type = BDADDR_BREDR;
7524 } else if (event == HCI_EV_LE_META) {
7525 struct hci_ev_le_meta *le_ev = (void *)skb->data;
7526 u8 subevent = le_ev->subevent;
7527 u8 *ptr = &skb->data[sizeof(*le_ev)];
7528 u8 num_reports = *ptr;
7530 if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
7531 subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
7532 subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
7534 adv = (void *)(ptr + 1);
7535 direct_adv = (void *)(ptr + 1);
7536 ext_adv = (void *)(ptr + 1);
7539 case HCI_EV_LE_ADVERTISING_REPORT:
7540 bacpy(&hdev->wake_addr, &adv->bdaddr);
7541 hdev->wake_addr_type = adv->bdaddr_type;
7543 case HCI_EV_LE_DIRECT_ADV_REPORT:
7544 bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
7545 hdev->wake_addr_type = direct_adv->bdaddr_type;
7547 case HCI_EV_LE_EXT_ADV_REPORT:
7548 bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
7549 hdev->wake_addr_type = ext_adv->bdaddr_type;
7554 hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
7558 hci_dev_unlock(hdev);
7561 #define HCI_EV_VL(_op, _func, _min_len, _max_len) \
7565 .min_len = _min_len, \
7566 .max_len = _max_len, \
7569 #define HCI_EV(_op, _func, _len) \
7570 HCI_EV_VL(_op, _func, _len, _len)
7572 #define HCI_EV_STATUS(_op, _func) \
7573 HCI_EV(_op, _func, sizeof(struct hci_ev_status))
7575 #define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \
7578 .func_req = _func, \
7579 .min_len = _min_len, \
7580 .max_len = _max_len, \
7583 #define HCI_EV_REQ(_op, _func, _len) \
7584 HCI_EV_REQ_VL(_op, _func, _len, _len)
7586 /* Entries in this table shall have their position according to the event opcode
7587 * they handle so the use of the macros above is recommend since it does attempt
7588 * to initialize at its proper index using Designated Initializers that way
7589 * events without a callback function don't have entered.
7591 static const struct hci_ev {
7594 void (*func)(struct hci_dev *hdev, void *data,
7595 struct sk_buff *skb);
7596 void (*func_req)(struct hci_dev *hdev, void *data,
7597 struct sk_buff *skb, u16 *opcode, u8 *status,
7598 hci_req_complete_t *req_complete,
7599 hci_req_complete_skb_t *req_complete_skb);
7603 } hci_ev_table[U8_MAX + 1] = {
7604 /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */
7605 HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt),
7606 /* [0x02 = HCI_EV_INQUIRY_RESULT] */
7607 HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt,
7608 sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE),
7609 /* [0x03 = HCI_EV_CONN_COMPLETE] */
7610 HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt,
7611 sizeof(struct hci_ev_conn_complete)),
7612 /* [0x04 = HCI_EV_CONN_REQUEST] */
7613 HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt,
7614 sizeof(struct hci_ev_conn_request)),
7615 /* [0x05 = HCI_EV_DISCONN_COMPLETE] */
7616 HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt,
7617 sizeof(struct hci_ev_disconn_complete)),
7618 /* [0x06 = HCI_EV_AUTH_COMPLETE] */
7619 HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt,
7620 sizeof(struct hci_ev_auth_complete)),
7621 /* [0x07 = HCI_EV_REMOTE_NAME] */
7622 HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt,
7623 sizeof(struct hci_ev_remote_name)),
7624 /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */
7625 HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt,
7626 sizeof(struct hci_ev_encrypt_change)),
7627 /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */
7628 HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE,
7629 hci_change_link_key_complete_evt,
7630 sizeof(struct hci_ev_change_link_key_complete)),
7631 /* [0x0b = HCI_EV_REMOTE_FEATURES] */
7632 HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt,
7633 sizeof(struct hci_ev_remote_features)),
7634 /* [0x0e = HCI_EV_CMD_COMPLETE] */
7635 HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt,
7636 sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE),
7637 /* [0x0f = HCI_EV_CMD_STATUS] */
7638 HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt,
7639 sizeof(struct hci_ev_cmd_status)),
7640 /* [0x10 = HCI_EV_CMD_STATUS] */
7641 HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt,
7642 sizeof(struct hci_ev_hardware_error)),
7643 /* [0x12 = HCI_EV_ROLE_CHANGE] */
7644 HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt,
7645 sizeof(struct hci_ev_role_change)),
7646 /* [0x13 = HCI_EV_NUM_COMP_PKTS] */
7647 HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt,
7648 sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE),
7649 /* [0x14 = HCI_EV_MODE_CHANGE] */
7650 HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt,
7651 sizeof(struct hci_ev_mode_change)),
7652 /* [0x16 = HCI_EV_PIN_CODE_REQ] */
7653 HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt,
7654 sizeof(struct hci_ev_pin_code_req)),
7655 /* [0x17 = HCI_EV_LINK_KEY_REQ] */
7656 HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt,
7657 sizeof(struct hci_ev_link_key_req)),
7658 /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */
7659 HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt,
7660 sizeof(struct hci_ev_link_key_notify)),
7661 /* [0x1c = HCI_EV_CLOCK_OFFSET] */
7662 HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt,
7663 sizeof(struct hci_ev_clock_offset)),
7664 /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */
7665 HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt,
7666 sizeof(struct hci_ev_pkt_type_change)),
7667 /* [0x20 = HCI_EV_PSCAN_REP_MODE] */
7668 HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt,
7669 sizeof(struct hci_ev_pscan_rep_mode)),
7670 /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */
7671 HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI,
7672 hci_inquiry_result_with_rssi_evt,
7673 sizeof(struct hci_ev_inquiry_result_rssi),
7674 HCI_MAX_EVENT_SIZE),
7675 /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */
7676 HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt,
7677 sizeof(struct hci_ev_remote_ext_features)),
7678 /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */
7679 HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt,
7680 sizeof(struct hci_ev_sync_conn_complete)),
7681 /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */
7682 HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT,
7683 hci_extended_inquiry_result_evt,
7684 sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE),
7685 /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */
7686 HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt,
7687 sizeof(struct hci_ev_key_refresh_complete)),
7688 /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */
7689 HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt,
7690 sizeof(struct hci_ev_io_capa_request)),
7691 /* [0x32 = HCI_EV_IO_CAPA_REPLY] */
7692 HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt,
7693 sizeof(struct hci_ev_io_capa_reply)),
7694 /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */
7695 HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt,
7696 sizeof(struct hci_ev_user_confirm_req)),
7697 /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */
7698 HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt,
7699 sizeof(struct hci_ev_user_passkey_req)),
7700 /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */
7701 HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt,
7702 sizeof(struct hci_ev_remote_oob_data_request)),
7703 /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */
7704 HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt,
7705 sizeof(struct hci_ev_simple_pair_complete)),
7706 /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */
7707 HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt,
7708 sizeof(struct hci_ev_user_passkey_notify)),
7709 /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */
7710 HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt,
7711 sizeof(struct hci_ev_keypress_notify)),
7712 /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */
7713 HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt,
7714 sizeof(struct hci_ev_remote_host_features)),
7715 /* [0x3e = HCI_EV_LE_META] */
7716 HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt,
7717 sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE),
7718 #if IS_ENABLED(CONFIG_BT_HS)
7719 /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */
7720 HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt,
7721 sizeof(struct hci_ev_phy_link_complete)),
7722 /* [0x41 = HCI_EV_CHANNEL_SELECTED] */
7723 HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt,
7724 sizeof(struct hci_ev_channel_selected)),
7725 /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */
7726 HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE,
7727 hci_disconn_loglink_complete_evt,
7728 sizeof(struct hci_ev_disconn_logical_link_complete)),
7729 /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */
7730 HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt,
7731 sizeof(struct hci_ev_logical_link_complete)),
7732 /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */
7733 HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE,
7734 hci_disconn_phylink_complete_evt,
7735 sizeof(struct hci_ev_disconn_phy_link_complete)),
7737 /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */
7738 HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt,
7739 sizeof(struct hci_ev_num_comp_blocks)),
7741 /* [0xFF = HCI_EV_VENDOR_SPECIFIC] */
7742 HCI_EV(HCI_EV_VENDOR_SPECIFIC, hci_vendor_specific_evt,
7743 sizeof(struct hci_ev_vendor_specific)),
7745 /* [0xff = HCI_EV_VENDOR] */
7746 HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE),
7750 static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb,
7751 u16 *opcode, u8 *status,
7752 hci_req_complete_t *req_complete,
7753 hci_req_complete_skb_t *req_complete_skb)
7755 const struct hci_ev *ev = &hci_ev_table[event];
7761 if (skb->len < ev->min_len) {
7762 bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u",
7763 event, skb->len, ev->min_len);
7767 /* Just warn if the length is over max_len size it still be
7768 * possible to partially parse the event so leave to callback to
7769 * decide if that is acceptable.
7771 if (skb->len > ev->max_len)
7772 bt_dev_warn_ratelimited(hdev,
7773 "unexpected event 0x%2.2x length: %u > %u",
7774 event, skb->len, ev->max_len);
7776 data = hci_ev_skb_pull(hdev, skb, event, ev->min_len);
7781 ev->func_req(hdev, data, skb, opcode, status, req_complete,
7784 ev->func(hdev, data, skb);
7787 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
7789 struct hci_event_hdr *hdr = (void *) skb->data;
7790 hci_req_complete_t req_complete = NULL;
7791 hci_req_complete_skb_t req_complete_skb = NULL;
7792 struct sk_buff *orig_skb = NULL;
7793 u8 status = 0, event, req_evt = 0;
7794 u16 opcode = HCI_OP_NOP;
7796 if (skb->len < sizeof(*hdr)) {
7797 bt_dev_err(hdev, "Malformed HCI Event");
7801 kfree_skb(hdev->recv_event);
7802 hdev->recv_event = skb_clone(skb, GFP_KERNEL);
7806 bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x",
7811 /* Only match event if command OGF is not for LE */
7812 if (hdev->sent_cmd &&
7813 hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 &&
7814 hci_skb_event(hdev->sent_cmd) == event) {
7815 hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd),
7816 status, &req_complete, &req_complete_skb);
7820 /* If it looks like we might end up having to call
7821 * req_complete_skb, store a pristine copy of the skb since the
7822 * various handlers may modify the original one through
7823 * skb_pull() calls, etc.
7825 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
7826 event == HCI_EV_CMD_COMPLETE)
7827 orig_skb = skb_clone(skb, GFP_KERNEL);
7829 skb_pull(skb, HCI_EVENT_HDR_SIZE);
7831 /* Store wake reason if we're suspended */
7832 hci_store_wake_reason(hdev, event, skb);
7834 bt_dev_dbg(hdev, "event 0x%2.2x", event);
7836 hci_event_func(hdev, event, skb, &opcode, &status, &req_complete,
7840 req_complete(hdev, status, opcode);
7841 } else if (req_complete_skb) {
7842 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
7843 kfree_skb(orig_skb);
7846 req_complete_skb(hdev, status, opcode, orig_skb);
7850 kfree_skb(orig_skb);
7852 hdev->stat.evt_rx++;