2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
42 /* Handle HCI Event packets */
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
46 __u8 status = *((__u8 *) skb->data);
48 BT_DBG("%s status 0x%2.2x", hdev->name, status);
53 clear_bit(HCI_INQUIRY, &hdev->flags);
54 #ifdef CONFIG_TIZEN_WIP
55 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
57 /* In latest kernel, smp_mb__after_clear_bit is replaced with
58 * smp_mb__after_atomic. So, if kernel is migrated to latest,
59 * then below code should be enabled
61 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
63 wake_up_bit(&hdev->flags, HCI_INQUIRY);
66 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
69 hci_conn_check_pending(hdev);
72 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74 __u8 status = *((__u8 *) skb->data);
76 BT_DBG("%s status 0x%2.2x", hdev->name, status);
81 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
84 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
86 __u8 status = *((__u8 *) skb->data);
88 BT_DBG("%s status 0x%2.2x", hdev->name, status);
93 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
95 hci_conn_check_pending(hdev);
98 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
101 BT_DBG("%s", hdev->name);
104 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
106 struct hci_rp_role_discovery *rp = (void *) skb->data;
107 struct hci_conn *conn;
109 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
116 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
118 conn->role = rp->role;
120 hci_dev_unlock(hdev);
123 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
125 struct hci_rp_read_link_policy *rp = (void *) skb->data;
126 struct hci_conn *conn;
128 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
135 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
137 conn->link_policy = __le16_to_cpu(rp->policy);
139 hci_dev_unlock(hdev);
142 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
144 struct hci_rp_write_link_policy *rp = (void *) skb->data;
145 #ifdef CONFIG_TIZEN_WIP
146 struct hci_cp_write_link_policy cp;
147 struct hci_conn *sco_conn;
149 struct hci_conn *conn;
152 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
157 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
163 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
165 conn->link_policy = get_unaligned_le16(sent + 2);
166 #ifdef CONFIG_TIZEN_WIP
167 sco_conn = hci_conn_hash_lookup_sco(hdev);
168 if (sco_conn && conn && bacmp(&sco_conn->dst, &conn->dst) == 0 &&
169 conn->link_policy & HCI_LP_SNIFF) {
170 BT_ERR("SNIFF is not allowed during sco connection");
171 cp.handle = __cpu_to_le16(conn->handle);
172 cp.policy = __cpu_to_le16(conn->link_policy & ~HCI_LP_SNIFF);
173 hci_send_cmd(hdev, HCI_OP_WRITE_LINK_POLICY, sizeof(cp), &cp);
177 hci_dev_unlock(hdev);
180 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
183 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
185 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
190 hdev->link_policy = __le16_to_cpu(rp->policy);
193 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
196 __u8 status = *((__u8 *) skb->data);
199 BT_DBG("%s status 0x%2.2x", hdev->name, status);
204 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
208 hdev->link_policy = get_unaligned_le16(sent);
211 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
213 __u8 status = *((__u8 *) skb->data);
215 BT_DBG("%s status 0x%2.2x", hdev->name, status);
217 clear_bit(HCI_RESET, &hdev->flags);
222 /* Reset all non-persistent flags */
223 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
225 hdev->discovery.state = DISCOVERY_STOPPED;
226 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
227 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
229 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
230 hdev->adv_data_len = 0;
232 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
233 hdev->scan_rsp_data_len = 0;
235 hdev->le_scan_type = LE_SCAN_PASSIVE;
237 hdev->ssp_debug_mode = 0;
239 hci_bdaddr_list_clear(&hdev->le_white_list);
242 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
245 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
246 struct hci_cp_read_stored_link_key *sent;
248 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
250 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
254 if (!rp->status && sent->read_all == 0x01) {
255 hdev->stored_max_keys = rp->max_keys;
256 hdev->stored_num_keys = rp->num_keys;
260 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
263 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
265 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
270 if (rp->num_keys <= hdev->stored_num_keys)
271 hdev->stored_num_keys -= rp->num_keys;
273 hdev->stored_num_keys = 0;
276 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
278 __u8 status = *((__u8 *) skb->data);
281 BT_DBG("%s status 0x%2.2x", hdev->name, status);
283 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
289 if (test_bit(HCI_MGMT, &hdev->dev_flags))
290 mgmt_set_local_name_complete(hdev, sent, status);
292 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
294 hci_dev_unlock(hdev);
297 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
299 struct hci_rp_read_local_name *rp = (void *) skb->data;
301 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
306 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
307 test_bit(HCI_CONFIG, &hdev->dev_flags))
308 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
311 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
313 __u8 status = *((__u8 *) skb->data);
316 BT_DBG("%s status 0x%2.2x", hdev->name, status);
318 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
325 __u8 param = *((__u8 *) sent);
327 if (param == AUTH_ENABLED)
328 set_bit(HCI_AUTH, &hdev->flags);
330 clear_bit(HCI_AUTH, &hdev->flags);
333 if (test_bit(HCI_MGMT, &hdev->dev_flags))
334 mgmt_auth_enable_complete(hdev, status);
336 hci_dev_unlock(hdev);
339 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
341 __u8 status = *((__u8 *) skb->data);
345 BT_DBG("%s status 0x%2.2x", hdev->name, status);
350 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
354 param = *((__u8 *) sent);
357 set_bit(HCI_ENCRYPT, &hdev->flags);
359 clear_bit(HCI_ENCRYPT, &hdev->flags);
362 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
364 __u8 status = *((__u8 *) skb->data);
368 BT_DBG("%s status 0x%2.2x", hdev->name, status);
370 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
374 param = *((__u8 *) sent);
379 hdev->discov_timeout = 0;
383 if (param & SCAN_INQUIRY)
384 set_bit(HCI_ISCAN, &hdev->flags);
386 clear_bit(HCI_ISCAN, &hdev->flags);
388 if (param & SCAN_PAGE)
389 set_bit(HCI_PSCAN, &hdev->flags);
391 clear_bit(HCI_PSCAN, &hdev->flags);
394 hci_dev_unlock(hdev);
397 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
399 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
401 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
406 memcpy(hdev->dev_class, rp->dev_class, 3);
408 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
409 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
412 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
414 __u8 status = *((__u8 *) skb->data);
417 BT_DBG("%s status 0x%2.2x", hdev->name, status);
419 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
426 memcpy(hdev->dev_class, sent, 3);
428 if (test_bit(HCI_MGMT, &hdev->dev_flags))
429 mgmt_set_class_of_dev_complete(hdev, sent, status);
431 hci_dev_unlock(hdev);
434 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
436 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
439 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
444 setting = __le16_to_cpu(rp->voice_setting);
446 if (hdev->voice_setting == setting)
449 hdev->voice_setting = setting;
451 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
454 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
457 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
460 __u8 status = *((__u8 *) skb->data);
464 BT_DBG("%s status 0x%2.2x", hdev->name, status);
469 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
473 setting = get_unaligned_le16(sent);
475 if (hdev->voice_setting == setting)
478 hdev->voice_setting = setting;
480 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
483 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
486 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
489 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
491 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
496 hdev->num_iac = rp->num_iac;
498 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
501 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
503 __u8 status = *((__u8 *) skb->data);
504 struct hci_cp_write_ssp_mode *sent;
506 BT_DBG("%s status 0x%2.2x", hdev->name, status);
508 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
516 hdev->features[1][0] |= LMP_HOST_SSP;
518 hdev->features[1][0] &= ~LMP_HOST_SSP;
521 if (test_bit(HCI_MGMT, &hdev->dev_flags))
522 mgmt_ssp_enable_complete(hdev, sent->mode, status);
525 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
527 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
530 hci_dev_unlock(hdev);
533 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
535 u8 status = *((u8 *) skb->data);
536 struct hci_cp_write_sc_support *sent;
538 BT_DBG("%s status 0x%2.2x", hdev->name, status);
540 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
548 hdev->features[1][0] |= LMP_HOST_SC;
550 hdev->features[1][0] &= ~LMP_HOST_SC;
553 if (!test_bit(HCI_MGMT, &hdev->dev_flags) && !status) {
555 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
557 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
560 hci_dev_unlock(hdev);
563 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
565 struct hci_rp_read_local_version *rp = (void *) skb->data;
567 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
572 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
573 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
574 hdev->hci_ver = rp->hci_ver;
575 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
576 hdev->lmp_ver = rp->lmp_ver;
577 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
578 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
582 static void hci_cc_read_local_commands(struct hci_dev *hdev,
585 struct hci_rp_read_local_commands *rp = (void *) skb->data;
587 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
592 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
593 test_bit(HCI_CONFIG, &hdev->dev_flags))
594 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
597 static void hci_cc_read_local_features(struct hci_dev *hdev,
600 struct hci_rp_read_local_features *rp = (void *) skb->data;
602 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
607 memcpy(hdev->features, rp->features, 8);
609 /* Adjust default settings according to features
610 * supported by device. */
612 if (hdev->features[0][0] & LMP_3SLOT)
613 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
615 if (hdev->features[0][0] & LMP_5SLOT)
616 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
618 if (hdev->features[0][1] & LMP_HV2) {
619 hdev->pkt_type |= (HCI_HV2);
620 hdev->esco_type |= (ESCO_HV2);
623 if (hdev->features[0][1] & LMP_HV3) {
624 hdev->pkt_type |= (HCI_HV3);
625 hdev->esco_type |= (ESCO_HV3);
628 if (lmp_esco_capable(hdev))
629 hdev->esco_type |= (ESCO_EV3);
631 if (hdev->features[0][4] & LMP_EV4)
632 hdev->esco_type |= (ESCO_EV4);
634 if (hdev->features[0][4] & LMP_EV5)
635 hdev->esco_type |= (ESCO_EV5);
637 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
638 hdev->esco_type |= (ESCO_2EV3);
640 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
641 hdev->esco_type |= (ESCO_3EV3);
643 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
644 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
647 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
650 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
652 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
657 if (hdev->max_page < rp->max_page)
658 hdev->max_page = rp->max_page;
660 if (rp->page < HCI_MAX_PAGES)
661 memcpy(hdev->features[rp->page], rp->features, 8);
664 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
667 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
669 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
674 hdev->flow_ctl_mode = rp->mode;
677 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
679 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
681 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
686 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
687 hdev->sco_mtu = rp->sco_mtu;
688 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
689 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
691 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
696 hdev->acl_cnt = hdev->acl_pkts;
697 hdev->sco_cnt = hdev->sco_pkts;
699 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
700 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
703 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
705 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
707 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
712 if (test_bit(HCI_INIT, &hdev->flags))
713 bacpy(&hdev->bdaddr, &rp->bdaddr);
715 if (test_bit(HCI_SETUP, &hdev->dev_flags))
716 bacpy(&hdev->setup_addr, &rp->bdaddr);
719 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
722 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
724 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
729 if (test_bit(HCI_INIT, &hdev->flags)) {
730 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
731 hdev->page_scan_window = __le16_to_cpu(rp->window);
735 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
738 u8 status = *((u8 *) skb->data);
739 struct hci_cp_write_page_scan_activity *sent;
741 BT_DBG("%s status 0x%2.2x", hdev->name, status);
746 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
750 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
751 hdev->page_scan_window = __le16_to_cpu(sent->window);
754 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
757 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
759 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
764 if (test_bit(HCI_INIT, &hdev->flags))
765 hdev->page_scan_type = rp->type;
768 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
771 u8 status = *((u8 *) skb->data);
774 BT_DBG("%s status 0x%2.2x", hdev->name, status);
779 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
781 hdev->page_scan_type = *type;
784 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
787 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
789 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
794 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
795 hdev->block_len = __le16_to_cpu(rp->block_len);
796 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
798 hdev->block_cnt = hdev->num_blocks;
800 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
801 hdev->block_cnt, hdev->block_len);
804 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
806 struct hci_rp_read_clock *rp = (void *) skb->data;
807 struct hci_cp_read_clock *cp;
808 struct hci_conn *conn;
810 BT_DBG("%s", hdev->name);
812 if (skb->len < sizeof(*rp))
820 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
824 if (cp->which == 0x00) {
825 hdev->clock = le32_to_cpu(rp->clock);
829 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
831 conn->clock = le32_to_cpu(rp->clock);
832 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
836 hci_dev_unlock(hdev);
839 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
842 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
844 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
849 hdev->amp_status = rp->amp_status;
850 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
851 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
852 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
853 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
854 hdev->amp_type = rp->amp_type;
855 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
856 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
857 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
858 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
861 a2mp_send_getinfo_rsp(hdev);
864 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
867 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
868 struct amp_assoc *assoc = &hdev->loc_assoc;
869 size_t rem_len, frag_len;
871 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
876 frag_len = skb->len - sizeof(*rp);
877 rem_len = __le16_to_cpu(rp->rem_len);
879 if (rem_len > frag_len) {
880 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
882 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
883 assoc->offset += frag_len;
885 /* Read other fragments */
886 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
891 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
892 assoc->len = assoc->offset + rem_len;
896 /* Send A2MP Rsp when all fragments are received */
897 a2mp_send_getampassoc_rsp(hdev, rp->status);
898 a2mp_send_create_phy_link_req(hdev, rp->status);
901 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
904 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
906 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
911 hdev->inq_tx_power = rp->tx_power;
914 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
916 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
917 struct hci_cp_pin_code_reply *cp;
918 struct hci_conn *conn;
920 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
924 if (test_bit(HCI_MGMT, &hdev->dev_flags))
925 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
930 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
934 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
936 conn->pin_length = cp->pin_len;
939 hci_dev_unlock(hdev);
942 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
944 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
946 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
950 if (test_bit(HCI_MGMT, &hdev->dev_flags))
951 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
954 hci_dev_unlock(hdev);
957 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
960 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
962 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
967 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
968 hdev->le_pkts = rp->le_max_pkt;
970 hdev->le_cnt = hdev->le_pkts;
972 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
975 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
978 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
980 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
985 memcpy(hdev->le_features, rp->features, 8);
988 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
991 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
993 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
998 hdev->adv_tx_power = rp->tx_power;
1001 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1003 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1005 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1009 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1010 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1013 hci_dev_unlock(hdev);
1016 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1017 struct sk_buff *skb)
1019 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1021 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1025 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1026 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1027 ACL_LINK, 0, rp->status);
1029 hci_dev_unlock(hdev);
1032 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1034 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1036 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1040 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1041 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1044 hci_dev_unlock(hdev);
1047 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1048 struct sk_buff *skb)
1050 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1052 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1056 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1057 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1058 ACL_LINK, 0, rp->status);
1060 hci_dev_unlock(hdev);
1063 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1064 struct sk_buff *skb)
1066 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1068 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1071 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL,
1073 hci_dev_unlock(hdev);
1076 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1077 struct sk_buff *skb)
1079 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1081 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1084 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192,
1085 rp->hash256, rp->rand256,
1087 hci_dev_unlock(hdev);
1091 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1093 __u8 status = *((__u8 *) skb->data);
1096 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1101 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1107 bacpy(&hdev->random_addr, sent);
1109 hci_dev_unlock(hdev);
1112 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1114 __u8 *sent, status = *((__u8 *) skb->data);
1116 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1121 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1127 /* If we're doing connection initiation as peripheral. Set a
1128 * timeout in case something goes wrong.
1131 struct hci_conn *conn;
1133 set_bit(HCI_LE_ADV, &hdev->dev_flags);
1135 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1137 queue_delayed_work(hdev->workqueue,
1138 &conn->le_conn_timeout,
1139 conn->conn_timeout);
1141 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1144 hci_dev_unlock(hdev);
1147 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1149 struct hci_cp_le_set_scan_param *cp;
1150 __u8 status = *((__u8 *) skb->data);
1152 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1157 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1163 hdev->le_scan_type = cp->type;
1165 hci_dev_unlock(hdev);
1168 static bool has_pending_adv_report(struct hci_dev *hdev)
1170 struct discovery_state *d = &hdev->discovery;
1172 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1175 static void clear_pending_adv_report(struct hci_dev *hdev)
1177 struct discovery_state *d = &hdev->discovery;
1179 bacpy(&d->last_adv_addr, BDADDR_ANY);
1180 d->last_adv_data_len = 0;
1183 #ifndef CONFIG_TIZEN_WIP
1184 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1185 u8 bdaddr_type, s8 rssi, u32 flags,
1188 struct discovery_state *d = &hdev->discovery;
1190 bacpy(&d->last_adv_addr, bdaddr);
1191 d->last_adv_addr_type = bdaddr_type;
1192 d->last_adv_rssi = rssi;
1193 d->last_adv_flags = flags;
1194 memcpy(d->last_adv_data, data, len);
1195 d->last_adv_data_len = len;
1199 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1200 struct sk_buff *skb)
1202 struct hci_cp_le_set_scan_enable *cp;
1203 __u8 status = *((__u8 *) skb->data);
1205 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1210 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1216 switch (cp->enable) {
1217 case LE_SCAN_ENABLE:
1218 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1219 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1220 clear_pending_adv_report(hdev);
1223 case LE_SCAN_DISABLE:
1224 /* We do this here instead of when setting DISCOVERY_STOPPED
1225 * since the latter would potentially require waiting for
1226 * inquiry to stop too.
1228 if (has_pending_adv_report(hdev)) {
1229 struct discovery_state *d = &hdev->discovery;
1231 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1232 d->last_adv_addr_type, NULL,
1233 d->last_adv_rssi, d->last_adv_flags,
1235 d->last_adv_data_len, NULL, 0);
1238 /* Cancel this timer so that we don't try to disable scanning
1239 * when it's already disabled.
1241 cancel_delayed_work(&hdev->le_scan_disable);
1243 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1245 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1246 * interrupted scanning due to a connect request. Mark
1247 * therefore discovery as stopped. If this was not
1248 * because of a connect request advertising might have
1249 * been disabled because of active scanning, so
1250 * re-enable it again if necessary.
1252 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1254 #ifndef CONFIG_TIZEN_WIP /* The below line is kernel bug. */
1255 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1257 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
1259 else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1260 hdev->discovery.state == DISCOVERY_FINDING)
1261 mgmt_reenable_advertising(hdev);
1266 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1270 hci_dev_unlock(hdev);
1273 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1274 struct sk_buff *skb)
1276 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1278 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1283 hdev->le_white_list_size = rp->size;
1286 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1287 struct sk_buff *skb)
1289 __u8 status = *((__u8 *) skb->data);
1291 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1296 hci_bdaddr_list_clear(&hdev->le_white_list);
1299 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1300 struct sk_buff *skb)
1302 struct hci_cp_le_add_to_white_list *sent;
1303 __u8 status = *((__u8 *) skb->data);
1305 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1310 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1314 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1318 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1319 struct sk_buff *skb)
1321 struct hci_cp_le_del_from_white_list *sent;
1322 __u8 status = *((__u8 *) skb->data);
1324 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1329 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1333 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1337 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1338 struct sk_buff *skb)
1340 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1342 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1347 memcpy(hdev->le_states, rp->le_states, 8);
1350 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1351 struct sk_buff *skb)
1353 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1355 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1360 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1361 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1364 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1365 struct sk_buff *skb)
1367 struct hci_cp_le_write_def_data_len *sent;
1368 __u8 status = *((__u8 *) skb->data);
1370 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1373 #ifndef CONFIG_TIZEN_WIP
1379 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1381 #ifndef CONFIG_TIZEN_WIP
1387 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1388 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1390 #ifdef CONFIG_TIZEN_WIP
1392 mgmt_le_write_host_suggested_data_length_complete(hdev, status);
1396 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1397 struct sk_buff *skb)
1399 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1401 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1403 #ifndef CONFIG_TIZEN_WIP
1410 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1411 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1412 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1413 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1415 #ifdef CONFIG_TIZEN_WIP
1416 mgmt_le_read_maximum_data_length_complete(hdev, rp->status);
1417 hci_dev_unlock(hdev);
1421 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1422 struct sk_buff *skb)
1424 struct hci_cp_write_le_host_supported *sent;
1425 __u8 status = *((__u8 *) skb->data);
1427 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1432 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1439 hdev->features[1][0] |= LMP_HOST_LE;
1440 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1442 hdev->features[1][0] &= ~LMP_HOST_LE;
1443 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1444 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1448 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1450 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1452 hci_dev_unlock(hdev);
1455 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1457 struct hci_cp_le_set_adv_param *cp;
1458 u8 status = *((u8 *) skb->data);
1460 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1465 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1470 hdev->adv_addr_type = cp->own_address_type;
1471 hci_dev_unlock(hdev);
1474 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1475 struct sk_buff *skb)
1477 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1479 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1480 hdev->name, rp->status, rp->phy_handle);
1485 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1487 #ifdef CONFIG_TIZEN_WIP
1488 /* BEGIN TIZEN_Bluetooth :: Handle RSSI monitoring */
1489 static void hci_cc_enable_rssi(struct hci_dev *hdev,
1490 struct sk_buff *skb)
1492 struct hci_cc_rsp_enable_rssi *rp = (void *) skb->data;
1494 BT_DBG("hci_cc_enable_rssi - %s status 0x%2.2x Event_LE_ext_Opcode 0x%2.2x",
1495 hdev->name, rp->status, rp->le_ext_opcode);
1497 mgmt_enable_rssi_cc(hdev, rp, rp->status);
1500 static void hci_cc_get_raw_rssi(struct hci_dev *hdev,
1501 struct sk_buff *skb)
1503 struct hci_cc_rp_get_raw_rssi *rp = (void *) skb->data;
1505 BT_DBG("hci_cc_get_raw_rssi- %s Get Raw Rssi Response[%2.2x %4.4x %2.2X]",
1506 hdev->name, rp->status, rp->conn_handle, rp->rssi_dbm);
1508 mgmt_raw_rssi_response(hdev, rp, rp->status);
1510 /* END TIZEN_Bluetooth :: Handle RSSI monitoring */
1512 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1514 struct hci_rp_read_rssi *rp = (void *) skb->data;
1515 struct hci_conn *conn;
1517 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1524 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1526 conn->rssi = rp->rssi;
1528 hci_dev_unlock(hdev);
1531 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1533 struct hci_cp_read_tx_power *sent;
1534 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1535 struct hci_conn *conn;
1537 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1542 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1548 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1552 switch (sent->type) {
1554 conn->tx_power = rp->tx_power;
1557 conn->max_tx_power = rp->tx_power;
1562 hci_dev_unlock(hdev);
1565 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1567 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1570 hci_conn_check_pending(hdev);
1574 set_bit(HCI_INQUIRY, &hdev->flags);
1577 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1579 struct hci_cp_create_conn *cp;
1580 struct hci_conn *conn;
1582 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1584 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1590 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1592 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1595 #ifdef CONFIG_TIZEN_WIP
1597 BT_ERR("ACL Connection Already Exists on cs_create_con");
1599 if (conn && conn->state == BT_CONNECT && status != 0x0b) {
1601 if (conn && conn->state == BT_CONNECT) {
1603 if (status != 0x0c || conn->attempt > 2) {
1604 conn->state = BT_CLOSED;
1605 hci_proto_connect_cfm(conn, status);
1608 conn->state = BT_CONNECT2;
1612 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1615 BT_ERR("No memory for new connection");
1619 hci_dev_unlock(hdev);
1622 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1624 struct hci_cp_add_sco *cp;
1625 struct hci_conn *acl, *sco;
1628 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1633 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1637 handle = __le16_to_cpu(cp->handle);
1639 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1643 acl = hci_conn_hash_lookup_handle(hdev, handle);
1647 sco->state = BT_CLOSED;
1649 hci_proto_connect_cfm(sco, status);
1654 hci_dev_unlock(hdev);
1657 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1659 struct hci_cp_auth_requested *cp;
1660 struct hci_conn *conn;
1662 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1667 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1673 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1675 if (conn->state == BT_CONFIG) {
1676 hci_proto_connect_cfm(conn, status);
1677 hci_conn_drop(conn);
1681 hci_dev_unlock(hdev);
1684 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1686 struct hci_cp_set_conn_encrypt *cp;
1687 struct hci_conn *conn;
1689 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1694 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1700 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1702 if (conn->state == BT_CONFIG) {
1703 hci_proto_connect_cfm(conn, status);
1704 hci_conn_drop(conn);
1708 hci_dev_unlock(hdev);
1711 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1712 struct hci_conn *conn)
1714 if (conn->state != BT_CONFIG || !conn->out)
1717 if (conn->pending_sec_level == BT_SECURITY_SDP)
1720 /* Only request authentication for SSP connections or non-SSP
1721 * devices with sec_level MEDIUM or HIGH or if MITM protection
1724 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1725 conn->pending_sec_level != BT_SECURITY_FIPS &&
1726 conn->pending_sec_level != BT_SECURITY_HIGH &&
1727 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1733 static int hci_resolve_name(struct hci_dev *hdev,
1734 struct inquiry_entry *e)
1736 struct hci_cp_remote_name_req cp;
1738 memset(&cp, 0, sizeof(cp));
1740 bacpy(&cp.bdaddr, &e->data.bdaddr);
1741 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1742 cp.pscan_mode = e->data.pscan_mode;
1743 cp.clock_offset = e->data.clock_offset;
1745 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1748 static bool hci_resolve_next_name(struct hci_dev *hdev)
1750 struct discovery_state *discov = &hdev->discovery;
1751 struct inquiry_entry *e;
1753 if (list_empty(&discov->resolve))
1756 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1760 if (hci_resolve_name(hdev, e) == 0) {
1761 e->name_state = NAME_PENDING;
1768 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1769 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1771 struct discovery_state *discov = &hdev->discovery;
1772 struct inquiry_entry *e;
1774 /* BEGIN TIZEN_Bluetooth :: name update changes */
1775 #ifdef CONFIG_TIZEN_WIP
1776 /* Update the mgmt connected state if necessary. Be careful with
1777 * conn objects that exist but are not (yet) connected however.
1778 * Only those in BT_CONFIG or BT_CONNECTED states can be
1779 * considered connected.
1781 if (conn && (conn->state == BT_CONFIG || conn->state == BT_CONNECTED)) {
1782 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1783 mgmt_device_connected(hdev, conn, 0, name, name_len);
1785 mgmt_device_name_update(hdev, bdaddr, name, name_len);
1789 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1790 mgmt_device_connected(hdev, conn, 0, name, name_len);
1791 /* END TIZEN_Bluetooth :: name update changes */
1793 if (discov->state == DISCOVERY_STOPPED)
1796 if (discov->state == DISCOVERY_STOPPING)
1797 goto discov_complete;
1799 if (discov->state != DISCOVERY_RESOLVING)
1802 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1803 /* If the device was not found in a list of found devices names of which
1804 * are pending. there is no need to continue resolving a next name as it
1805 * will be done upon receiving another Remote Name Request Complete
1812 e->name_state = NAME_KNOWN;
1813 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1814 e->data.rssi, name, name_len);
1816 e->name_state = NAME_NOT_KNOWN;
1819 if (hci_resolve_next_name(hdev))
1823 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1826 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1828 struct hci_cp_remote_name_req *cp;
1829 struct hci_conn *conn;
1831 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1833 /* If successful wait for the name req complete event before
1834 * checking for the need to do authentication */
1838 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1844 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1846 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1847 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1852 if (!hci_outgoing_auth_needed(hdev, conn))
1855 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1856 struct hci_cp_auth_requested auth_cp;
1858 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1860 auth_cp.handle = __cpu_to_le16(conn->handle);
1861 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1862 sizeof(auth_cp), &auth_cp);
1866 hci_dev_unlock(hdev);
1869 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1871 struct hci_cp_read_remote_features *cp;
1872 struct hci_conn *conn;
1874 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1879 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1885 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1887 if (conn->state == BT_CONFIG) {
1888 hci_proto_connect_cfm(conn, status);
1889 hci_conn_drop(conn);
1893 hci_dev_unlock(hdev);
1896 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1898 struct hci_cp_read_remote_ext_features *cp;
1899 struct hci_conn *conn;
1901 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1906 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1912 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1914 if (conn->state == BT_CONFIG) {
1915 hci_proto_connect_cfm(conn, status);
1916 hci_conn_drop(conn);
1920 hci_dev_unlock(hdev);
1923 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1925 struct hci_cp_setup_sync_conn *cp;
1926 struct hci_conn *acl, *sco;
1929 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1934 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1938 handle = __le16_to_cpu(cp->handle);
1940 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1944 acl = hci_conn_hash_lookup_handle(hdev, handle);
1948 sco->state = BT_CLOSED;
1950 hci_proto_connect_cfm(sco, status);
1955 hci_dev_unlock(hdev);
1958 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1960 struct hci_cp_sniff_mode *cp;
1961 struct hci_conn *conn;
1963 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1968 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1974 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1976 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1978 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1979 hci_sco_setup(conn, status);
1982 hci_dev_unlock(hdev);
1985 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1987 struct hci_cp_exit_sniff_mode *cp;
1988 struct hci_conn *conn;
1990 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1995 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
2001 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2003 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
2005 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2006 hci_sco_setup(conn, status);
2009 hci_dev_unlock(hdev);
2012 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
2014 struct hci_cp_disconnect *cp;
2015 struct hci_conn *conn;
2020 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
2026 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2028 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2029 conn->dst_type, status);
2031 hci_dev_unlock(hdev);
2034 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
2036 struct hci_cp_create_phy_link *cp;
2038 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2040 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
2047 struct hci_conn *hcon;
2049 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
2053 amp_write_remote_assoc(hdev, cp->phy_handle);
2056 hci_dev_unlock(hdev);
2059 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
2061 struct hci_cp_accept_phy_link *cp;
2063 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2068 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
2072 amp_write_remote_assoc(hdev, cp->phy_handle);
2075 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
2077 struct hci_cp_le_create_conn *cp;
2078 struct hci_conn *conn;
2080 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2082 /* All connection failure handling is taken care of by the
2083 * hci_le_conn_failed function which is triggered by the HCI
2084 * request completion callbacks used for connecting.
2089 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
2095 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
2099 /* Store the initiator and responder address information which
2100 * is needed for SMP. These values will not change during the
2101 * lifetime of the connection.
2103 conn->init_addr_type = cp->own_address_type;
2104 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
2105 bacpy(&conn->init_addr, &hdev->random_addr);
2107 bacpy(&conn->init_addr, &hdev->bdaddr);
2109 conn->resp_addr_type = cp->peer_addr_type;
2110 bacpy(&conn->resp_addr, &cp->peer_addr);
2112 /* We don't want the connection attempt to stick around
2113 * indefinitely since LE doesn't have a page timeout concept
2114 * like BR/EDR. Set a timer for any connection that doesn't use
2115 * the white list for connecting.
2117 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
2118 queue_delayed_work(conn->hdev->workqueue,
2119 &conn->le_conn_timeout,
2120 conn->conn_timeout);
2123 hci_dev_unlock(hdev);
2126 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2128 struct hci_cp_le_start_enc *cp;
2129 struct hci_conn *conn;
2131 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2138 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2142 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2146 if (conn->state != BT_CONNECTED)
2149 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2150 hci_conn_drop(conn);
2153 hci_dev_unlock(hdev);
2156 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2158 struct hci_cp_switch_role *cp;
2159 struct hci_conn *conn;
2161 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2166 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2172 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2174 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2176 hci_dev_unlock(hdev);
2179 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2181 __u8 status = *((__u8 *) skb->data);
2182 struct discovery_state *discov = &hdev->discovery;
2183 struct inquiry_entry *e;
2185 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2187 hci_conn_check_pending(hdev);
2189 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2192 #ifdef CONFIG_TIZEN_WIP
2193 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
2195 /* In latest kernel, smp_mb__after_clear_bit is replaced with
2196 * smp_mb__after_atomic. So, if kernel is migrated to latest,
2197 * then below code should be enabled
2199 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2201 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2203 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2208 if (discov->state != DISCOVERY_FINDING)
2211 if (list_empty(&discov->resolve)) {
2212 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2216 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2217 if (e && hci_resolve_name(hdev, e) == 0) {
2218 e->name_state = NAME_PENDING;
2219 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2221 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2225 hci_dev_unlock(hdev);
2228 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2230 struct inquiry_data data;
2231 struct inquiry_info *info = (void *) (skb->data + 1);
2232 int num_rsp = *((__u8 *) skb->data);
2234 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2239 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2244 for (; num_rsp; num_rsp--, info++) {
2247 bacpy(&data.bdaddr, &info->bdaddr);
2248 data.pscan_rep_mode = info->pscan_rep_mode;
2249 data.pscan_period_mode = info->pscan_period_mode;
2250 data.pscan_mode = info->pscan_mode;
2251 memcpy(data.dev_class, info->dev_class, 3);
2252 data.clock_offset = info->clock_offset;
2253 data.rssi = HCI_RSSI_INVALID;
2254 data.ssp_mode = 0x00;
2256 flags = hci_inquiry_cache_update(hdev, &data, false);
2258 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2259 info->dev_class, HCI_RSSI_INVALID,
2260 flags, NULL, 0, NULL, 0);
2263 hci_dev_unlock(hdev);
2266 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2268 struct hci_ev_conn_complete *ev = (void *) skb->data;
2269 struct hci_conn *conn;
2271 BT_DBG("%s", hdev->name);
2275 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2277 if (ev->link_type != SCO_LINK)
2280 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2284 conn->type = SCO_LINK;
2288 conn->handle = __le16_to_cpu(ev->handle);
2290 if (conn->type == ACL_LINK) {
2291 conn->state = BT_CONFIG;
2292 hci_conn_hold(conn);
2294 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2295 !hci_find_link_key(hdev, &ev->bdaddr))
2296 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2298 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2300 conn->state = BT_CONNECTED;
2302 hci_debugfs_create_conn(conn);
2303 hci_conn_add_sysfs(conn);
2305 if (test_bit(HCI_AUTH, &hdev->flags))
2306 set_bit(HCI_CONN_AUTH, &conn->flags);
2308 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2309 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2311 /* Get remote features */
2312 if (conn->type == ACL_LINK) {
2313 struct hci_cp_read_remote_features cp;
2314 cp.handle = ev->handle;
2315 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2318 hci_update_page_scan(hdev);
2321 /* Set packet type for incoming connection */
2322 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2323 struct hci_cp_change_conn_ptype cp;
2324 cp.handle = ev->handle;
2325 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2326 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2330 #ifdef CONFIG_TIZEN_WIP
2331 if ((get_link_mode(conn)) & HCI_LM_MASTER)
2332 hci_conn_change_supervision_timeout(conn,
2333 LINK_SUPERVISION_TIMEOUT);
2334 } else if (ev->status == 0x0b) {
2335 BT_ERR("ACL connection already exists, this evt is ignored");
2338 conn->state = BT_CLOSED;
2339 if (conn->type == ACL_LINK)
2340 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2341 conn->dst_type, ev->status);
2344 if (conn->type == ACL_LINK)
2345 hci_sco_setup(conn, ev->status);
2347 #ifdef CONFIG_TIZEN_WIP
2348 if (ev->status && ev->status != 0x0b) {
2352 hci_proto_connect_cfm(conn, ev->status);
2354 } else if (ev->link_type != ACL_LINK)
2355 hci_proto_connect_cfm(conn, ev->status);
2358 hci_dev_unlock(hdev);
2360 hci_conn_check_pending(hdev);
2363 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2365 struct hci_cp_reject_conn_req cp;
2367 bacpy(&cp.bdaddr, bdaddr);
2368 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2369 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2372 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2374 struct hci_ev_conn_request *ev = (void *) skb->data;
2375 int mask = hdev->link_mode;
2376 struct inquiry_entry *ie;
2377 struct hci_conn *conn;
2380 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2383 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2386 if (!(mask & HCI_LM_ACCEPT)) {
2387 hci_reject_conn(hdev, &ev->bdaddr);
2391 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2393 hci_reject_conn(hdev, &ev->bdaddr);
2397 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2398 * connection. These features are only touched through mgmt so
2399 * only do the checks if HCI_MGMT is set.
2401 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2402 !test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
2403 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2405 hci_reject_conn(hdev, &ev->bdaddr);
2409 /* Connection accepted */
2413 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2415 memcpy(ie->data.dev_class, ev->dev_class, 3);
2417 #ifdef CONFIG_TIZEN_WIP
2418 if ((ev->link_type == SCO_LINK || ev->link_type == ESCO_LINK) &&
2419 hci_conn_hash_lookup_sco(hdev)) {
2420 struct hci_cp_reject_conn_req cp;
2422 bacpy(&cp.bdaddr, &ev->bdaddr);
2423 cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
2424 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ,
2426 hci_dev_unlock(hdev);
2430 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2433 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2436 BT_ERR("No memory for new connection");
2437 hci_dev_unlock(hdev);
2442 memcpy(conn->dev_class, ev->dev_class, 3);
2444 hci_dev_unlock(hdev);
2446 if (ev->link_type == ACL_LINK ||
2447 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2448 struct hci_cp_accept_conn_req cp;
2449 conn->state = BT_CONNECT;
2451 bacpy(&cp.bdaddr, &ev->bdaddr);
2453 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2454 cp.role = 0x00; /* Become master */
2456 cp.role = 0x01; /* Remain slave */
2458 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2459 } else if (!(flags & HCI_PROTO_DEFER)) {
2460 struct hci_cp_accept_sync_conn_req cp;
2461 conn->state = BT_CONNECT;
2463 bacpy(&cp.bdaddr, &ev->bdaddr);
2464 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2466 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2467 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2468 cp.max_latency = cpu_to_le16(0xffff);
2469 cp.content_format = cpu_to_le16(hdev->voice_setting);
2470 cp.retrans_effort = 0xff;
2472 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2475 conn->state = BT_CONNECT2;
2476 hci_proto_connect_cfm(conn, 0);
2480 static u8 hci_to_mgmt_reason(u8 err)
2483 case HCI_ERROR_CONNECTION_TIMEOUT:
2484 return MGMT_DEV_DISCONN_TIMEOUT;
2485 case HCI_ERROR_REMOTE_USER_TERM:
2486 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2487 case HCI_ERROR_REMOTE_POWER_OFF:
2488 return MGMT_DEV_DISCONN_REMOTE;
2489 case HCI_ERROR_LOCAL_HOST_TERM:
2490 return MGMT_DEV_DISCONN_LOCAL_HOST;
2492 return MGMT_DEV_DISCONN_UNKNOWN;
2496 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2498 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2499 u8 reason = hci_to_mgmt_reason(ev->reason);
2500 struct hci_conn_params *params;
2501 struct hci_conn *conn;
2502 bool mgmt_connected;
2505 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2509 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2514 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2515 conn->dst_type, ev->status);
2519 conn->state = BT_CLOSED;
2521 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2522 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2523 reason, mgmt_connected);
2525 if (conn->type == ACL_LINK) {
2526 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2527 hci_remove_link_key(hdev, &conn->dst);
2529 hci_update_page_scan(hdev);
2532 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2534 switch (params->auto_connect) {
2535 case HCI_AUTO_CONN_LINK_LOSS:
2536 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2540 case HCI_AUTO_CONN_DIRECT:
2541 case HCI_AUTO_CONN_ALWAYS:
2542 list_del_init(¶ms->action);
2543 list_add(¶ms->action, &hdev->pend_le_conns);
2544 hci_update_background_scan(hdev);
2554 hci_proto_disconn_cfm(conn, ev->reason);
2557 /* Re-enable advertising if necessary, since it might
2558 * have been disabled by the connection. From the
2559 * HCI_LE_Set_Advertise_Enable command description in
2560 * the core specification (v4.0):
2561 * "The Controller shall continue advertising until the Host
2562 * issues an LE_Set_Advertise_Enable command with
2563 * Advertising_Enable set to 0x00 (Advertising is disabled)
2564 * or until a connection is created or until the Advertising
2565 * is timed out due to Directed Advertising."
2567 if (type == LE_LINK)
2568 mgmt_reenable_advertising(hdev);
2570 #ifdef CONFIG_TIZEN_WIP
2571 if (type == ACL_LINK && !hci_conn_num(hdev, ACL_LINK)) {
2575 iscan = test_bit(HCI_ISCAN, &hdev->flags);
2576 pscan = test_bit(HCI_PSCAN, &hdev->flags);
2577 if (!iscan && !pscan) {
2578 u8 scan_enable = SCAN_PAGE;
2580 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE,
2581 sizeof(scan_enable), &scan_enable);
2587 hci_dev_unlock(hdev);
2590 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2592 struct hci_ev_auth_complete *ev = (void *) skb->data;
2593 struct hci_conn *conn;
2595 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2599 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2603 #ifdef CONFIG_TIZEN_WIP /* PIN or Key Missing patch */
2604 BT_DBG("remote_auth %x, remote_cap %x, auth_type %x, io_capability %x",
2605 conn->remote_auth, conn->remote_cap,
2606 conn->auth_type, conn->io_capability);
2608 if (ev->status == 0x06) {
2609 struct hci_cp_auth_requested cp;
2610 BT_DBG("Pin or key missing");
2611 hci_remove_link_key(hdev, &conn->dst);
2612 cp.handle = cpu_to_le16(conn->handle);
2613 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp),
2619 if (!hci_conn_ssp_enabled(conn) &&
2620 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2621 BT_INFO("re-auth of legacy device is not possible.");
2623 set_bit(HCI_CONN_AUTH, &conn->flags);
2624 conn->sec_level = conn->pending_sec_level;
2627 mgmt_auth_failed(conn, ev->status);
2630 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2631 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2633 if (conn->state == BT_CONFIG) {
2634 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2635 struct hci_cp_set_conn_encrypt cp;
2636 cp.handle = ev->handle;
2638 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2641 conn->state = BT_CONNECTED;
2642 hci_proto_connect_cfm(conn, ev->status);
2643 hci_conn_drop(conn);
2646 hci_auth_cfm(conn, ev->status);
2648 hci_conn_hold(conn);
2649 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2650 hci_conn_drop(conn);
2653 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2655 struct hci_cp_set_conn_encrypt cp;
2656 cp.handle = ev->handle;
2658 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2661 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2662 hci_encrypt_cfm(conn, ev->status, 0x00);
2667 hci_dev_unlock(hdev);
2670 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2672 struct hci_ev_remote_name *ev = (void *) skb->data;
2673 struct hci_conn *conn;
2675 BT_DBG("%s", hdev->name);
2677 hci_conn_check_pending(hdev);
2681 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2683 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2686 if (ev->status == 0)
2687 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2688 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2690 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2696 if (!hci_outgoing_auth_needed(hdev, conn))
2699 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2700 struct hci_cp_auth_requested cp;
2702 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2704 cp.handle = __cpu_to_le16(conn->handle);
2705 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2709 hci_dev_unlock(hdev);
2712 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2714 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2715 struct hci_conn *conn;
2717 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2721 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2727 /* Encryption implies authentication */
2728 set_bit(HCI_CONN_AUTH, &conn->flags);
2729 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2730 conn->sec_level = conn->pending_sec_level;
2732 /* Disable Secure connection implementation now */
2733 #ifdef CONFIG_TIZEN_WIP
2734 /* P-256 authentication key implies FIPS */
2735 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2736 set_bit(HCI_CONN_FIPS, &conn->flags);
2738 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2739 conn->type == LE_LINK)
2740 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2743 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2744 /* Disable Secure connection implementation now */
2745 #ifdef CONFIG_TIZEN_WIP
2746 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2751 /* We should disregard the current RPA and generate a new one
2752 * whenever the encryption procedure fails.
2754 if (ev->status && conn->type == LE_LINK)
2755 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2757 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2759 if (ev->status && conn->state == BT_CONNECTED) {
2760 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2761 hci_conn_drop(conn);
2765 if (conn->state == BT_CONFIG) {
2767 conn->state = BT_CONNECTED;
2769 /* Disable Secure connection implementation now */
2770 #ifdef CONFIG_TIZEN_WIP
2771 /* In Secure Connections Only mode, do not allow any
2772 * connections that are not encrypted with AES-CCM
2773 * using a P-256 authenticated combination key.
2775 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2776 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2777 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2778 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2779 hci_conn_drop(conn);
2783 hci_proto_connect_cfm(conn, ev->status);
2784 hci_conn_drop(conn);
2786 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2789 hci_dev_unlock(hdev);
2792 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2793 struct sk_buff *skb)
2795 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2796 struct hci_conn *conn;
2798 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2802 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2805 set_bit(HCI_CONN_SECURE, &conn->flags);
2807 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2809 hci_key_change_cfm(conn, ev->status);
2812 hci_dev_unlock(hdev);
2815 static void hci_remote_features_evt(struct hci_dev *hdev,
2816 struct sk_buff *skb)
2818 struct hci_ev_remote_features *ev = (void *) skb->data;
2819 struct hci_conn *conn;
2821 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2825 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2830 memcpy(conn->features[0], ev->features, 8);
2832 if (conn->state != BT_CONFIG)
2835 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2836 struct hci_cp_read_remote_ext_features cp;
2837 cp.handle = ev->handle;
2839 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2844 #ifdef CONFIG_SPRD_2331
2845 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2846 struct hci_cp_change_conn_ptype cp;
2847 memset(&cp, 0, sizeof(cp));
2848 cp.pkt_type = (HCI_DM1|HCI_DH1);
2849 if (conn->features[0][0] & LMP_3SLOT)
2850 cp.pkt_type |= (HCI_DM3|HCI_DH3);
2852 if (conn->features[0][0] & LMP_5SLOT)
2853 cp.pkt_type |= (HCI_DM5|HCI_DH5);
2855 if (!(conn->features[0][3] & LMP_EDR_ACL_2M)) {
2856 cp.pkt_type |= (HCI_2DH1|HCI_2DH3|HCI_2DH5);
2858 if (!(conn->features[0][4] & LMP_EDR_3SLOT))
2859 cp.pkt_type |= HCI_2DH3;
2861 if (!(conn->features[0][5] & LMP_EDR_5SLOT))
2862 cp.pkt_type |= HCI_2DH5;
2865 if (!(conn->features[0][3] & LMP_EDR_ACL_3M)) {
2866 cp.pkt_type |= (HCI_3DH1|HCI_3DH3|HCI_3DH5);
2868 if (!(conn->features[0][4] & LMP_EDR_3SLOT))
2869 cp.pkt_type |= HCI_3DH3;
2871 if (!(conn->features[0][5] & LMP_EDR_5SLOT))
2872 cp.pkt_type |= HCI_3DH5;
2875 cp.handle = ev->handle;
2876 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), &cp);
2880 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2881 struct hci_cp_remote_name_req cp;
2882 memset(&cp, 0, sizeof(cp));
2883 bacpy(&cp.bdaddr, &conn->dst);
2884 cp.pscan_rep_mode = 0x02;
2885 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2886 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2887 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2889 if (!hci_outgoing_auth_needed(hdev, conn)) {
2890 conn->state = BT_CONNECTED;
2891 hci_proto_connect_cfm(conn, ev->status);
2892 hci_conn_drop(conn);
2896 hci_dev_unlock(hdev);
2899 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2901 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2902 u8 status = skb->data[sizeof(*ev)];
2905 skb_pull(skb, sizeof(*ev));
2907 opcode = __le16_to_cpu(ev->opcode);
2910 case HCI_OP_INQUIRY_CANCEL:
2911 hci_cc_inquiry_cancel(hdev, skb);
2914 case HCI_OP_PERIODIC_INQ:
2915 hci_cc_periodic_inq(hdev, skb);
2918 case HCI_OP_EXIT_PERIODIC_INQ:
2919 hci_cc_exit_periodic_inq(hdev, skb);
2922 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2923 hci_cc_remote_name_req_cancel(hdev, skb);
2926 case HCI_OP_ROLE_DISCOVERY:
2927 hci_cc_role_discovery(hdev, skb);
2930 case HCI_OP_READ_LINK_POLICY:
2931 hci_cc_read_link_policy(hdev, skb);
2934 case HCI_OP_WRITE_LINK_POLICY:
2935 hci_cc_write_link_policy(hdev, skb);
2938 case HCI_OP_READ_DEF_LINK_POLICY:
2939 hci_cc_read_def_link_policy(hdev, skb);
2942 case HCI_OP_WRITE_DEF_LINK_POLICY:
2943 hci_cc_write_def_link_policy(hdev, skb);
2947 hci_cc_reset(hdev, skb);
2950 case HCI_OP_READ_STORED_LINK_KEY:
2951 hci_cc_read_stored_link_key(hdev, skb);
2954 case HCI_OP_DELETE_STORED_LINK_KEY:
2955 hci_cc_delete_stored_link_key(hdev, skb);
2958 case HCI_OP_WRITE_LOCAL_NAME:
2959 hci_cc_write_local_name(hdev, skb);
2962 case HCI_OP_READ_LOCAL_NAME:
2963 hci_cc_read_local_name(hdev, skb);
2966 case HCI_OP_WRITE_AUTH_ENABLE:
2967 hci_cc_write_auth_enable(hdev, skb);
2970 case HCI_OP_WRITE_ENCRYPT_MODE:
2971 hci_cc_write_encrypt_mode(hdev, skb);
2974 case HCI_OP_WRITE_SCAN_ENABLE:
2975 hci_cc_write_scan_enable(hdev, skb);
2978 case HCI_OP_READ_CLASS_OF_DEV:
2979 hci_cc_read_class_of_dev(hdev, skb);
2982 case HCI_OP_WRITE_CLASS_OF_DEV:
2983 hci_cc_write_class_of_dev(hdev, skb);
2986 case HCI_OP_READ_VOICE_SETTING:
2987 hci_cc_read_voice_setting(hdev, skb);
2990 case HCI_OP_WRITE_VOICE_SETTING:
2991 hci_cc_write_voice_setting(hdev, skb);
2994 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2995 hci_cc_read_num_supported_iac(hdev, skb);
2998 case HCI_OP_WRITE_SSP_MODE:
2999 hci_cc_write_ssp_mode(hdev, skb);
3002 case HCI_OP_WRITE_SC_SUPPORT:
3003 hci_cc_write_sc_support(hdev, skb);
3006 case HCI_OP_READ_LOCAL_VERSION:
3007 hci_cc_read_local_version(hdev, skb);
3010 case HCI_OP_READ_LOCAL_COMMANDS:
3011 hci_cc_read_local_commands(hdev, skb);
3014 case HCI_OP_READ_LOCAL_FEATURES:
3015 hci_cc_read_local_features(hdev, skb);
3018 case HCI_OP_READ_LOCAL_EXT_FEATURES:
3019 hci_cc_read_local_ext_features(hdev, skb);
3022 case HCI_OP_READ_BUFFER_SIZE:
3023 hci_cc_read_buffer_size(hdev, skb);
3026 case HCI_OP_READ_BD_ADDR:
3027 hci_cc_read_bd_addr(hdev, skb);
3030 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
3031 hci_cc_read_page_scan_activity(hdev, skb);
3034 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
3035 hci_cc_write_page_scan_activity(hdev, skb);
3038 case HCI_OP_READ_PAGE_SCAN_TYPE:
3039 hci_cc_read_page_scan_type(hdev, skb);
3042 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
3043 hci_cc_write_page_scan_type(hdev, skb);
3046 case HCI_OP_READ_DATA_BLOCK_SIZE:
3047 hci_cc_read_data_block_size(hdev, skb);
3050 case HCI_OP_READ_FLOW_CONTROL_MODE:
3051 hci_cc_read_flow_control_mode(hdev, skb);
3054 case HCI_OP_READ_LOCAL_AMP_INFO:
3055 hci_cc_read_local_amp_info(hdev, skb);
3058 case HCI_OP_READ_CLOCK:
3059 hci_cc_read_clock(hdev, skb);
3062 case HCI_OP_READ_LOCAL_AMP_ASSOC:
3063 hci_cc_read_local_amp_assoc(hdev, skb);
3066 case HCI_OP_READ_INQ_RSP_TX_POWER:
3067 hci_cc_read_inq_rsp_tx_power(hdev, skb);
3070 case HCI_OP_PIN_CODE_REPLY:
3071 hci_cc_pin_code_reply(hdev, skb);
3074 case HCI_OP_PIN_CODE_NEG_REPLY:
3075 hci_cc_pin_code_neg_reply(hdev, skb);
3078 case HCI_OP_READ_LOCAL_OOB_DATA:
3079 hci_cc_read_local_oob_data(hdev, skb);
3082 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
3083 hci_cc_read_local_oob_ext_data(hdev, skb);
3086 case HCI_OP_LE_READ_BUFFER_SIZE:
3087 hci_cc_le_read_buffer_size(hdev, skb);
3090 case HCI_OP_LE_READ_LOCAL_FEATURES:
3091 hci_cc_le_read_local_features(hdev, skb);
3094 case HCI_OP_LE_READ_ADV_TX_POWER:
3095 hci_cc_le_read_adv_tx_power(hdev, skb);
3098 case HCI_OP_USER_CONFIRM_REPLY:
3099 hci_cc_user_confirm_reply(hdev, skb);
3102 case HCI_OP_USER_CONFIRM_NEG_REPLY:
3103 hci_cc_user_confirm_neg_reply(hdev, skb);
3106 case HCI_OP_USER_PASSKEY_REPLY:
3107 hci_cc_user_passkey_reply(hdev, skb);
3110 case HCI_OP_USER_PASSKEY_NEG_REPLY:
3111 hci_cc_user_passkey_neg_reply(hdev, skb);
3114 case HCI_OP_LE_SET_RANDOM_ADDR:
3115 hci_cc_le_set_random_addr(hdev, skb);
3118 case HCI_OP_LE_SET_ADV_ENABLE:
3119 hci_cc_le_set_adv_enable(hdev, skb);
3122 case HCI_OP_LE_SET_SCAN_PARAM:
3123 hci_cc_le_set_scan_param(hdev, skb);
3126 case HCI_OP_LE_SET_SCAN_ENABLE:
3127 hci_cc_le_set_scan_enable(hdev, skb);
3130 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
3131 hci_cc_le_read_white_list_size(hdev, skb);
3134 case HCI_OP_LE_CLEAR_WHITE_LIST:
3135 hci_cc_le_clear_white_list(hdev, skb);
3138 case HCI_OP_LE_ADD_TO_WHITE_LIST:
3139 hci_cc_le_add_to_white_list(hdev, skb);
3142 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
3143 hci_cc_le_del_from_white_list(hdev, skb);
3146 case HCI_OP_LE_READ_SUPPORTED_STATES:
3147 hci_cc_le_read_supported_states(hdev, skb);
3150 case HCI_OP_LE_READ_DEF_DATA_LEN:
3151 hci_cc_le_read_def_data_len(hdev, skb);
3154 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
3155 hci_cc_le_write_def_data_len(hdev, skb);
3158 case HCI_OP_LE_READ_MAX_DATA_LEN:
3159 hci_cc_le_read_max_data_len(hdev, skb);
3162 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3163 hci_cc_write_le_host_supported(hdev, skb);
3166 case HCI_OP_LE_SET_ADV_PARAM:
3167 hci_cc_set_adv_param(hdev, skb);
3170 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
3171 hci_cc_write_remote_amp_assoc(hdev, skb);
3174 case HCI_OP_READ_RSSI:
3175 hci_cc_read_rssi(hdev, skb);
3178 case HCI_OP_READ_TX_POWER:
3179 hci_cc_read_tx_power(hdev, skb);
3181 #ifdef CONFIG_TIZEN_WIP
3182 case HCI_OP_ENABLE_RSSI:
3183 hci_cc_enable_rssi(hdev, skb);
3186 case HCI_OP_GET_RAW_RSSI:
3187 hci_cc_get_raw_rssi(hdev, skb);
3191 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3195 if (opcode != HCI_OP_NOP)
3196 cancel_delayed_work(&hdev->cmd_timer);
3198 hci_req_cmd_complete(hdev, opcode, status);
3200 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
3201 atomic_set(&hdev->cmd_cnt, 1);
3202 if (!skb_queue_empty(&hdev->cmd_q))
3203 queue_work(hdev->workqueue, &hdev->cmd_work);
3207 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
3209 struct hci_ev_cmd_status *ev = (void *) skb->data;
3212 skb_pull(skb, sizeof(*ev));
3214 opcode = __le16_to_cpu(ev->opcode);
3217 case HCI_OP_INQUIRY:
3218 hci_cs_inquiry(hdev, ev->status);
3221 case HCI_OP_CREATE_CONN:
3222 hci_cs_create_conn(hdev, ev->status);
3225 case HCI_OP_DISCONNECT:
3226 hci_cs_disconnect(hdev, ev->status);
3229 case HCI_OP_ADD_SCO:
3230 hci_cs_add_sco(hdev, ev->status);
3233 case HCI_OP_AUTH_REQUESTED:
3234 hci_cs_auth_requested(hdev, ev->status);
3237 case HCI_OP_SET_CONN_ENCRYPT:
3238 hci_cs_set_conn_encrypt(hdev, ev->status);
3241 case HCI_OP_REMOTE_NAME_REQ:
3242 hci_cs_remote_name_req(hdev, ev->status);
3245 case HCI_OP_READ_REMOTE_FEATURES:
3246 hci_cs_read_remote_features(hdev, ev->status);
3249 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3250 hci_cs_read_remote_ext_features(hdev, ev->status);
3253 case HCI_OP_SETUP_SYNC_CONN:
3254 hci_cs_setup_sync_conn(hdev, ev->status);
3257 case HCI_OP_CREATE_PHY_LINK:
3258 hci_cs_create_phylink(hdev, ev->status);
3261 case HCI_OP_ACCEPT_PHY_LINK:
3262 hci_cs_accept_phylink(hdev, ev->status);
3265 case HCI_OP_SNIFF_MODE:
3266 hci_cs_sniff_mode(hdev, ev->status);
3269 case HCI_OP_EXIT_SNIFF_MODE:
3270 hci_cs_exit_sniff_mode(hdev, ev->status);
3273 case HCI_OP_SWITCH_ROLE:
3274 hci_cs_switch_role(hdev, ev->status);
3277 case HCI_OP_LE_CREATE_CONN:
3278 hci_cs_le_create_conn(hdev, ev->status);
3281 case HCI_OP_LE_START_ENC:
3282 hci_cs_le_start_enc(hdev, ev->status);
3286 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3290 if (opcode != HCI_OP_NOP)
3291 cancel_delayed_work(&hdev->cmd_timer);
3294 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
3295 hci_req_cmd_complete(hdev, opcode, ev->status);
3297 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
3298 atomic_set(&hdev->cmd_cnt, 1);
3299 if (!skb_queue_empty(&hdev->cmd_q))
3300 queue_work(hdev->workqueue, &hdev->cmd_work);
3304 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3306 struct hci_ev_hardware_error *ev = (void *) skb->data;
3308 BT_ERR("%s hardware error 0x%2.2x", hdev->name, ev->code);
3309 #ifdef CONFIG_TIZEN_WIP
3311 mgmt_hardware_error(hdev, ev->code);
3312 hci_dev_unlock(hdev);
3316 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3318 struct hci_ev_role_change *ev = (void *) skb->data;
3319 struct hci_conn *conn;
3321 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3325 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3328 conn->role = ev->role;
3330 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3332 hci_role_switch_cfm(conn, ev->status, ev->role);
3334 #ifdef CONFIG_TIZEN_WIP
3335 if (!ev->status && (get_link_mode(conn)) & HCI_LM_MASTER)
3336 hci_conn_change_supervision_timeout(conn,
3337 LINK_SUPERVISION_TIMEOUT);
3341 hci_dev_unlock(hdev);
3344 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3346 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3349 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3350 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3354 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3355 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3356 BT_DBG("%s bad parameters", hdev->name);
3360 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3362 for (i = 0; i < ev->num_hndl; i++) {
3363 struct hci_comp_pkts_info *info = &ev->handles[i];
3364 struct hci_conn *conn;
3365 __u16 handle, count;
3367 handle = __le16_to_cpu(info->handle);
3368 count = __le16_to_cpu(info->count);
3370 conn = hci_conn_hash_lookup_handle(hdev, handle);
3374 conn->sent -= count;
3376 switch (conn->type) {
3378 hdev->acl_cnt += count;
3379 if (hdev->acl_cnt > hdev->acl_pkts)
3380 hdev->acl_cnt = hdev->acl_pkts;
3384 if (hdev->le_pkts) {
3385 hdev->le_cnt += count;
3386 if (hdev->le_cnt > hdev->le_pkts)
3387 hdev->le_cnt = hdev->le_pkts;
3389 hdev->acl_cnt += count;
3390 if (hdev->acl_cnt > hdev->acl_pkts)
3391 hdev->acl_cnt = hdev->acl_pkts;
3396 hdev->sco_cnt += count;
3397 if (hdev->sco_cnt > hdev->sco_pkts)
3398 hdev->sco_cnt = hdev->sco_pkts;
3402 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3407 queue_work(hdev->workqueue, &hdev->tx_work);
3410 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3413 struct hci_chan *chan;
3415 switch (hdev->dev_type) {
3417 return hci_conn_hash_lookup_handle(hdev, handle);
3419 chan = hci_chan_lookup_handle(hdev, handle);
3424 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3431 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3433 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3436 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3437 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3441 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3442 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3443 BT_DBG("%s bad parameters", hdev->name);
3447 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3450 for (i = 0; i < ev->num_hndl; i++) {
3451 struct hci_comp_blocks_info *info = &ev->handles[i];
3452 struct hci_conn *conn = NULL;
3453 __u16 handle, block_count;
3455 handle = __le16_to_cpu(info->handle);
3456 block_count = __le16_to_cpu(info->blocks);
3458 conn = __hci_conn_lookup_handle(hdev, handle);
3462 conn->sent -= block_count;
3464 switch (conn->type) {
3467 hdev->block_cnt += block_count;
3468 if (hdev->block_cnt > hdev->num_blocks)
3469 hdev->block_cnt = hdev->num_blocks;
3473 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3478 queue_work(hdev->workqueue, &hdev->tx_work);
3481 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3483 struct hci_ev_mode_change *ev = (void *) skb->data;
3484 struct hci_conn *conn;
3486 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3490 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3492 conn->mode = ev->mode;
3494 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3496 if (conn->mode == HCI_CM_ACTIVE)
3497 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3499 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3502 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3503 hci_sco_setup(conn, ev->status);
3506 hci_dev_unlock(hdev);
3509 #ifdef CONFIG_TIZEN_WIP
3510 static void hci_vendor_specific_evt(struct hci_dev *hdev, struct sk_buff *skb)
3512 struct hci_ev_vendor_specific *ev = (void *) skb->data;
3513 __u8 event_sub_code;
3514 skb_pull(skb, sizeof(*ev));
3516 BT_DBG("hci_vendor_specific_evt");
3517 event_sub_code = ev->event_sub_code;
3519 switch (event_sub_code) {
3520 case LE_META_VENDOR_SPECIFIC_GROUP_EVENT: {
3521 struct hci_ev_ext_vendor_specific *ev = (void *) skb->data;
3522 __u8 event_le_ext_sub_code;
3523 skb_pull(skb, sizeof(*ev));
3524 event_le_ext_sub_code = ev->event_le_ext_sub_code;
3526 BT_DBG("Func: %s RSSI event LE_META_VENDOR_SPECIFIC_GROUP_EVENT: %X",
3527 __func__, LE_META_VENDOR_SPECIFIC_GROUP_EVENT);
3529 switch (event_le_ext_sub_code) {
3530 case LE_RSSI_LINK_ALERT:
3531 BT_DBG("Func: %s RSSI event LE_RSSI_LINK_ALERT %X",
3532 __func__, LE_RSSI_LINK_ALERT);
3533 mgmt_rssi_alert_evt(hdev, skb);
3542 case LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT:
3543 BT_DBG("Func: %s LE_MULTI_ADV_STATE_CHANGE_SUB_EVENT", __func__);
3544 mgmt_multi_adv_state_change_evt(hdev, skb);
3552 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3554 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3555 struct hci_conn *conn;
3557 BT_DBG("%s", hdev->name);
3561 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3565 if (conn->state == BT_CONNECTED) {
3566 hci_conn_hold(conn);
3567 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3568 hci_conn_drop(conn);
3571 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
3572 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3573 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3574 sizeof(ev->bdaddr), &ev->bdaddr);
3575 } else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3578 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3583 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3587 hci_dev_unlock(hdev);
3590 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3592 if (key_type == HCI_LK_CHANGED_COMBINATION)
3595 conn->pin_length = pin_len;
3596 conn->key_type = key_type;
3599 case HCI_LK_LOCAL_UNIT:
3600 case HCI_LK_REMOTE_UNIT:
3601 case HCI_LK_DEBUG_COMBINATION:
3603 case HCI_LK_COMBINATION:
3605 conn->pending_sec_level = BT_SECURITY_HIGH;
3607 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3609 case HCI_LK_UNAUTH_COMBINATION_P192:
3610 case HCI_LK_UNAUTH_COMBINATION_P256:
3611 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3613 case HCI_LK_AUTH_COMBINATION_P192:
3614 conn->pending_sec_level = BT_SECURITY_HIGH;
3616 case HCI_LK_AUTH_COMBINATION_P256:
3617 conn->pending_sec_level = BT_SECURITY_FIPS;
3622 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3624 struct hci_ev_link_key_req *ev = (void *) skb->data;
3625 struct hci_cp_link_key_reply cp;
3626 struct hci_conn *conn;
3627 struct link_key *key;
3629 BT_DBG("%s", hdev->name);
3631 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3636 key = hci_find_link_key(hdev, &ev->bdaddr);
3638 BT_DBG("%s link key not found for %pMR", hdev->name,
3643 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3646 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3648 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3650 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3651 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3652 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3653 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3657 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3658 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3659 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3660 BT_DBG("%s ignoring key unauthenticated for high security",
3665 conn_set_key(conn, key->type, key->pin_len);
3668 bacpy(&cp.bdaddr, &ev->bdaddr);
3669 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3671 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3673 hci_dev_unlock(hdev);
3678 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3679 hci_dev_unlock(hdev);
3682 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3684 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3685 struct hci_conn *conn;
3686 struct link_key *key;
3690 BT_DBG("%s", hdev->name);
3694 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3698 hci_conn_hold(conn);
3699 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3700 hci_conn_drop(conn);
3702 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3703 conn_set_key(conn, ev->key_type, conn->pin_length);
3705 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3708 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3709 ev->key_type, pin_len, &persistent);
3713 /* Update connection information since adding the key will have
3714 * fixed up the type in the case of changed combination keys.
3716 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3717 conn_set_key(conn, key->type, key->pin_len);
3719 mgmt_new_link_key(hdev, key, persistent);
3721 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3722 * is set. If it's not set simply remove the key from the kernel
3723 * list (we've still notified user space about it but with
3724 * store_hint being 0).
3726 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3727 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3728 list_del_rcu(&key->list);
3729 kfree_rcu(key, rcu);
3734 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3736 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3739 hci_dev_unlock(hdev);
3742 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3744 struct hci_ev_clock_offset *ev = (void *) skb->data;
3745 struct hci_conn *conn;
3747 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3751 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3752 if (conn && !ev->status) {
3753 struct inquiry_entry *ie;
3755 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3757 ie->data.clock_offset = ev->clock_offset;
3758 ie->timestamp = jiffies;
3762 hci_dev_unlock(hdev);
3765 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3767 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3768 struct hci_conn *conn;
3770 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3774 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3775 if (conn && !ev->status)
3776 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3778 hci_dev_unlock(hdev);
3781 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3783 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3784 struct inquiry_entry *ie;
3786 BT_DBG("%s", hdev->name);
3790 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3792 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3793 ie->timestamp = jiffies;
3796 hci_dev_unlock(hdev);
3799 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3800 struct sk_buff *skb)
3802 struct inquiry_data data;
3803 int num_rsp = *((__u8 *) skb->data);
3805 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3810 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3815 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3816 struct inquiry_info_with_rssi_and_pscan_mode *info;
3817 info = (void *) (skb->data + 1);
3819 for (; num_rsp; num_rsp--, info++) {
3822 bacpy(&data.bdaddr, &info->bdaddr);
3823 data.pscan_rep_mode = info->pscan_rep_mode;
3824 data.pscan_period_mode = info->pscan_period_mode;
3825 data.pscan_mode = info->pscan_mode;
3826 memcpy(data.dev_class, info->dev_class, 3);
3827 data.clock_offset = info->clock_offset;
3828 data.rssi = info->rssi;
3829 data.ssp_mode = 0x00;
3831 flags = hci_inquiry_cache_update(hdev, &data, false);
3833 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3834 info->dev_class, info->rssi,
3835 flags, NULL, 0, NULL, 0);
3838 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3840 for (; num_rsp; num_rsp--, info++) {
3843 bacpy(&data.bdaddr, &info->bdaddr);
3844 data.pscan_rep_mode = info->pscan_rep_mode;
3845 data.pscan_period_mode = info->pscan_period_mode;
3846 data.pscan_mode = 0x00;
3847 memcpy(data.dev_class, info->dev_class, 3);
3848 data.clock_offset = info->clock_offset;
3849 data.rssi = info->rssi;
3850 data.ssp_mode = 0x00;
3852 flags = hci_inquiry_cache_update(hdev, &data, false);
3854 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3855 info->dev_class, info->rssi,
3856 flags, NULL, 0, NULL, 0);
3860 hci_dev_unlock(hdev);
3863 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3864 struct sk_buff *skb)
3866 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3867 struct hci_conn *conn;
3869 BT_DBG("%s", hdev->name);
3873 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3877 if (ev->page < HCI_MAX_PAGES)
3878 memcpy(conn->features[ev->page], ev->features, 8);
3880 if (!ev->status && ev->page == 0x01) {
3881 struct inquiry_entry *ie;
3883 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3885 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3887 if (ev->features[0] & LMP_HOST_SSP) {
3888 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3890 /* It is mandatory by the Bluetooth specification that
3891 * Extended Inquiry Results are only used when Secure
3892 * Simple Pairing is enabled, but some devices violate
3895 * To make these devices work, the internal SSP
3896 * enabled flag needs to be cleared if the remote host
3897 * features do not indicate SSP support */
3898 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3901 /* Disable Secure connection implementation now */
3902 #ifdef CONFIG_TIZEN_WIP
3903 if (ev->features[0] & LMP_HOST_SC)
3904 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3908 if (conn->state != BT_CONFIG)
3911 #ifdef CONFIG_SPRD_2331
3912 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3913 struct hci_cp_change_conn_ptype cp;
3914 memset(&cp, 0, sizeof(cp));
3915 cp.pkt_type = (HCI_DM1|HCI_DH1);
3917 if (conn->features[0][0] & LMP_3SLOT)
3918 cp.pkt_type |= (HCI_DM3|HCI_DH3);
3920 if (conn->features[0][0] & LMP_5SLOT)
3921 cp.pkt_type |= (HCI_DM5|HCI_DH5);
3923 if (!(conn->features[0][3] & LMP_EDR_ACL_2M)) {
3924 cp.pkt_type |= (HCI_2DH1|HCI_2DH3|HCI_2DH5);
3926 if (!(conn->features[0][4] & LMP_EDR_3SLOT))
3927 cp.pkt_type |= HCI_2DH3;
3929 if (!(conn->features[0][5] & LMP_EDR_5SLOT))
3930 cp.pkt_type |= HCI_2DH5;
3933 if (!(conn->features[0][3] & LMP_EDR_ACL_3M)) {
3934 cp.pkt_type |= (HCI_3DH1|HCI_3DH3|HCI_3DH5);
3936 if (!(conn->features[0][4] & LMP_EDR_3SLOT))
3937 cp.pkt_type |= HCI_3DH3;
3939 if (!(conn->features[0][5] & LMP_EDR_5SLOT))
3940 cp.pkt_type |= HCI_3DH5;
3943 cp.handle = ev->handle;
3944 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), &cp);
3948 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3949 struct hci_cp_remote_name_req cp;
3950 memset(&cp, 0, sizeof(cp));
3951 bacpy(&cp.bdaddr, &conn->dst);
3952 cp.pscan_rep_mode = 0x02;
3953 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3954 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3955 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3957 if (!hci_outgoing_auth_needed(hdev, conn)) {
3958 conn->state = BT_CONNECTED;
3959 hci_proto_connect_cfm(conn, ev->status);
3960 hci_conn_drop(conn);
3964 hci_dev_unlock(hdev);
3967 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3968 struct sk_buff *skb)
3970 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3971 struct hci_conn *conn;
3973 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3977 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3979 if (ev->link_type == ESCO_LINK)
3982 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3986 conn->type = SCO_LINK;
3989 switch (ev->status) {
3991 conn->handle = __le16_to_cpu(ev->handle);
3992 conn->state = BT_CONNECTED;
3994 hci_debugfs_create_conn(conn);
3995 hci_conn_add_sysfs(conn);
3998 case 0x10: /* Connection Accept Timeout */
3999 case 0x0d: /* Connection Rejected due to Limited Resources */
4000 case 0x11: /* Unsupported Feature or Parameter Value */
4001 case 0x1c: /* SCO interval rejected */
4002 case 0x1a: /* Unsupported Remote Feature */
4003 case 0x1f: /* Unspecified error */
4004 case 0x20: /* Unsupported LMP Parameter value */
4006 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
4007 (hdev->esco_type & EDR_ESCO_MASK);
4008 if (hci_setup_sync(conn, conn->link->handle))
4014 conn->state = BT_CLOSED;
4018 hci_proto_connect_cfm(conn, ev->status);
4023 hci_dev_unlock(hdev);
4026 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
4030 while (parsed < eir_len) {
4031 u8 field_len = eir[0];
4036 parsed += field_len + 1;
4037 eir += field_len + 1;
4043 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
4044 struct sk_buff *skb)
4046 struct inquiry_data data;
4047 struct extended_inquiry_info *info = (void *) (skb->data + 1);
4048 int num_rsp = *((__u8 *) skb->data);
4051 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
4056 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
4061 for (; num_rsp; num_rsp--, info++) {
4065 bacpy(&data.bdaddr, &info->bdaddr);
4066 data.pscan_rep_mode = info->pscan_rep_mode;
4067 data.pscan_period_mode = info->pscan_period_mode;
4068 data.pscan_mode = 0x00;
4069 memcpy(data.dev_class, info->dev_class, 3);
4070 data.clock_offset = info->clock_offset;
4071 data.rssi = info->rssi;
4072 data.ssp_mode = 0x01;
4074 if (test_bit(HCI_MGMT, &hdev->dev_flags))
4075 name_known = eir_has_data_type(info->data,
4081 flags = hci_inquiry_cache_update(hdev, &data, name_known);
4083 eir_len = eir_get_length(info->data, sizeof(info->data));
4085 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
4086 info->dev_class, info->rssi,
4087 flags, info->data, eir_len, NULL, 0);
4090 hci_dev_unlock(hdev);
4093 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
4094 struct sk_buff *skb)
4096 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
4097 struct hci_conn *conn;
4099 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
4100 __le16_to_cpu(ev->handle));
4104 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4108 /* For BR/EDR the necessary steps are taken through the
4109 * auth_complete event.
4111 if (conn->type != LE_LINK)
4115 conn->sec_level = conn->pending_sec_level;
4117 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
4119 if (ev->status && conn->state == BT_CONNECTED) {
4120 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
4121 hci_conn_drop(conn);
4125 if (conn->state == BT_CONFIG) {
4127 conn->state = BT_CONNECTED;
4129 hci_proto_connect_cfm(conn, ev->status);
4130 hci_conn_drop(conn);
4132 hci_auth_cfm(conn, ev->status);
4134 hci_conn_hold(conn);
4135 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
4136 hci_conn_drop(conn);
4140 hci_dev_unlock(hdev);
4143 static u8 hci_get_auth_req(struct hci_conn *conn)
4145 #ifdef CONFIG_TIZEN_WIP
4146 if (conn->remote_auth == HCI_AT_GENERAL_BONDING_MITM) {
4147 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4148 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4149 return HCI_AT_GENERAL_BONDING_MITM;
4153 /* If remote requests no-bonding follow that lead */
4154 if (conn->remote_auth == HCI_AT_NO_BONDING ||
4155 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
4156 return conn->remote_auth | (conn->auth_type & 0x01);
4158 /* If both remote and local have enough IO capabilities, require
4161 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
4162 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
4163 return conn->remote_auth | 0x01;
4165 /* No MITM protection possible so ignore remote requirement */
4166 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
4169 static u8 bredr_oob_data_present(struct hci_conn *conn)
4171 struct hci_dev *hdev = conn->hdev;
4172 struct oob_data *data;
4174 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
4178 /* When Secure Connections Only mode is enabled, then the P-256
4179 * values are required. If they are not available, then do not
4180 * declare that OOB data is present.
4182 if (bredr_sc_enabled(hdev) &&
4183 test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
4184 (!memcmp(data->rand256, ZERO_KEY, 16) ||
4185 !memcmp(data->hash256, ZERO_KEY, 16)))
4188 if (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags))
4194 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4196 struct hci_ev_io_capa_request *ev = (void *) skb->data;
4197 struct hci_conn *conn;
4199 BT_DBG("%s", hdev->name);
4203 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4207 hci_conn_hold(conn);
4209 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4212 /* Allow pairing if we're pairable, the initiators of the
4213 * pairing or if the remote is not requesting bonding.
4215 if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
4216 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
4217 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
4218 struct hci_cp_io_capability_reply cp;
4220 bacpy(&cp.bdaddr, &ev->bdaddr);
4221 /* Change the IO capability from KeyboardDisplay
4222 * to DisplayYesNo as it is not supported by BT spec. */
4223 cp.capability = (conn->io_capability == 0x04) ?
4224 HCI_IO_DISPLAY_YESNO : conn->io_capability;
4226 /* If we are initiators, there is no remote information yet */
4227 if (conn->remote_auth == 0xff) {
4228 /* Request MITM protection if our IO caps allow it
4229 * except for the no-bonding case.
4231 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4232 conn->auth_type != HCI_AT_NO_BONDING)
4233 conn->auth_type |= 0x01;
4235 conn->auth_type = hci_get_auth_req(conn);
4238 /* If we're not bondable, force one of the non-bondable
4239 * authentication requirement values.
4241 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
4242 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
4244 cp.authentication = conn->auth_type;
4245 cp.oob_data = bredr_oob_data_present(conn);
4247 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4250 struct hci_cp_io_capability_neg_reply cp;
4252 bacpy(&cp.bdaddr, &ev->bdaddr);
4253 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4255 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4260 hci_dev_unlock(hdev);
4263 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4265 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4266 struct hci_conn *conn;
4268 BT_DBG("%s", hdev->name);
4272 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4276 conn->remote_cap = ev->capability;
4277 conn->remote_auth = ev->authentication;
4279 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
4282 hci_dev_unlock(hdev);
4285 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4286 struct sk_buff *skb)
4288 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4289 int loc_mitm, rem_mitm, confirm_hint = 0;
4290 struct hci_conn *conn;
4292 BT_DBG("%s", hdev->name);
4296 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4299 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4303 loc_mitm = (conn->auth_type & 0x01);
4304 rem_mitm = (conn->remote_auth & 0x01);
4306 /* If we require MITM but the remote device can't provide that
4307 * (it has NoInputNoOutput) then reject the confirmation
4308 * request. We check the security level here since it doesn't
4309 * necessarily match conn->auth_type.
4311 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4312 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4313 BT_DBG("Rejecting request: remote device can't provide MITM");
4314 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4315 sizeof(ev->bdaddr), &ev->bdaddr);
4319 /* If no side requires MITM protection; auto-accept */
4320 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4321 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4323 /* If we're not the initiators request authorization to
4324 * proceed from user space (mgmt_user_confirm with
4325 * confirm_hint set to 1). The exception is if neither
4326 * side had MITM or if the local IO capability is
4327 * NoInputNoOutput, in which case we do auto-accept
4329 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4330 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4331 (loc_mitm || rem_mitm)) {
4332 BT_DBG("Confirming auto-accept as acceptor");
4337 BT_DBG("Auto-accept of user confirmation with %ums delay",
4338 hdev->auto_accept_delay);
4340 if (hdev->auto_accept_delay > 0) {
4341 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4342 queue_delayed_work(conn->hdev->workqueue,
4343 &conn->auto_accept_work, delay);
4347 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4348 sizeof(ev->bdaddr), &ev->bdaddr);
4353 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4354 le32_to_cpu(ev->passkey), confirm_hint);
4357 hci_dev_unlock(hdev);
4360 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4361 struct sk_buff *skb)
4363 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4365 BT_DBG("%s", hdev->name);
4367 if (test_bit(HCI_MGMT, &hdev->dev_flags))
4368 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4371 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4372 struct sk_buff *skb)
4374 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4375 struct hci_conn *conn;
4377 BT_DBG("%s", hdev->name);
4379 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4383 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4384 conn->passkey_entered = 0;
4386 if (test_bit(HCI_MGMT, &hdev->dev_flags))
4387 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4388 conn->dst_type, conn->passkey_notify,
4389 conn->passkey_entered);
4392 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4394 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4395 struct hci_conn *conn;
4397 BT_DBG("%s", hdev->name);
4399 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4404 case HCI_KEYPRESS_STARTED:
4405 conn->passkey_entered = 0;
4408 case HCI_KEYPRESS_ENTERED:
4409 conn->passkey_entered++;
4412 case HCI_KEYPRESS_ERASED:
4413 conn->passkey_entered--;
4416 case HCI_KEYPRESS_CLEARED:
4417 conn->passkey_entered = 0;
4420 case HCI_KEYPRESS_COMPLETED:
4424 if (test_bit(HCI_MGMT, &hdev->dev_flags))
4425 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4426 conn->dst_type, conn->passkey_notify,
4427 conn->passkey_entered);
4430 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4431 struct sk_buff *skb)
4433 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4434 struct hci_conn *conn;
4436 BT_DBG("%s", hdev->name);
4440 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4444 /* Reset the authentication requirement to unknown */
4445 conn->remote_auth = 0xff;
4447 /* To avoid duplicate auth_failed events to user space we check
4448 * the HCI_CONN_AUTH_PEND flag which will be set if we
4449 * initiated the authentication. A traditional auth_complete
4450 * event gets always produced as initiator and is also mapped to
4451 * the mgmt_auth_failed event */
4452 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4453 mgmt_auth_failed(conn, ev->status);
4455 hci_conn_drop(conn);
4458 hci_dev_unlock(hdev);
4461 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4462 struct sk_buff *skb)
4464 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4465 struct inquiry_entry *ie;
4466 struct hci_conn *conn;
4468 BT_DBG("%s", hdev->name);
4472 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4474 memcpy(conn->features[1], ev->features, 8);
4476 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4478 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4480 hci_dev_unlock(hdev);
4483 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4484 struct sk_buff *skb)
4486 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4487 struct oob_data *data;
4489 BT_DBG("%s", hdev->name);
4493 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4496 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4498 struct hci_cp_remote_oob_data_neg_reply cp;
4500 bacpy(&cp.bdaddr, &ev->bdaddr);
4501 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4506 if (bredr_sc_enabled(hdev)) {
4507 struct hci_cp_remote_oob_ext_data_reply cp;
4509 bacpy(&cp.bdaddr, &ev->bdaddr);
4510 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4511 memset(cp.hash192, 0, sizeof(cp.hash192));
4512 memset(cp.rand192, 0, sizeof(cp.rand192));
4514 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4515 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4517 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4518 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4520 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4523 struct hci_cp_remote_oob_data_reply cp;
4525 bacpy(&cp.bdaddr, &ev->bdaddr);
4526 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4527 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4529 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4534 hci_dev_unlock(hdev);
4537 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4538 struct sk_buff *skb)
4540 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4541 struct hci_conn *hcon, *bredr_hcon;
4543 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4548 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4550 hci_dev_unlock(hdev);
4556 hci_dev_unlock(hdev);
4560 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4562 hcon->state = BT_CONNECTED;
4563 bacpy(&hcon->dst, &bredr_hcon->dst);
4565 hci_conn_hold(hcon);
4566 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4567 hci_conn_drop(hcon);
4569 hci_debugfs_create_conn(hcon);
4570 hci_conn_add_sysfs(hcon);
4572 amp_physical_cfm(bredr_hcon, hcon);
4574 hci_dev_unlock(hdev);
4577 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4579 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4580 struct hci_conn *hcon;
4581 struct hci_chan *hchan;
4582 struct amp_mgr *mgr;
4584 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4585 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4588 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4592 /* Create AMP hchan */
4593 hchan = hci_chan_create(hcon);
4597 hchan->handle = le16_to_cpu(ev->handle);
4599 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4601 mgr = hcon->amp_mgr;
4602 if (mgr && mgr->bredr_chan) {
4603 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4605 l2cap_chan_lock(bredr_chan);
4607 bredr_chan->conn->mtu = hdev->block_mtu;
4608 l2cap_logical_cfm(bredr_chan, hchan, 0);
4609 hci_conn_hold(hcon);
4611 l2cap_chan_unlock(bredr_chan);
4615 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4616 struct sk_buff *skb)
4618 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4619 struct hci_chan *hchan;
4621 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4622 le16_to_cpu(ev->handle), ev->status);
4629 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4633 amp_destroy_logical_link(hchan, ev->reason);
4636 hci_dev_unlock(hdev);
4639 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4640 struct sk_buff *skb)
4642 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4643 struct hci_conn *hcon;
4645 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4652 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4654 hcon->state = BT_CLOSED;
4658 hci_dev_unlock(hdev);
4661 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4663 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4664 struct hci_conn_params *params;
4665 struct hci_conn *conn;
4666 struct smp_irk *irk;
4669 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4673 /* All controllers implicitly stop advertising in the event of a
4674 * connection, so ensure that the state bit is cleared.
4676 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4678 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4680 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4682 BT_ERR("No memory for new connection");
4686 conn->dst_type = ev->bdaddr_type;
4688 /* If we didn't have a hci_conn object previously
4689 * but we're in master role this must be something
4690 * initiated using a white list. Since white list based
4691 * connections are not "first class citizens" we don't
4692 * have full tracking of them. Therefore, we go ahead
4693 * with a "best effort" approach of determining the
4694 * initiator address based on the HCI_PRIVACY flag.
4697 conn->resp_addr_type = ev->bdaddr_type;
4698 bacpy(&conn->resp_addr, &ev->bdaddr);
4699 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4700 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4701 bacpy(&conn->init_addr, &hdev->rpa);
4703 hci_copy_identity_address(hdev,
4705 &conn->init_addr_type);
4709 #ifdef CONFIG_TIZEN_WIP
4710 /* LE auto connect */
4711 bacpy(&conn->dst, &ev->bdaddr);
4713 cancel_delayed_work(&conn->le_conn_timeout);
4717 /* Set the responder (our side) address type based on
4718 * the advertising address type.
4720 conn->resp_addr_type = hdev->adv_addr_type;
4721 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4722 bacpy(&conn->resp_addr, &hdev->random_addr);
4724 bacpy(&conn->resp_addr, &hdev->bdaddr);
4726 conn->init_addr_type = ev->bdaddr_type;
4727 bacpy(&conn->init_addr, &ev->bdaddr);
4729 /* For incoming connections, set the default minimum
4730 * and maximum connection interval. They will be used
4731 * to check if the parameters are in range and if not
4732 * trigger the connection update procedure.
4734 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4735 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4738 /* Lookup the identity address from the stored connection
4739 * address and address type.
4741 * When establishing connections to an identity address, the
4742 * connection procedure will store the resolvable random
4743 * address first. Now if it can be converted back into the
4744 * identity address, start using the identity address from
4747 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4749 #ifdef __TIZEN_PATCH__
4750 /* Update rpa. So that, if irk is refreshed, it can be saved */
4751 bacpy(&irk->rpa, &conn->dst);
4753 bacpy(&conn->dst, &irk->bdaddr);
4754 conn->dst_type = irk->addr_type;
4758 hci_le_conn_failed(conn, ev->status);
4762 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4763 addr_type = BDADDR_LE_PUBLIC;
4765 addr_type = BDADDR_LE_RANDOM;
4767 /* Drop the connection if the device is blocked */
4768 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4769 hci_conn_drop(conn);
4773 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4774 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4776 conn->sec_level = BT_SECURITY_LOW;
4777 conn->handle = __le16_to_cpu(ev->handle);
4778 conn->state = BT_CONNECTED;
4780 conn->le_conn_interval = le16_to_cpu(ev->interval);
4781 conn->le_conn_latency = le16_to_cpu(ev->latency);
4782 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4784 hci_debugfs_create_conn(conn);
4785 hci_conn_add_sysfs(conn);
4787 hci_proto_connect_cfm(conn, ev->status);
4789 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4792 list_del_init(¶ms->action);
4794 hci_conn_drop(params->conn);
4795 hci_conn_put(params->conn);
4796 params->conn = NULL;
4801 hci_update_background_scan(hdev);
4802 hci_dev_unlock(hdev);
4805 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4806 struct sk_buff *skb)
4808 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4809 struct hci_conn *conn;
4811 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4818 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4820 conn->le_conn_interval = le16_to_cpu(ev->interval);
4821 conn->le_conn_latency = le16_to_cpu(ev->latency);
4822 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4825 hci_dev_unlock(hdev);
4828 /* This function requires the caller holds hdev->lock */
4829 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4831 u8 addr_type, u8 adv_type)
4833 struct hci_conn *conn;
4834 struct hci_conn_params *params;
4836 /* If the event is not connectable don't proceed further */
4837 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4840 /* Ignore if the device is blocked */
4841 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4844 /* Most controller will fail if we try to create new connections
4845 * while we have an existing one in slave role.
4847 if (hdev->conn_hash.le_num_slave > 0)
4850 /* If we're not connectable only connect devices that we have in
4851 * our pend_le_conns list.
4853 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4858 switch (params->auto_connect) {
4859 case HCI_AUTO_CONN_DIRECT:
4860 /* Only devices advertising with ADV_DIRECT_IND are
4861 * triggering a connection attempt. This is allowing
4862 * incoming connections from slave devices.
4864 if (adv_type != LE_ADV_DIRECT_IND)
4867 case HCI_AUTO_CONN_ALWAYS:
4868 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4869 * are triggering a connection attempt. This means
4870 * that incoming connectioms from slave device are
4871 * accepted and also outgoing connections to slave
4872 * devices are established when found.
4879 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4880 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4881 if (!IS_ERR(conn)) {
4882 /* Store the pointer since we don't really have any
4883 * other owner of the object besides the params that
4884 * triggered it. This way we can abort the connection if
4885 * the parameters get removed and keep the reference
4886 * count consistent once the connection is established.
4888 params->conn = hci_conn_get(conn);
4892 switch (PTR_ERR(conn)) {
4894 /* If hci_connect() returns -EBUSY it means there is already
4895 * an LE connection attempt going on. Since controllers don't
4896 * support more than one connection attempt at the time, we
4897 * don't consider this an error case.
4901 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4908 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4909 u8 bdaddr_type, bdaddr_t *direct_addr,
4910 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4912 #ifndef CONFIG_TIZEN_WIP
4913 struct discovery_state *d = &hdev->discovery;
4915 struct smp_irk *irk;
4916 struct hci_conn *conn;
4917 #ifndef CONFIG_TIZEN_WIP /* TIZEN_Bluetooth :: Disable adv ind and scan rsp merging */
4922 /* If the direct address is present, then this report is from
4923 * a LE Direct Advertising Report event. In that case it is
4924 * important to see if the address is matching the local
4925 * controller address.
4928 /* Only resolvable random addresses are valid for these
4929 * kind of reports and others can be ignored.
4931 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4934 /* If the controller is not using resolvable random
4935 * addresses, then this report can be ignored.
4937 if (!test_bit(HCI_PRIVACY, &hdev->dev_flags))
4940 /* If the local IRK of the controller does not match
4941 * with the resolvable random address provided, then
4942 * this report can be ignored.
4944 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4948 /* Check if we need to convert to identity address */
4949 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4951 bdaddr = &irk->bdaddr;
4952 bdaddr_type = irk->addr_type;
4955 /* Check if we have been requested to connect to this device */
4956 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4957 if (conn && type == LE_ADV_IND) {
4958 /* Store report for later inclusion by
4959 * mgmt_device_connected
4961 memcpy(conn->le_adv_data, data, len);
4962 conn->le_adv_data_len = len;
4965 /* Passive scanning shouldn't trigger any device found events,
4966 * except for devices marked as CONN_REPORT for which we do send
4967 * device found events.
4969 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4970 if (type == LE_ADV_DIRECT_IND)
4973 #ifndef CONFIG_TIZEN_WIP /* TIZEN_Bluetooth :: Handle all adv packet in platform */
4974 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4975 bdaddr, bdaddr_type))
4979 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4980 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4984 #ifdef CONFIG_TIZEN_WIP
4985 mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4986 rssi, flags, data, len, NULL, 0, type);
4988 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4989 rssi, flags, data, len, NULL, 0);
4994 /* When receiving non-connectable or scannable undirected
4995 * advertising reports, this means that the remote device is
4996 * not connectable and then clearly indicate this in the
4997 * device found event.
4999 * When receiving a scan response, then there is no way to
5000 * know if the remote device is connectable or not. However
5001 * since scan responses are merged with a previously seen
5002 * advertising report, the flags field from that report
5005 * In the really unlikely case that a controller get confused
5006 * and just sends a scan response event, then it is marked as
5007 * not connectable as well.
5009 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
5010 type == LE_ADV_SCAN_RSP)
5011 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
5015 #ifdef CONFIG_TIZEN_WIP /* TIZEN_Bluetooth :: Disable adv ind and scan rsp merging */
5016 mgmt_le_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5017 rssi, flags, data, len, NULL, 0, type);
5019 /* If there's nothing pending either store the data from this
5020 * event or send an immediate device found event if the data
5021 * should not be stored for later.
5023 if (!has_pending_adv_report(hdev)) {
5024 /* If the report will trigger a SCAN_REQ store it for
5027 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5028 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5029 rssi, flags, data, len);
5033 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5034 rssi, flags, data, len, NULL, 0);
5038 /* Check if the pending report is for the same device as the new one */
5039 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
5040 bdaddr_type == d->last_adv_addr_type);
5042 /* If the pending data doesn't match this report or this isn't a
5043 * scan response (e.g. we got a duplicate ADV_IND) then force
5044 * sending of the pending data.
5046 if (type != LE_ADV_SCAN_RSP || !match) {
5047 /* Send out whatever is in the cache, but skip duplicates */
5049 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5050 d->last_adv_addr_type, NULL,
5051 d->last_adv_rssi, d->last_adv_flags,
5053 d->last_adv_data_len, NULL, 0);
5055 /* If the new report will trigger a SCAN_REQ store it for
5058 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
5059 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
5060 rssi, flags, data, len);
5064 /* The advertising reports cannot be merged, so clear
5065 * the pending report and send out a device found event.
5067 clear_pending_adv_report(hdev);
5068 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
5069 rssi, flags, data, len, NULL, 0);
5073 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
5074 * the new event is a SCAN_RSP. We can therefore proceed with
5075 * sending a merged device found event.
5077 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
5078 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
5079 d->last_adv_data, d->last_adv_data_len, data, len);
5080 clear_pending_adv_report(hdev);
5084 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
5086 u8 num_reports = skb->data[0];
5087 void *ptr = &skb->data[1];
5091 while (num_reports--) {
5092 struct hci_ev_le_advertising_info *ev = ptr;
5095 rssi = ev->data[ev->length];
5096 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5097 ev->bdaddr_type, NULL, 0, rssi,
5098 ev->data, ev->length);
5100 ptr += sizeof(*ev) + ev->length + 1;
5103 hci_dev_unlock(hdev);
5106 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
5108 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
5109 struct hci_cp_le_ltk_reply cp;
5110 struct hci_cp_le_ltk_neg_reply neg;
5111 struct hci_conn *conn;
5112 struct smp_ltk *ltk;
5114 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
5118 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
5122 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
5126 if (smp_ltk_is_sc(ltk)) {
5127 /* With SC both EDiv and Rand are set to zero */
5128 if (ev->ediv || ev->rand)
5131 /* For non-SC keys check that EDiv and Rand match */
5132 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
5136 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
5137 cp.handle = cpu_to_le16(conn->handle);
5139 conn->pending_sec_level = smp_ltk_sec_level(ltk);
5141 conn->enc_key_size = ltk->enc_size;
5143 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
5145 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
5146 * temporary key used to encrypt a connection following
5147 * pairing. It is used during the Encrypted Session Setup to
5148 * distribute the keys. Later, security can be re-established
5149 * using a distributed LTK.
5151 if (ltk->type == SMP_STK) {
5152 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5153 list_del_rcu(<k->list);
5154 kfree_rcu(ltk, rcu);
5156 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
5159 hci_dev_unlock(hdev);
5164 neg.handle = ev->handle;
5165 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
5166 hci_dev_unlock(hdev);
5169 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
5172 struct hci_cp_le_conn_param_req_neg_reply cp;
5174 cp.handle = cpu_to_le16(handle);
5177 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
5181 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5182 struct sk_buff *skb)
5184 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5185 struct hci_cp_le_conn_param_req_reply cp;
5186 struct hci_conn *hcon;
5187 u16 handle, min, max, latency, timeout;
5189 handle = le16_to_cpu(ev->handle);
5190 min = le16_to_cpu(ev->interval_min);
5191 max = le16_to_cpu(ev->interval_max);
5192 latency = le16_to_cpu(ev->latency);
5193 timeout = le16_to_cpu(ev->timeout);
5195 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5196 if (!hcon || hcon->state != BT_CONNECTED)
5197 return send_conn_param_neg_reply(hdev, handle,
5198 HCI_ERROR_UNKNOWN_CONN_ID);
5200 if (hci_check_conn_params(min, max, latency, timeout))
5201 return send_conn_param_neg_reply(hdev, handle,
5202 HCI_ERROR_INVALID_LL_PARAMS);
5204 if (hcon->role == HCI_ROLE_MASTER) {
5205 struct hci_conn_params *params;
5210 params = hci_conn_params_lookup(hdev, &hcon->dst,
5213 params->conn_min_interval = min;
5214 params->conn_max_interval = max;
5215 params->conn_latency = latency;
5216 params->supervision_timeout = timeout;
5222 hci_dev_unlock(hdev);
5224 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5225 store_hint, min, max, latency, timeout);
5228 cp.handle = ev->handle;
5229 cp.interval_min = ev->interval_min;
5230 cp.interval_max = ev->interval_max;
5231 cp.latency = ev->latency;
5232 cp.timeout = ev->timeout;
5236 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5239 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5240 struct sk_buff *skb)
5242 u8 num_reports = skb->data[0];
5243 void *ptr = &skb->data[1];
5247 while (num_reports--) {
5248 struct hci_ev_le_direct_adv_info *ev = ptr;
5250 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5251 ev->bdaddr_type, &ev->direct_addr,
5252 ev->direct_addr_type, ev->rssi, NULL, 0);
5257 hci_dev_unlock(hdev);
5260 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5262 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5264 skb_pull(skb, sizeof(*le_ev));
5266 switch (le_ev->subevent) {
5267 case HCI_EV_LE_CONN_COMPLETE:
5268 hci_le_conn_complete_evt(hdev, skb);
5271 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5272 hci_le_conn_update_complete_evt(hdev, skb);
5275 case HCI_EV_LE_ADVERTISING_REPORT:
5276 hci_le_adv_report_evt(hdev, skb);
5279 case HCI_EV_LE_LTK_REQ:
5280 hci_le_ltk_request_evt(hdev, skb);
5283 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5284 hci_le_remote_conn_param_req_evt(hdev, skb);
5287 case HCI_EV_LE_DIRECT_ADV_REPORT:
5288 hci_le_direct_adv_report_evt(hdev, skb);
5296 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
5298 struct hci_ev_channel_selected *ev = (void *) skb->data;
5299 struct hci_conn *hcon;
5301 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
5303 skb_pull(skb, sizeof(*ev));
5305 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
5309 amp_read_loc_assoc_final_data(hdev, hcon);
5312 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5314 struct hci_event_hdr *hdr = (void *) skb->data;
5315 __u8 event = hdr->evt;
5319 /* Received events are (currently) only needed when a request is
5320 * ongoing so avoid unnecessary memory allocation.
5322 if (hci_req_pending(hdev)) {
5323 kfree_skb(hdev->recv_evt);
5324 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
5327 hci_dev_unlock(hdev);
5329 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5331 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
5332 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5333 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
5335 hci_req_cmd_complete(hdev, opcode, 0);
5339 case HCI_EV_INQUIRY_COMPLETE:
5340 hci_inquiry_complete_evt(hdev, skb);
5343 case HCI_EV_INQUIRY_RESULT:
5344 hci_inquiry_result_evt(hdev, skb);
5347 case HCI_EV_CONN_COMPLETE:
5348 hci_conn_complete_evt(hdev, skb);
5351 case HCI_EV_CONN_REQUEST:
5352 hci_conn_request_evt(hdev, skb);
5355 case HCI_EV_DISCONN_COMPLETE:
5356 hci_disconn_complete_evt(hdev, skb);
5359 case HCI_EV_AUTH_COMPLETE:
5360 hci_auth_complete_evt(hdev, skb);
5363 case HCI_EV_REMOTE_NAME:
5364 hci_remote_name_evt(hdev, skb);
5367 case HCI_EV_ENCRYPT_CHANGE:
5368 hci_encrypt_change_evt(hdev, skb);
5371 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5372 hci_change_link_key_complete_evt(hdev, skb);
5375 case HCI_EV_REMOTE_FEATURES:
5376 hci_remote_features_evt(hdev, skb);
5379 case HCI_EV_CMD_COMPLETE:
5380 hci_cmd_complete_evt(hdev, skb);
5383 case HCI_EV_CMD_STATUS:
5384 hci_cmd_status_evt(hdev, skb);
5387 case HCI_EV_HARDWARE_ERROR:
5388 hci_hardware_error_evt(hdev, skb);
5391 case HCI_EV_ROLE_CHANGE:
5392 hci_role_change_evt(hdev, skb);
5395 case HCI_EV_NUM_COMP_PKTS:
5396 hci_num_comp_pkts_evt(hdev, skb);
5399 case HCI_EV_MODE_CHANGE:
5400 hci_mode_change_evt(hdev, skb);
5403 case HCI_EV_PIN_CODE_REQ:
5404 hci_pin_code_request_evt(hdev, skb);
5407 case HCI_EV_LINK_KEY_REQ:
5408 hci_link_key_request_evt(hdev, skb);
5411 case HCI_EV_LINK_KEY_NOTIFY:
5412 hci_link_key_notify_evt(hdev, skb);
5415 case HCI_EV_CLOCK_OFFSET:
5416 hci_clock_offset_evt(hdev, skb);
5419 case HCI_EV_PKT_TYPE_CHANGE:
5420 hci_pkt_type_change_evt(hdev, skb);
5423 case HCI_EV_PSCAN_REP_MODE:
5424 hci_pscan_rep_mode_evt(hdev, skb);
5427 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5428 hci_inquiry_result_with_rssi_evt(hdev, skb);
5431 case HCI_EV_REMOTE_EXT_FEATURES:
5432 hci_remote_ext_features_evt(hdev, skb);
5435 case HCI_EV_SYNC_CONN_COMPLETE:
5436 hci_sync_conn_complete_evt(hdev, skb);
5439 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5440 hci_extended_inquiry_result_evt(hdev, skb);
5443 case HCI_EV_KEY_REFRESH_COMPLETE:
5444 hci_key_refresh_complete_evt(hdev, skb);
5447 case HCI_EV_IO_CAPA_REQUEST:
5448 hci_io_capa_request_evt(hdev, skb);
5451 case HCI_EV_IO_CAPA_REPLY:
5452 hci_io_capa_reply_evt(hdev, skb);
5455 case HCI_EV_USER_CONFIRM_REQUEST:
5456 hci_user_confirm_request_evt(hdev, skb);
5459 case HCI_EV_USER_PASSKEY_REQUEST:
5460 hci_user_passkey_request_evt(hdev, skb);
5463 case HCI_EV_USER_PASSKEY_NOTIFY:
5464 hci_user_passkey_notify_evt(hdev, skb);
5467 case HCI_EV_KEYPRESS_NOTIFY:
5468 hci_keypress_notify_evt(hdev, skb);
5471 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5472 hci_simple_pair_complete_evt(hdev, skb);
5475 case HCI_EV_REMOTE_HOST_FEATURES:
5476 hci_remote_host_features_evt(hdev, skb);
5479 case HCI_EV_LE_META:
5480 hci_le_meta_evt(hdev, skb);
5483 case HCI_EV_CHANNEL_SELECTED:
5484 hci_chan_selected_evt(hdev, skb);
5487 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5488 hci_remote_oob_data_request_evt(hdev, skb);
5491 case HCI_EV_PHY_LINK_COMPLETE:
5492 hci_phy_link_complete_evt(hdev, skb);
5495 case HCI_EV_LOGICAL_LINK_COMPLETE:
5496 hci_loglink_complete_evt(hdev, skb);
5499 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5500 hci_disconn_loglink_complete_evt(hdev, skb);
5503 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5504 hci_disconn_phylink_complete_evt(hdev, skb);
5507 case HCI_EV_NUM_COMP_BLOCKS:
5508 hci_num_comp_blocks_evt(hdev, skb);
5511 #ifdef CONFIG_TIZEN_WIP
5512 case HCI_EV_VENDOR_SPECIFIC:
5513 hci_vendor_specific_evt(hdev, skb);
5518 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5523 hdev->stat.evt_rx++;