2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
35 #ifdef CONFIG_TIZEN_WIP
36 #include <net/bluetooth/mgmt_tizen.h>
37 #include <net/bluetooth/sco.h>
39 #include "hci_request.h"
42 #define MGMT_VERSION 1
43 #define MGMT_REVISION 8
45 #ifdef CONFIG_TIZEN_WIP
47 #define U16_MAX ((u16)~0U)
51 static const u16 mgmt_commands[] = {
52 MGMT_OP_READ_INDEX_LIST,
55 MGMT_OP_SET_DISCOVERABLE,
56 MGMT_OP_SET_CONNECTABLE,
57 MGMT_OP_SET_FAST_CONNECTABLE,
59 MGMT_OP_SET_LINK_SECURITY,
63 MGMT_OP_SET_DEV_CLASS,
64 MGMT_OP_SET_LOCAL_NAME,
67 MGMT_OP_LOAD_LINK_KEYS,
68 MGMT_OP_LOAD_LONG_TERM_KEYS,
70 MGMT_OP_GET_CONNECTIONS,
71 MGMT_OP_PIN_CODE_REPLY,
72 MGMT_OP_PIN_CODE_NEG_REPLY,
73 MGMT_OP_SET_IO_CAPABILITY,
75 MGMT_OP_CANCEL_PAIR_DEVICE,
76 MGMT_OP_UNPAIR_DEVICE,
77 MGMT_OP_USER_CONFIRM_REPLY,
78 MGMT_OP_USER_CONFIRM_NEG_REPLY,
79 MGMT_OP_USER_PASSKEY_REPLY,
80 MGMT_OP_USER_PASSKEY_NEG_REPLY,
81 MGMT_OP_READ_LOCAL_OOB_DATA,
82 MGMT_OP_ADD_REMOTE_OOB_DATA,
83 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
84 MGMT_OP_START_DISCOVERY,
85 MGMT_OP_STOP_DISCOVERY,
88 MGMT_OP_UNBLOCK_DEVICE,
89 MGMT_OP_SET_DEVICE_ID,
90 MGMT_OP_SET_ADVERTISING,
92 MGMT_OP_SET_STATIC_ADDRESS,
93 MGMT_OP_SET_SCAN_PARAMS,
94 MGMT_OP_SET_SECURE_CONN,
95 MGMT_OP_SET_DEBUG_KEYS,
98 MGMT_OP_GET_CONN_INFO,
99 MGMT_OP_GET_CLOCK_INFO,
101 MGMT_OP_REMOVE_DEVICE,
102 MGMT_OP_LOAD_CONN_PARAM,
103 MGMT_OP_READ_UNCONF_INDEX_LIST,
104 MGMT_OP_READ_CONFIG_INFO,
105 MGMT_OP_SET_EXTERNAL_CONFIG,
106 MGMT_OP_SET_PUBLIC_ADDRESS,
107 MGMT_OP_START_SERVICE_DISCOVERY,
110 static const u16 mgmt_events[] = {
111 MGMT_EV_CONTROLLER_ERROR,
113 MGMT_EV_INDEX_REMOVED,
114 MGMT_EV_NEW_SETTINGS,
115 MGMT_EV_CLASS_OF_DEV_CHANGED,
116 MGMT_EV_LOCAL_NAME_CHANGED,
117 MGMT_EV_NEW_LINK_KEY,
118 MGMT_EV_NEW_LONG_TERM_KEY,
119 MGMT_EV_DEVICE_CONNECTED,
120 MGMT_EV_DEVICE_DISCONNECTED,
121 MGMT_EV_CONNECT_FAILED,
122 MGMT_EV_PIN_CODE_REQUEST,
123 MGMT_EV_USER_CONFIRM_REQUEST,
124 MGMT_EV_USER_PASSKEY_REQUEST,
126 MGMT_EV_DEVICE_FOUND,
128 MGMT_EV_DEVICE_BLOCKED,
129 MGMT_EV_DEVICE_UNBLOCKED,
130 MGMT_EV_DEVICE_UNPAIRED,
131 MGMT_EV_PASSKEY_NOTIFY,
134 MGMT_EV_DEVICE_ADDED,
135 MGMT_EV_DEVICE_REMOVED,
136 MGMT_EV_NEW_CONN_PARAM,
137 MGMT_EV_UNCONF_INDEX_ADDED,
138 MGMT_EV_UNCONF_INDEX_REMOVED,
139 MGMT_EV_NEW_CONFIG_OPTIONS,
142 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
144 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
145 "\x00\x00\x00\x00\x00\x00\x00\x00"
148 struct list_head list;
155 int (*cmd_complete)(struct pending_cmd *cmd, u8 status);
158 /* HCI to MGMT error code conversion table */
159 static u8 mgmt_status_table[] = {
161 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
162 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
163 MGMT_STATUS_FAILED, /* Hardware Failure */
164 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
165 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
166 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
167 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
168 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
169 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
170 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
171 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
172 MGMT_STATUS_BUSY, /* Command Disallowed */
173 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
174 MGMT_STATUS_REJECTED, /* Rejected Security */
175 MGMT_STATUS_REJECTED, /* Rejected Personal */
176 MGMT_STATUS_TIMEOUT, /* Host Timeout */
177 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
178 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
179 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
180 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
181 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
182 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
183 MGMT_STATUS_BUSY, /* Repeated Attempts */
184 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
185 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
186 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
187 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
188 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
189 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
190 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
191 MGMT_STATUS_FAILED, /* Unspecified Error */
192 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
193 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
194 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
195 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
196 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
197 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
198 MGMT_STATUS_FAILED, /* Unit Link Key Used */
199 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
200 MGMT_STATUS_TIMEOUT, /* Instant Passed */
201 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
202 MGMT_STATUS_FAILED, /* Transaction Collision */
203 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
204 MGMT_STATUS_REJECTED, /* QoS Rejected */
205 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
206 MGMT_STATUS_REJECTED, /* Insufficient Security */
207 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
208 MGMT_STATUS_BUSY, /* Role Switch Pending */
209 MGMT_STATUS_FAILED, /* Slot Violation */
210 MGMT_STATUS_FAILED, /* Role Switch Failed */
211 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
212 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
213 MGMT_STATUS_BUSY, /* Host Busy Pairing */
214 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
215 MGMT_STATUS_BUSY, /* Controller Busy */
216 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
217 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
218 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
219 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
220 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
223 static u8 mgmt_status(u8 hci_status)
225 if (hci_status < ARRAY_SIZE(mgmt_status_table))
226 return mgmt_status_table[hci_status];
228 return MGMT_STATUS_FAILED;
231 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
232 struct sock *skip_sk)
235 struct mgmt_hdr *hdr;
237 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
241 hdr = (void *) skb_put(skb, sizeof(*hdr));
242 hdr->opcode = cpu_to_le16(event);
244 hdr->index = cpu_to_le16(hdev->id);
246 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
247 hdr->len = cpu_to_le16(data_len);
250 memcpy(skb_put(skb, data_len), data, data_len);
253 __net_timestamp(skb);
255 hci_send_to_control(skb, skip_sk);
261 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
264 struct mgmt_hdr *hdr;
265 struct mgmt_ev_cmd_status *ev;
268 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
270 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
274 hdr = (void *) skb_put(skb, sizeof(*hdr));
276 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
277 hdr->index = cpu_to_le16(index);
278 hdr->len = cpu_to_le16(sizeof(*ev));
280 ev = (void *) skb_put(skb, sizeof(*ev));
282 ev->opcode = cpu_to_le16(cmd);
284 err = sock_queue_rcv_skb(sk, skb);
291 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
292 void *rp, size_t rp_len)
295 struct mgmt_hdr *hdr;
296 struct mgmt_ev_cmd_complete *ev;
299 BT_DBG("sock %p", sk);
301 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
305 hdr = (void *) skb_put(skb, sizeof(*hdr));
307 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
308 hdr->index = cpu_to_le16(index);
309 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
311 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
312 ev->opcode = cpu_to_le16(cmd);
316 memcpy(ev->data, rp, rp_len);
318 err = sock_queue_rcv_skb(sk, skb);
325 #ifdef CONFIG_TIZEN_WIP
326 /* BEGIN TIZEN_Bluetooth :: check for le connection update params */
327 static inline int check_le_conn_update_param(u16 min, u16 max, u16 latency,
332 if (min > max || min < 6 || max > 3200)
335 if (to_multiplier < 10 || to_multiplier > 3200)
338 if (max >= to_multiplier * 8)
341 max_latency = (to_multiplier * 8 / max) - 1;
343 if (latency > 499 || latency > max_latency)
348 /* END TIZEN_Bluetooth */
351 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
354 struct mgmt_rp_read_version rp;
356 BT_DBG("sock %p", sk);
358 rp.version = MGMT_VERSION;
359 rp.revision = cpu_to_le16(MGMT_REVISION);
361 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
365 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
368 struct mgmt_rp_read_commands *rp;
369 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
370 const u16 num_events = ARRAY_SIZE(mgmt_events);
375 BT_DBG("sock %p", sk);
377 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
379 rp = kmalloc(rp_size, GFP_KERNEL);
383 rp->num_commands = cpu_to_le16(num_commands);
384 rp->num_events = cpu_to_le16(num_events);
386 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
387 put_unaligned_le16(mgmt_commands[i], opcode);
389 for (i = 0; i < num_events; i++, opcode++)
390 put_unaligned_le16(mgmt_events[i], opcode);
392 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
399 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
402 struct mgmt_rp_read_index_list *rp;
408 BT_DBG("sock %p", sk);
410 read_lock(&hci_dev_list_lock);
413 list_for_each_entry(d, &hci_dev_list, list) {
414 if (d->dev_type == HCI_BREDR &&
415 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
419 rp_len = sizeof(*rp) + (2 * count);
420 rp = kmalloc(rp_len, GFP_ATOMIC);
422 read_unlock(&hci_dev_list_lock);
427 list_for_each_entry(d, &hci_dev_list, list) {
428 if (test_bit(HCI_SETUP, &d->dev_flags) ||
429 test_bit(HCI_CONFIG, &d->dev_flags) ||
430 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
433 /* Devices marked as raw-only are neither configured
434 * nor unconfigured controllers.
436 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
439 if (d->dev_type == HCI_BREDR &&
440 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
441 rp->index[count++] = cpu_to_le16(d->id);
442 BT_DBG("Added hci%u", d->id);
446 rp->num_controllers = cpu_to_le16(count);
447 rp_len = sizeof(*rp) + (2 * count);
449 read_unlock(&hci_dev_list_lock);
451 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
459 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
460 void *data, u16 data_len)
462 struct mgmt_rp_read_unconf_index_list *rp;
468 BT_DBG("sock %p", sk);
470 read_lock(&hci_dev_list_lock);
473 list_for_each_entry(d, &hci_dev_list, list) {
474 if (d->dev_type == HCI_BREDR &&
475 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
479 rp_len = sizeof(*rp) + (2 * count);
480 rp = kmalloc(rp_len, GFP_ATOMIC);
482 read_unlock(&hci_dev_list_lock);
487 list_for_each_entry(d, &hci_dev_list, list) {
488 if (test_bit(HCI_SETUP, &d->dev_flags) ||
489 test_bit(HCI_CONFIG, &d->dev_flags) ||
490 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
493 /* Devices marked as raw-only are neither configured
494 * nor unconfigured controllers.
496 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
499 if (d->dev_type == HCI_BREDR &&
500 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
501 rp->index[count++] = cpu_to_le16(d->id);
502 BT_DBG("Added hci%u", d->id);
506 rp->num_controllers = cpu_to_le16(count);
507 rp_len = sizeof(*rp) + (2 * count);
509 read_unlock(&hci_dev_list_lock);
511 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
519 static bool is_configured(struct hci_dev *hdev)
521 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
522 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
525 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
526 !bacmp(&hdev->public_addr, BDADDR_ANY))
532 static __le32 get_missing_options(struct hci_dev *hdev)
536 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
537 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
538 options |= MGMT_OPTION_EXTERNAL_CONFIG;
540 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
541 !bacmp(&hdev->public_addr, BDADDR_ANY))
542 options |= MGMT_OPTION_PUBLIC_ADDRESS;
544 return cpu_to_le32(options);
547 static int new_options(struct hci_dev *hdev, struct sock *skip)
549 __le32 options = get_missing_options(hdev);
551 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
552 sizeof(options), skip);
555 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
557 __le32 options = get_missing_options(hdev);
559 return cmd_complete(sk, hdev->id, opcode, 0, &options,
563 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
564 void *data, u16 data_len)
566 struct mgmt_rp_read_config_info rp;
569 BT_DBG("sock %p %s", sk, hdev->name);
573 memset(&rp, 0, sizeof(rp));
574 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
576 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
577 options |= MGMT_OPTION_EXTERNAL_CONFIG;
579 if (hdev->set_bdaddr)
580 options |= MGMT_OPTION_PUBLIC_ADDRESS;
582 rp.supported_options = cpu_to_le32(options);
583 rp.missing_options = get_missing_options(hdev);
585 hci_dev_unlock(hdev);
587 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
591 static u32 get_supported_settings(struct hci_dev *hdev)
595 settings |= MGMT_SETTING_POWERED;
596 settings |= MGMT_SETTING_BONDABLE;
597 /* Disable Debug keys implementation now */
598 #ifdef CONFIG_TIZEN_WIP
599 settings |= MGMT_SETTING_DEBUG_KEYS;
601 settings |= MGMT_SETTING_CONNECTABLE;
602 settings |= MGMT_SETTING_DISCOVERABLE;
604 if (lmp_bredr_capable(hdev)) {
605 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
606 settings |= MGMT_SETTING_FAST_CONNECTABLE;
607 settings |= MGMT_SETTING_BREDR;
608 settings |= MGMT_SETTING_LINK_SECURITY;
610 if (lmp_ssp_capable(hdev)) {
611 settings |= MGMT_SETTING_SSP;
612 settings |= MGMT_SETTING_HS;
615 /* Disable Secure connection implementation now */
616 #ifdef CONFIG_TIZEN_WIP
617 if (lmp_sc_capable(hdev) ||
618 test_bit(HCI_FORCE_BREDR_SMP, &hdev->dbg_flags))
619 settings |= MGMT_SETTING_SECURE_CONN;
623 if (lmp_le_capable(hdev)) {
624 settings |= MGMT_SETTING_LE;
625 settings |= MGMT_SETTING_ADVERTISING;
626 settings |= MGMT_SETTING_SECURE_CONN;
627 settings |= MGMT_SETTING_PRIVACY;
630 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
632 settings |= MGMT_SETTING_CONFIGURATION;
637 static u32 get_current_settings(struct hci_dev *hdev)
641 if (hdev_is_powered(hdev))
642 settings |= MGMT_SETTING_POWERED;
644 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
645 settings |= MGMT_SETTING_CONNECTABLE;
647 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
648 settings |= MGMT_SETTING_FAST_CONNECTABLE;
650 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
651 settings |= MGMT_SETTING_DISCOVERABLE;
653 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
654 settings |= MGMT_SETTING_BONDABLE;
656 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
657 settings |= MGMT_SETTING_BREDR;
659 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
660 settings |= MGMT_SETTING_LE;
662 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
663 settings |= MGMT_SETTING_LINK_SECURITY;
665 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
666 settings |= MGMT_SETTING_SSP;
668 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
669 settings |= MGMT_SETTING_HS;
671 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
672 settings |= MGMT_SETTING_ADVERTISING;
674 /* Disable Secure connection and debug keys implementation now */
675 #ifdef CONFIG_TIZEN_WIP
676 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
677 settings |= MGMT_SETTING_SECURE_CONN;
679 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
680 settings |= MGMT_SETTING_DEBUG_KEYS;
683 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
684 settings |= MGMT_SETTING_PRIVACY;
689 #define PNP_INFO_SVCLASS_ID 0x1200
691 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
693 u8 *ptr = data, *uuids_start = NULL;
694 struct bt_uuid *uuid;
699 list_for_each_entry(uuid, &hdev->uuids, list) {
702 if (uuid->size != 16)
705 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
709 if (uuid16 == PNP_INFO_SVCLASS_ID)
715 uuids_start[1] = EIR_UUID16_ALL;
719 /* Stop if not enough space to put next UUID */
720 if ((ptr - data) + sizeof(u16) > len) {
721 uuids_start[1] = EIR_UUID16_SOME;
725 *ptr++ = (uuid16 & 0x00ff);
726 *ptr++ = (uuid16 & 0xff00) >> 8;
727 uuids_start[0] += sizeof(uuid16);
733 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
735 u8 *ptr = data, *uuids_start = NULL;
736 struct bt_uuid *uuid;
741 list_for_each_entry(uuid, &hdev->uuids, list) {
742 if (uuid->size != 32)
748 uuids_start[1] = EIR_UUID32_ALL;
752 /* Stop if not enough space to put next UUID */
753 if ((ptr - data) + sizeof(u32) > len) {
754 uuids_start[1] = EIR_UUID32_SOME;
758 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
760 uuids_start[0] += sizeof(u32);
766 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
768 u8 *ptr = data, *uuids_start = NULL;
769 struct bt_uuid *uuid;
774 list_for_each_entry(uuid, &hdev->uuids, list) {
775 if (uuid->size != 128)
781 uuids_start[1] = EIR_UUID128_ALL;
785 /* Stop if not enough space to put next UUID */
786 if ((ptr - data) + 16 > len) {
787 uuids_start[1] = EIR_UUID128_SOME;
791 memcpy(ptr, uuid->uuid, 16);
793 uuids_start[0] += 16;
799 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
801 struct pending_cmd *cmd;
803 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
804 if (cmd->opcode == opcode)
811 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
812 struct hci_dev *hdev,
815 struct pending_cmd *cmd;
817 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
818 if (cmd->user_data != data)
820 if (cmd->opcode == opcode)
827 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
832 name_len = strlen(hdev->dev_name);
834 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
836 if (name_len > max_len) {
838 ptr[1] = EIR_NAME_SHORT;
840 ptr[1] = EIR_NAME_COMPLETE;
842 ptr[0] = name_len + 1;
844 memcpy(ptr + 2, hdev->dev_name, name_len);
846 ad_len += (name_len + 2);
847 ptr += (name_len + 2);
853 static void update_scan_rsp_data(struct hci_request *req)
855 struct hci_dev *hdev = req->hdev;
856 struct hci_cp_le_set_scan_rsp_data cp;
859 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
862 memset(&cp, 0, sizeof(cp));
864 len = create_scan_rsp_data(hdev, cp.data);
866 #ifdef CONFIG_TIZEN_WIP
867 /* Advertising scan response data is handled in bluez.
868 This value will be updated only when application request the update */
871 if (hdev->scan_rsp_data_len == len &&
872 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
875 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
876 hdev->scan_rsp_data_len = len;
880 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
884 static u8 get_adv_discov_flags(struct hci_dev *hdev)
886 struct pending_cmd *cmd;
888 /* If there's a pending mgmt command the flags will not yet have
889 * their final values, so check for this first.
891 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
893 struct mgmt_mode *cp = cmd->param;
895 return LE_AD_GENERAL;
896 else if (cp->val == 0x02)
897 return LE_AD_LIMITED;
899 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
900 return LE_AD_LIMITED;
901 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
902 return LE_AD_GENERAL;
908 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
910 u8 ad_len = 0, flags = 0;
912 flags |= get_adv_discov_flags(hdev);
914 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
915 flags |= LE_AD_NO_BREDR;
918 BT_DBG("adv flags 0x%02x", flags);
928 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
930 ptr[1] = EIR_TX_POWER;
931 ptr[2] = (u8) hdev->adv_tx_power;
940 static void update_adv_data(struct hci_request *req)
942 struct hci_dev *hdev = req->hdev;
943 struct hci_cp_le_set_adv_data cp;
946 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
949 memset(&cp, 0, sizeof(cp));
951 len = create_adv_data(hdev, cp.data);
953 #ifdef CONFIG_TIZEN_WIP
954 /* Bluez will handle the advertising data, including the flag and tx power.
955 This value will be updated only when application request the update */
958 if (hdev->adv_data_len == len &&
959 memcmp(cp.data, hdev->adv_data, len) == 0)
962 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
963 hdev->adv_data_len = len;
967 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
971 int mgmt_update_adv_data(struct hci_dev *hdev)
973 struct hci_request req;
975 hci_req_init(&req, hdev);
976 update_adv_data(&req);
978 return hci_req_run(&req, NULL);
981 static void create_eir(struct hci_dev *hdev, u8 *data)
986 name_len = strlen(hdev->dev_name);
992 ptr[1] = EIR_NAME_SHORT;
994 ptr[1] = EIR_NAME_COMPLETE;
996 /* EIR Data length */
997 ptr[0] = name_len + 1;
999 memcpy(ptr + 2, hdev->dev_name, name_len);
1001 ptr += (name_len + 2);
1004 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
1006 ptr[1] = EIR_TX_POWER;
1007 ptr[2] = (u8) hdev->inq_tx_power;
1012 if (hdev->devid_source > 0) {
1014 ptr[1] = EIR_DEVICE_ID;
1016 put_unaligned_le16(hdev->devid_source, ptr + 2);
1017 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
1018 put_unaligned_le16(hdev->devid_product, ptr + 6);
1019 put_unaligned_le16(hdev->devid_version, ptr + 8);
1024 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1025 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1026 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
1027 #ifdef CONFIG_TIZEN_WIP
1028 if (hdev->manufacturer_len > 0 &&
1029 ptr - data + hdev->manufacturer_len + 2 <= HCI_MAX_EIR_LENGTH) {
1030 ptr[0] = hdev->manufacturer_len + 1;
1031 ptr[1] = EIR_MANUFACTURER_DATA;
1032 memcpy(ptr + 2, hdev->manufacturer_data, hdev->manufacturer_len);
1033 ptr += hdev->manufacturer_len + 2;
1038 static void update_eir(struct hci_request *req)
1040 struct hci_dev *hdev = req->hdev;
1041 struct hci_cp_write_eir cp;
1043 if (!hdev_is_powered(hdev))
1046 if (!lmp_ext_inq_capable(hdev))
1049 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1052 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1055 memset(&cp, 0, sizeof(cp));
1057 create_eir(hdev, cp.data);
1059 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
1062 memcpy(hdev->eir, cp.data, sizeof(cp.data));
1064 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1067 static u8 get_service_classes(struct hci_dev *hdev)
1069 struct bt_uuid *uuid;
1072 list_for_each_entry(uuid, &hdev->uuids, list)
1073 val |= uuid->svc_hint;
1078 static void update_class(struct hci_request *req)
1080 struct hci_dev *hdev = req->hdev;
1083 BT_DBG("%s", hdev->name);
1085 if (!hdev_is_powered(hdev))
1088 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1091 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1094 cod[0] = hdev->minor_class;
1095 cod[1] = hdev->major_class;
1096 cod[2] = get_service_classes(hdev);
1098 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1101 if (memcmp(cod, hdev->dev_class, 3) == 0)
1104 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1107 #ifndef CONFIG_TIZEN_WIP
1108 static bool get_connectable(struct hci_dev *hdev)
1110 struct pending_cmd *cmd;
1112 /* If there's a pending mgmt command the flag will not yet have
1113 * it's final value, so check for this first.
1115 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1117 struct mgmt_mode *cp = cmd->param;
1121 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1125 static void disable_advertising(struct hci_request *req)
1129 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1132 static void enable_advertising(struct hci_request *req)
1134 struct hci_dev *hdev = req->hdev;
1135 struct hci_cp_le_set_adv_param cp;
1136 #ifndef CONFIG_TIZEN_WIP
1137 u8 own_addr_type, enable = 0x01;
1140 u8 own_addr_type = ADDR_LE_DEV_PUBLIC;
1144 if (hci_conn_num(hdev, LE_LINK) > 0)
1147 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1148 disable_advertising(req);
1150 /* Clear the HCI_LE_ADV bit temporarily so that the
1151 * hci_update_random_address knows that it's safe to go ahead
1152 * and write a new random address. The flag will be set back on
1153 * as soon as the SET_ADV_ENABLE HCI command completes.
1155 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1157 #ifndef CONFIG_TIZEN_WIP
1158 connectable = get_connectable(hdev);
1160 /* Set require_privacy to true only when non-connectable
1161 * advertising is used. In that case it is fine to use a
1162 * non-resolvable private address.
1164 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1168 memset(&cp, 0, sizeof(cp));
1169 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1170 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1171 #ifdef CONFIG_TIZEN_WIP
1172 cp.filter_policy = hdev->adv_filter_policy;
1173 cp.type = hdev->adv_type;
1175 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1177 cp.own_address_type = own_addr_type;
1178 cp.channel_map = hdev->le_adv_channel_map;
1180 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1182 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1185 static void service_cache_off(struct work_struct *work)
1187 struct hci_dev *hdev = container_of(work, struct hci_dev,
1188 service_cache.work);
1189 struct hci_request req;
1191 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1194 hci_req_init(&req, hdev);
1201 hci_dev_unlock(hdev);
1203 hci_req_run(&req, NULL);
1206 static void rpa_expired(struct work_struct *work)
1208 struct hci_dev *hdev = container_of(work, struct hci_dev,
1210 struct hci_request req;
1214 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1216 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1219 /* The generation of a new RPA and programming it into the
1220 * controller happens in the enable_advertising() function.
1222 hci_req_init(&req, hdev);
1223 enable_advertising(&req);
1224 hci_req_run(&req, NULL);
1227 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1229 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1232 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1233 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1235 /* Non-mgmt controlled devices get this bit set
1236 * implicitly so that pairing works for them, however
1237 * for mgmt we require user-space to explicitly enable
1240 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1243 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1244 void *data, u16 data_len)
1246 struct mgmt_rp_read_info rp;
1248 BT_DBG("sock %p %s", sk, hdev->name);
1252 memset(&rp, 0, sizeof(rp));
1254 bacpy(&rp.bdaddr, &hdev->bdaddr);
1256 rp.version = hdev->hci_ver;
1257 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1259 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1260 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1262 memcpy(rp.dev_class, hdev->dev_class, 3);
1264 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1265 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1267 hci_dev_unlock(hdev);
1269 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1273 static void mgmt_pending_free(struct pending_cmd *cmd)
1280 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1281 struct hci_dev *hdev, void *data,
1284 struct pending_cmd *cmd;
1286 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1290 cmd->opcode = opcode;
1291 cmd->index = hdev->id;
1293 cmd->param = kmemdup(data, len, GFP_KERNEL);
1299 cmd->param_len = len;
1304 list_add(&cmd->list, &hdev->mgmt_pending);
1309 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1310 void (*cb)(struct pending_cmd *cmd,
1314 struct pending_cmd *cmd, *tmp;
1316 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1317 if (opcode > 0 && cmd->opcode != opcode)
1324 static void mgmt_pending_remove(struct pending_cmd *cmd)
1326 list_del(&cmd->list);
1327 mgmt_pending_free(cmd);
1330 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1332 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1334 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1338 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1340 BT_DBG("%s status 0x%02x", hdev->name, status);
1342 if (hci_conn_count(hdev) == 0) {
1343 cancel_delayed_work(&hdev->power_off);
1344 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1348 static bool hci_stop_discovery(struct hci_request *req)
1350 struct hci_dev *hdev = req->hdev;
1351 struct hci_cp_remote_name_req_cancel cp;
1352 struct inquiry_entry *e;
1354 switch (hdev->discovery.state) {
1355 case DISCOVERY_FINDING:
1356 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1357 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1359 cancel_delayed_work(&hdev->le_scan_disable);
1360 hci_req_add_le_scan_disable(req);
1365 case DISCOVERY_RESOLVING:
1366 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1371 bacpy(&cp.bdaddr, &e->data.bdaddr);
1372 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1378 /* Passive scanning */
1379 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1380 hci_req_add_le_scan_disable(req);
1390 static int clean_up_hci_state(struct hci_dev *hdev)
1392 struct hci_request req;
1393 struct hci_conn *conn;
1394 bool discov_stopped;
1397 hci_req_init(&req, hdev);
1399 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1400 test_bit(HCI_PSCAN, &hdev->flags)) {
1402 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1405 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1406 disable_advertising(&req);
1408 discov_stopped = hci_stop_discovery(&req);
1410 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1411 struct hci_cp_disconnect dc;
1412 struct hci_cp_reject_conn_req rej;
1414 switch (conn->state) {
1417 dc.handle = cpu_to_le16(conn->handle);
1418 dc.reason = 0x15; /* Terminated due to Power Off */
1419 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1422 if (conn->type == LE_LINK)
1423 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1425 else if (conn->type == ACL_LINK)
1426 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1430 bacpy(&rej.bdaddr, &conn->dst);
1431 rej.reason = 0x15; /* Terminated due to Power Off */
1432 if (conn->type == ACL_LINK)
1433 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1435 else if (conn->type == SCO_LINK)
1436 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1442 err = hci_req_run(&req, clean_up_hci_complete);
1443 if (!err && discov_stopped)
1444 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1446 #ifdef CONFIG_TIZEN_WIP
1447 if (!err && discov_stopped)
1448 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
1454 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1457 struct mgmt_mode *cp = data;
1458 struct pending_cmd *cmd;
1461 BT_DBG("request for %s", hdev->name);
1463 if (cp->val != 0x00 && cp->val != 0x01)
1464 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1465 MGMT_STATUS_INVALID_PARAMS);
1469 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1470 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1475 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1476 cancel_delayed_work(&hdev->power_off);
1479 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1481 err = mgmt_powered(hdev, 1);
1486 if (!!cp->val == hdev_is_powered(hdev)) {
1487 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1491 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1498 queue_work(hdev->req_workqueue, &hdev->power_on);
1501 /* Disconnect connections, stop scans, etc */
1502 err = clean_up_hci_state(hdev);
1504 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1505 HCI_POWER_OFF_TIMEOUT);
1507 /* ENODATA means there were no HCI commands queued */
1508 if (err == -ENODATA) {
1509 cancel_delayed_work(&hdev->power_off);
1510 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1516 hci_dev_unlock(hdev);
1520 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1524 ev = cpu_to_le32(get_current_settings(hdev));
1526 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1529 int mgmt_new_settings(struct hci_dev *hdev)
1531 return new_settings(hdev, NULL);
1536 struct hci_dev *hdev;
1540 static void settings_rsp(struct pending_cmd *cmd, void *data)
1542 struct cmd_lookup *match = data;
1544 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1546 list_del(&cmd->list);
1548 if (match->sk == NULL) {
1549 match->sk = cmd->sk;
1550 sock_hold(match->sk);
1553 mgmt_pending_free(cmd);
1556 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1560 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1561 mgmt_pending_remove(cmd);
1564 static void cmd_complete_rsp(struct pending_cmd *cmd, void *data)
1566 if (cmd->cmd_complete) {
1569 cmd->cmd_complete(cmd, *status);
1570 mgmt_pending_remove(cmd);
1575 cmd_status_rsp(cmd, data);
1578 static int generic_cmd_complete(struct pending_cmd *cmd, u8 status)
1580 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1581 cmd->param, cmd->param_len);
1584 static int addr_cmd_complete(struct pending_cmd *cmd, u8 status)
1586 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, cmd->param,
1587 sizeof(struct mgmt_addr_info));
1590 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1592 if (!lmp_bredr_capable(hdev))
1593 return MGMT_STATUS_NOT_SUPPORTED;
1594 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1595 return MGMT_STATUS_REJECTED;
1597 return MGMT_STATUS_SUCCESS;
1600 static u8 mgmt_le_support(struct hci_dev *hdev)
1602 if (!lmp_le_capable(hdev))
1603 return MGMT_STATUS_NOT_SUPPORTED;
1604 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1605 return MGMT_STATUS_REJECTED;
1607 return MGMT_STATUS_SUCCESS;
1610 static void set_discoverable_complete(struct hci_dev *hdev, u8 status,
1613 struct pending_cmd *cmd;
1614 struct mgmt_mode *cp;
1615 struct hci_request req;
1618 BT_DBG("status 0x%02x", status);
1622 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1627 u8 mgmt_err = mgmt_status(status);
1628 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1629 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1635 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1638 if (hdev->discov_timeout > 0) {
1639 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1640 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1644 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1648 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1651 new_settings(hdev, cmd->sk);
1653 /* When the discoverable mode gets changed, make sure
1654 * that class of device has the limited discoverable
1655 * bit correctly set. Also update page scan based on whitelist
1658 hci_req_init(&req, hdev);
1659 __hci_update_page_scan(&req);
1661 hci_req_run(&req, NULL);
1664 mgmt_pending_remove(cmd);
1667 hci_dev_unlock(hdev);
1670 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1673 struct mgmt_cp_set_discoverable *cp = data;
1674 struct pending_cmd *cmd;
1675 struct hci_request req;
1680 BT_DBG("request for %s", hdev->name);
1682 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1683 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1684 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1685 MGMT_STATUS_REJECTED);
1687 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1688 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1689 MGMT_STATUS_INVALID_PARAMS);
1691 timeout = __le16_to_cpu(cp->timeout);
1693 /* Disabling discoverable requires that no timeout is set,
1694 * and enabling limited discoverable requires a timeout.
1696 if ((cp->val == 0x00 && timeout > 0) ||
1697 (cp->val == 0x02 && timeout == 0))
1698 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1699 MGMT_STATUS_INVALID_PARAMS);
1703 if (!hdev_is_powered(hdev) && timeout > 0) {
1704 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1705 MGMT_STATUS_NOT_POWERED);
1709 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1710 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1711 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1716 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1717 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1718 MGMT_STATUS_REJECTED);
1722 if (!hdev_is_powered(hdev)) {
1723 bool changed = false;
1725 /* Setting limited discoverable when powered off is
1726 * not a valid operation since it requires a timeout
1727 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1729 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1730 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1734 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1739 err = new_settings(hdev, sk);
1744 /* If the current mode is the same, then just update the timeout
1745 * value with the new value. And if only the timeout gets updated,
1746 * then no need for any HCI transactions.
1748 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1749 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1750 &hdev->dev_flags)) {
1751 cancel_delayed_work(&hdev->discov_off);
1752 hdev->discov_timeout = timeout;
1754 if (cp->val && hdev->discov_timeout > 0) {
1755 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1756 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1760 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1764 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1770 /* Cancel any potential discoverable timeout that might be
1771 * still active and store new timeout value. The arming of
1772 * the timeout happens in the complete handler.
1774 cancel_delayed_work(&hdev->discov_off);
1775 hdev->discov_timeout = timeout;
1777 /* Limited discoverable mode */
1778 if (cp->val == 0x02)
1779 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1781 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1783 hci_req_init(&req, hdev);
1785 /* The procedure for LE-only controllers is much simpler - just
1786 * update the advertising data.
1788 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1794 struct hci_cp_write_current_iac_lap hci_cp;
1796 if (cp->val == 0x02) {
1797 /* Limited discoverable mode */
1798 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1799 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1800 hci_cp.iac_lap[1] = 0x8b;
1801 hci_cp.iac_lap[2] = 0x9e;
1802 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1803 hci_cp.iac_lap[4] = 0x8b;
1804 hci_cp.iac_lap[5] = 0x9e;
1806 /* General discoverable mode */
1808 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1809 hci_cp.iac_lap[1] = 0x8b;
1810 hci_cp.iac_lap[2] = 0x9e;
1813 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1814 (hci_cp.num_iac * 3) + 1, &hci_cp);
1816 scan |= SCAN_INQUIRY;
1818 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1821 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1824 update_adv_data(&req);
1826 err = hci_req_run(&req, set_discoverable_complete);
1828 mgmt_pending_remove(cmd);
1831 hci_dev_unlock(hdev);
1835 static void write_fast_connectable(struct hci_request *req, bool enable)
1837 struct hci_dev *hdev = req->hdev;
1838 #ifndef CONFIG_TIZEN_WIP
1839 struct hci_cp_write_page_scan_activity acp;
1843 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1846 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1849 #ifdef CONFIG_TIZEN_WIP
1851 type = PAGE_SCAN_TYPE_INTERLACED;
1853 type = PAGE_SCAN_TYPE_STANDARD;
1857 type = PAGE_SCAN_TYPE_INTERLACED;
1859 /* 160 msec page scan interval */
1860 acp.interval = cpu_to_le16(0x0100);
1862 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1864 /* default 1.28 sec page scan */
1865 acp.interval = cpu_to_le16(0x0800);
1868 acp.window = cpu_to_le16(0x0012);
1870 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1871 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1872 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1876 if (hdev->page_scan_type != type)
1877 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1880 static void set_connectable_complete(struct hci_dev *hdev, u8 status,
1883 struct pending_cmd *cmd;
1884 struct mgmt_mode *cp;
1885 bool conn_changed, discov_changed;
1887 BT_DBG("status 0x%02x", status);
1891 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1896 u8 mgmt_err = mgmt_status(status);
1897 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1903 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1905 discov_changed = false;
1907 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1909 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1913 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1915 if (conn_changed || discov_changed) {
1916 new_settings(hdev, cmd->sk);
1917 hci_update_page_scan(hdev);
1919 mgmt_update_adv_data(hdev);
1920 hci_update_background_scan(hdev);
1924 mgmt_pending_remove(cmd);
1927 hci_dev_unlock(hdev);
1930 static int set_connectable_update_settings(struct hci_dev *hdev,
1931 struct sock *sk, u8 val)
1933 bool changed = false;
1936 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1940 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1942 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1943 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1946 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1951 hci_update_page_scan(hdev);
1952 hci_update_background_scan(hdev);
1953 return new_settings(hdev, sk);
1959 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1962 struct mgmt_mode *cp = data;
1963 struct pending_cmd *cmd;
1964 struct hci_request req;
1968 BT_DBG("request for %s", hdev->name);
1970 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1971 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1972 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1973 MGMT_STATUS_REJECTED);
1975 if (cp->val != 0x00 && cp->val != 0x01)
1976 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1977 MGMT_STATUS_INVALID_PARAMS);
1981 if (!hdev_is_powered(hdev)) {
1982 err = set_connectable_update_settings(hdev, sk, cp->val);
1986 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1987 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1988 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1993 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1999 hci_req_init(&req, hdev);
2001 /* If BR/EDR is not enabled and we disable advertising as a
2002 * by-product of disabling connectable, we need to update the
2003 * advertising flags.
2005 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2007 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2008 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2010 update_adv_data(&req);
2011 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
2015 /* If we don't have any whitelist entries just
2016 * disable all scanning. If there are entries
2017 * and we had both page and inquiry scanning
2018 * enabled then fall back to only page scanning.
2019 * Otherwise no changes are needed.
2021 if (list_empty(&hdev->whitelist))
2022 scan = SCAN_DISABLED;
2023 else if (test_bit(HCI_ISCAN, &hdev->flags))
2026 goto no_scan_update;
2028 if (test_bit(HCI_ISCAN, &hdev->flags) &&
2029 hdev->discov_timeout > 0)
2030 cancel_delayed_work(&hdev->discov_off);
2033 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
2037 /* If we're going from non-connectable to connectable or
2038 * vice-versa when fast connectable is enabled ensure that fast
2039 * connectable gets disabled. write_fast_connectable won't do
2040 * anything if the page scan parameters are already what they
2043 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
2044 write_fast_connectable(&req, false);
2046 /* Update the advertising parameters if necessary */
2047 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2048 enable_advertising(&req);
2050 err = hci_req_run(&req, set_connectable_complete);
2052 mgmt_pending_remove(cmd);
2053 if (err == -ENODATA)
2054 err = set_connectable_update_settings(hdev, sk,
2060 hci_dev_unlock(hdev);
2064 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
2067 struct mgmt_mode *cp = data;
2071 BT_DBG("request for %s", hdev->name);
2073 if (cp->val != 0x00 && cp->val != 0x01)
2074 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
2075 MGMT_STATUS_INVALID_PARAMS);
2080 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
2082 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
2084 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
2089 err = new_settings(hdev, sk);
2092 hci_dev_unlock(hdev);
2096 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
2099 struct mgmt_mode *cp = data;
2100 struct pending_cmd *cmd;
2104 BT_DBG("request for %s", hdev->name);
2106 status = mgmt_bredr_support(hdev);
2108 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2111 if (cp->val != 0x00 && cp->val != 0x01)
2112 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2113 MGMT_STATUS_INVALID_PARAMS);
2117 if (!hdev_is_powered(hdev)) {
2118 bool changed = false;
2120 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
2121 &hdev->dev_flags)) {
2122 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
2126 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2131 err = new_settings(hdev, sk);
2136 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2137 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2144 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2145 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2149 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2155 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2157 mgmt_pending_remove(cmd);
2162 hci_dev_unlock(hdev);
2166 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2168 struct mgmt_mode *cp = data;
2169 struct pending_cmd *cmd;
2173 BT_DBG("request for %s", hdev->name);
2175 status = mgmt_bredr_support(hdev);
2177 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2179 if (!lmp_ssp_capable(hdev))
2180 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2181 MGMT_STATUS_NOT_SUPPORTED);
2183 if (cp->val != 0x00 && cp->val != 0x01)
2184 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2185 MGMT_STATUS_INVALID_PARAMS);
2189 if (!hdev_is_powered(hdev)) {
2193 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2196 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2199 changed = test_and_clear_bit(HCI_HS_ENABLED,
2202 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2205 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2210 err = new_settings(hdev, sk);
2215 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2216 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2217 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2222 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2223 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2227 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2233 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2234 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2235 sizeof(cp->val), &cp->val);
2237 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2239 mgmt_pending_remove(cmd);
2244 hci_dev_unlock(hdev);
2248 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2250 struct mgmt_mode *cp = data;
2255 BT_DBG("request for %s", hdev->name);
2257 status = mgmt_bredr_support(hdev);
2259 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2261 if (!lmp_ssp_capable(hdev))
2262 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2263 MGMT_STATUS_NOT_SUPPORTED);
2265 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2266 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2267 MGMT_STATUS_REJECTED);
2269 if (cp->val != 0x00 && cp->val != 0x01)
2270 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2271 MGMT_STATUS_INVALID_PARAMS);
2276 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2278 if (hdev_is_powered(hdev)) {
2279 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2280 MGMT_STATUS_REJECTED);
2284 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2287 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2292 err = new_settings(hdev, sk);
2295 hci_dev_unlock(hdev);
2299 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2301 struct cmd_lookup match = { NULL, hdev };
2303 #ifdef CONFIG_TIZEN_WIP
2308 u8 mgmt_err = mgmt_status(status);
2310 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2312 #ifdef CONFIG_TIZEN_WIP
2319 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2321 new_settings(hdev, match.sk);
2326 /* Make sure the controller has a good default for
2327 * advertising data. Restrict the update to when LE
2328 * has actually been enabled. During power on, the
2329 * update in powered_update_hci will take care of it.
2331 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2332 struct hci_request req;
2334 #ifndef CONFIG_TIZEN_WIP
2338 hci_req_init(&req, hdev);
2339 update_adv_data(&req);
2340 update_scan_rsp_data(&req);
2341 __hci_update_background_scan(&req);
2342 hci_req_run(&req, NULL);
2344 hci_update_background_scan(hdev);
2345 #ifdef CONFIG_TIZEN_WIP
2347 hci_dev_unlock(hdev);
2351 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2353 struct mgmt_mode *cp = data;
2354 struct hci_cp_write_le_host_supported hci_cp;
2355 struct pending_cmd *cmd;
2356 struct hci_request req;
2360 BT_DBG("request for %s", hdev->name);
2362 if (!lmp_le_capable(hdev))
2363 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2364 MGMT_STATUS_NOT_SUPPORTED);
2366 if (cp->val != 0x00 && cp->val != 0x01)
2367 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2368 MGMT_STATUS_INVALID_PARAMS);
2370 /* LE-only devices do not allow toggling LE on/off */
2371 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2372 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2373 MGMT_STATUS_REJECTED);
2378 enabled = lmp_host_le_capable(hdev);
2380 if (!hdev_is_powered(hdev) || val == enabled) {
2381 bool changed = false;
2383 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2384 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2388 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2389 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2393 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2398 err = new_settings(hdev, sk);
2403 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2404 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2405 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2410 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2416 hci_req_init(&req, hdev);
2418 memset(&hci_cp, 0, sizeof(hci_cp));
2422 hci_cp.simul = 0x00;
2424 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2425 disable_advertising(&req);
2428 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2431 err = hci_req_run(&req, le_enable_complete);
2433 mgmt_pending_remove(cmd);
2436 hci_dev_unlock(hdev);
2440 /* This is a helper function to test for pending mgmt commands that can
2441 * cause CoD or EIR HCI commands. We can only allow one such pending
2442 * mgmt command at a time since otherwise we cannot easily track what
2443 * the current values are, will be, and based on that calculate if a new
2444 * HCI command needs to be sent and if yes with what value.
2446 static bool pending_eir_or_class(struct hci_dev *hdev)
2448 struct pending_cmd *cmd;
2450 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2451 switch (cmd->opcode) {
2452 case MGMT_OP_ADD_UUID:
2453 case MGMT_OP_REMOVE_UUID:
2454 case MGMT_OP_SET_DEV_CLASS:
2455 case MGMT_OP_SET_POWERED:
2463 static const u8 bluetooth_base_uuid[] = {
2464 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2465 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2468 static u8 get_uuid_size(const u8 *uuid)
2472 if (memcmp(uuid, bluetooth_base_uuid, 12))
2475 val = get_unaligned_le32(&uuid[12]);
2482 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2484 struct pending_cmd *cmd;
2488 cmd = mgmt_pending_find(mgmt_op, hdev);
2492 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2493 hdev->dev_class, 3);
2495 mgmt_pending_remove(cmd);
2498 hci_dev_unlock(hdev);
2501 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2503 BT_DBG("status 0x%02x", status);
2505 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2508 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2510 struct mgmt_cp_add_uuid *cp = data;
2511 struct pending_cmd *cmd;
2512 struct hci_request req;
2513 struct bt_uuid *uuid;
2516 BT_DBG("request for %s", hdev->name);
2520 if (pending_eir_or_class(hdev)) {
2521 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2526 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2532 memcpy(uuid->uuid, cp->uuid, 16);
2533 uuid->svc_hint = cp->svc_hint;
2534 uuid->size = get_uuid_size(cp->uuid);
2536 list_add_tail(&uuid->list, &hdev->uuids);
2538 hci_req_init(&req, hdev);
2543 err = hci_req_run(&req, add_uuid_complete);
2545 if (err != -ENODATA)
2548 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2549 hdev->dev_class, 3);
2553 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2562 hci_dev_unlock(hdev);
2566 static bool enable_service_cache(struct hci_dev *hdev)
2568 if (!hdev_is_powered(hdev))
2571 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2572 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2580 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2582 BT_DBG("status 0x%02x", status);
2584 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2587 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2590 struct mgmt_cp_remove_uuid *cp = data;
2591 struct pending_cmd *cmd;
2592 struct bt_uuid *match, *tmp;
2593 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2594 struct hci_request req;
2597 BT_DBG("request for %s", hdev->name);
2601 if (pending_eir_or_class(hdev)) {
2602 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2607 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2608 hci_uuids_clear(hdev);
2610 if (enable_service_cache(hdev)) {
2611 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2612 0, hdev->dev_class, 3);
2621 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2622 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2625 list_del(&match->list);
2631 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2632 MGMT_STATUS_INVALID_PARAMS);
2637 hci_req_init(&req, hdev);
2642 err = hci_req_run(&req, remove_uuid_complete);
2644 if (err != -ENODATA)
2647 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2648 hdev->dev_class, 3);
2652 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2661 hci_dev_unlock(hdev);
2665 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2667 BT_DBG("status 0x%02x", status);
2669 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2672 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2675 struct mgmt_cp_set_dev_class *cp = data;
2676 struct pending_cmd *cmd;
2677 struct hci_request req;
2680 BT_DBG("request for %s", hdev->name);
2682 if (!lmp_bredr_capable(hdev))
2683 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2684 MGMT_STATUS_NOT_SUPPORTED);
2688 if (pending_eir_or_class(hdev)) {
2689 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2694 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2695 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2696 MGMT_STATUS_INVALID_PARAMS);
2700 hdev->major_class = cp->major;
2701 hdev->minor_class = cp->minor;
2703 if (!hdev_is_powered(hdev)) {
2704 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2705 hdev->dev_class, 3);
2709 hci_req_init(&req, hdev);
2711 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2712 hci_dev_unlock(hdev);
2713 cancel_delayed_work_sync(&hdev->service_cache);
2720 err = hci_req_run(&req, set_class_complete);
2722 if (err != -ENODATA)
2725 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2726 hdev->dev_class, 3);
2730 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2739 hci_dev_unlock(hdev);
2743 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2746 struct mgmt_cp_load_link_keys *cp = data;
2747 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2748 sizeof(struct mgmt_link_key_info));
2749 u16 key_count, expected_len;
2753 BT_DBG("request for %s", hdev->name);
2755 if (!lmp_bredr_capable(hdev))
2756 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2757 MGMT_STATUS_NOT_SUPPORTED);
2759 key_count = __le16_to_cpu(cp->key_count);
2760 if (key_count > max_key_count) {
2761 BT_ERR("load_link_keys: too big key_count value %u",
2763 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2764 MGMT_STATUS_INVALID_PARAMS);
2767 expected_len = sizeof(*cp) + key_count *
2768 sizeof(struct mgmt_link_key_info);
2769 if (expected_len != len) {
2770 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2772 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2773 MGMT_STATUS_INVALID_PARAMS);
2776 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2777 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2778 MGMT_STATUS_INVALID_PARAMS);
2780 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2783 for (i = 0; i < key_count; i++) {
2784 struct mgmt_link_key_info *key = &cp->keys[i];
2786 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2787 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2788 MGMT_STATUS_INVALID_PARAMS);
2793 hci_link_keys_clear(hdev);
2796 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2799 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2803 new_settings(hdev, NULL);
2805 for (i = 0; i < key_count; i++) {
2806 struct mgmt_link_key_info *key = &cp->keys[i];
2808 /* Always ignore debug keys and require a new pairing if
2809 * the user wants to use them.
2811 if (key->type == HCI_LK_DEBUG_COMBINATION)
2814 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2815 key->type, key->pin_len, NULL);
2818 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2820 hci_dev_unlock(hdev);
2825 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2826 u8 addr_type, struct sock *skip_sk)
2828 struct mgmt_ev_device_unpaired ev;
2830 bacpy(&ev.addr.bdaddr, bdaddr);
2831 ev.addr.type = addr_type;
2833 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2837 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2840 struct mgmt_cp_unpair_device *cp = data;
2841 struct mgmt_rp_unpair_device rp;
2842 struct hci_cp_disconnect dc;
2843 struct pending_cmd *cmd;
2844 struct hci_conn *conn;
2847 memset(&rp, 0, sizeof(rp));
2848 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2849 rp.addr.type = cp->addr.type;
2851 if (!bdaddr_type_is_valid(cp->addr.type))
2852 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2853 MGMT_STATUS_INVALID_PARAMS,
2856 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2857 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2858 MGMT_STATUS_INVALID_PARAMS,
2863 if (!hdev_is_powered(hdev)) {
2864 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2865 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2869 if (cp->addr.type == BDADDR_BREDR) {
2870 /* If disconnection is requested, then look up the
2871 * connection. If the remote device is connected, it
2872 * will be later used to terminate the link.
2874 * Setting it to NULL explicitly will cause no
2875 * termination of the link.
2878 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2883 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2887 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2890 /* Defer clearing up the connection parameters
2891 * until closing to give a chance of keeping
2892 * them if a repairing happens.
2894 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2896 /* If disconnection is not requested, then
2897 * clear the connection variable so that the
2898 * link is not terminated.
2900 if (!cp->disconnect)
2904 if (cp->addr.type == BDADDR_LE_PUBLIC)
2905 addr_type = ADDR_LE_DEV_PUBLIC;
2907 addr_type = ADDR_LE_DEV_RANDOM;
2909 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2911 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2915 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2916 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2920 /* If the connection variable is set, then termination of the
2921 * link is requested.
2924 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2926 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2930 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2937 cmd->cmd_complete = addr_cmd_complete;
2939 dc.handle = cpu_to_le16(conn->handle);
2940 dc.reason = 0x13; /* Remote User Terminated Connection */
2941 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2943 mgmt_pending_remove(cmd);
2946 hci_dev_unlock(hdev);
2950 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2953 struct mgmt_cp_disconnect *cp = data;
2954 struct mgmt_rp_disconnect rp;
2955 struct pending_cmd *cmd;
2956 struct hci_conn *conn;
2961 memset(&rp, 0, sizeof(rp));
2962 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2963 rp.addr.type = cp->addr.type;
2965 if (!bdaddr_type_is_valid(cp->addr.type))
2966 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2967 MGMT_STATUS_INVALID_PARAMS,
2972 if (!test_bit(HCI_UP, &hdev->flags)) {
2973 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2974 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2978 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2979 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2980 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2984 if (cp->addr.type == BDADDR_BREDR)
2985 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2988 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2990 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2991 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2992 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2996 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3002 cmd->cmd_complete = generic_cmd_complete;
3004 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3006 mgmt_pending_remove(cmd);
3009 hci_dev_unlock(hdev);
3013 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3015 switch (link_type) {
3017 switch (addr_type) {
3018 case ADDR_LE_DEV_PUBLIC:
3019 return BDADDR_LE_PUBLIC;
3022 /* Fallback to LE Random address type */
3023 return BDADDR_LE_RANDOM;
3027 /* Fallback to BR/EDR type */
3028 return BDADDR_BREDR;
3032 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3035 struct mgmt_rp_get_connections *rp;
3045 if (!hdev_is_powered(hdev)) {
3046 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3047 MGMT_STATUS_NOT_POWERED);
3052 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3053 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3057 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3058 rp = kmalloc(rp_len, GFP_KERNEL);
3065 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3066 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3068 bacpy(&rp->addr[i].bdaddr, &c->dst);
3069 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3070 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3075 rp->conn_count = cpu_to_le16(i);
3077 /* Recalculate length in case of filtered SCO connections, etc */
3078 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
3080 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3086 hci_dev_unlock(hdev);
3090 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3091 struct mgmt_cp_pin_code_neg_reply *cp)
3093 struct pending_cmd *cmd;
3096 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3101 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3102 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3104 mgmt_pending_remove(cmd);
3109 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3112 struct hci_conn *conn;
3113 struct mgmt_cp_pin_code_reply *cp = data;
3114 struct hci_cp_pin_code_reply reply;
3115 struct pending_cmd *cmd;
3122 if (!hdev_is_powered(hdev)) {
3123 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3124 MGMT_STATUS_NOT_POWERED);
3128 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3130 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3131 MGMT_STATUS_NOT_CONNECTED);
3135 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3136 struct mgmt_cp_pin_code_neg_reply ncp;
3138 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3140 BT_ERR("PIN code is not 16 bytes long");
3142 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3144 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3145 MGMT_STATUS_INVALID_PARAMS);
3150 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3156 cmd->cmd_complete = addr_cmd_complete;
3158 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3159 reply.pin_len = cp->pin_len;
3160 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3162 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3164 mgmt_pending_remove(cmd);
3167 hci_dev_unlock(hdev);
3171 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3174 struct mgmt_cp_set_io_capability *cp = data;
3178 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3179 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3180 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3184 hdev->io_capability = cp->io_capability;
3186 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3187 hdev->io_capability);
3189 hci_dev_unlock(hdev);
3191 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3195 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3197 struct hci_dev *hdev = conn->hdev;
3198 struct pending_cmd *cmd;
3200 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3201 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3204 if (cmd->user_data != conn)
3213 static int pairing_complete(struct pending_cmd *cmd, u8 status)
3215 struct mgmt_rp_pair_device rp;
3216 struct hci_conn *conn = cmd->user_data;
3219 bacpy(&rp.addr.bdaddr, &conn->dst);
3220 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3222 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3225 /* So we don't get further callbacks for this connection */
3226 conn->connect_cfm_cb = NULL;
3227 conn->security_cfm_cb = NULL;
3228 conn->disconn_cfm_cb = NULL;
3230 hci_conn_drop(conn);
3232 /* The device is paired so there is no need to remove
3233 * its connection parameters anymore.
3235 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3242 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3244 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3245 struct pending_cmd *cmd;
3247 cmd = find_pairing(conn);
3249 cmd->cmd_complete(cmd, status);
3250 mgmt_pending_remove(cmd);
3254 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3256 struct pending_cmd *cmd;
3258 BT_DBG("status %u", status);
3260 cmd = find_pairing(conn);
3262 BT_DBG("Unable to find a pending command");
3266 cmd->cmd_complete(cmd, mgmt_status(status));
3267 mgmt_pending_remove(cmd);
3270 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3272 struct pending_cmd *cmd;
3274 BT_DBG("status %u", status);
3279 cmd = find_pairing(conn);
3281 BT_DBG("Unable to find a pending command");
3285 cmd->cmd_complete(cmd, mgmt_status(status));
3286 mgmt_pending_remove(cmd);
3289 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3292 struct mgmt_cp_pair_device *cp = data;
3293 struct mgmt_rp_pair_device rp;
3294 struct pending_cmd *cmd;
3295 u8 sec_level, auth_type;
3296 struct hci_conn *conn;
3301 memset(&rp, 0, sizeof(rp));
3302 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3303 rp.addr.type = cp->addr.type;
3305 if (!bdaddr_type_is_valid(cp->addr.type))
3306 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3307 MGMT_STATUS_INVALID_PARAMS,
3310 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3311 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3312 MGMT_STATUS_INVALID_PARAMS,
3317 if (!hdev_is_powered(hdev)) {
3318 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3319 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3323 sec_level = BT_SECURITY_MEDIUM;
3324 auth_type = HCI_AT_DEDICATED_BONDING;
3326 if (cp->addr.type == BDADDR_BREDR) {
3327 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3332 /* Convert from L2CAP channel address type to HCI address type
3334 if (cp->addr.type == BDADDR_LE_PUBLIC)
3335 addr_type = ADDR_LE_DEV_PUBLIC;
3337 addr_type = ADDR_LE_DEV_RANDOM;
3339 /* When pairing a new device, it is expected to remember
3340 * this device for future connections. Adding the connection
3341 * parameter information ahead of time allows tracking
3342 * of the slave preferred values and will speed up any
3343 * further connection establishment.
3345 * If connection parameters already exist, then they
3346 * will be kept and this function does nothing.
3348 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3350 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3351 sec_level, HCI_LE_CONN_TIMEOUT,
3358 if (PTR_ERR(conn) == -EBUSY)
3359 status = MGMT_STATUS_BUSY;
3361 status = MGMT_STATUS_CONNECT_FAILED;
3363 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3369 if (conn->connect_cfm_cb) {
3370 hci_conn_drop(conn);
3371 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3372 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3376 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3379 hci_conn_drop(conn);
3383 cmd->cmd_complete = pairing_complete;
3385 /* For LE, just connecting isn't a proof that the pairing finished */
3386 if (cp->addr.type == BDADDR_BREDR) {
3387 conn->connect_cfm_cb = pairing_complete_cb;
3388 conn->security_cfm_cb = pairing_complete_cb;
3389 conn->disconn_cfm_cb = pairing_complete_cb;
3391 conn->connect_cfm_cb = le_pairing_complete_cb;
3392 conn->security_cfm_cb = le_pairing_complete_cb;
3393 conn->disconn_cfm_cb = le_pairing_complete_cb;
3396 conn->io_capability = cp->io_cap;
3397 cmd->user_data = hci_conn_get(conn);
3399 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3400 hci_conn_security(conn, sec_level, auth_type, true)) {
3401 cmd->cmd_complete(cmd, 0);
3402 mgmt_pending_remove(cmd);
3408 hci_dev_unlock(hdev);
3412 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3415 struct mgmt_addr_info *addr = data;
3416 struct pending_cmd *cmd;
3417 struct hci_conn *conn;
3424 if (!hdev_is_powered(hdev)) {
3425 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3426 MGMT_STATUS_NOT_POWERED);
3430 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3432 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3433 MGMT_STATUS_INVALID_PARAMS);
3437 conn = cmd->user_data;
3439 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3440 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3441 MGMT_STATUS_INVALID_PARAMS);
3445 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3446 mgmt_pending_remove(cmd);
3448 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3449 addr, sizeof(*addr));
3451 hci_dev_unlock(hdev);
3455 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3456 struct mgmt_addr_info *addr, u16 mgmt_op,
3457 u16 hci_op, __le32 passkey)
3459 struct pending_cmd *cmd;
3460 struct hci_conn *conn;
3465 if (!hdev_is_powered(hdev)) {
3466 err = cmd_complete(sk, hdev->id, mgmt_op,
3467 MGMT_STATUS_NOT_POWERED, addr,
3472 if (addr->type == BDADDR_BREDR)
3473 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3475 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3478 err = cmd_complete(sk, hdev->id, mgmt_op,
3479 MGMT_STATUS_NOT_CONNECTED, addr,
3484 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3485 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3487 err = cmd_complete(sk, hdev->id, mgmt_op,
3488 MGMT_STATUS_SUCCESS, addr,
3491 err = cmd_complete(sk, hdev->id, mgmt_op,
3492 MGMT_STATUS_FAILED, addr,
3498 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3504 cmd->cmd_complete = addr_cmd_complete;
3506 /* Continue with pairing via HCI */
3507 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3508 struct hci_cp_user_passkey_reply cp;
3510 bacpy(&cp.bdaddr, &addr->bdaddr);
3511 cp.passkey = passkey;
3512 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3514 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3518 mgmt_pending_remove(cmd);
3521 hci_dev_unlock(hdev);
3525 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3526 void *data, u16 len)
3528 struct mgmt_cp_pin_code_neg_reply *cp = data;
3532 return user_pairing_resp(sk, hdev, &cp->addr,
3533 MGMT_OP_PIN_CODE_NEG_REPLY,
3534 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3537 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3540 struct mgmt_cp_user_confirm_reply *cp = data;
3544 if (len != sizeof(*cp))
3545 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3546 MGMT_STATUS_INVALID_PARAMS);
3548 return user_pairing_resp(sk, hdev, &cp->addr,
3549 MGMT_OP_USER_CONFIRM_REPLY,
3550 HCI_OP_USER_CONFIRM_REPLY, 0);
3553 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3554 void *data, u16 len)
3556 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3560 return user_pairing_resp(sk, hdev, &cp->addr,
3561 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3562 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3565 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3568 struct mgmt_cp_user_passkey_reply *cp = data;
3572 return user_pairing_resp(sk, hdev, &cp->addr,
3573 MGMT_OP_USER_PASSKEY_REPLY,
3574 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3577 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3578 void *data, u16 len)
3580 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3584 return user_pairing_resp(sk, hdev, &cp->addr,
3585 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3586 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3589 static void update_name(struct hci_request *req)
3591 struct hci_dev *hdev = req->hdev;
3592 struct hci_cp_write_local_name cp;
3594 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3596 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3599 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3601 struct mgmt_cp_set_local_name *cp;
3602 struct pending_cmd *cmd;
3604 BT_DBG("status 0x%02x", status);
3608 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3615 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3616 mgmt_status(status));
3618 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3621 mgmt_pending_remove(cmd);
3624 hci_dev_unlock(hdev);
3627 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3630 struct mgmt_cp_set_local_name *cp = data;
3631 struct pending_cmd *cmd;
3632 struct hci_request req;
3639 /* If the old values are the same as the new ones just return a
3640 * direct command complete event.
3642 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3643 !memcmp(hdev->short_name, cp->short_name,
3644 sizeof(hdev->short_name))) {
3645 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3650 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3652 if (!hdev_is_powered(hdev)) {
3653 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3655 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3660 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3666 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3672 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3674 hci_req_init(&req, hdev);
3676 if (lmp_bredr_capable(hdev)) {
3681 /* The name is stored in the scan response data and so
3682 * no need to udpate the advertising data here.
3684 if (lmp_le_capable(hdev))
3685 update_scan_rsp_data(&req);
3687 err = hci_req_run(&req, set_name_complete);
3689 mgmt_pending_remove(cmd);
3692 hci_dev_unlock(hdev);
3696 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3697 void *data, u16 data_len)
3699 struct pending_cmd *cmd;
3702 BT_DBG("%s", hdev->name);
3706 if (!hdev_is_powered(hdev)) {
3707 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3708 MGMT_STATUS_NOT_POWERED);
3712 if (!lmp_ssp_capable(hdev)) {
3713 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3714 MGMT_STATUS_NOT_SUPPORTED);
3718 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3719 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3724 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3730 if (bredr_sc_enabled(hdev))
3731 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3734 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3737 mgmt_pending_remove(cmd);
3740 hci_dev_unlock(hdev);
3744 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3745 void *data, u16 len)
3747 struct mgmt_addr_info *addr = data;
3750 BT_DBG("%s ", hdev->name);
3752 if (!bdaddr_type_is_valid(addr->type))
3753 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3754 MGMT_STATUS_INVALID_PARAMS, addr,
3759 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3760 struct mgmt_cp_add_remote_oob_data *cp = data;
3763 if (cp->addr.type != BDADDR_BREDR) {
3764 err = cmd_complete(sk, hdev->id,
3765 MGMT_OP_ADD_REMOTE_OOB_DATA,
3766 MGMT_STATUS_INVALID_PARAMS,
3767 &cp->addr, sizeof(cp->addr));
3771 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3772 cp->addr.type, cp->hash,
3773 cp->rand, NULL, NULL);
3775 status = MGMT_STATUS_FAILED;
3777 status = MGMT_STATUS_SUCCESS;
3779 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3780 status, &cp->addr, sizeof(cp->addr));
3781 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3782 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3783 u8 *rand192, *hash192;
3786 if (bdaddr_type_is_le(cp->addr.type)) {
3787 /* Enforce zero-valued 192-bit parameters as
3788 * long as legacy SMP OOB isn't implemented.
3790 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3791 memcmp(cp->hash192, ZERO_KEY, 16)) {
3792 err = cmd_complete(sk, hdev->id,
3793 MGMT_OP_ADD_REMOTE_OOB_DATA,
3794 MGMT_STATUS_INVALID_PARAMS,
3795 addr, sizeof(*addr));
3802 rand192 = cp->rand192;
3803 hash192 = cp->hash192;
3806 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3807 cp->addr.type, hash192, rand192,
3808 cp->hash256, cp->rand256);
3810 status = MGMT_STATUS_FAILED;
3812 status = MGMT_STATUS_SUCCESS;
3814 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3815 status, &cp->addr, sizeof(cp->addr));
3817 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3818 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3819 MGMT_STATUS_INVALID_PARAMS);
3823 hci_dev_unlock(hdev);
3827 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3828 void *data, u16 len)
3830 struct mgmt_cp_remove_remote_oob_data *cp = data;
3834 BT_DBG("%s", hdev->name);
3836 if (cp->addr.type != BDADDR_BREDR)
3837 return cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3838 MGMT_STATUS_INVALID_PARAMS,
3839 &cp->addr, sizeof(cp->addr));
3843 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3844 hci_remote_oob_data_clear(hdev);
3845 status = MGMT_STATUS_SUCCESS;
3849 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3851 status = MGMT_STATUS_INVALID_PARAMS;
3853 status = MGMT_STATUS_SUCCESS;
3856 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3857 status, &cp->addr, sizeof(cp->addr));
3859 hci_dev_unlock(hdev);
3863 static bool trigger_discovery(struct hci_request *req, u8 *status)
3865 struct hci_dev *hdev = req->hdev;
3866 struct hci_cp_le_set_scan_param param_cp;
3867 struct hci_cp_le_set_scan_enable enable_cp;
3868 struct hci_cp_inquiry inq_cp;
3869 /* General inquiry access code (GIAC) */
3870 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3874 switch (hdev->discovery.type) {
3875 case DISCOV_TYPE_BREDR:
3876 *status = mgmt_bredr_support(hdev);
3880 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3881 *status = MGMT_STATUS_BUSY;
3885 hci_inquiry_cache_flush(hdev);
3887 memset(&inq_cp, 0, sizeof(inq_cp));
3888 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3889 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3890 hci_req_add(req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3893 case DISCOV_TYPE_LE:
3894 case DISCOV_TYPE_INTERLEAVED:
3895 *status = mgmt_le_support(hdev);
3899 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3900 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3901 *status = MGMT_STATUS_NOT_SUPPORTED;
3905 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3906 /* Don't let discovery abort an outgoing
3907 * connection attempt that's using directed
3910 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3912 *status = MGMT_STATUS_REJECTED;
3916 disable_advertising(req);
3919 /* If controller is scanning, it means the background scanning
3920 * is running. Thus, we should temporarily stop it in order to
3921 * set the discovery scanning parameters.
3923 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3924 hci_req_add_le_scan_disable(req);
3926 memset(¶m_cp, 0, sizeof(param_cp));
3928 /* All active scans will be done with either a resolvable
3929 * private address (when privacy feature has been enabled)
3930 * or non-resolvable private address.
3932 err = hci_update_random_address(req, true, &own_addr_type);
3934 *status = MGMT_STATUS_FAILED;
3938 param_cp.type = LE_SCAN_ACTIVE;
3939 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3940 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3941 param_cp.own_address_type = own_addr_type;
3942 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3945 memset(&enable_cp, 0, sizeof(enable_cp));
3946 enable_cp.enable = LE_SCAN_ENABLE;
3947 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3948 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3953 *status = MGMT_STATUS_INVALID_PARAMS;
3960 static void start_discovery_complete(struct hci_dev *hdev, u8 status,
3963 struct pending_cmd *cmd;
3964 unsigned long timeout;
3966 BT_DBG("status %d", status);
3970 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3972 cmd = mgmt_pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3975 cmd->cmd_complete(cmd, mgmt_status(status));
3976 mgmt_pending_remove(cmd);
3980 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3984 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3986 switch (hdev->discovery.type) {
3987 case DISCOV_TYPE_LE:
3988 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3990 case DISCOV_TYPE_INTERLEAVED:
3991 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3993 case DISCOV_TYPE_BREDR:
3997 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
4003 queue_delayed_work(hdev->workqueue,
4004 &hdev->le_scan_disable, timeout);
4007 hci_dev_unlock(hdev);
4010 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
4011 void *data, u16 len)
4013 struct mgmt_cp_start_discovery *cp = data;
4014 struct pending_cmd *cmd;
4015 struct hci_request req;
4019 BT_DBG("%s", hdev->name);
4023 if (!hdev_is_powered(hdev)) {
4024 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4025 MGMT_STATUS_NOT_POWERED,
4026 &cp->type, sizeof(cp->type));
4030 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4031 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
4032 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4033 MGMT_STATUS_BUSY, &cp->type,
4038 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, data, len);
4044 cmd->cmd_complete = generic_cmd_complete;
4046 /* Clear the discovery filter first to free any previously
4047 * allocated memory for the UUID list.
4049 hci_discovery_filter_clear(hdev);
4051 hdev->discovery.type = cp->type;
4052 hdev->discovery.report_invalid_rssi = false;
4054 hci_req_init(&req, hdev);
4056 if (!trigger_discovery(&req, &status)) {
4057 err = cmd_complete(sk, hdev->id, MGMT_OP_START_DISCOVERY,
4058 status, &cp->type, sizeof(cp->type));
4059 mgmt_pending_remove(cmd);
4063 err = hci_req_run(&req, start_discovery_complete);
4065 mgmt_pending_remove(cmd);
4069 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4072 hci_dev_unlock(hdev);
4076 static int service_discovery_cmd_complete(struct pending_cmd *cmd, u8 status)
4078 return cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
4082 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
4083 void *data, u16 len)
4085 struct mgmt_cp_start_service_discovery *cp = data;
4086 struct pending_cmd *cmd;
4087 struct hci_request req;
4088 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
4089 u16 uuid_count, expected_len;
4093 BT_DBG("%s", hdev->name);
4097 if (!hdev_is_powered(hdev)) {
4098 err = cmd_complete(sk, hdev->id,
4099 MGMT_OP_START_SERVICE_DISCOVERY,
4100 MGMT_STATUS_NOT_POWERED,
4101 &cp->type, sizeof(cp->type));
4105 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4106 test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
4107 err = cmd_complete(sk, hdev->id,
4108 MGMT_OP_START_SERVICE_DISCOVERY,
4109 MGMT_STATUS_BUSY, &cp->type,
4114 uuid_count = __le16_to_cpu(cp->uuid_count);
4115 if (uuid_count > max_uuid_count) {
4116 BT_ERR("service_discovery: too big uuid_count value %u",
4118 err = cmd_complete(sk, hdev->id,
4119 MGMT_OP_START_SERVICE_DISCOVERY,
4120 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4125 expected_len = sizeof(*cp) + uuid_count * 16;
4126 if (expected_len != len) {
4127 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4129 err = cmd_complete(sk, hdev->id,
4130 MGMT_OP_START_SERVICE_DISCOVERY,
4131 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4136 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4143 cmd->cmd_complete = service_discovery_cmd_complete;
4145 /* Clear the discovery filter first to free any previously
4146 * allocated memory for the UUID list.
4148 hci_discovery_filter_clear(hdev);
4150 hdev->discovery.type = cp->type;
4151 hdev->discovery.rssi = cp->rssi;
4152 hdev->discovery.uuid_count = uuid_count;
4154 if (uuid_count > 0) {
4155 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4157 if (!hdev->discovery.uuids) {
4158 err = cmd_complete(sk, hdev->id,
4159 MGMT_OP_START_SERVICE_DISCOVERY,
4161 &cp->type, sizeof(cp->type));
4162 mgmt_pending_remove(cmd);
4167 hci_req_init(&req, hdev);
4169 if (!trigger_discovery(&req, &status)) {
4170 err = cmd_complete(sk, hdev->id,
4171 MGMT_OP_START_SERVICE_DISCOVERY,
4172 status, &cp->type, sizeof(cp->type));
4173 mgmt_pending_remove(cmd);
4177 err = hci_req_run(&req, start_discovery_complete);
4179 mgmt_pending_remove(cmd);
4183 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4186 hci_dev_unlock(hdev);
4190 static void stop_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4192 struct pending_cmd *cmd;
4194 BT_DBG("status %d", status);
4198 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4200 cmd->cmd_complete(cmd, mgmt_status(status));
4201 mgmt_pending_remove(cmd);
4205 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4207 hci_dev_unlock(hdev);
4210 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4213 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4214 struct pending_cmd *cmd;
4215 struct hci_request req;
4218 BT_DBG("%s", hdev->name);
4222 if (!hci_discovery_active(hdev)) {
4223 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4224 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4225 sizeof(mgmt_cp->type));
4229 if (hdev->discovery.type != mgmt_cp->type) {
4230 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4231 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
4232 sizeof(mgmt_cp->type));
4236 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4242 cmd->cmd_complete = generic_cmd_complete;
4244 hci_req_init(&req, hdev);
4246 hci_stop_discovery(&req);
4248 err = hci_req_run(&req, stop_discovery_complete);
4250 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4254 mgmt_pending_remove(cmd);
4256 /* If no HCI commands were sent we're done */
4257 if (err == -ENODATA) {
4258 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
4259 &mgmt_cp->type, sizeof(mgmt_cp->type));
4260 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
4264 hci_dev_unlock(hdev);
4268 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4271 struct mgmt_cp_confirm_name *cp = data;
4272 struct inquiry_entry *e;
4275 BT_DBG("%s", hdev->name);
4279 if (!hci_discovery_active(hdev)) {
4280 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4281 MGMT_STATUS_FAILED, &cp->addr,
4286 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4288 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4289 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4294 if (cp->name_known) {
4295 e->name_state = NAME_KNOWN;
4298 e->name_state = NAME_NEEDED;
4299 hci_inquiry_cache_update_resolve(hdev, e);
4302 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
4306 hci_dev_unlock(hdev);
4310 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4313 struct mgmt_cp_block_device *cp = data;
4317 BT_DBG("%s", hdev->name);
4319 if (!bdaddr_type_is_valid(cp->addr.type))
4320 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4321 MGMT_STATUS_INVALID_PARAMS,
4322 &cp->addr, sizeof(cp->addr));
4326 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4329 status = MGMT_STATUS_FAILED;
4333 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4335 status = MGMT_STATUS_SUCCESS;
4338 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4339 &cp->addr, sizeof(cp->addr));
4341 hci_dev_unlock(hdev);
4346 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4349 struct mgmt_cp_unblock_device *cp = data;
4353 BT_DBG("%s", hdev->name);
4355 if (!bdaddr_type_is_valid(cp->addr.type))
4356 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4357 MGMT_STATUS_INVALID_PARAMS,
4358 &cp->addr, sizeof(cp->addr));
4362 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4365 status = MGMT_STATUS_INVALID_PARAMS;
4369 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4371 status = MGMT_STATUS_SUCCESS;
4374 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4375 &cp->addr, sizeof(cp->addr));
4377 hci_dev_unlock(hdev);
4382 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4385 struct mgmt_cp_set_device_id *cp = data;
4386 struct hci_request req;
4390 BT_DBG("%s", hdev->name);
4392 source = __le16_to_cpu(cp->source);
4394 if (source > 0x0002)
4395 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4396 MGMT_STATUS_INVALID_PARAMS);
4400 hdev->devid_source = source;
4401 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4402 hdev->devid_product = __le16_to_cpu(cp->product);
4403 hdev->devid_version = __le16_to_cpu(cp->version);
4405 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4407 hci_req_init(&req, hdev);
4409 hci_req_run(&req, NULL);
4411 hci_dev_unlock(hdev);
4416 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4419 struct cmd_lookup match = { NULL, hdev };
4424 u8 mgmt_err = mgmt_status(status);
4426 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4427 cmd_status_rsp, &mgmt_err);
4431 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4432 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4434 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4436 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4439 new_settings(hdev, match.sk);
4445 hci_dev_unlock(hdev);
4448 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4451 struct mgmt_mode *cp = data;
4452 struct pending_cmd *cmd;
4453 struct hci_request req;
4454 u8 val, enabled, status;
4457 BT_DBG("request for %s", hdev->name);
4459 status = mgmt_le_support(hdev);
4461 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4464 if (cp->val != 0x00 && cp->val != 0x01)
4465 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4466 MGMT_STATUS_INVALID_PARAMS);
4471 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4473 /* The following conditions are ones which mean that we should
4474 * not do any HCI communication but directly send a mgmt
4475 * response to user space (after toggling the flag if
4478 if (!hdev_is_powered(hdev) || val == enabled ||
4479 hci_conn_num(hdev, LE_LINK) > 0 ||
4480 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4481 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4482 bool changed = false;
4484 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4485 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4489 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4494 err = new_settings(hdev, sk);
4499 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4500 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4501 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4506 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4512 hci_req_init(&req, hdev);
4515 enable_advertising(&req);
4517 disable_advertising(&req);
4519 err = hci_req_run(&req, set_advertising_complete);
4521 mgmt_pending_remove(cmd);
4524 hci_dev_unlock(hdev);
4528 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4529 void *data, u16 len)
4531 struct mgmt_cp_set_static_address *cp = data;
4534 BT_DBG("%s", hdev->name);
4536 if (!lmp_le_capable(hdev))
4537 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4538 MGMT_STATUS_NOT_SUPPORTED);
4540 if (hdev_is_powered(hdev))
4541 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4542 MGMT_STATUS_REJECTED);
4544 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4545 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4546 return cmd_status(sk, hdev->id,
4547 MGMT_OP_SET_STATIC_ADDRESS,
4548 MGMT_STATUS_INVALID_PARAMS);
4550 /* Two most significant bits shall be set */
4551 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4552 return cmd_status(sk, hdev->id,
4553 MGMT_OP_SET_STATIC_ADDRESS,
4554 MGMT_STATUS_INVALID_PARAMS);
4559 bacpy(&hdev->static_addr, &cp->bdaddr);
4561 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4563 hci_dev_unlock(hdev);
4568 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4569 void *data, u16 len)
4571 struct mgmt_cp_set_scan_params *cp = data;
4572 __u16 interval, window;
4575 BT_DBG("%s", hdev->name);
4577 if (!lmp_le_capable(hdev))
4578 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4579 MGMT_STATUS_NOT_SUPPORTED);
4581 interval = __le16_to_cpu(cp->interval);
4583 if (interval < 0x0004 || interval > 0x4000)
4584 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4585 MGMT_STATUS_INVALID_PARAMS);
4587 window = __le16_to_cpu(cp->window);
4589 if (window < 0x0004 || window > 0x4000)
4590 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4591 MGMT_STATUS_INVALID_PARAMS);
4593 if (window > interval)
4594 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4595 MGMT_STATUS_INVALID_PARAMS);
4599 hdev->le_scan_interval = interval;
4600 hdev->le_scan_window = window;
4602 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4604 /* If background scan is running, restart it so new parameters are
4607 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4608 hdev->discovery.state == DISCOVERY_STOPPED) {
4609 struct hci_request req;
4611 hci_req_init(&req, hdev);
4613 hci_req_add_le_scan_disable(&req);
4614 hci_req_add_le_passive_scan(&req);
4616 hci_req_run(&req, NULL);
4619 hci_dev_unlock(hdev);
4624 #ifdef CONFIG_TIZEN_WIP
4625 static int le_set_scan_params(struct sock *sk, struct hci_dev *hdev,
4626 void *data, u16 len)
4628 struct mgmt_cp_le_set_scan_params *cp = data;
4629 __u16 interval, window;
4632 BT_DBG("%s", hdev->name);
4634 if (!lmp_le_capable(hdev))
4635 return cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
4636 MGMT_STATUS_NOT_SUPPORTED);
4638 interval = __le16_to_cpu(cp->interval);
4640 if (interval < 0x0004 || interval > 0x4000)
4641 return cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
4642 MGMT_STATUS_INVALID_PARAMS);
4644 window = __le16_to_cpu(cp->window);
4646 if (window < 0x0004 || window > 0x4000)
4647 return cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
4648 MGMT_STATUS_INVALID_PARAMS);
4650 if (window > interval)
4651 return cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
4652 MGMT_STATUS_INVALID_PARAMS);
4656 hdev->le_scan_type = cp->type;
4657 hdev->le_scan_interval = interval;
4658 hdev->le_scan_window = window;
4660 err = cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS, 0, NULL, 0);
4662 /* If background scan is running, restart it so new parameters are
4665 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4666 hdev->discovery.state == DISCOVERY_STOPPED) {
4667 struct hci_request req;
4669 hci_req_init(&req, hdev);
4671 hci_req_add_le_scan_disable(&req);
4672 hci_req_add_le_passive_scan(&req);
4674 hci_req_run(&req, NULL);
4677 hci_dev_unlock(hdev);
4683 #ifdef CONFIG_TIZEN_WIP /* Adv White List feature */
4684 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4686 struct mgmt_cp_add_dev_white_list *cp;
4687 struct pending_cmd *cmd;
4689 BT_DBG("status 0x%02x", status);
4693 cmd = mgmt_pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
4700 cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
4701 mgmt_status(status));
4703 cmd_complete(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST, 0,
4706 mgmt_pending_remove(cmd);
4709 hci_dev_unlock(hdev);
4712 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
4713 void *data, u16 len)
4715 struct pending_cmd *cmd;
4716 struct mgmt_cp_add_dev_white_list *cp = data;
4717 struct hci_request req;
4720 BT_DBG("%s", hdev->name);
4722 if (!lmp_le_capable(hdev))
4723 return cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
4724 MGMT_STATUS_NOT_SUPPORTED);
4726 if (!hdev_is_powered(hdev))
4727 return cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
4728 MGMT_STATUS_REJECTED);
4732 if (mgmt_pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
4733 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
4738 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
4744 hci_req_init(&req, hdev);
4746 hci_req_add(&req, HCI_OP_LE_ADD_DEV_WHITE_LIST, sizeof(*cp), cp);
4748 err = hci_req_run(&req, add_white_list_complete);
4750 mgmt_pending_remove(cmd);
4755 hci_dev_unlock(hdev);
4760 static void remove_from_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4762 struct mgmt_cp_remove_dev_from_white_list *cp;
4763 struct pending_cmd *cmd;
4765 BT_DBG("status 0x%02x", status);
4769 cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
4776 cmd_status(cmd->sk, hdev->id, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
4777 mgmt_status(status));
4779 cmd_complete(cmd->sk, hdev->id, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
4782 mgmt_pending_remove(cmd);
4785 hci_dev_unlock(hdev);
4788 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
4789 void *data, u16 len)
4791 struct pending_cmd *cmd;
4792 struct mgmt_cp_remove_dev_from_white_list *cp = data;
4793 struct hci_request req;
4796 BT_DBG("%s", hdev->name);
4798 if (!lmp_le_capable(hdev))
4799 return cmd_status(sk, hdev->id, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
4800 MGMT_STATUS_NOT_SUPPORTED);
4802 if (!hdev_is_powered(hdev))
4803 return cmd_status(sk, hdev->id, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
4804 MGMT_STATUS_REJECTED);
4808 if (mgmt_pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
4809 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
4814 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev, data, len);
4820 hci_req_init(&req, hdev);
4822 hci_req_add(&req, HCI_OP_LE_REMOVE_FROM_DEV_WHITE_LIST, sizeof(*cp), cp);
4824 err = hci_req_run(&req, remove_from_white_list_complete);
4826 mgmt_pending_remove(cmd);
4831 hci_dev_unlock(hdev);
4836 static void clear_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4838 struct pending_cmd *cmd;
4840 BT_DBG("status 0x%02x", status);
4844 cmd = mgmt_pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
4849 cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
4850 mgmt_status(status));
4852 cmd_complete(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST, 0,
4855 mgmt_pending_remove(cmd);
4858 hci_dev_unlock(hdev);
4861 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
4862 void *data, u16 len)
4864 struct pending_cmd *cmd;
4865 struct hci_request req;
4868 BT_DBG("%s", hdev->name);
4870 if (!lmp_le_capable(hdev))
4871 return cmd_status(sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
4872 MGMT_STATUS_NOT_SUPPORTED);
4874 if (!hdev_is_powered(hdev))
4875 return cmd_status(sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
4876 MGMT_STATUS_REJECTED);
4880 if (mgmt_pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
4881 err = cmd_status(sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
4886 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev, NULL, 0);
4892 hci_req_init(&req, hdev);
4894 hci_req_add(&req, HCI_OP_LE_CLEAR_DEV_WHITE_LIST, 0, NULL);
4896 err = hci_req_run(&req, clear_white_list_complete);
4898 mgmt_pending_remove(cmd);
4903 hci_dev_unlock(hdev);
4908 static int mgmt_start_le_discovery_failed(struct hci_dev *hdev, u8 status)
4910 struct pending_cmd *cmd;
4914 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
4916 cmd = mgmt_pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
4920 type = hdev->le_discovery.type;
4922 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
4923 &type, sizeof(type));
4924 mgmt_pending_remove(cmd);
4929 static void start_le_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4931 unsigned long timeout = 0;
4933 BT_DBG("status %d", status);
4937 mgmt_start_le_discovery_failed(hdev, status);
4938 hci_dev_unlock(hdev);
4943 hci_le_discovery_set_state(hdev, DISCOVERY_FINDING);
4944 hci_dev_unlock(hdev);
4946 switch (hdev->le_discovery.type) {
4947 case DISCOV_TYPE_LE:
4948 /* BEGIN TIZEN_Bluetooth :: Keep going on LE Scan */
4950 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
4952 /* END TIZEN_Bluetooth */
4956 BT_ERR("Invalid discovery type %d", hdev->le_discovery.type);
4962 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
4965 static int start_le_discovery(struct sock *sk, struct hci_dev *hdev,
4966 void *data, u16 len)
4968 struct mgmt_cp_start_le_discovery *cp = data;
4969 struct pending_cmd *cmd;
4970 struct hci_cp_le_set_scan_param param_cp;
4971 struct hci_cp_le_set_scan_enable enable_cp;
4972 struct hci_request req;
4973 u8 status, own_addr_type;
4976 BT_DBG("%s", hdev->name);
4980 if (!hdev_is_powered(hdev)) {
4981 err = cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
4982 MGMT_STATUS_NOT_POWERED);
4986 if (hdev->le_discovery.state != DISCOVERY_STOPPED) {
4987 err = cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
4992 cmd = mgmt_pending_add(sk, MGMT_OP_START_LE_DISCOVERY, hdev, NULL, 0);
4998 hdev->le_discovery.type = cp->type;
5000 hci_req_init(&req, hdev);
5002 switch (hdev->le_discovery.type) {
5003 case DISCOV_TYPE_LE:
5004 status = mgmt_le_support(hdev);
5006 err = cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
5008 mgmt_pending_remove(cmd);
5012 /* If controller is scanning, it means the background scanning
5013 * is running. Thus, we should temporarily stop it in order to
5014 * set the discovery scanning parameters.
5016 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5017 hci_req_add_le_scan_disable(&req);
5019 memset(¶m_cp, 0, sizeof(param_cp));
5021 /* All active scans will be done with either a resolvable
5022 * private address (when privacy feature has been enabled)
5023 * or unresolvable private address.
5025 err = hci_update_random_address(&req, true, &own_addr_type);
5027 err = cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
5028 MGMT_STATUS_FAILED);
5029 mgmt_pending_remove(cmd);
5033 param_cp.type = hdev->le_scan_type;
5034 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5035 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5036 param_cp.own_address_type = own_addr_type;
5037 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5040 memset(&enable_cp, 0, sizeof(enable_cp));
5041 enable_cp.enable = LE_SCAN_ENABLE;
5042 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5044 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5049 err = cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
5050 MGMT_STATUS_INVALID_PARAMS);
5051 mgmt_pending_remove(cmd);
5055 err = hci_req_run(&req, start_le_discovery_complete);
5057 mgmt_pending_remove(cmd);
5059 hci_le_discovery_set_state(hdev, DISCOVERY_STARTING);
5062 hci_dev_unlock(hdev);
5066 static int mgmt_stop_le_discovery_failed(struct hci_dev *hdev, u8 status)
5068 struct pending_cmd *cmd;
5071 cmd = mgmt_pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
5075 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
5076 &hdev->le_discovery.type, sizeof(hdev->le_discovery.type));
5077 mgmt_pending_remove(cmd);
5082 static void stop_le_discovery_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5084 BT_DBG("status %d", status);
5089 mgmt_stop_le_discovery_failed(hdev, status);
5093 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
5096 hci_dev_unlock(hdev);
5099 static int stop_le_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5102 struct mgmt_cp_stop_le_discovery *mgmt_cp = data;
5103 struct pending_cmd *cmd;
5104 struct hci_request req;
5107 BT_DBG("%s", hdev->name);
5111 if (!hci_le_discovery_active(hdev)) {
5112 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
5113 MGMT_STATUS_REJECTED, &mgmt_cp->type,
5114 sizeof(mgmt_cp->type));
5118 if (hdev->le_discovery.type != mgmt_cp->type) {
5119 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
5120 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
5121 sizeof(mgmt_cp->type));
5125 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_LE_DISCOVERY, hdev, NULL, 0);
5131 hci_req_init(&req, hdev);
5133 switch (hdev->le_discovery.state) {
5134 case DISCOVERY_FINDING:
5135 cancel_delayed_work(&hdev->le_scan_disable);
5136 hci_req_add_le_scan_disable(&req);
5140 BT_DBG("unknown le discovery state %u", hdev->le_discovery.state);
5142 mgmt_pending_remove(cmd);
5143 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
5144 MGMT_STATUS_FAILED, &mgmt_cp->type,
5145 sizeof(mgmt_cp->type));
5149 err = hci_req_run(&req, stop_le_discovery_complete);
5151 mgmt_pending_remove(cmd);
5153 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
5156 hci_dev_unlock(hdev);
5160 /* BEGIN TIZEN_Bluetooth :: LE auto connect */
5161 static int disable_le_auto_connect(struct sock *sk, struct hci_dev *hdev,
5162 void *data, u16 len)
5166 BT_DBG("%s", hdev->name);
5170 err = hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
5172 BT_ERR("HCI_OP_LE_CREATE_CONN_CANCEL is failed");
5175 hci_dev_unlock(hdev);
5179 /* END TIZEN_Bluetooth */
5181 /* BEGIN TIZEN_Bluetooth :: LE connection Update */
5182 static int le_conn_update(struct sock *sk, struct hci_dev *hdev, void *data,
5185 struct mgmt_cp_le_conn_update *cp = data;
5187 struct hci_conn *conn;
5188 u16 min, max, latency, supervision_timeout;
5191 if (!hdev_is_powered(hdev))
5192 return cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
5193 MGMT_STATUS_NOT_POWERED);
5195 min = __le16_to_cpu(cp->conn_interval_min);
5196 max = __le16_to_cpu(cp->conn_interval_max);
5197 latency = __le16_to_cpu(cp->conn_latency);
5198 supervision_timeout = __le16_to_cpu(cp->supervision_timeout);
5200 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x supervision_timeout: 0x%4.4x",
5201 min, max, latency, supervision_timeout);
5203 err = check_le_conn_update_param(min, max, latency, supervision_timeout);
5206 err = cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
5207 MGMT_STATUS_INVALID_PARAMS);
5213 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
5216 cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
5217 MGMT_STATUS_NOT_CONNECTED);
5221 hci_le_conn_update(conn, min, max, latency, supervision_timeout);
5223 err = cmd_complete(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE, 0,
5226 hci_dev_unlock(hdev);
5230 static int set_dev_rpa_res_support(struct sock *sk, struct hci_dev *hdev,
5231 void *data, u16 len)
5233 struct mgmt_cp_set_dev_rpa_res_support *cp = data;
5236 BT_DBG("Set resolve RPA as %u for %s", cp->res_support, hdev->name);
5240 if (!lmp_le_capable(hdev)) {
5241 err = cmd_status(sk, hdev->id,
5242 MGMT_OP_SET_DEV_RPA_RES_SUPPORT,
5243 MGMT_STATUS_NOT_SUPPORTED);
5247 if (!hdev_is_powered(hdev)) {
5248 err = cmd_status(sk, hdev->id,
5249 MGMT_OP_SET_DEV_RPA_RES_SUPPORT,
5250 MGMT_STATUS_REJECTED);
5254 if (hci_set_rpa_res_support(hdev, &cp->addr.bdaddr, cp->addr.type,
5256 err = cmd_complete(sk, hdev->id,
5257 MGMT_OP_SET_DEV_RPA_RES_SUPPORT,
5258 MGMT_STATUS_NOT_PAIRED, NULL, 0);
5262 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_RPA_RES_SUPPORT,
5263 MGMT_STATUS_SUCCESS, NULL, 0);
5266 hci_dev_unlock(hdev);
5270 static int set_irk(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5273 struct mgmt_cp_set_irk *cp = cp_data;
5276 BT_DBG("request for %s", hdev->name);
5280 if (!lmp_le_capable(hdev)) {
5281 err = cmd_status(sk, hdev->id, MGMT_OP_SET_IRK,
5282 MGMT_STATUS_NOT_SUPPORTED);
5286 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5288 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_IRK, 0, NULL, 0);
5291 hci_dev_unlock(hdev);
5294 /* END TIZEN_Bluetooth */
5297 static void fast_connectable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5299 struct pending_cmd *cmd;
5301 BT_DBG("status 0x%02x", status);
5305 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5310 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5311 mgmt_status(status));
5313 struct mgmt_mode *cp = cmd->param;
5316 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
5318 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
5320 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
5321 new_settings(hdev, cmd->sk);
5324 mgmt_pending_remove(cmd);
5327 hci_dev_unlock(hdev);
5330 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
5331 void *data, u16 len)
5333 struct mgmt_mode *cp = data;
5334 struct pending_cmd *cmd;
5335 struct hci_request req;
5338 BT_DBG("%s", hdev->name);
5340 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
5341 hdev->hci_ver < BLUETOOTH_VER_1_2)
5342 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5343 MGMT_STATUS_NOT_SUPPORTED);
5345 if (cp->val != 0x00 && cp->val != 0x01)
5346 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5347 MGMT_STATUS_INVALID_PARAMS);
5349 if (!hdev_is_powered(hdev))
5350 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5351 MGMT_STATUS_NOT_POWERED);
5353 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
5354 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5355 MGMT_STATUS_REJECTED);
5359 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
5360 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5365 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
5366 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
5371 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
5378 hci_req_init(&req, hdev);
5380 write_fast_connectable(&req, cp->val);
5382 err = hci_req_run(&req, fast_connectable_complete);
5384 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
5385 MGMT_STATUS_FAILED);
5386 mgmt_pending_remove(cmd);
5390 hci_dev_unlock(hdev);
5395 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5397 struct pending_cmd *cmd;
5399 BT_DBG("status 0x%02x", status);
5403 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
5408 u8 mgmt_err = mgmt_status(status);
5410 /* We need to restore the flag if related HCI commands
5413 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
5415 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
5417 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
5418 new_settings(hdev, cmd->sk);
5421 mgmt_pending_remove(cmd);
5424 hci_dev_unlock(hdev);
5427 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
5429 struct mgmt_mode *cp = data;
5430 struct pending_cmd *cmd;
5431 struct hci_request req;
5434 BT_DBG("request for %s", hdev->name);
5436 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
5437 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5438 MGMT_STATUS_NOT_SUPPORTED);
5440 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5441 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5442 MGMT_STATUS_REJECTED);
5444 if (cp->val != 0x00 && cp->val != 0x01)
5445 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5446 MGMT_STATUS_INVALID_PARAMS);
5450 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5451 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5455 if (!hdev_is_powered(hdev)) {
5457 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5458 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5459 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5460 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
5461 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5464 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
5466 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
5470 err = new_settings(hdev, sk);
5474 /* Reject disabling when powered on */
5476 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5477 MGMT_STATUS_REJECTED);
5480 /* When configuring a dual-mode controller to operate
5481 * with LE only and using a static address, then switching
5482 * BR/EDR back on is not allowed.
5484 * Dual-mode controllers shall operate with the public
5485 * address as its identity address for BR/EDR and LE. So
5486 * reject the attempt to create an invalid configuration.
5488 * The same restrictions applies when secure connections
5489 * has been enabled. For BR/EDR this is a controller feature
5490 * while for LE it is a host stack feature. This means that
5491 * switching BR/EDR back on when secure connections has been
5492 * enabled is not a supported transaction.
5494 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
5495 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
5496 test_bit(HCI_SC_ENABLED, &hdev->dev_flags))) {
5497 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5498 MGMT_STATUS_REJECTED);
5503 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
5504 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
5509 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
5515 /* We need to flip the bit already here so that update_adv_data
5516 * generates the correct flags.
5518 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
5520 hci_req_init(&req, hdev);
5522 write_fast_connectable(&req, false);
5523 __hci_update_page_scan(&req);
5525 /* Since only the advertising data flags will change, there
5526 * is no need to update the scan response data.
5528 update_adv_data(&req);
5530 err = hci_req_run(&req, set_bredr_complete);
5532 mgmt_pending_remove(cmd);
5535 hci_dev_unlock(hdev);
5539 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5541 struct pending_cmd *cmd;
5542 struct mgmt_mode *cp;
5544 BT_DBG("%s status %u", hdev->name, status);
5548 cmd = mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
5553 cmd_status(cmd->sk, cmd->index, cmd->opcode,
5554 mgmt_status(status));
5562 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5563 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5566 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5567 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5570 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5571 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
5575 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
5576 new_settings(hdev, cmd->sk);
5579 mgmt_pending_remove(cmd);
5581 hci_dev_unlock(hdev);
5584 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
5585 void *data, u16 len)
5587 struct mgmt_mode *cp = data;
5588 struct pending_cmd *cmd;
5589 struct hci_request req;
5593 BT_DBG("request for %s", hdev->name);
5595 if (!lmp_sc_capable(hdev) &&
5596 !test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5597 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5598 MGMT_STATUS_NOT_SUPPORTED);
5600 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) &&
5601 !test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5602 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5603 MGMT_STATUS_REJECTED);
5605 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5606 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5607 MGMT_STATUS_INVALID_PARAMS);
5611 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
5612 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5616 changed = !test_and_set_bit(HCI_SC_ENABLED,
5618 if (cp->val == 0x02)
5619 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
5621 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5623 changed = test_and_clear_bit(HCI_SC_ENABLED,
5625 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5628 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5633 err = new_settings(hdev, sk);
5638 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
5639 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
5646 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5647 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
5648 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
5652 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
5658 hci_req_init(&req, hdev);
5659 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
5660 err = hci_req_run(&req, sc_enable_complete);
5662 mgmt_pending_remove(cmd);
5667 hci_dev_unlock(hdev);
5671 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
5672 void *data, u16 len)
5674 struct mgmt_mode *cp = data;
5675 bool changed, use_changed;
5678 BT_DBG("request for %s", hdev->name);
5680 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5681 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
5682 MGMT_STATUS_INVALID_PARAMS);
5687 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
5690 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
5693 if (cp->val == 0x02)
5694 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
5697 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
5700 if (hdev_is_powered(hdev) && use_changed &&
5701 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
5702 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
5703 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
5704 sizeof(mode), &mode);
5707 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
5712 err = new_settings(hdev, sk);
5715 hci_dev_unlock(hdev);
5719 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5722 struct mgmt_cp_set_privacy *cp = cp_data;
5726 BT_DBG("request for %s", hdev->name);
5728 if (!lmp_le_capable(hdev))
5729 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5730 MGMT_STATUS_NOT_SUPPORTED);
5732 if (cp->privacy != 0x00 && cp->privacy != 0x01)
5733 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5734 MGMT_STATUS_INVALID_PARAMS);
5736 /* Not sure why below condition is required; commenting for now,
5737 * since set privacy command is always rejected if this condition is enabled.
5739 #ifndef CONFIG_TIZEN_WIP
5740 if (hdev_is_powered(hdev))
5741 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5742 MGMT_STATUS_REJECTED);
5747 /* If user space supports this command it is also expected to
5748 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5750 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
5753 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
5754 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5755 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
5757 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
5758 memset(hdev->irk, 0, sizeof(hdev->irk));
5759 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
5762 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5767 err = new_settings(hdev, sk);
5770 hci_dev_unlock(hdev);
5774 static bool irk_is_valid(struct mgmt_irk_info *irk)
5776 switch (irk->addr.type) {
5777 case BDADDR_LE_PUBLIC:
5780 case BDADDR_LE_RANDOM:
5781 /* Two most significant bits shall be set */
5782 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5790 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5793 struct mgmt_cp_load_irks *cp = cp_data;
5794 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5795 sizeof(struct mgmt_irk_info));
5796 u16 irk_count, expected_len;
5799 BT_DBG("request for %s", hdev->name);
5801 if (!lmp_le_capable(hdev))
5802 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5803 MGMT_STATUS_NOT_SUPPORTED);
5805 irk_count = __le16_to_cpu(cp->irk_count);
5806 if (irk_count > max_irk_count) {
5807 BT_ERR("load_irks: too big irk_count value %u", irk_count);
5808 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5809 MGMT_STATUS_INVALID_PARAMS);
5812 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
5813 if (expected_len != len) {
5814 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5816 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5817 MGMT_STATUS_INVALID_PARAMS);
5820 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5822 for (i = 0; i < irk_count; i++) {
5823 struct mgmt_irk_info *key = &cp->irks[i];
5825 if (!irk_is_valid(key))
5826 return cmd_status(sk, hdev->id,
5828 MGMT_STATUS_INVALID_PARAMS);
5833 hci_smp_irks_clear(hdev);
5835 for (i = 0; i < irk_count; i++) {
5836 struct mgmt_irk_info *irk = &cp->irks[i];
5839 if (irk->addr.type == BDADDR_LE_PUBLIC)
5840 addr_type = ADDR_LE_DEV_PUBLIC;
5842 addr_type = ADDR_LE_DEV_RANDOM;
5844 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
5848 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
5850 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5852 hci_dev_unlock(hdev);
5857 #ifdef CONFIG_TIZEN_WIP
5858 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
5859 void *data, u16 len)
5861 struct mgmt_cp_set_advertising_params *cp = data;
5866 BT_DBG("%s", hdev->name);
5868 if (!lmp_le_capable(hdev))
5869 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING_PARAMS,
5870 MGMT_STATUS_NOT_SUPPORTED);
5872 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5873 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING_PARAMS,
5876 min_interval = __le16_to_cpu(cp->interval_min);
5877 max_interval = __le16_to_cpu(cp->interval_max);
5879 if (min_interval > max_interval ||
5880 min_interval < 0x0020 || max_interval > 0x4000)
5881 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING_PARAMS,
5882 MGMT_STATUS_INVALID_PARAMS);
5886 hdev->le_adv_min_interval = min_interval;
5887 hdev->le_adv_max_interval = max_interval;
5888 hdev->adv_filter_policy = cp->filter_policy;
5889 hdev->adv_type = cp->type;
5891 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
5893 hci_dev_unlock(hdev);
5898 static void set_advertising_data_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5900 struct mgmt_cp_set_advertising_data *cp;
5901 struct pending_cmd *cmd;
5903 BT_DBG("status 0x%02x", status);
5907 cmd = mgmt_pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
5914 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_ADVERTISING_DATA,
5915 mgmt_status(status));
5917 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_ADVERTISING_DATA, 0,
5920 mgmt_pending_remove(cmd);
5923 hci_dev_unlock(hdev);
5926 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
5927 void *data, u16 len)
5929 struct pending_cmd *cmd;
5930 struct hci_request req;
5931 struct mgmt_cp_set_advertising_data *cp = data;
5932 struct hci_cp_le_set_adv_data adv;
5935 BT_DBG("%s", hdev->name);
5937 if (!lmp_le_capable(hdev)) {
5938 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING_DATA,
5939 MGMT_STATUS_NOT_SUPPORTED);
5944 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
5945 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING_DATA,
5950 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
5957 if (len > HCI_MAX_AD_LENGTH) {
5958 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING_DATA,
5959 MGMT_STATUS_INVALID_PARAMS);
5963 hci_req_init(&req, hdev);
5965 memset(&adv, 0, sizeof(adv));
5966 memcpy(adv.data, cp->data, len);
5969 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
5971 err = hci_req_run(&req, set_advertising_data_complete);
5973 mgmt_pending_remove(cmd);
5977 hci_dev_unlock(hdev);
5982 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5984 struct mgmt_cp_set_scan_rsp_data *cp;
5985 struct pending_cmd *cmd;
5987 BT_DBG("status 0x%02x", status);
5991 cmd = mgmt_pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
5998 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
5999 mgmt_status(status));
6001 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA, 0,
6004 mgmt_pending_remove(cmd);
6007 hci_dev_unlock(hdev);
6010 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
6013 struct pending_cmd *cmd;
6014 struct hci_request req;
6015 struct mgmt_cp_set_scan_rsp_data *cp = data;
6016 struct hci_cp_le_set_scan_rsp_data rsp;
6019 BT_DBG("%s", hdev->name);
6021 if (!lmp_le_capable(hdev))
6022 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6023 MGMT_STATUS_NOT_SUPPORTED);
6027 if (mgmt_pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
6028 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6033 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
6038 if (len > HCI_MAX_AD_LENGTH) {
6039 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
6040 MGMT_STATUS_INVALID_PARAMS);
6044 hci_req_init(&req, hdev);
6046 memset(&rsp, 0, sizeof(rsp));
6047 memcpy(rsp.data, cp->data, len);
6050 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
6052 err = hci_req_run(&req, set_scan_rsp_data_complete);
6054 mgmt_pending_remove(cmd);
6058 hci_dev_unlock(hdev);
6063 static void set_manufacturer_data_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6065 struct mgmt_cp_set_manufacturer_data *cp;
6066 struct pending_cmd *cmd;
6068 BT_DBG("status 0x%02x", status);
6072 cmd = mgmt_pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev);
6079 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
6080 mgmt_status(status));
6082 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA, 0,
6085 mgmt_pending_remove(cmd);
6088 hci_dev_unlock(hdev);
6091 static int set_manufacturer_data(struct sock *sk, struct hci_dev *hdev,
6092 void *data, u16 len)
6094 struct pending_cmd *cmd;
6095 struct hci_request req;
6096 struct mgmt_cp_set_manufacturer_data *cp = data;
6097 u8 old_data[HCI_MAX_EIR_LENGTH] = {0, };
6102 BT_DBG("%s", hdev->name);
6104 if (!lmp_bredr_capable(hdev)) {
6105 return cmd_status(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
6106 MGMT_STATUS_NOT_SUPPORTED);
6109 if (cp->data[0] == 0 ||
6110 cp->data[0] - 1 > sizeof(hdev->manufacturer_data)) {
6111 return cmd_status(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
6112 MGMT_STATUS_INVALID_PARAMS);
6115 if (cp->data[1] != 0xFF) {
6116 return cmd_status(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
6117 MGMT_STATUS_NOT_SUPPORTED);
6122 if (mgmt_pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev)) {
6123 err = cmd_status(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
6128 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MANUFACTURER_DATA, hdev, data, len);
6134 hci_req_init(&req, hdev);
6136 /* if new data is same as previous data then return command complete event*/
6137 if (hdev->manufacturer_len == cp->data[0] - 1 &&
6138 !memcmp(hdev->manufacturer_data, cp->data + 2, cp->data[0] - 1)) {
6139 mgmt_pending_remove(cmd);
6140 cmd_complete(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA, 0,
6142 hci_dev_unlock(hdev);
6146 old_len = hdev->manufacturer_len;
6148 memcpy(old_data, hdev->manufacturer_data, old_len);
6151 hdev->manufacturer_len = cp->data[0] - 1;
6152 if (hdev->manufacturer_len > 0) {
6153 memcpy(hdev->manufacturer_data, cp->data + 2, hdev->manufacturer_len);
6158 err = hci_req_run(&req, set_manufacturer_data_complete);
6160 mgmt_pending_remove(cmd);
6165 hci_dev_unlock(hdev);
6170 memset(hdev->manufacturer_data, 0x00, sizeof(hdev->manufacturer_data));
6171 hdev->manufacturer_len = old_len;
6172 if (hdev->manufacturer_len > 0) {
6173 memcpy(hdev->manufacturer_data, old_data,
6174 hdev->manufacturer_len);
6176 hci_dev_unlock(hdev);
6180 #ifdef CONFIG_TIZEN_WIP
6181 static int set_voice_setting(struct sock *sk, struct hci_dev *hdev,
6182 void *data, u16 len)
6184 struct mgmt_cp_set_voice_setting *cp = data;
6185 struct hci_conn *conn;
6186 struct hci_conn *sco_conn;
6190 BT_DBG("%s", hdev->name);
6192 if (!lmp_bredr_capable(hdev)) {
6193 return cmd_status(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING,
6194 MGMT_STATUS_NOT_SUPPORTED);
6199 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
6201 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING, 0,
6206 conn->voice_setting = cp->voice_setting;
6207 conn->sco_role = cp->sco_role;
6209 sco_conn = hci_conn_hash_lookup_sco(hdev);
6210 if (sco_conn && bacmp(&sco_conn->dst, &cp->bdaddr) != 0) {
6211 BT_ERR("There is other SCO connection.");
6215 if (conn->sco_role == MGMT_SCO_ROLE_HANDSFREE) {
6216 if (conn->voice_setting == 0x0063)
6217 sco_connect_set_wbc(hdev);
6219 sco_connect_set_nbc(hdev);
6221 if (conn->voice_setting == 0x0063)
6222 sco_connect_set_gw_wbc(hdev);
6224 sco_connect_set_gw_nbc(hdev);
6228 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING, 0,
6232 hci_dev_unlock(hdev);
6237 static int get_adv_tx_power(struct sock *sk, struct hci_dev *hdev,
6238 void *data, u16 len)
6240 struct mgmt_rp_get_adv_tx_power *rp;
6244 BT_DBG("%s", hdev->name);
6248 rp_len = sizeof(*rp);
6249 rp = kmalloc(rp_len, GFP_KERNEL);
6255 rp->adv_tx_power= hdev->adv_tx_power;
6257 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_TX_POWER, 0, rp,
6263 hci_dev_unlock(hdev);
6268 /* BEGIN TIZEN_Bluetooth :: Apply RSSI changes */
6269 static void set_rssi_threshold_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6271 BT_DBG("status 0x%02x", status);
6274 static void set_rssi_disable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6276 BT_DBG("status 0x%02x", status);
6279 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
6280 void *data, u16 len)
6283 struct hci_cp_set_rssi_threshold th = { 0, };
6284 struct mgmt_cp_set_enable_rssi *cp = data;
6285 struct hci_conn *conn;
6286 struct pending_cmd *cmd;
6287 struct hci_request req;
6292 cmd = mgmt_pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6294 err = cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6295 MGMT_STATUS_FAILED);
6299 if (!lmp_le_capable(hdev)) {
6300 mgmt_pending_remove(cmd);
6301 return cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6302 MGMT_STATUS_NOT_SUPPORTED);
6305 if (!hdev_is_powered(hdev)) {
6306 BT_DBG("%s", hdev->name);
6307 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6308 MGMT_STATUS_NOT_POWERED, data, len);
6309 mgmt_pending_remove(cmd);
6313 if (cp->link_type == 0x01)
6314 dest_type = LE_LINK;
6316 dest_type = ACL_LINK;
6318 /* Get LE/ACL link handle info*/
6319 conn = hci_conn_hash_lookup_ba(hdev,
6320 dest_type, &cp->bdaddr);
6323 err = cmd_complete(sk, hdev->id,
6324 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
6325 mgmt_pending_remove(cmd);
6329 hci_req_init(&req, hdev);
6331 th.hci_le_ext_opcode = 0x0B;
6333 th.conn_handle = conn->handle;
6334 th.alert_mask = 0x07;
6335 th.low_th = cp->low_th;
6336 th.in_range_th = cp->in_range_th;
6337 th.high_th = cp->high_th;
6339 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
6340 err = hci_req_run(&req, set_rssi_threshold_complete);
6343 mgmt_pending_remove(cmd);
6344 BT_ERR("Error in requesting hci_req_run");
6349 hci_dev_unlock(hdev);
6353 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
6354 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6356 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
6357 struct mgmt_cp_set_enable_rssi *cp = data;
6358 struct pending_cmd *cmd;
6360 if (cp == NULL || rp == NULL)
6363 mgmt_rp.status = rp->status;
6364 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6365 mgmt_rp.bt_address = cp->bdaddr;
6366 mgmt_rp.link_type = cp->link_type;
6368 cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, MGMT_STATUS_SUCCESS,
6369 &mgmt_rp, sizeof(struct mgmt_cc_rsp_enable_rssi));
6371 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
6372 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6374 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
6375 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6376 &mgmt_rp.bt_address, true);
6380 cmd = mgmt_pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6382 mgmt_pending_remove(cmd);
6384 hci_dev_unlock(hdev);
6387 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
6388 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
6390 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
6391 struct mgmt_cp_disable_rssi *cp = data;
6392 struct pending_cmd *cmd;
6394 if (cp == NULL || rp == NULL)
6397 mgmt_rp.status = rp->status;
6398 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
6399 mgmt_rp.bt_address = cp->bdaddr;
6400 mgmt_rp.link_type = cp->link_type;
6402 cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE, MGMT_STATUS_SUCCESS,
6403 &mgmt_rp, sizeof(struct mgmt_cc_rsp_enable_rssi));
6405 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
6406 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
6408 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
6409 &mgmt_rp.bt_address, false);
6414 cmd = mgmt_pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6416 mgmt_pending_remove(cmd);
6418 hci_dev_unlock(hdev);
6421 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
6422 void *data, u16 len)
6424 struct pending_cmd *cmd;
6425 struct hci_request req;
6426 struct hci_cp_set_enable_rssi cp_en = { 0, };
6429 BT_DBG("Set Disable RSSI.");
6431 cp_en.hci_le_ext_opcode = 0x01;
6432 cp_en.le_enable_cs_Features = 0x00;
6433 cp_en.data[0] = 0x00;
6434 cp_en.data[1] = 0x00;
6435 cp_en.data[2] = 0x00;
6439 cmd = mgmt_pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6441 err = cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6442 MGMT_STATUS_FAILED);
6446 if (!lmp_le_capable(hdev)) {
6447 mgmt_pending_remove(cmd);
6448 return cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6449 MGMT_STATUS_NOT_SUPPORTED);
6452 if (!hdev_is_powered(hdev)) {
6453 BT_DBG("%s", hdev->name);
6454 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6455 MGMT_STATUS_NOT_POWERED, data, len);
6456 mgmt_pending_remove(cmd);
6460 hci_req_init(&req, hdev);
6462 BT_DBG("Enable Len: %d [%2.2X %2.2X %2.2X %2.2X %2.2X]",
6463 sizeof(struct hci_cp_set_enable_rssi),
6464 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
6465 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
6467 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
6468 err = hci_req_run(&req, set_rssi_disable_complete);
6471 mgmt_pending_remove(cmd);
6472 BT_ERR("Error in requesting hci_req_run");
6477 hci_dev_unlock(hdev);
6483 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
6485 struct hci_cc_rsp_enable_rssi *rp = response;
6486 struct pending_cmd *cmd_enable = NULL;
6487 struct pending_cmd *cmd_disable = NULL;
6488 struct mgmt_cp_set_enable_rssi *cp_en;
6489 struct mgmt_cp_disable_rssi *cp_dis;
6492 cmd_enable = mgmt_pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
6493 cmd_disable = mgmt_pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6494 hci_dev_unlock(hdev);
6497 BT_DBG("Enable Request");
6500 BT_DBG("Disable Request");
6503 cp_en = cmd_enable->param;
6508 switch (rp->le_ext_opcode) {
6510 BT_DBG("RSSI enabled.. Setting Threshold...");
6511 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
6512 cp_en, sizeof(*cp_en));
6516 BT_DBG("Sending RSSI enable success");
6517 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
6518 cp_en, rp, rp->status);
6522 } else if (cmd_disable) {
6523 cp_dis = cmd_disable->param;
6528 switch (rp->le_ext_opcode) {
6530 BT_DBG("Sending RSSI disable success");
6531 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6532 cp_dis, rp, rp->status);
6536 /* Only unset RSSI Threshold values for the Link if
6537 RSSI is monitored for other BREDR or LE Links*/
6538 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
6539 BT_DBG("Unset Threshold. Other links being monitored");
6540 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
6541 cp_dis, rp, rp->status);
6543 BT_DBG("Unset Threshold. Disabling...");
6544 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
6545 cp_dis, sizeof(*cp_dis));
6552 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6554 BT_DBG("status 0x%02x", status);
6557 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
6558 void *data, u16 len)
6560 struct pending_cmd *cmd;
6561 struct hci_request req;
6562 struct mgmt_cp_set_enable_rssi *cp = data;
6563 struct hci_cp_set_enable_rssi cp_en = { 0, };
6565 BT_DBG("Set Enable RSSI.");
6567 cp_en.hci_le_ext_opcode = 0x01;
6568 cp_en.le_enable_cs_Features = 0x04;
6569 cp_en.data[0] = 0x00;
6570 cp_en.data[1] = 0x00;
6571 cp_en.data[2] = 0x00;
6573 if (!lmp_le_capable(hdev))
6574 return cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6575 MGMT_STATUS_NOT_SUPPORTED);
6578 if (!hdev_is_powered(hdev)) {
6579 BT_DBG("%s", hdev->name);
6580 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
6586 if (mgmt_pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
6587 BT_DBG("%s", hdev->name);
6588 err = cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
6593 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp, sizeof(*cp));
6595 BT_DBG("%s", hdev->name);
6600 /* If RSSI is already enabled directly set Threshold values*/
6601 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
6602 hci_dev_unlock(hdev);
6603 BT_DBG("RSSI Enabled. Directly set Threshold");
6604 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
6608 hci_req_init(&req, hdev);
6610 BT_DBG("Enable Len: %d [%2.2X %2.2X %2.2X %2.2X %2.2X]",
6611 sizeof(struct hci_cp_set_enable_rssi),
6612 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
6613 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
6615 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
6616 err = hci_req_run(&req, set_rssi_enable_complete);
6619 mgmt_pending_remove(cmd);
6620 BT_ERR("Error in requesting hci_req_run");
6625 hci_dev_unlock(hdev);
6630 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
6632 struct pending_cmd *cmd;
6634 BT_DBG("status 0x%02x", status);
6638 cmd = mgmt_pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
6642 cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI, MGMT_STATUS_SUCCESS,
6645 mgmt_pending_remove(cmd);
6648 hci_dev_unlock(hdev);
6651 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
6654 struct pending_cmd *cmd;
6655 struct hci_request req;
6656 struct mgmt_cp_get_raw_rssi *cp = data;
6657 struct hci_cp_get_raw_rssi hci_cp;
6659 struct hci_conn *conn;
6663 BT_DBG("Get Raw RSSI.");
6664 if (!lmp_le_capable(hdev))
6665 return cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
6666 MGMT_STATUS_NOT_SUPPORTED);
6670 if (cp->link_type == 0x01) {
6671 dest_type = LE_LINK;
6673 dest_type = ACL_LINK;
6676 /* Get LE/BREDR link handle info*/
6677 conn = hci_conn_hash_lookup_ba(hdev,
6678 dest_type, &cp->bt_address);
6680 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_RAW_RSSI, 1,
6684 hci_cp.conn_handle = conn->handle;
6686 if (!hdev_is_powered(hdev)) {
6687 BT_DBG("%s", hdev->name);
6688 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_RAW_RSSI, 0,
6694 if (mgmt_pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
6695 BT_DBG("%s", hdev->name);
6696 err = cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
6701 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
6703 BT_DBG("%s", hdev->name);
6708 hci_req_init(&req, hdev);
6710 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
6711 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
6712 err = hci_req_run(&req, get_raw_rssi_complete);
6715 mgmt_pending_remove(cmd);
6716 BT_ERR("Error in requesting hci_req_run");
6720 hci_dev_unlock(hdev);
6725 void mgmt_raw_rssi_response(struct hci_dev *hdev,
6726 struct hci_cc_rp_get_raw_rssi *rp, int success)
6728 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
6729 struct hci_conn *conn;
6731 mgmt_rp.status = rp->status;
6732 mgmt_rp.rssi_dbm = rp->rssi_dbm;
6734 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
6738 bacpy(&mgmt_rp.bt_address, &conn->dst);
6739 if (conn->type == LE_LINK) {
6740 mgmt_rp.link_type = 0x01;
6742 mgmt_rp.link_type = 0x00;
6745 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
6746 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
6749 static void set_disable_threshold_complete(struct hci_dev *hdev,
6750 u8 status, u16 opcode)
6752 BT_DBG("status 0x%02x", status);
6755 /** Removes monitoring for a link*/
6756 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
6757 void *data, u16 len)
6760 struct hci_cp_set_rssi_threshold th = { 0, };
6761 struct mgmt_cp_disable_rssi *cp = data;
6762 struct hci_conn *conn;
6763 struct pending_cmd *cmd;
6764 struct hci_request req;
6766 BT_DBG("Set Disable RSSI.");
6768 if (!lmp_le_capable(hdev)) {
6769 return cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6770 MGMT_STATUS_NOT_SUPPORTED);
6775 /* Get LE/ACL link handle info*/
6776 if (cp->link_type == 0x01)
6777 dest_type = LE_LINK;
6779 dest_type = ACL_LINK;
6781 conn = hci_conn_hash_lookup_ba(hdev,
6782 dest_type, &cp->bdaddr);
6784 err = cmd_complete(sk, hdev->id,
6785 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
6789 th.hci_le_ext_opcode = 0x0B;
6791 th.conn_handle = conn->handle;
6792 th.alert_mask = 0x00;
6794 th.in_range_th = 0x00;
6797 if (!hdev_is_powered(hdev)) {
6798 BT_DBG("%s", hdev->name);
6799 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE, 0,
6804 if (mgmt_pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
6805 BT_DBG("%s", hdev->name);
6806 err = cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6811 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp, sizeof(*cp));
6813 BT_DBG("%s", hdev->name);
6818 hci_req_init(&req, hdev);
6820 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
6821 err = hci_req_run(&req, set_disable_threshold_complete);
6823 mgmt_pending_remove(cmd);
6824 BT_ERR("Error in requesting hci_req_run");
6829 hci_dev_unlock(hdev);
6834 void mgmt_rssi_alert_evt(struct hci_dev *hdev, struct sk_buff *skb)
6836 struct hci_ev_vendor_specific_rssi_alert *ev = (void *) skb->data;
6837 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
6838 struct hci_conn *conn;
6840 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
6841 ev->conn_handle, ev->alert_type, ev->rssi_dbm);
6843 conn = hci_conn_hash_lookup_handle(hdev, ev->conn_handle);
6846 BT_ERR("RSSI alert Error: Device not found for handle");
6849 bacpy(&mgmt_ev.bdaddr, &conn->dst);
6851 if (conn->type == LE_LINK)
6852 mgmt_ev.link_type = 0x01;
6854 mgmt_ev.link_type = 0x00;
6856 mgmt_ev.alert_type = ev->alert_type;
6857 mgmt_ev.rssi_dbm = ev->rssi_dbm;
6859 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
6860 sizeof(struct mgmt_ev_vendor_specific_rssi_alert), NULL);
6863 void mgmt_multi_adv_state_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
6865 struct hci_ev_vendor_specific_multi_adv_state *ev = (void *) skb->data;
6866 struct mgmt_ev_vendor_specific_multi_adv_state_changed mgmt_ev;
6868 BT_DBG("Multi adv state changed [%2.2X %2.2X %2.2X]",
6869 ev->adv_instance, ev->state_change_reason, ev->connection_handle);
6871 mgmt_ev.adv_instance = ev->adv_instance;
6872 mgmt_ev.state_change_reason = ev->state_change_reason;
6873 mgmt_ev.connection_handle = ev->connection_handle;
6875 mgmt_event(MGMT_EV_MULTI_ADV_STATE_CHANGED, hdev, &mgmt_ev,
6876 sizeof(struct mgmt_ev_vendor_specific_multi_adv_state_changed), NULL);
6879 static int enable_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
6880 void *data, u16 len)
6883 struct mgmt_cp_enable_6lowpan *cp = data;
6885 BT_DBG("%s", hdev->name);
6889 if (!hdev_is_powered(hdev)) {
6890 err = cmd_status(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
6891 MGMT_STATUS_NOT_POWERED);
6895 if (!lmp_le_capable(hdev)) {
6896 err = cmd_status(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
6897 MGMT_STATUS_NOT_SUPPORTED);
6901 if (cp->enable_6lowpan)
6902 bt_6lowpan_enable();
6904 bt_6lowpan_disable();
6906 err = cmd_complete(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
6907 MGMT_STATUS_SUCCESS, NULL, 0);
6909 hci_dev_unlock(hdev);
6913 static int connect_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
6914 void *data, u16 len)
6916 struct mgmt_cp_connect_6lowpan *cp = data;
6917 __u8 addr_type = ADDR_LE_DEV_PUBLIC;
6920 BT_DBG("%s", hdev->name);
6924 if (!lmp_le_capable(hdev)) {
6925 err = cmd_status(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
6926 MGMT_STATUS_NOT_SUPPORTED);
6930 if (!hdev_is_powered(hdev)) {
6931 err = cmd_status(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
6932 MGMT_STATUS_REJECTED);
6936 if (bdaddr_type_is_le(cp->addr.type)) {
6937 if (cp->addr.type == BDADDR_LE_PUBLIC)
6938 addr_type = ADDR_LE_DEV_PUBLIC;
6940 addr_type = ADDR_LE_DEV_RANDOM;
6942 err = cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
6943 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
6947 hci_dev_unlock(hdev);
6949 /* 6lowpan Connect */
6950 err = _bt_6lowpan_connect(&cp->addr.bdaddr, cp->addr.type);
6955 err = cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
6956 MGMT_STATUS_REJECTED, NULL, 0);
6961 err = cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN, 0,
6964 hci_dev_unlock(hdev);
6968 static int disconnect_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
6969 void *data, u16 len)
6971 struct mgmt_cp_disconnect_6lowpan *cp = data;
6972 struct hci_conn *conn = NULL;
6973 __u8 addr_type = ADDR_LE_DEV_PUBLIC;
6976 BT_DBG("%s", hdev->name);
6980 if (!lmp_le_capable(hdev)) {
6981 err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT_6LOWPAN,
6982 MGMT_STATUS_NOT_SUPPORTED);
6986 if (!hdev_is_powered(hdev)) {
6987 err = cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT_6LOWPAN,
6988 MGMT_STATUS_REJECTED);
6992 if (bdaddr_type_is_le(cp->addr.type)) {
6993 if (cp->addr.type == BDADDR_LE_PUBLIC)
6994 addr_type = ADDR_LE_DEV_PUBLIC;
6996 addr_type = ADDR_LE_DEV_RANDOM;
6998 err = cmd_complete(sk, hdev->id,
6999 MGMT_OP_DISCONNECT_6LOWPAN,
7000 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
7004 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7006 err = cmd_complete(sk, hdev->id,
7007 MGMT_OP_DISCONNECT_6LOWPAN,
7008 MGMT_STATUS_NOT_CONNECTED, NULL, 0);
7012 if (conn->dst_type != addr_type) {
7013 err = cmd_complete(sk, hdev->id,
7014 MGMT_OP_DISCONNECT_6LOWPAN,
7015 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
7019 if (conn->state != BT_CONNECTED) {
7020 err = cmd_complete(sk, hdev->id,
7021 MGMT_OP_DISCONNECT_6LOWPAN,
7022 MGMT_STATUS_NOT_CONNECTED, NULL, 0);
7026 /* 6lowpan Disconnect */
7027 err = _bt_6lowpan_disconnect(conn->l2cap_data, cp->addr.type);
7029 err = cmd_complete(sk, hdev->id,
7030 MGMT_OP_DISCONNECT_6LOWPAN,
7031 MGMT_STATUS_REJECTED, NULL, 0);
7035 err = cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN, 0,
7039 hci_dev_unlock(hdev);
7043 void mgmt_6lowpan_conn_changed(struct hci_dev *hdev, char if_name[16],
7044 bdaddr_t *bdaddr, u8 addr_type, bool connected)
7047 struct mgmt_ev_6lowpan_conn_state_changed *ev = (void *)buf;
7050 memset(buf, 0, sizeof(buf));
7051 bacpy(&ev->addr.bdaddr, bdaddr);
7052 ev->addr.type = addr_type;
7053 ev->connected = connected;
7054 memcpy(ev->ifname, (__u8 *)if_name, 16);
7056 ev_size = sizeof(*ev);
7058 mgmt_event(MGMT_EV_6LOWPAN_CONN_STATE_CHANGED, hdev, ev, ev_size, NULL);
7061 void mgmt_le_read_maximum_data_length_complete(struct hci_dev *hdev, u8 status)
7063 struct pending_cmd *cmd;
7064 struct mgmt_rp_le_read_maximum_data_length rp;
7066 BT_DBG("%s status %u", hdev->name, status);
7068 cmd = mgmt_pending_find(MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, hdev);
7073 cmd_status(cmd->sk, hdev->id,
7074 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
7075 mgmt_status(status));
7077 memset(&rp, 0, sizeof(rp));
7079 rp.max_tx_octets = cpu_to_le16(hdev->le_max_tx_len);
7080 rp.max_tx_time = cpu_to_le16(hdev->le_max_tx_time);
7081 rp.max_rx_octets = cpu_to_le16(hdev->le_max_rx_len);
7082 rp.max_rx_time = cpu_to_le16(hdev->le_max_rx_time);
7084 cmd_complete(cmd->sk, hdev->id,
7085 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, 0,
7088 mgmt_pending_remove(cmd);
7091 static int read_maximum_le_data_length(struct sock *sk,
7092 struct hci_dev *hdev, void *data, u16 len)
7094 struct pending_cmd *cmd;
7097 BT_DBG("read_maximum_le_data_length %s", hdev->name);
7101 if (!hdev_is_powered(hdev)) {
7102 err = cmd_status(sk, hdev->id,
7103 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
7104 MGMT_STATUS_NOT_POWERED);
7108 if (!lmp_le_capable(hdev)) {
7109 err = cmd_status(sk, hdev->id,
7110 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
7111 MGMT_STATUS_NOT_SUPPORTED);
7115 if (mgmt_pending_find(MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, hdev)) {
7116 err = cmd_status(sk, hdev->id,
7117 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
7122 cmd = mgmt_pending_add(sk, MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
7129 err = hci_send_cmd(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
7131 mgmt_pending_remove(cmd);
7134 hci_dev_unlock(hdev);
7138 void mgmt_le_write_host_suggested_data_length_complete(struct hci_dev *hdev,
7141 struct pending_cmd *cmd;
7143 BT_DBG("status 0x%02x", status);
7147 cmd = mgmt_pending_find(MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH, hdev);
7149 BT_ERR("cmd not found in the pending list");
7154 cmd_status(cmd->sk, hdev->id,
7155 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
7156 mgmt_status(status));
7158 cmd_complete(cmd->sk, hdev->id,
7159 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
7162 mgmt_pending_remove(cmd);
7165 hci_dev_unlock(hdev);
7168 static int write_host_suggested_le_data_length(struct sock *sk,
7169 struct hci_dev *hdev, void *data, u16 len)
7171 struct pending_cmd *cmd;
7172 struct mgmt_cp_le_write_host_suggested_data_length *cp = data;
7173 struct hci_cp_le_write_def_data_len hci_data;
7176 BT_DBG("Write host suggested data length request for %s", hdev->name);
7180 if (!hdev_is_powered(hdev)) {
7181 err = cmd_status(sk, hdev->id,
7182 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
7183 MGMT_STATUS_NOT_POWERED);
7187 if (!lmp_le_capable(hdev)) {
7188 err = cmd_status(sk, hdev->id,
7189 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
7190 MGMT_STATUS_NOT_SUPPORTED);
7194 if (mgmt_pending_find(MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH, hdev)) {
7195 err = cmd_status(sk, hdev->id,
7196 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
7201 cmd = mgmt_pending_add(sk, MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
7208 hci_data.tx_len = cp->def_tx_octets;
7209 hci_data.tx_time = cp->def_tx_time;
7211 err = hci_send_cmd(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN,
7212 sizeof(hci_data), &hci_data);
7214 mgmt_pending_remove(cmd);
7217 hci_dev_unlock(hdev);
7222 void mgmt_le_read_host_suggested_data_length_complete(struct hci_dev *hdev,
7225 struct pending_cmd *cmd;
7226 struct mgmt_rp_le_read_host_suggested_data_length rp;
7228 BT_DBG("%s status %u", hdev->name, status);
7230 cmd = mgmt_pending_find(MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH, hdev);
7232 BT_ERR("cmd not found in the pending list");
7237 cmd_status(cmd->sk, hdev->id,
7238 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
7239 mgmt_status(status));
7241 memset(&rp, 0, sizeof(rp));
7243 rp.def_tx_octets = cpu_to_le16(hdev->le_def_tx_len);
7244 rp.def_tx_time = cpu_to_le16(hdev->le_def_tx_time);
7246 cmd_complete(cmd->sk, hdev->id,
7247 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH, 0,
7250 mgmt_pending_remove(cmd);
7253 static int read_host_suggested_data_length(struct sock *sk,
7254 struct hci_dev *hdev, void *data, u16 len)
7256 struct pending_cmd *cmd;
7259 BT_DBG("read_host_suggested_data_length %s", hdev->name);
7263 if (!hdev_is_powered(hdev)) {
7264 err = cmd_status(sk, hdev->id,
7265 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
7266 MGMT_STATUS_NOT_POWERED);
7270 if (!lmp_le_capable(hdev)) {
7271 err = cmd_status(sk, hdev->id,
7272 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
7273 MGMT_STATUS_NOT_SUPPORTED);
7277 if (mgmt_pending_find(MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH, hdev)) {
7278 err = cmd_status(sk, hdev->id,
7279 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
7284 cmd = mgmt_pending_add(sk, MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
7291 err = hci_send_cmd(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
7293 mgmt_pending_remove(cmd);
7296 hci_dev_unlock(hdev);
7301 static int set_le_data_length_params(struct sock *sk, struct hci_dev *hdev,
7302 void *data, u16 len)
7304 struct mgmt_cp_le_set_data_length *cp = data;
7305 struct mgmt_rp_le_set_data_length *rp;
7306 struct pending_cmd *cmd;
7307 struct hci_conn *conn;
7309 u16 max_tx_octets, max_tx_time;
7312 BT_INFO("Set Data length for the device %s", hdev->name);
7316 rp_len = sizeof(*rp);
7317 rp = kmalloc(rp_len, GFP_KERNEL);
7323 if (!hdev_is_powered(hdev)) {
7324 err = cmd_status(sk, hdev->id, MGMT_OP_LE_SET_DATA_LENGTH,
7325 MGMT_STATUS_NOT_POWERED);
7329 if (!lmp_le_capable(hdev)) {
7330 err = cmd_status(sk, hdev->id, MGMT_OP_LE_SET_DATA_LENGTH,
7331 MGMT_STATUS_NOT_SUPPORTED);
7335 if (mgmt_pending_find(MGMT_OP_LE_SET_DATA_LENGTH, hdev)) {
7336 err = cmd_status(sk, hdev->id, MGMT_OP_LE_SET_DATA_LENGTH,
7341 cmd = mgmt_pending_add(sk, MGMT_OP_LE_SET_DATA_LENGTH, hdev, data, len);
7347 max_tx_octets = __le16_to_cpu(cp->max_tx_octets);
7348 max_tx_time = __le16_to_cpu(cp->max_tx_time);
7350 BT_DBG("max_tx_octets 0x%4.4x max_tx_time 0x%4.4x latency",
7351 max_tx_octets, max_tx_time);
7353 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
7355 cmd_status(sk, hdev->id, MGMT_OP_LE_SET_DATA_LENGTH,
7356 MGMT_STATUS_NOT_CONNECTED);
7360 hci_dev_unlock(hdev);
7362 err = hci_le_set_data_length(conn, max_tx_octets, max_tx_time);
7364 mgmt_pending_remove(cmd);
7366 rp->handle = conn->handle;
7370 err = cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_DATA_LENGTH, 0,
7374 hci_dev_unlock(hdev);
7379 void mgmt_le_data_length_change_complete(struct hci_dev *hdev,
7380 bdaddr_t *bdaddr, u16 tx_octets, u16 tx_time,
7381 u16 rx_octets, u16 rx_time)
7383 struct mgmt_ev_le_data_length_changed ev;
7385 bacpy(&ev.addr.bdaddr, bdaddr);
7386 ev.max_tx_octets = tx_octets;
7387 ev.max_tx_time = tx_time;
7388 ev.max_rx_octets = rx_octets;
7389 ev.max_rx_time = rx_time;
7391 mgmt_event(MGMT_EV_LE_DATA_LENGTH_CHANGED, hdev, &ev, sizeof(ev), NULL);
7393 /* END TIZEN_Bluetooth */
7396 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7398 if (key->master != 0x00 && key->master != 0x01)
7401 switch (key->addr.type) {
7402 case BDADDR_LE_PUBLIC:
7405 case BDADDR_LE_RANDOM:
7406 /* Two most significant bits shall be set */
7407 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7415 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7416 void *cp_data, u16 len)
7418 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7419 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7420 sizeof(struct mgmt_ltk_info));
7421 u16 key_count, expected_len;
7424 BT_DBG("request for %s", hdev->name);
7426 if (!lmp_le_capable(hdev))
7427 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7428 MGMT_STATUS_NOT_SUPPORTED);
7430 key_count = __le16_to_cpu(cp->key_count);
7431 if (key_count > max_key_count) {
7432 BT_ERR("load_ltks: too big key_count value %u", key_count);
7433 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7434 MGMT_STATUS_INVALID_PARAMS);
7437 expected_len = sizeof(*cp) + key_count *
7438 sizeof(struct mgmt_ltk_info);
7439 if (expected_len != len) {
7440 BT_ERR("load_keys: expected %u bytes, got %u bytes",
7442 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7443 MGMT_STATUS_INVALID_PARAMS);
7446 BT_DBG("%s key_count %u", hdev->name, key_count);
7448 for (i = 0; i < key_count; i++) {
7449 struct mgmt_ltk_info *key = &cp->keys[i];
7451 if (!ltk_is_valid(key))
7452 return cmd_status(sk, hdev->id,
7453 MGMT_OP_LOAD_LONG_TERM_KEYS,
7454 MGMT_STATUS_INVALID_PARAMS);
7459 hci_smp_ltks_clear(hdev);
7461 for (i = 0; i < key_count; i++) {
7462 struct mgmt_ltk_info *key = &cp->keys[i];
7463 u8 type, addr_type, authenticated;
7465 if (key->addr.type == BDADDR_LE_PUBLIC)
7466 addr_type = ADDR_LE_DEV_PUBLIC;
7468 addr_type = ADDR_LE_DEV_RANDOM;
7470 switch (key->type) {
7471 case MGMT_LTK_UNAUTHENTICATED:
7472 authenticated = 0x00;
7473 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
7475 case MGMT_LTK_AUTHENTICATED:
7476 authenticated = 0x01;
7477 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
7479 case MGMT_LTK_P256_UNAUTH:
7480 authenticated = 0x00;
7481 type = SMP_LTK_P256;
7483 case MGMT_LTK_P256_AUTH:
7484 authenticated = 0x01;
7485 type = SMP_LTK_P256;
7487 case MGMT_LTK_P256_DEBUG:
7488 authenticated = 0x00;
7489 type = SMP_LTK_P256_DEBUG;
7494 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
7495 authenticated, key->val, key->enc_size, key->ediv,
7499 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7502 hci_dev_unlock(hdev);
7507 static int conn_info_cmd_complete(struct pending_cmd *cmd, u8 status)
7509 struct hci_conn *conn = cmd->user_data;
7510 struct mgmt_rp_get_conn_info rp;
7513 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
7515 if (status == MGMT_STATUS_SUCCESS) {
7516 rp.rssi = conn->rssi;
7517 rp.tx_power = conn->tx_power;
7518 rp.max_tx_power = conn->max_tx_power;
7520 rp.rssi = HCI_RSSI_INVALID;
7521 rp.tx_power = HCI_TX_POWER_INVALID;
7522 rp.max_tx_power = HCI_TX_POWER_INVALID;
7525 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7528 hci_conn_drop(conn);
7534 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
7537 struct hci_cp_read_rssi *cp;
7538 struct pending_cmd *cmd;
7539 struct hci_conn *conn;
7543 BT_DBG("status 0x%02x", hci_status);
7547 /* Commands sent in request are either Read RSSI or Read Transmit Power
7548 * Level so we check which one was last sent to retrieve connection
7549 * handle. Both commands have handle as first parameter so it's safe to
7550 * cast data on the same command struct.
7552 * First command sent is always Read RSSI and we fail only if it fails.
7553 * In other case we simply override error to indicate success as we
7554 * already remembered if TX power value is actually valid.
7556 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
7558 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
7559 status = MGMT_STATUS_SUCCESS;
7561 status = mgmt_status(hci_status);
7565 BT_ERR("invalid sent_cmd in conn_info response");
7569 handle = __le16_to_cpu(cp->handle);
7570 conn = hci_conn_hash_lookup_handle(hdev, handle);
7572 BT_ERR("unknown handle (%d) in conn_info response", handle);
7576 cmd = mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
7580 cmd->cmd_complete(cmd, status);
7581 mgmt_pending_remove(cmd);
7584 hci_dev_unlock(hdev);
7588 #ifdef CONFIG_TIZEN_WIP
7589 /* defination of "prandom_u32_max" is imported from latest kernel,
7590 * so if the kernel is migrated to latest, below defination should be removed
7594 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
7595 * @ep_ro: right open interval endpoint
7597 * Returns a pseudo-random number that is in interval [0, ep_ro). Note
7598 * that the result depends on PRNG being well distributed in [0, ~0U]
7599 * u32 space. Here we use maximally equidistributed combined Tausworthe
7600 * generator, that is, prandom_u32(). This is useful when requesting a
7601 * random index of an array containing ep_ro elements, for example.
7603 * Returns: pseudo-random number in interval [0, ep_ro)
7605 static inline u32 prandom_u32_max(u32 ep_ro)
7607 return (u32)(((u64) prandom_u32() * ep_ro) >> 32);
7611 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7614 struct mgmt_cp_get_conn_info *cp = data;
7615 struct mgmt_rp_get_conn_info rp;
7616 struct hci_conn *conn;
7617 unsigned long conn_info_age;
7620 BT_DBG("%s", hdev->name);
7622 memset(&rp, 0, sizeof(rp));
7623 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7624 rp.addr.type = cp->addr.type;
7626 if (!bdaddr_type_is_valid(cp->addr.type))
7627 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7628 MGMT_STATUS_INVALID_PARAMS,
7633 if (!hdev_is_powered(hdev)) {
7634 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7635 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
7639 if (cp->addr.type == BDADDR_BREDR)
7640 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7643 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7645 if (!conn || conn->state != BT_CONNECTED) {
7646 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7647 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
7651 if (mgmt_pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
7652 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7653 MGMT_STATUS_BUSY, &rp, sizeof(rp));
7657 /* To avoid client trying to guess when to poll again for information we
7658 * calculate conn info age as random value between min/max set in hdev.
7660 conn_info_age = hdev->conn_info_min_age +
7661 prandom_u32_max(hdev->conn_info_max_age -
7662 hdev->conn_info_min_age);
7664 /* Query controller to refresh cached values if they are too old or were
7667 if (time_after(jiffies, conn->conn_info_timestamp +
7668 msecs_to_jiffies(conn_info_age)) ||
7669 !conn->conn_info_timestamp) {
7670 struct hci_request req;
7671 struct hci_cp_read_tx_power req_txp_cp;
7672 struct hci_cp_read_rssi req_rssi_cp;
7673 struct pending_cmd *cmd;
7675 hci_req_init(&req, hdev);
7676 req_rssi_cp.handle = cpu_to_le16(conn->handle);
7677 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
7680 /* For LE links TX power does not change thus we don't need to
7681 * query for it once value is known.
7683 if (!bdaddr_type_is_le(cp->addr.type) ||
7684 conn->tx_power == HCI_TX_POWER_INVALID) {
7685 req_txp_cp.handle = cpu_to_le16(conn->handle);
7686 req_txp_cp.type = 0x00;
7687 hci_req_add(&req, HCI_OP_READ_TX_POWER,
7688 sizeof(req_txp_cp), &req_txp_cp);
7691 /* Max TX power needs to be read only once per connection */
7692 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
7693 req_txp_cp.handle = cpu_to_le16(conn->handle);
7694 req_txp_cp.type = 0x01;
7695 hci_req_add(&req, HCI_OP_READ_TX_POWER,
7696 sizeof(req_txp_cp), &req_txp_cp);
7699 err = hci_req_run(&req, conn_info_refresh_complete);
7703 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
7710 hci_conn_hold(conn);
7711 cmd->user_data = hci_conn_get(conn);
7712 cmd->cmd_complete = conn_info_cmd_complete;
7714 conn->conn_info_timestamp = jiffies;
7716 /* Cache is valid, just reply with values cached in hci_conn */
7717 rp.rssi = conn->rssi;
7718 rp.tx_power = conn->tx_power;
7719 rp.max_tx_power = conn->max_tx_power;
7721 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7722 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7726 hci_dev_unlock(hdev);
7730 static int clock_info_cmd_complete(struct pending_cmd *cmd, u8 status)
7732 struct hci_conn *conn = cmd->user_data;
7733 struct mgmt_rp_get_clock_info rp;
7734 struct hci_dev *hdev;
7737 memset(&rp, 0, sizeof(rp));
7738 memcpy(&rp.addr, &cmd->param, sizeof(rp.addr));
7743 hdev = hci_dev_get(cmd->index);
7745 rp.local_clock = cpu_to_le32(hdev->clock);
7750 rp.piconet_clock = cpu_to_le32(conn->clock);
7751 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7755 err = cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7759 hci_conn_drop(conn);
7766 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7768 struct hci_cp_read_clock *hci_cp;
7769 struct pending_cmd *cmd;
7770 struct hci_conn *conn;
7772 BT_DBG("%s status %u", hdev->name, status);
7776 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
7780 if (hci_cp->which) {
7781 u16 handle = __le16_to_cpu(hci_cp->handle);
7782 conn = hci_conn_hash_lookup_handle(hdev, handle);
7787 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
7791 cmd->cmd_complete(cmd, mgmt_status(status));
7792 mgmt_pending_remove(cmd);
7795 hci_dev_unlock(hdev);
7798 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7801 struct mgmt_cp_get_clock_info *cp = data;
7802 struct mgmt_rp_get_clock_info rp;
7803 struct hci_cp_read_clock hci_cp;
7804 struct pending_cmd *cmd;
7805 struct hci_request req;
7806 struct hci_conn *conn;
7809 BT_DBG("%s", hdev->name);
7811 memset(&rp, 0, sizeof(rp));
7812 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7813 rp.addr.type = cp->addr.type;
7815 if (cp->addr.type != BDADDR_BREDR)
7816 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7817 MGMT_STATUS_INVALID_PARAMS,
7822 if (!hdev_is_powered(hdev)) {
7823 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7824 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
7828 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7829 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7831 if (!conn || conn->state != BT_CONNECTED) {
7832 err = cmd_complete(sk, hdev->id,
7833 MGMT_OP_GET_CLOCK_INFO,
7834 MGMT_STATUS_NOT_CONNECTED,
7842 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7848 cmd->cmd_complete = clock_info_cmd_complete;
7850 hci_req_init(&req, hdev);
7852 memset(&hci_cp, 0, sizeof(hci_cp));
7853 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
7856 hci_conn_hold(conn);
7857 cmd->user_data = hci_conn_get(conn);
7859 hci_cp.handle = cpu_to_le16(conn->handle);
7860 hci_cp.which = 0x01; /* Piconet clock */
7861 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
7864 err = hci_req_run(&req, get_clock_info_complete);
7866 mgmt_pending_remove(cmd);
7869 hci_dev_unlock(hdev);
7873 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7875 struct hci_conn *conn;
7877 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7881 if (conn->dst_type != type)
7884 if (conn->state != BT_CONNECTED)
7890 /* This function requires the caller holds hdev->lock */
7891 static int hci_conn_params_set(struct hci_request *req, bdaddr_t *addr,
7892 u8 addr_type, u8 auto_connect)
7894 struct hci_dev *hdev = req->hdev;
7895 struct hci_conn_params *params;
7897 params = hci_conn_params_add(hdev, addr, addr_type);
7901 if (params->auto_connect == auto_connect)
7904 list_del_init(¶ms->action);
7906 switch (auto_connect) {
7907 case HCI_AUTO_CONN_DISABLED:
7908 case HCI_AUTO_CONN_LINK_LOSS:
7909 __hci_update_background_scan(req);
7911 case HCI_AUTO_CONN_REPORT:
7912 list_add(¶ms->action, &hdev->pend_le_reports);
7913 __hci_update_background_scan(req);
7915 case HCI_AUTO_CONN_DIRECT:
7916 case HCI_AUTO_CONN_ALWAYS:
7917 if (!is_connected(hdev, addr, addr_type)) {
7918 list_add(¶ms->action, &hdev->pend_le_conns);
7919 __hci_update_background_scan(req);
7924 params->auto_connect = auto_connect;
7926 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
7932 static void device_added(struct sock *sk, struct hci_dev *hdev,
7933 bdaddr_t *bdaddr, u8 type, u8 action)
7935 struct mgmt_ev_device_added ev;
7937 bacpy(&ev.addr.bdaddr, bdaddr);
7938 ev.addr.type = type;
7941 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7944 static void add_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7946 struct pending_cmd *cmd;
7948 BT_DBG("status 0x%02x", status);
7952 cmd = mgmt_pending_find(MGMT_OP_ADD_DEVICE, hdev);
7956 cmd->cmd_complete(cmd, mgmt_status(status));
7957 mgmt_pending_remove(cmd);
7960 hci_dev_unlock(hdev);
7963 static int add_device(struct sock *sk, struct hci_dev *hdev,
7964 void *data, u16 len)
7966 struct mgmt_cp_add_device *cp = data;
7967 struct pending_cmd *cmd;
7968 struct hci_request req;
7969 u8 auto_conn, addr_type;
7972 BT_DBG("%s", hdev->name);
7974 if (!bdaddr_type_is_valid(cp->addr.type) ||
7975 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7976 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7977 MGMT_STATUS_INVALID_PARAMS,
7978 &cp->addr, sizeof(cp->addr));
7980 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7981 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7982 MGMT_STATUS_INVALID_PARAMS,
7983 &cp->addr, sizeof(cp->addr));
7985 hci_req_init(&req, hdev);
7989 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEVICE, hdev, data, len);
7995 cmd->cmd_complete = addr_cmd_complete;
7997 if (cp->addr.type == BDADDR_BREDR) {
7998 /* Only incoming connections action is supported for now */
7999 if (cp->action != 0x01) {
8000 err = cmd->cmd_complete(cmd,
8001 MGMT_STATUS_INVALID_PARAMS);
8002 mgmt_pending_remove(cmd);
8006 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
8011 __hci_update_page_scan(&req);
8016 if (cp->addr.type == BDADDR_LE_PUBLIC)
8017 addr_type = ADDR_LE_DEV_PUBLIC;
8019 addr_type = ADDR_LE_DEV_RANDOM;
8021 if (cp->action == 0x02)
8022 auto_conn = HCI_AUTO_CONN_ALWAYS;
8023 else if (cp->action == 0x01)
8024 auto_conn = HCI_AUTO_CONN_DIRECT;
8026 auto_conn = HCI_AUTO_CONN_REPORT;
8028 /* If the connection parameters don't exist for this device,
8029 * they will be created and configured with defaults.
8031 if (hci_conn_params_set(&req, &cp->addr.bdaddr, addr_type,
8033 err = cmd->cmd_complete(cmd, MGMT_STATUS_FAILED);
8034 mgmt_pending_remove(cmd);
8039 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
8041 err = hci_req_run(&req, add_device_complete);
8043 /* ENODATA means no HCI commands were needed (e.g. if
8044 * the adapter is powered off).
8046 if (err == -ENODATA)
8047 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
8048 mgmt_pending_remove(cmd);
8052 hci_dev_unlock(hdev);
8056 static void device_removed(struct sock *sk, struct hci_dev *hdev,
8057 bdaddr_t *bdaddr, u8 type)
8059 struct mgmt_ev_device_removed ev;
8061 bacpy(&ev.addr.bdaddr, bdaddr);
8062 ev.addr.type = type;
8064 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
8067 static void remove_device_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8069 struct pending_cmd *cmd;
8071 BT_DBG("status 0x%02x", status);
8075 cmd = mgmt_pending_find(MGMT_OP_REMOVE_DEVICE, hdev);
8079 cmd->cmd_complete(cmd, mgmt_status(status));
8080 mgmt_pending_remove(cmd);
8083 hci_dev_unlock(hdev);
8086 static int remove_device(struct sock *sk, struct hci_dev *hdev,
8087 void *data, u16 len)
8089 struct mgmt_cp_remove_device *cp = data;
8090 struct pending_cmd *cmd;
8091 struct hci_request req;
8094 BT_DBG("%s", hdev->name);
8096 hci_req_init(&req, hdev);
8100 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEVICE, hdev, data, len);
8106 cmd->cmd_complete = addr_cmd_complete;
8108 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
8109 struct hci_conn_params *params;
8112 if (!bdaddr_type_is_valid(cp->addr.type)) {
8113 err = cmd->cmd_complete(cmd,
8114 MGMT_STATUS_INVALID_PARAMS);
8115 mgmt_pending_remove(cmd);
8119 if (cp->addr.type == BDADDR_BREDR) {
8120 err = hci_bdaddr_list_del(&hdev->whitelist,
8124 err = cmd->cmd_complete(cmd,
8125 MGMT_STATUS_INVALID_PARAMS);
8126 mgmt_pending_remove(cmd);
8130 __hci_update_page_scan(&req);
8132 device_removed(sk, hdev, &cp->addr.bdaddr,
8137 if (cp->addr.type == BDADDR_LE_PUBLIC)
8138 addr_type = ADDR_LE_DEV_PUBLIC;
8140 addr_type = ADDR_LE_DEV_RANDOM;
8142 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
8145 err = cmd->cmd_complete(cmd,
8146 MGMT_STATUS_INVALID_PARAMS);
8147 mgmt_pending_remove(cmd);
8151 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
8152 err = cmd->cmd_complete(cmd,
8153 MGMT_STATUS_INVALID_PARAMS);
8154 mgmt_pending_remove(cmd);
8158 list_del(¶ms->action);
8159 list_del(¶ms->list);
8161 __hci_update_background_scan(&req);
8163 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
8165 struct hci_conn_params *p, *tmp;
8166 struct bdaddr_list *b, *btmp;
8168 if (cp->addr.type) {
8169 err = cmd->cmd_complete(cmd,
8170 MGMT_STATUS_INVALID_PARAMS);
8171 mgmt_pending_remove(cmd);
8175 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
8176 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
8181 __hci_update_page_scan(&req);
8183 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
8184 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
8186 device_removed(sk, hdev, &p->addr, p->addr_type);
8187 list_del(&p->action);
8192 BT_DBG("All LE connection parameters were removed");
8194 __hci_update_background_scan(&req);
8198 err = hci_req_run(&req, remove_device_complete);
8200 /* ENODATA means no HCI commands were needed (e.g. if
8201 * the adapter is powered off).
8203 if (err == -ENODATA)
8204 err = cmd->cmd_complete(cmd, MGMT_STATUS_SUCCESS);
8205 mgmt_pending_remove(cmd);
8209 hci_dev_unlock(hdev);
8213 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
8216 struct mgmt_cp_load_conn_param *cp = data;
8217 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
8218 sizeof(struct mgmt_conn_param));
8219 u16 param_count, expected_len;
8222 if (!lmp_le_capable(hdev))
8223 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8224 MGMT_STATUS_NOT_SUPPORTED);
8226 param_count = __le16_to_cpu(cp->param_count);
8227 if (param_count > max_param_count) {
8228 BT_ERR("load_conn_param: too big param_count value %u",
8230 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8231 MGMT_STATUS_INVALID_PARAMS);
8234 expected_len = sizeof(*cp) + param_count *
8235 sizeof(struct mgmt_conn_param);
8236 if (expected_len != len) {
8237 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
8239 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
8240 MGMT_STATUS_INVALID_PARAMS);
8243 BT_DBG("%s param_count %u", hdev->name, param_count);
8247 hci_conn_params_clear_disabled(hdev);
8249 for (i = 0; i < param_count; i++) {
8250 struct mgmt_conn_param *param = &cp->params[i];
8251 struct hci_conn_params *hci_param;
8252 u16 min, max, latency, timeout;
8255 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
8258 if (param->addr.type == BDADDR_LE_PUBLIC) {
8259 addr_type = ADDR_LE_DEV_PUBLIC;
8260 } else if (param->addr.type == BDADDR_LE_RANDOM) {
8261 addr_type = ADDR_LE_DEV_RANDOM;
8263 BT_ERR("Ignoring invalid connection parameters");
8267 min = le16_to_cpu(param->min_interval);
8268 max = le16_to_cpu(param->max_interval);
8269 latency = le16_to_cpu(param->latency);
8270 timeout = le16_to_cpu(param->timeout);
8272 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
8273 min, max, latency, timeout);
8275 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
8276 BT_ERR("Ignoring invalid connection parameters");
8280 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
8283 BT_ERR("Failed to add connection parameters");
8287 hci_param->conn_min_interval = min;
8288 hci_param->conn_max_interval = max;
8289 hci_param->conn_latency = latency;
8290 hci_param->supervision_timeout = timeout;
8293 hci_dev_unlock(hdev);
8295 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
8298 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
8299 void *data, u16 len)
8301 struct mgmt_cp_set_external_config *cp = data;
8305 BT_DBG("%s", hdev->name);
8307 if (hdev_is_powered(hdev))
8308 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8309 MGMT_STATUS_REJECTED);
8311 if (cp->config != 0x00 && cp->config != 0x01)
8312 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8313 MGMT_STATUS_INVALID_PARAMS);
8315 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
8316 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
8317 MGMT_STATUS_NOT_SUPPORTED);
8322 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
8325 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
8328 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
8335 err = new_options(hdev, sk);
8337 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
8338 mgmt_index_removed(hdev);
8340 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
8341 set_bit(HCI_CONFIG, &hdev->dev_flags);
8342 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
8344 queue_work(hdev->req_workqueue, &hdev->power_on);
8346 set_bit(HCI_RAW, &hdev->flags);
8347 mgmt_index_added(hdev);
8352 hci_dev_unlock(hdev);
8356 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8357 void *data, u16 len)
8359 struct mgmt_cp_set_public_address *cp = data;
8363 BT_DBG("%s", hdev->name);
8365 if (hdev_is_powered(hdev))
8366 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8367 MGMT_STATUS_REJECTED);
8369 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8370 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8371 MGMT_STATUS_INVALID_PARAMS);
8373 if (!hdev->set_bdaddr)
8374 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8375 MGMT_STATUS_NOT_SUPPORTED);
8379 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8380 bacpy(&hdev->public_addr, &cp->bdaddr);
8382 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8389 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
8390 err = new_options(hdev, sk);
8392 if (is_configured(hdev)) {
8393 mgmt_index_removed(hdev);
8395 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
8397 set_bit(HCI_CONFIG, &hdev->dev_flags);
8398 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
8400 queue_work(hdev->req_workqueue, &hdev->power_on);
8404 hci_dev_unlock(hdev);
8408 static const struct mgmt_handler {
8409 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
8413 } mgmt_handlers[] = {
8414 { NULL }, /* 0x0000 (no command) */
8415 { read_version, false, MGMT_READ_VERSION_SIZE },
8416 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
8417 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
8418 { read_controller_info, false, MGMT_READ_INFO_SIZE },
8419 { set_powered, false, MGMT_SETTING_SIZE },
8420 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
8421 { set_connectable, false, MGMT_SETTING_SIZE },
8422 { set_fast_connectable, false, MGMT_SETTING_SIZE },
8423 { set_bondable, false, MGMT_SETTING_SIZE },
8424 { set_link_security, false, MGMT_SETTING_SIZE },
8425 { set_ssp, false, MGMT_SETTING_SIZE },
8426 { set_hs, false, MGMT_SETTING_SIZE },
8427 { set_le, false, MGMT_SETTING_SIZE },
8428 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
8429 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
8430 { add_uuid, false, MGMT_ADD_UUID_SIZE },
8431 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
8432 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
8433 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
8434 { disconnect, false, MGMT_DISCONNECT_SIZE },
8435 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
8436 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
8437 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
8438 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
8439 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
8440 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
8441 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
8442 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
8443 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8444 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
8445 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8446 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
8447 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
8448 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8449 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
8450 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
8451 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
8452 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
8453 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
8454 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
8455 { set_advertising, false, MGMT_SETTING_SIZE },
8456 { set_bredr, false, MGMT_SETTING_SIZE },
8457 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
8458 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
8459 { set_secure_conn, false, MGMT_SETTING_SIZE },
8460 { set_debug_keys, false, MGMT_SETTING_SIZE },
8461 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
8462 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
8463 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
8464 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
8465 { add_device, false, MGMT_ADD_DEVICE_SIZE },
8466 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
8467 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
8468 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
8469 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
8470 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
8471 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
8472 { start_service_discovery,true, MGMT_START_SERVICE_DISCOVERY_SIZE },
8475 #ifdef CONFIG_TIZEN_WIP
8476 static const struct mgmt_handler tizen_mgmt_handlers[] = {
8477 { NULL }, /* 0x0000 (no command) */
8478 { set_advertising_params, false, MGMT_SET_ADVERTISING_PARAMS_SIZE },
8479 { set_advertising_data, true, MGMT_SET_ADV_MIN_APP_DATA_SIZE },
8480 { set_scan_rsp_data, true, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE },
8481 { add_white_list, false, MGMT_ADD_DEV_WHITE_LIST_SIZE },
8482 { remove_from_white_list, false, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
8483 { clear_white_list, false, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
8484 { set_enable_rssi, false, MGMT_SET_RSSI_ENABLE_SIZE },
8485 { get_raw_rssi, false, MGMT_GET_RAW_RSSI_SIZE },
8486 { set_disable_threshold, false, MGMT_SET_RSSI_DISABLE_SIZE },
8487 { start_le_discovery, false, MGMT_START_LE_DISCOVERY_SIZE },
8488 { stop_le_discovery, false, MGMT_STOP_LE_DISCOVERY_SIZE },
8489 { disable_le_auto_connect, false, MGMT_DISABLE_LE_AUTO_CONNECT_SIZE },
8490 { le_conn_update, false, MGMT_LE_CONN_UPDATE_SIZE},
8491 { set_manufacturer_data, false, MGMT_SET_MANUFACTURER_DATA_SIZE},
8492 { le_set_scan_params, false, MGMT_LE_SET_SCAN_PARAMS_SIZE },
8493 { set_voice_setting, false, MGMT_SET_VOICE_SETTING_SIZE},
8494 { get_adv_tx_power, false, MGMT_GET_ADV_TX_POWER_SIZE},
8495 { enable_bt_6lowpan, false, MGMT_ENABLE_BT_6LOWPAN_SIZE },
8496 { connect_bt_6lowpan, false, MGMT_CONNECT_6LOWPAN_SIZE },
8497 { disconnect_bt_6lowpan, false, MGMT_DISCONNECT_6LOWPAN_SIZE },
8498 { read_maximum_le_data_length, false, MGMT_LE_READ_MAXIMUM_DATA_LENGTH_SIZE },
8499 { write_host_suggested_le_data_length, false,
8500 MGMT_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH_SIZE },
8501 { read_host_suggested_data_length, false,
8502 MGMT_LE_READ_HOST_SUGGESTED_DATA_LENGTH_SIZE },
8503 { set_le_data_length_params, false,
8504 MGMT_LE_SET_DATA_LENGTH_SIZE },
8505 { set_dev_rpa_res_support, false, MGMT_OP_SET_DEV_RPA_RES_SUPPORT_SIZE },
8506 { set_irk, false, MGMT_SET_IRK_SIZE },
8510 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
8514 struct mgmt_hdr *hdr;
8515 u16 opcode, index, len;
8516 struct hci_dev *hdev = NULL;
8517 const struct mgmt_handler *handler;
8520 BT_DBG("got %zu bytes", msglen);
8522 if (msglen < sizeof(*hdr))
8525 buf = kmalloc(msglen, GFP_KERNEL);
8529 #ifdef CONFIG_TIZEN_WIP
8530 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
8532 if (memcpy_from_msg(buf, msg, msglen)) {
8539 opcode = __le16_to_cpu(hdr->opcode);
8540 index = __le16_to_cpu(hdr->index);
8541 len = __le16_to_cpu(hdr->len);
8543 if (len != msglen - sizeof(*hdr)) {
8548 if (index != MGMT_INDEX_NONE) {
8549 hdev = hci_dev_get(index);
8551 err = cmd_status(sk, index, opcode,
8552 MGMT_STATUS_INVALID_INDEX);
8556 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
8557 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
8558 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
8559 err = cmd_status(sk, index, opcode,
8560 MGMT_STATUS_INVALID_INDEX);
8564 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
8565 opcode != MGMT_OP_READ_CONFIG_INFO &&
8566 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
8567 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
8568 err = cmd_status(sk, index, opcode,
8569 MGMT_STATUS_INVALID_INDEX);
8574 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
8575 mgmt_handlers[opcode].func == NULL) {
8576 #ifdef CONFIG_TIZEN_WIP
8577 u16 tizen_opcode = opcode - TIZEN_OP_CODE_BASE;
8579 if (tizen_opcode > 0 &&
8580 tizen_opcode < ARRAY_SIZE(tizen_mgmt_handlers) &&
8581 tizen_mgmt_handlers[tizen_opcode].func) {
8583 handler = &tizen_mgmt_handlers[tizen_opcode];
8587 BT_DBG("Unknown op %u", opcode);
8588 err = cmd_status(sk, index, opcode,
8589 MGMT_STATUS_UNKNOWN_COMMAND);
8593 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
8594 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
8595 err = cmd_status(sk, index, opcode,
8596 MGMT_STATUS_INVALID_INDEX);
8600 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
8601 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
8602 err = cmd_status(sk, index, opcode,
8603 MGMT_STATUS_INVALID_INDEX);
8607 handler = &mgmt_handlers[opcode];
8609 #ifdef CONFIG_TIZEN_WIP
8612 if ((handler->var_len && len < handler->data_len) ||
8613 (!handler->var_len && len != handler->data_len)) {
8614 err = cmd_status(sk, index, opcode,
8615 MGMT_STATUS_INVALID_PARAMS);
8620 mgmt_init_hdev(sk, hdev);
8622 cp = buf + sizeof(*hdr);
8624 err = handler->func(sk, hdev, cp, len);
8638 void mgmt_index_added(struct hci_dev *hdev)
8640 if (hdev->dev_type != HCI_BREDR)
8643 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8646 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
8647 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
8649 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
8652 void mgmt_index_removed(struct hci_dev *hdev)
8654 u8 status = MGMT_STATUS_INVALID_INDEX;
8656 if (hdev->dev_type != HCI_BREDR)
8659 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8662 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8664 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
8665 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
8667 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
8670 /* This function requires the caller holds hdev->lock */
8671 static void restart_le_actions(struct hci_request *req)
8673 struct hci_dev *hdev = req->hdev;
8674 struct hci_conn_params *p;
8676 list_for_each_entry(p, &hdev->le_conn_params, list) {
8677 /* Needed for AUTO_OFF case where might not "really"
8678 * have been powered off.
8680 list_del_init(&p->action);
8682 switch (p->auto_connect) {
8683 case HCI_AUTO_CONN_DIRECT:
8684 case HCI_AUTO_CONN_ALWAYS:
8685 list_add(&p->action, &hdev->pend_le_conns);
8687 case HCI_AUTO_CONN_REPORT:
8688 list_add(&p->action, &hdev->pend_le_reports);
8695 __hci_update_background_scan(req);
8698 static void powered_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8700 struct cmd_lookup match = { NULL, hdev };
8702 BT_DBG("status 0x%02x", status);
8705 /* Register the available SMP channels (BR/EDR and LE) only
8706 * when successfully powering on the controller. This late
8707 * registration is required so that LE SMP can clearly
8708 * decide if the public address or static address is used.
8715 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8717 new_settings(hdev, match.sk);
8719 hci_dev_unlock(hdev);
8725 static int powered_update_hci(struct hci_dev *hdev)
8727 struct hci_request req;
8730 hci_req_init(&req, hdev);
8732 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
8733 !lmp_host_ssp_capable(hdev)) {
8736 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
8738 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
8741 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT,
8742 sizeof(support), &support);
8746 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
8747 lmp_bredr_capable(hdev)) {
8748 struct hci_cp_write_le_host_supported cp;
8753 /* Check first if we already have the right
8754 * host state (host features set)
8756 if (cp.le != lmp_host_le_capable(hdev) ||
8757 cp.simul != lmp_host_le_br_capable(hdev))
8758 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
8762 if (lmp_le_capable(hdev)) {
8763 /* Make sure the controller has a good default for
8764 * advertising data. This also applies to the case
8765 * where BR/EDR was toggled during the AUTO_OFF phase.
8767 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
8768 update_adv_data(&req);
8769 update_scan_rsp_data(&req);
8772 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
8773 enable_advertising(&req);
8775 restart_le_actions(&req);
8778 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
8779 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
8780 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
8781 sizeof(link_sec), &link_sec);
8783 if (lmp_bredr_capable(hdev)) {
8784 write_fast_connectable(&req, false);
8785 __hci_update_page_scan(&req);
8791 return hci_req_run(&req, powered_complete);
8794 int mgmt_powered(struct hci_dev *hdev, u8 powered)
8796 struct cmd_lookup match = { NULL, hdev };
8797 u8 status, zero_cod[] = { 0, 0, 0 };
8800 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
8804 if (powered_update_hci(hdev) == 0)
8807 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
8812 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8814 /* If the power off is because of hdev unregistration let
8815 * use the appropriate INVALID_INDEX status. Otherwise use
8816 * NOT_POWERED. We cover both scenarios here since later in
8817 * mgmt_index_removed() any hci_conn callbacks will have already
8818 * been triggered, potentially causing misleading DISCONNECTED
8821 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
8822 status = MGMT_STATUS_INVALID_INDEX;
8824 status = MGMT_STATUS_NOT_POWERED;
8826 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8828 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
8829 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8830 zero_cod, sizeof(zero_cod), NULL);
8833 err = new_settings(hdev, match.sk);
8841 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8843 struct pending_cmd *cmd;
8846 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
8850 if (err == -ERFKILL)
8851 status = MGMT_STATUS_RFKILLED;
8853 status = MGMT_STATUS_FAILED;
8855 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8857 mgmt_pending_remove(cmd);
8860 void mgmt_discoverable_timeout(struct hci_dev *hdev)
8862 struct hci_request req;
8866 /* When discoverable timeout triggers, then just make sure
8867 * the limited discoverable flag is cleared. Even in the case
8868 * of a timeout triggered from general discoverable, it is
8869 * safe to unconditionally clear the flag.
8871 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
8872 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
8874 hci_req_init(&req, hdev);
8875 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
8876 u8 scan = SCAN_PAGE;
8877 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
8878 sizeof(scan), &scan);
8881 update_adv_data(&req);
8882 hci_req_run(&req, NULL);
8884 hdev->discov_timeout = 0;
8886 new_settings(hdev, NULL);
8888 hci_dev_unlock(hdev);
8891 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8894 struct mgmt_ev_new_link_key ev;
8896 memset(&ev, 0, sizeof(ev));
8898 ev.store_hint = persistent;
8899 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8900 ev.key.addr.type = BDADDR_BREDR;
8901 ev.key.type = key->type;
8902 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8903 ev.key.pin_len = key->pin_len;
8905 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8908 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8910 switch (ltk->type) {
8913 if (ltk->authenticated)
8914 return MGMT_LTK_AUTHENTICATED;
8915 return MGMT_LTK_UNAUTHENTICATED;
8917 if (ltk->authenticated)
8918 return MGMT_LTK_P256_AUTH;
8919 return MGMT_LTK_P256_UNAUTH;
8920 case SMP_LTK_P256_DEBUG:
8921 return MGMT_LTK_P256_DEBUG;
8924 return MGMT_LTK_UNAUTHENTICATED;
8927 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8929 struct mgmt_ev_new_long_term_key ev;
8931 memset(&ev, 0, sizeof(ev));
8933 /* Devices using resolvable or non-resolvable random addresses
8934 * without providing an indentity resolving key don't require
8935 * to store long term keys. Their addresses will change the
8938 * Only when a remote device provides an identity address
8939 * make sure the long term key is stored. If the remote
8940 * identity is known, the long term keys are internally
8941 * mapped to the identity address. So allow static random
8942 * and public addresses here.
8944 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8945 (key->bdaddr.b[5] & 0xc0) != 0xc0)
8946 ev.store_hint = 0x00;
8948 ev.store_hint = persistent;
8950 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8951 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8952 ev.key.type = mgmt_ltk_type(key);
8953 ev.key.enc_size = key->enc_size;
8954 ev.key.ediv = key->ediv;
8955 ev.key.rand = key->rand;
8957 if (key->type == SMP_LTK)
8960 memcpy(ev.key.val, key->val, sizeof(key->val));
8962 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
8965 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
8967 struct mgmt_ev_new_irk ev;
8969 memset(&ev, 0, sizeof(ev));
8971 /* For identity resolving keys from devices that are already
8972 * using a public address or static random address, do not
8973 * ask for storing this key. The identity resolving key really
8974 * is only mandatory for devices using resovlable random
8977 * Storing all identity resolving keys has the downside that
8978 * they will be also loaded on next boot of they system. More
8979 * identity resolving keys, means more time during scanning is
8980 * needed to actually resolve these addresses.
8982 if (bacmp(&irk->rpa, BDADDR_ANY))
8983 ev.store_hint = 0x01;
8985 ev.store_hint = 0x00;
8987 bacpy(&ev.rpa, &irk->rpa);
8988 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
8989 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
8990 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
8992 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
8995 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
8998 struct mgmt_ev_new_csrk ev;
9000 memset(&ev, 0, sizeof(ev));
9002 /* Devices using resolvable or non-resolvable random addresses
9003 * without providing an indentity resolving key don't require
9004 * to store signature resolving keys. Their addresses will change
9005 * the next time around.
9007 * Only when a remote device provides an identity address
9008 * make sure the signature resolving key is stored. So allow
9009 * static random and public addresses here.
9011 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9012 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9013 ev.store_hint = 0x00;
9015 ev.store_hint = persistent;
9017 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9018 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9019 ev.key.master = csrk->master;
9020 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9022 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9025 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9026 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9027 u16 max_interval, u16 latency, u16 timeout)
9029 struct mgmt_ev_new_conn_param ev;
9031 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9034 memset(&ev, 0, sizeof(ev));
9035 bacpy(&ev.addr.bdaddr, bdaddr);
9036 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9037 ev.store_hint = store_hint;
9038 ev.min_interval = cpu_to_le16(min_interval);
9039 ev.max_interval = cpu_to_le16(max_interval);
9040 ev.latency = cpu_to_le16(latency);
9041 ev.timeout = cpu_to_le16(timeout);
9043 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9046 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
9049 eir[eir_len++] = sizeof(type) + data_len;
9050 eir[eir_len++] = type;
9051 memcpy(&eir[eir_len], data, data_len);
9052 eir_len += data_len;
9057 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9058 u32 flags, u8 *name, u8 name_len)
9061 struct mgmt_ev_device_connected *ev = (void *) buf;
9064 bacpy(&ev->addr.bdaddr, &conn->dst);
9065 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9067 ev->flags = __cpu_to_le32(flags);
9069 /* We must ensure that the EIR Data fields are ordered and
9070 * unique. Keep it simple for now and avoid the problem by not
9071 * adding any BR/EDR data to the LE adv.
9073 if (conn->le_adv_data_len > 0) {
9074 memcpy(&ev->eir[eir_len],
9075 conn->le_adv_data, conn->le_adv_data_len);
9076 eir_len = conn->le_adv_data_len;
9079 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
9082 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
9083 eir_len = eir_append_data(ev->eir, eir_len,
9085 conn->dev_class, 3);
9088 ev->eir_len = cpu_to_le16(eir_len);
9090 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
9091 sizeof(*ev) + eir_len, NULL);
9094 #ifdef CONFIG_TIZEN_WIP
9095 /* BEGIN TIZEN_Bluetooth :: name update changes */
9096 int mgmt_device_name_update(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name,
9100 struct mgmt_ev_device_name_update *ev = (void *) buf;
9106 bacpy(&ev->addr.bdaddr, bdaddr);
9107 ev->addr.type = BDADDR_BREDR;
9109 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9112 ev->eir_len = cpu_to_le16(eir_len);
9114 return mgmt_event(MGMT_EV_DEVICE_NAME_UPDATE, hdev, buf,
9115 sizeof(*ev) + eir_len, NULL);
9117 /* END TIZEN_Bluetooth :: name update changes */
9120 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
9122 struct sock **sk = data;
9124 cmd->cmd_complete(cmd, 0);
9129 mgmt_pending_remove(cmd);
9132 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
9134 struct hci_dev *hdev = data;
9135 struct mgmt_cp_unpair_device *cp = cmd->param;
9137 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9139 cmd->cmd_complete(cmd, 0);
9140 mgmt_pending_remove(cmd);
9143 bool mgmt_powering_down(struct hci_dev *hdev)
9145 struct pending_cmd *cmd;
9146 struct mgmt_mode *cp;
9148 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
9159 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9160 u8 link_type, u8 addr_type, u8 reason,
9161 bool mgmt_connected)
9163 struct mgmt_ev_device_disconnected ev;
9164 struct sock *sk = NULL;
9166 /* The connection is still in hci_conn_hash so test for 1
9167 * instead of 0 to know if this is the last one.
9169 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9170 cancel_delayed_work(&hdev->power_off);
9171 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9174 if (!mgmt_connected)
9177 if (link_type != ACL_LINK && link_type != LE_LINK)
9180 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9182 bacpy(&ev.addr.bdaddr, bdaddr);
9183 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9186 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9191 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9195 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9196 u8 link_type, u8 addr_type, u8 status)
9198 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9199 struct mgmt_cp_disconnect *cp;
9200 struct pending_cmd *cmd;
9202 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9205 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
9211 if (bacmp(bdaddr, &cp->addr.bdaddr))
9214 if (cp->addr.type != bdaddr_type)
9217 cmd->cmd_complete(cmd, mgmt_status(status));
9218 mgmt_pending_remove(cmd);
9221 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9222 u8 addr_type, u8 status)
9224 struct mgmt_ev_connect_failed ev;
9226 /* The connection is still in hci_conn_hash so test for 1
9227 * instead of 0 to know if this is the last one.
9229 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9230 cancel_delayed_work(&hdev->power_off);
9231 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9234 bacpy(&ev.addr.bdaddr, bdaddr);
9235 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9236 ev.status = mgmt_status(status);
9238 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9240 #ifdef CONFIG_TIZEN_WIP
9241 void mgmt_hardware_error(struct hci_dev *hdev, u8 err_code)
9243 struct mgmt_ev_hardware_error ev;
9245 ev.error_code = err_code;
9246 mgmt_event(MGMT_EV_HARDWARE_ERROR, hdev, &ev, sizeof(ev), NULL);
9249 void mgmt_tx_timeout_error(struct hci_dev *hdev)
9251 mgmt_event(MGMT_EV_TX_TIMEOUT_ERROR, hdev, NULL, 0, NULL);
9255 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9257 struct mgmt_ev_pin_code_request ev;
9259 bacpy(&ev.addr.bdaddr, bdaddr);
9260 ev.addr.type = BDADDR_BREDR;
9263 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9266 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9269 struct pending_cmd *cmd;
9271 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9275 cmd->cmd_complete(cmd, mgmt_status(status));
9276 mgmt_pending_remove(cmd);
9279 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9282 struct pending_cmd *cmd;
9284 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9288 cmd->cmd_complete(cmd, mgmt_status(status));
9289 mgmt_pending_remove(cmd);
9292 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9293 u8 link_type, u8 addr_type, u32 value,
9296 struct mgmt_ev_user_confirm_request ev;
9298 BT_DBG("%s", hdev->name);
9300 bacpy(&ev.addr.bdaddr, bdaddr);
9301 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9302 ev.confirm_hint = confirm_hint;
9303 ev.value = cpu_to_le32(value);
9305 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9309 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9310 u8 link_type, u8 addr_type)
9312 struct mgmt_ev_user_passkey_request ev;
9314 BT_DBG("%s", hdev->name);
9316 bacpy(&ev.addr.bdaddr, bdaddr);
9317 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9319 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9323 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9324 u8 link_type, u8 addr_type, u8 status,
9327 struct pending_cmd *cmd;
9329 cmd = mgmt_pending_find(opcode, hdev);
9333 cmd->cmd_complete(cmd, mgmt_status(status));
9334 mgmt_pending_remove(cmd);
9339 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9340 u8 link_type, u8 addr_type, u8 status)
9342 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9343 status, MGMT_OP_USER_CONFIRM_REPLY);
9346 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9347 u8 link_type, u8 addr_type, u8 status)
9349 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9351 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9354 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9355 u8 link_type, u8 addr_type, u8 status)
9357 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9358 status, MGMT_OP_USER_PASSKEY_REPLY);
9361 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9362 u8 link_type, u8 addr_type, u8 status)
9364 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9366 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9369 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9370 u8 link_type, u8 addr_type, u32 passkey,
9373 struct mgmt_ev_passkey_notify ev;
9375 BT_DBG("%s", hdev->name);
9377 bacpy(&ev.addr.bdaddr, bdaddr);
9378 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9379 ev.passkey = __cpu_to_le32(passkey);
9380 ev.entered = entered;
9382 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9385 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9387 struct mgmt_ev_auth_failed ev;
9388 struct pending_cmd *cmd;
9389 u8 status = mgmt_status(hci_status);
9391 bacpy(&ev.addr.bdaddr, &conn->dst);
9392 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9395 cmd = find_pairing(conn);
9397 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9398 cmd ? cmd->sk : NULL);
9401 cmd->cmd_complete(cmd, status);
9402 mgmt_pending_remove(cmd);
9406 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9408 struct cmd_lookup match = { NULL, hdev };
9412 u8 mgmt_err = mgmt_status(status);
9413 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9414 cmd_status_rsp, &mgmt_err);
9418 if (test_bit(HCI_AUTH, &hdev->flags))
9419 changed = !test_and_set_bit(HCI_LINK_SECURITY,
9422 changed = test_and_clear_bit(HCI_LINK_SECURITY,
9425 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9429 new_settings(hdev, match.sk);
9435 static void clear_eir(struct hci_request *req)
9437 struct hci_dev *hdev = req->hdev;
9438 struct hci_cp_write_eir cp;
9440 if (!lmp_ext_inq_capable(hdev))
9443 memset(hdev->eir, 0, sizeof(hdev->eir));
9445 memset(&cp, 0, sizeof(cp));
9447 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
9450 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
9452 struct cmd_lookup match = { NULL, hdev };
9453 struct hci_request req;
9454 bool changed = false;
9457 u8 mgmt_err = mgmt_status(status);
9459 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
9460 &hdev->dev_flags)) {
9461 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
9462 new_settings(hdev, NULL);
9465 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
9471 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
9473 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
9475 changed = test_and_clear_bit(HCI_HS_ENABLED,
9478 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
9481 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
9484 new_settings(hdev, match.sk);
9489 hci_req_init(&req, hdev);
9491 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
9492 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
9493 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
9494 sizeof(enable), &enable);
9500 hci_req_run(&req, NULL);
9503 static void sk_lookup(struct pending_cmd *cmd, void *data)
9505 struct cmd_lookup *match = data;
9507 if (match->sk == NULL) {
9508 match->sk = cmd->sk;
9509 sock_hold(match->sk);
9513 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9516 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9518 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9519 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9520 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9523 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
9530 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9532 struct mgmt_cp_set_local_name ev;
9533 struct pending_cmd *cmd;
9538 memset(&ev, 0, sizeof(ev));
9539 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9540 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9542 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9544 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9546 /* If this is a HCI command related to powering on the
9547 * HCI dev don't send any mgmt signals.
9549 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
9553 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9554 cmd ? cmd->sk : NULL);
9557 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
9558 u8 *rand192, u8 *hash256, u8 *rand256,
9561 struct pending_cmd *cmd;
9563 BT_DBG("%s status %u", hdev->name, status);
9565 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
9570 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
9571 mgmt_status(status));
9573 if (bredr_sc_enabled(hdev) && hash256 && rand256) {
9574 struct mgmt_rp_read_local_oob_ext_data rp;
9576 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
9577 memcpy(rp.rand192, rand192, sizeof(rp.rand192));
9579 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
9580 memcpy(rp.rand256, rand256, sizeof(rp.rand256));
9582 cmd_complete(cmd->sk, hdev->id,
9583 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
9586 struct mgmt_rp_read_local_oob_data rp;
9588 memcpy(rp.hash, hash192, sizeof(rp.hash));
9589 memcpy(rp.rand, rand192, sizeof(rp.rand));
9591 cmd_complete(cmd->sk, hdev->id,
9592 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
9597 mgmt_pending_remove(cmd);
9600 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9604 for (i = 0; i < uuid_count; i++) {
9605 if (!memcmp(uuid, uuids[i], 16))
9612 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9616 while (parsed < eir_len) {
9617 u8 field_len = eir[0];
9624 if (eir_len - parsed < field_len + 1)
9628 case EIR_UUID16_ALL:
9629 case EIR_UUID16_SOME:
9630 for (i = 0; i + 3 <= field_len; i += 2) {
9631 memcpy(uuid, bluetooth_base_uuid, 16);
9632 uuid[13] = eir[i + 3];
9633 uuid[12] = eir[i + 2];
9634 if (has_uuid(uuid, uuid_count, uuids))
9638 case EIR_UUID32_ALL:
9639 case EIR_UUID32_SOME:
9640 for (i = 0; i + 5 <= field_len; i += 4) {
9641 memcpy(uuid, bluetooth_base_uuid, 16);
9642 uuid[15] = eir[i + 5];
9643 uuid[14] = eir[i + 4];
9644 uuid[13] = eir[i + 3];
9645 uuid[12] = eir[i + 2];
9646 if (has_uuid(uuid, uuid_count, uuids))
9650 case EIR_UUID128_ALL:
9651 case EIR_UUID128_SOME:
9652 for (i = 0; i + 17 <= field_len; i += 16) {
9653 memcpy(uuid, eir + i + 2, 16);
9654 if (has_uuid(uuid, uuid_count, uuids))
9660 parsed += field_len + 1;
9661 eir += field_len + 1;
9667 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9668 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9669 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9672 struct mgmt_ev_device_found *ev = (void *) buf;
9676 /* Don't send events for a non-kernel initiated discovery. With
9677 * LE one exception is if we have pend_le_reports > 0 in which
9678 * case we're doing passive scanning and want these events.
9680 if (!hci_discovery_active(hdev)) {
9681 if (link_type == ACL_LINK)
9683 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
9687 /* When using service discovery with a RSSI threshold, then check
9688 * if such a RSSI threshold is specified. If a RSSI threshold has
9689 * been specified, then all results with a RSSI smaller than the
9690 * RSSI threshold will be dropped.
9692 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9693 * the results are also dropped.
9695 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9696 (rssi < hdev->discovery.rssi || rssi == HCI_RSSI_INVALID))
9699 /* Make sure that the buffer is big enough. The 5 extra bytes
9700 * are for the potential CoD field.
9702 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9705 memset(buf, 0, sizeof(buf));
9707 /* In case of device discovery with BR/EDR devices (pre 1.2), the
9708 * RSSI value was reported as 0 when not available. This behavior
9709 * is kept when using device discovery. This is required for full
9710 * backwards compatibility with the API.
9712 * However when using service discovery, the value 127 will be
9713 * returned when the RSSI is not available.
9715 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9716 link_type == ACL_LINK)
9719 bacpy(&ev->addr.bdaddr, bdaddr);
9720 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9722 ev->flags = cpu_to_le32(flags);
9725 /* When using service discovery and a list of UUID is
9726 * provided, results with no matching UUID should be
9727 * dropped. In case there is a match the result is
9728 * kept and checking possible scan response data
9731 if (hdev->discovery.uuid_count > 0)
9732 match = eir_has_uuids(eir, eir_len,
9733 hdev->discovery.uuid_count,
9734 hdev->discovery.uuids);
9738 if (!match && !scan_rsp_len)
9741 /* Copy EIR or advertising data into event */
9742 memcpy(ev->eir, eir, eir_len);
9744 /* When using service discovery and a list of UUID is
9745 * provided, results with empty EIR or advertising data
9746 * should be dropped since they do not match any UUID.
9748 if (hdev->discovery.uuid_count > 0 && !scan_rsp_len)
9754 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
9755 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9758 if (scan_rsp_len > 0) {
9759 /* When using service discovery and a list of UUID is
9760 * provided, results with no matching UUID should be
9761 * dropped if there is no previous match from the
9764 if (hdev->discovery.uuid_count > 0) {
9765 if (!match && !eir_has_uuids(scan_rsp, scan_rsp_len,
9766 hdev->discovery.uuid_count,
9767 hdev->discovery.uuids))
9771 /* Append scan response data to event */
9772 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9774 /* When using service discovery and a list of UUID is
9775 * provided, results with empty scan response and no
9776 * previous matched advertising data should be dropped.
9778 if (hdev->discovery.uuid_count > 0 && !match)
9782 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9783 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9785 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9788 #ifdef CONFIG_TIZEN_WIP /* TIZEN_Bluetooth :: Pass adv type */
9789 void mgmt_le_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9790 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9791 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, u8 adv_type)
9794 struct mgmt_ev_le_device_found *ev = (void *) buf;
9797 if (!hci_discovery_active(hdev) && !hci_le_discovery_active(hdev))
9800 /* Make sure that the buffer is big enough. The 5 extra bytes
9801 * are for the potential CoD field.
9803 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9806 memset(buf, 0, sizeof(buf));
9808 bacpy(&ev->addr.bdaddr, bdaddr);
9809 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9811 ev->flags = cpu_to_le32(flags);
9812 ev->adv_type = adv_type;
9815 memcpy(ev->eir, eir, eir_len);
9817 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
9818 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9821 if (scan_rsp_len > 0)
9822 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9824 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9825 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9827 mgmt_event(MGMT_EV_LE_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9831 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9832 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9834 struct mgmt_ev_device_found *ev;
9835 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
9838 ev = (struct mgmt_ev_device_found *) buf;
9840 memset(buf, 0, sizeof(buf));
9842 bacpy(&ev->addr.bdaddr, bdaddr);
9843 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9846 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9849 ev->eir_len = cpu_to_le16(eir_len);
9851 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
9854 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9856 struct mgmt_ev_discovering ev;
9858 BT_DBG("%s discovering %u", hdev->name, discovering);
9860 memset(&ev, 0, sizeof(ev));
9861 ev.type = hdev->discovery.type;
9862 ev.discovering = discovering;
9864 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9867 #ifdef CONFIG_TIZEN_WIP
9868 /* BEGIN TIZEN_Bluetooth :: Seperate LE discovery */
9869 void mgmt_le_discovering(struct hci_dev *hdev, u8 discovering)
9871 struct mgmt_ev_discovering ev;
9872 struct pending_cmd *cmd;
9874 BT_DBG("%s le discovering %u", hdev->name, discovering);
9877 cmd = mgmt_pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
9879 cmd = mgmt_pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
9882 u8 type = hdev->le_discovery.type;
9884 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
9886 mgmt_pending_remove(cmd);
9889 memset(&ev, 0, sizeof(ev));
9890 ev.type = hdev->le_discovery.type;
9891 ev.discovering = discovering;
9893 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9895 /* END TIZEN_Bluetooth */
9898 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
9900 BT_DBG("%s status %u", hdev->name, status);
9903 void mgmt_reenable_advertising(struct hci_dev *hdev)
9905 struct hci_request req;
9907 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
9910 hci_req_init(&req, hdev);
9911 enable_advertising(&req);
9912 hci_req_run(&req, adv_enable_complete);