2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
37 #include <net/bluetooth/sco.h>
40 #include "hci_request.h"
42 #include "mgmt_util.h"
44 #define MGMT_VERSION 1
45 #define MGMT_REVISION 14
47 static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
55 MGMT_OP_SET_LINK_SECURITY,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
115 static const u16 mgmt_events[] = {
116 MGMT_EV_CONTROLLER_ERROR,
118 MGMT_EV_INDEX_REMOVED,
119 MGMT_EV_NEW_SETTINGS,
120 MGMT_EV_CLASS_OF_DEV_CHANGED,
121 MGMT_EV_LOCAL_NAME_CHANGED,
122 MGMT_EV_NEW_LINK_KEY,
123 MGMT_EV_NEW_LONG_TERM_KEY,
124 MGMT_EV_DEVICE_CONNECTED,
125 MGMT_EV_DEVICE_DISCONNECTED,
126 MGMT_EV_CONNECT_FAILED,
127 MGMT_EV_PIN_CODE_REQUEST,
128 MGMT_EV_USER_CONFIRM_REQUEST,
129 MGMT_EV_USER_PASSKEY_REQUEST,
131 MGMT_EV_DEVICE_FOUND,
133 MGMT_EV_DEVICE_BLOCKED,
134 MGMT_EV_DEVICE_UNBLOCKED,
135 MGMT_EV_DEVICE_UNPAIRED,
136 MGMT_EV_PASSKEY_NOTIFY,
139 MGMT_EV_DEVICE_ADDED,
140 MGMT_EV_DEVICE_REMOVED,
141 MGMT_EV_NEW_CONN_PARAM,
142 MGMT_EV_UNCONF_INDEX_ADDED,
143 MGMT_EV_UNCONF_INDEX_REMOVED,
144 MGMT_EV_NEW_CONFIG_OPTIONS,
145 MGMT_EV_EXT_INDEX_ADDED,
146 MGMT_EV_EXT_INDEX_REMOVED,
147 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
148 MGMT_EV_ADVERTISING_ADDED,
149 MGMT_EV_ADVERTISING_REMOVED,
150 MGMT_EV_EXT_INFO_CHANGED,
153 static const u16 mgmt_untrusted_commands[] = {
154 MGMT_OP_READ_INDEX_LIST,
156 MGMT_OP_READ_UNCONF_INDEX_LIST,
157 MGMT_OP_READ_CONFIG_INFO,
158 MGMT_OP_READ_EXT_INDEX_LIST,
159 MGMT_OP_READ_EXT_INFO,
162 static const u16 mgmt_untrusted_events[] = {
164 MGMT_EV_INDEX_REMOVED,
165 MGMT_EV_NEW_SETTINGS,
166 MGMT_EV_CLASS_OF_DEV_CHANGED,
167 MGMT_EV_LOCAL_NAME_CHANGED,
168 MGMT_EV_UNCONF_INDEX_ADDED,
169 MGMT_EV_UNCONF_INDEX_REMOVED,
170 MGMT_EV_NEW_CONFIG_OPTIONS,
171 MGMT_EV_EXT_INDEX_ADDED,
172 MGMT_EV_EXT_INDEX_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
176 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
178 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
179 "\x00\x00\x00\x00\x00\x00\x00\x00"
181 /* HCI to MGMT error code conversion table */
182 static u8 mgmt_status_table[] = {
184 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
185 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
186 MGMT_STATUS_FAILED, /* Hardware Failure */
187 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
188 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
189 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
190 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
191 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
192 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
193 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
194 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
195 MGMT_STATUS_BUSY, /* Command Disallowed */
196 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
197 MGMT_STATUS_REJECTED, /* Rejected Security */
198 MGMT_STATUS_REJECTED, /* Rejected Personal */
199 MGMT_STATUS_TIMEOUT, /* Host Timeout */
200 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
201 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
202 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
203 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
204 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
205 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
206 MGMT_STATUS_BUSY, /* Repeated Attempts */
207 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
208 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
209 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
210 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
211 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
212 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
213 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
214 MGMT_STATUS_FAILED, /* Unspecified Error */
215 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
216 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
217 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
218 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
219 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
220 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
221 MGMT_STATUS_FAILED, /* Unit Link Key Used */
222 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
223 MGMT_STATUS_TIMEOUT, /* Instant Passed */
224 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
225 MGMT_STATUS_FAILED, /* Transaction Collision */
226 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
227 MGMT_STATUS_REJECTED, /* QoS Rejected */
228 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
229 MGMT_STATUS_REJECTED, /* Insufficient Security */
230 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
231 MGMT_STATUS_BUSY, /* Role Switch Pending */
232 MGMT_STATUS_FAILED, /* Slot Violation */
233 MGMT_STATUS_FAILED, /* Role Switch Failed */
234 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
235 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
236 MGMT_STATUS_BUSY, /* Host Busy Pairing */
237 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
238 MGMT_STATUS_BUSY, /* Controller Busy */
239 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
240 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
241 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
242 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
243 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
246 static u8 mgmt_status(u8 hci_status)
248 if (hci_status < ARRAY_SIZE(mgmt_status_table))
249 return mgmt_status_table[hci_status];
251 return MGMT_STATUS_FAILED;
254 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
257 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
261 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
262 u16 len, int flag, struct sock *skip_sk)
264 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
268 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
269 struct sock *skip_sk)
271 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
272 HCI_SOCK_TRUSTED, skip_sk);
275 static u8 le_addr_type(u8 mgmt_addr_type)
277 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
278 return ADDR_LE_DEV_PUBLIC;
280 return ADDR_LE_DEV_RANDOM;
283 void mgmt_fill_version_info(void *ver)
285 struct mgmt_rp_read_version *rp = ver;
287 rp->version = MGMT_VERSION;
288 rp->revision = cpu_to_le16(MGMT_REVISION);
291 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
294 struct mgmt_rp_read_version rp;
296 BT_DBG("sock %p", sk);
298 mgmt_fill_version_info(&rp);
300 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
304 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
307 struct mgmt_rp_read_commands *rp;
308 u16 num_commands, num_events;
312 BT_DBG("sock %p", sk);
314 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
315 num_commands = ARRAY_SIZE(mgmt_commands);
316 num_events = ARRAY_SIZE(mgmt_events);
318 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
319 num_events = ARRAY_SIZE(mgmt_untrusted_events);
322 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
324 rp = kmalloc(rp_size, GFP_KERNEL);
328 rp->num_commands = cpu_to_le16(num_commands);
329 rp->num_events = cpu_to_le16(num_events);
331 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
332 __le16 *opcode = rp->opcodes;
334 for (i = 0; i < num_commands; i++, opcode++)
335 put_unaligned_le16(mgmt_commands[i], opcode);
337 for (i = 0; i < num_events; i++, opcode++)
338 put_unaligned_le16(mgmt_events[i], opcode);
340 __le16 *opcode = rp->opcodes;
342 for (i = 0; i < num_commands; i++, opcode++)
343 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
345 for (i = 0; i < num_events; i++, opcode++)
346 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
349 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
356 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
359 struct mgmt_rp_read_index_list *rp;
365 BT_DBG("sock %p", sk);
367 read_lock(&hci_dev_list_lock);
370 list_for_each_entry(d, &hci_dev_list, list) {
371 if (d->dev_type == HCI_PRIMARY &&
372 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
376 rp_len = sizeof(*rp) + (2 * count);
377 rp = kmalloc(rp_len, GFP_ATOMIC);
379 read_unlock(&hci_dev_list_lock);
384 list_for_each_entry(d, &hci_dev_list, list) {
385 if (hci_dev_test_flag(d, HCI_SETUP) ||
386 hci_dev_test_flag(d, HCI_CONFIG) ||
387 hci_dev_test_flag(d, HCI_USER_CHANNEL))
390 /* Devices marked as raw-only are neither configured
391 * nor unconfigured controllers.
393 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
396 if (d->dev_type == HCI_PRIMARY &&
397 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
398 rp->index[count++] = cpu_to_le16(d->id);
399 BT_DBG("Added hci%u", d->id);
403 rp->num_controllers = cpu_to_le16(count);
404 rp_len = sizeof(*rp) + (2 * count);
406 read_unlock(&hci_dev_list_lock);
408 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
416 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
417 void *data, u16 data_len)
419 struct mgmt_rp_read_unconf_index_list *rp;
425 BT_DBG("sock %p", sk);
427 read_lock(&hci_dev_list_lock);
430 list_for_each_entry(d, &hci_dev_list, list) {
431 if (d->dev_type == HCI_PRIMARY &&
432 hci_dev_test_flag(d, HCI_UNCONFIGURED))
436 rp_len = sizeof(*rp) + (2 * count);
437 rp = kmalloc(rp_len, GFP_ATOMIC);
439 read_unlock(&hci_dev_list_lock);
444 list_for_each_entry(d, &hci_dev_list, list) {
445 if (hci_dev_test_flag(d, HCI_SETUP) ||
446 hci_dev_test_flag(d, HCI_CONFIG) ||
447 hci_dev_test_flag(d, HCI_USER_CHANNEL))
450 /* Devices marked as raw-only are neither configured
451 * nor unconfigured controllers.
453 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
456 if (d->dev_type == HCI_PRIMARY &&
457 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
458 rp->index[count++] = cpu_to_le16(d->id);
459 BT_DBG("Added hci%u", d->id);
463 rp->num_controllers = cpu_to_le16(count);
464 rp_len = sizeof(*rp) + (2 * count);
466 read_unlock(&hci_dev_list_lock);
468 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
469 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
476 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
477 void *data, u16 data_len)
479 struct mgmt_rp_read_ext_index_list *rp;
484 BT_DBG("sock %p", sk);
486 read_lock(&hci_dev_list_lock);
489 list_for_each_entry(d, &hci_dev_list, list) {
490 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
494 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
496 read_unlock(&hci_dev_list_lock);
501 list_for_each_entry(d, &hci_dev_list, list) {
502 if (hci_dev_test_flag(d, HCI_SETUP) ||
503 hci_dev_test_flag(d, HCI_CONFIG) ||
504 hci_dev_test_flag(d, HCI_USER_CHANNEL))
507 /* Devices marked as raw-only are neither configured
508 * nor unconfigured controllers.
510 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
513 if (d->dev_type == HCI_PRIMARY) {
514 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
515 rp->entry[count].type = 0x01;
517 rp->entry[count].type = 0x00;
518 } else if (d->dev_type == HCI_AMP) {
519 rp->entry[count].type = 0x02;
524 rp->entry[count].bus = d->bus;
525 rp->entry[count++].index = cpu_to_le16(d->id);
526 BT_DBG("Added hci%u", d->id);
529 rp->num_controllers = cpu_to_le16(count);
531 read_unlock(&hci_dev_list_lock);
533 /* If this command is called at least once, then all the
534 * default index and unconfigured index events are disabled
535 * and from now on only extended index events are used.
537 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
538 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
539 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
541 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
542 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
543 struct_size(rp, entry, count));
550 static bool is_configured(struct hci_dev *hdev)
552 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
553 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
556 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
557 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
558 !bacmp(&hdev->public_addr, BDADDR_ANY))
564 static __le32 get_missing_options(struct hci_dev *hdev)
568 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
569 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
570 options |= MGMT_OPTION_EXTERNAL_CONFIG;
572 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
573 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
574 !bacmp(&hdev->public_addr, BDADDR_ANY))
575 options |= MGMT_OPTION_PUBLIC_ADDRESS;
577 return cpu_to_le32(options);
580 static int new_options(struct hci_dev *hdev, struct sock *skip)
582 __le32 options = get_missing_options(hdev);
584 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
585 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
588 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
590 __le32 options = get_missing_options(hdev);
592 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
596 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
597 void *data, u16 data_len)
599 struct mgmt_rp_read_config_info rp;
602 BT_DBG("sock %p %s", sk, hdev->name);
606 memset(&rp, 0, sizeof(rp));
607 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
609 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
610 options |= MGMT_OPTION_EXTERNAL_CONFIG;
612 if (hdev->set_bdaddr)
613 options |= MGMT_OPTION_PUBLIC_ADDRESS;
615 rp.supported_options = cpu_to_le32(options);
616 rp.missing_options = get_missing_options(hdev);
618 hci_dev_unlock(hdev);
620 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
624 static u32 get_supported_phys(struct hci_dev *hdev)
626 u32 supported_phys = 0;
628 if (lmp_bredr_capable(hdev)) {
629 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
631 if (hdev->features[0][0] & LMP_3SLOT)
632 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
634 if (hdev->features[0][0] & LMP_5SLOT)
635 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
637 if (lmp_edr_2m_capable(hdev)) {
638 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
640 if (lmp_edr_3slot_capable(hdev))
641 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
643 if (lmp_edr_5slot_capable(hdev))
644 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
646 if (lmp_edr_3m_capable(hdev)) {
647 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
649 if (lmp_edr_3slot_capable(hdev))
650 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
652 if (lmp_edr_5slot_capable(hdev))
653 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
658 if (lmp_le_capable(hdev)) {
659 supported_phys |= MGMT_PHY_LE_1M_TX;
660 supported_phys |= MGMT_PHY_LE_1M_RX;
662 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
663 supported_phys |= MGMT_PHY_LE_2M_TX;
664 supported_phys |= MGMT_PHY_LE_2M_RX;
667 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
668 supported_phys |= MGMT_PHY_LE_CODED_TX;
669 supported_phys |= MGMT_PHY_LE_CODED_RX;
673 return supported_phys;
676 static u32 get_selected_phys(struct hci_dev *hdev)
678 u32 selected_phys = 0;
680 if (lmp_bredr_capable(hdev)) {
681 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
683 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
684 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
686 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
687 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
689 if (lmp_edr_2m_capable(hdev)) {
690 if (!(hdev->pkt_type & HCI_2DH1))
691 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
693 if (lmp_edr_3slot_capable(hdev) &&
694 !(hdev->pkt_type & HCI_2DH3))
695 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
697 if (lmp_edr_5slot_capable(hdev) &&
698 !(hdev->pkt_type & HCI_2DH5))
699 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
701 if (lmp_edr_3m_capable(hdev)) {
702 if (!(hdev->pkt_type & HCI_3DH1))
703 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
705 if (lmp_edr_3slot_capable(hdev) &&
706 !(hdev->pkt_type & HCI_3DH3))
707 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
709 if (lmp_edr_5slot_capable(hdev) &&
710 !(hdev->pkt_type & HCI_3DH5))
711 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
716 if (lmp_le_capable(hdev)) {
717 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
718 selected_phys |= MGMT_PHY_LE_1M_TX;
720 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
721 selected_phys |= MGMT_PHY_LE_1M_RX;
723 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
724 selected_phys |= MGMT_PHY_LE_2M_TX;
726 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
727 selected_phys |= MGMT_PHY_LE_2M_RX;
729 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
730 selected_phys |= MGMT_PHY_LE_CODED_TX;
732 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
733 selected_phys |= MGMT_PHY_LE_CODED_RX;
736 return selected_phys;
739 static u32 get_configurable_phys(struct hci_dev *hdev)
741 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
742 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
745 static u32 get_supported_settings(struct hci_dev *hdev)
749 settings |= MGMT_SETTING_POWERED;
750 settings |= MGMT_SETTING_BONDABLE;
751 settings |= MGMT_SETTING_DEBUG_KEYS;
752 settings |= MGMT_SETTING_CONNECTABLE;
753 settings |= MGMT_SETTING_DISCOVERABLE;
755 if (lmp_bredr_capable(hdev)) {
756 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
757 settings |= MGMT_SETTING_FAST_CONNECTABLE;
758 settings |= MGMT_SETTING_BREDR;
759 settings |= MGMT_SETTING_LINK_SECURITY;
761 if (lmp_ssp_capable(hdev)) {
762 settings |= MGMT_SETTING_SSP;
763 settings |= MGMT_SETTING_HS;
766 if (lmp_sc_capable(hdev))
767 settings |= MGMT_SETTING_SECURE_CONN;
770 if (lmp_le_capable(hdev)) {
771 settings |= MGMT_SETTING_LE;
772 settings |= MGMT_SETTING_ADVERTISING;
773 settings |= MGMT_SETTING_SECURE_CONN;
774 settings |= MGMT_SETTING_PRIVACY;
775 settings |= MGMT_SETTING_STATIC_ADDRESS;
778 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
780 settings |= MGMT_SETTING_CONFIGURATION;
782 settings |= MGMT_SETTING_PHY_CONFIGURATION;
787 static u32 get_current_settings(struct hci_dev *hdev)
791 if (hdev_is_powered(hdev))
792 settings |= MGMT_SETTING_POWERED;
794 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
795 settings |= MGMT_SETTING_CONNECTABLE;
797 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
798 settings |= MGMT_SETTING_FAST_CONNECTABLE;
800 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
801 settings |= MGMT_SETTING_DISCOVERABLE;
803 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
804 settings |= MGMT_SETTING_BONDABLE;
806 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
807 settings |= MGMT_SETTING_BREDR;
809 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
810 settings |= MGMT_SETTING_LE;
812 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
813 settings |= MGMT_SETTING_LINK_SECURITY;
815 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
816 settings |= MGMT_SETTING_SSP;
818 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
819 settings |= MGMT_SETTING_HS;
821 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
822 settings |= MGMT_SETTING_ADVERTISING;
824 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
825 settings |= MGMT_SETTING_SECURE_CONN;
827 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
828 settings |= MGMT_SETTING_DEBUG_KEYS;
830 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
831 settings |= MGMT_SETTING_PRIVACY;
833 /* The current setting for static address has two purposes. The
834 * first is to indicate if the static address will be used and
835 * the second is to indicate if it is actually set.
837 * This means if the static address is not configured, this flag
838 * will never be set. If the address is configured, then if the
839 * address is actually used decides if the flag is set or not.
841 * For single mode LE only controllers and dual-mode controllers
842 * with BR/EDR disabled, the existence of the static address will
845 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
846 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
847 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
848 if (bacmp(&hdev->static_addr, BDADDR_ANY))
849 settings |= MGMT_SETTING_STATIC_ADDRESS;
855 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
857 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
860 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
861 struct hci_dev *hdev,
864 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
867 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
869 struct mgmt_pending_cmd *cmd;
871 /* If there's a pending mgmt command the flags will not yet have
872 * their final values, so check for this first.
874 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
876 struct mgmt_mode *cp = cmd->param;
878 return LE_AD_GENERAL;
879 else if (cp->val == 0x02)
880 return LE_AD_LIMITED;
882 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
883 return LE_AD_LIMITED;
884 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
885 return LE_AD_GENERAL;
891 bool mgmt_get_connectable(struct hci_dev *hdev)
893 struct mgmt_pending_cmd *cmd;
895 /* If there's a pending mgmt command the flag will not yet have
896 * it's final value, so check for this first.
898 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
900 struct mgmt_mode *cp = cmd->param;
905 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
908 static void service_cache_off(struct work_struct *work)
910 struct hci_dev *hdev = container_of(work, struct hci_dev,
912 struct hci_request req;
914 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
917 hci_req_init(&req, hdev);
921 __hci_req_update_eir(&req);
922 __hci_req_update_class(&req);
924 hci_dev_unlock(hdev);
926 hci_req_run(&req, NULL);
929 static void rpa_expired(struct work_struct *work)
931 struct hci_dev *hdev = container_of(work, struct hci_dev,
933 struct hci_request req;
937 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
939 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
942 /* The generation of a new RPA and programming it into the
943 * controller happens in the hci_req_enable_advertising()
946 hci_req_init(&req, hdev);
947 if (ext_adv_capable(hdev))
948 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
950 __hci_req_enable_advertising(&req);
951 hci_req_run(&req, NULL);
954 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
956 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
959 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
960 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
962 /* Non-mgmt controlled devices get this bit set
963 * implicitly so that pairing works for them, however
964 * for mgmt we require user-space to explicitly enable
967 hci_dev_clear_flag(hdev, HCI_BONDABLE);
970 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
971 void *data, u16 data_len)
973 struct mgmt_rp_read_info rp;
975 BT_DBG("sock %p %s", sk, hdev->name);
979 memset(&rp, 0, sizeof(rp));
981 bacpy(&rp.bdaddr, &hdev->bdaddr);
983 rp.version = hdev->hci_ver;
984 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
986 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
987 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
989 memcpy(rp.dev_class, hdev->dev_class, 3);
991 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
992 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
994 hci_dev_unlock(hdev);
996 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1000 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1005 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1006 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1007 hdev->dev_class, 3);
1009 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1010 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1013 name_len = strlen(hdev->dev_name);
1014 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1015 hdev->dev_name, name_len);
1017 name_len = strlen(hdev->short_name);
1018 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1019 hdev->short_name, name_len);
1024 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1025 void *data, u16 data_len)
1028 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1031 BT_DBG("sock %p %s", sk, hdev->name);
1033 memset(&buf, 0, sizeof(buf));
1037 bacpy(&rp->bdaddr, &hdev->bdaddr);
1039 rp->version = hdev->hci_ver;
1040 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1042 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1043 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1046 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1047 rp->eir_len = cpu_to_le16(eir_len);
1049 hci_dev_unlock(hdev);
1051 /* If this command is called at least once, then the events
1052 * for class of device and local name changes are disabled
1053 * and only the new extended controller information event
1056 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1057 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1058 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1060 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1061 sizeof(*rp) + eir_len);
1064 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1067 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1070 memset(buf, 0, sizeof(buf));
1072 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1073 ev->eir_len = cpu_to_le16(eir_len);
1075 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1076 sizeof(*ev) + eir_len,
1077 HCI_MGMT_EXT_INFO_EVENTS, skip);
1080 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1082 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1084 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1088 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1090 BT_DBG("%s status 0x%02x", hdev->name, status);
1092 if (hci_conn_count(hdev) == 0) {
1093 cancel_delayed_work(&hdev->power_off);
1094 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1098 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1100 struct mgmt_ev_advertising_added ev;
1102 ev.instance = instance;
1104 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1107 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1110 struct mgmt_ev_advertising_removed ev;
1112 ev.instance = instance;
1114 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1117 static void cancel_adv_timeout(struct hci_dev *hdev)
1119 if (hdev->adv_instance_timeout) {
1120 hdev->adv_instance_timeout = 0;
1121 cancel_delayed_work(&hdev->adv_instance_expire);
1125 static int clean_up_hci_state(struct hci_dev *hdev)
1127 struct hci_request req;
1128 struct hci_conn *conn;
1129 bool discov_stopped;
1132 hci_req_init(&req, hdev);
1134 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1135 test_bit(HCI_PSCAN, &hdev->flags)) {
1137 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1140 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1142 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1143 __hci_req_disable_advertising(&req);
1145 discov_stopped = hci_req_stop_discovery(&req);
1147 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1148 /* 0x15 == Terminated due to Power Off */
1149 __hci_abort_conn(&req, conn, 0x15);
1152 err = hci_req_run(&req, clean_up_hci_complete);
1153 if (!err && discov_stopped)
1154 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1159 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1162 struct mgmt_mode *cp = data;
1163 struct mgmt_pending_cmd *cmd;
1166 BT_DBG("request for %s", hdev->name);
1168 if (cp->val != 0x00 && cp->val != 0x01)
1169 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1170 MGMT_STATUS_INVALID_PARAMS);
1174 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1175 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1180 if (!!cp->val == hdev_is_powered(hdev)) {
1181 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1185 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1192 queue_work(hdev->req_workqueue, &hdev->power_on);
1195 /* Disconnect connections, stop scans, etc */
1196 err = clean_up_hci_state(hdev);
1198 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1199 HCI_POWER_OFF_TIMEOUT);
1201 /* ENODATA means there were no HCI commands queued */
1202 if (err == -ENODATA) {
1203 cancel_delayed_work(&hdev->power_off);
1204 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1210 hci_dev_unlock(hdev);
1214 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1216 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1218 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1219 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1222 int mgmt_new_settings(struct hci_dev *hdev)
1224 return new_settings(hdev, NULL);
1229 struct hci_dev *hdev;
1233 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1235 struct cmd_lookup *match = data;
1237 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1239 list_del(&cmd->list);
1241 if (match->sk == NULL) {
1242 match->sk = cmd->sk;
1243 sock_hold(match->sk);
1246 mgmt_pending_free(cmd);
1249 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1253 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1254 mgmt_pending_remove(cmd);
1257 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1259 if (cmd->cmd_complete) {
1262 cmd->cmd_complete(cmd, *status);
1263 mgmt_pending_remove(cmd);
1268 cmd_status_rsp(cmd, data);
1271 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1273 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1274 cmd->param, cmd->param_len);
1277 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1279 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1280 cmd->param, sizeof(struct mgmt_addr_info));
1283 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1285 if (!lmp_bredr_capable(hdev))
1286 return MGMT_STATUS_NOT_SUPPORTED;
1287 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1288 return MGMT_STATUS_REJECTED;
1290 return MGMT_STATUS_SUCCESS;
1293 static u8 mgmt_le_support(struct hci_dev *hdev)
1295 if (!lmp_le_capable(hdev))
1296 return MGMT_STATUS_NOT_SUPPORTED;
1297 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1298 return MGMT_STATUS_REJECTED;
1300 return MGMT_STATUS_SUCCESS;
1303 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1305 struct mgmt_pending_cmd *cmd;
1307 BT_DBG("status 0x%02x", status);
1311 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1316 u8 mgmt_err = mgmt_status(status);
1317 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1318 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1322 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1323 hdev->discov_timeout > 0) {
1324 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1325 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1328 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1329 new_settings(hdev, cmd->sk);
1332 mgmt_pending_remove(cmd);
1335 hci_dev_unlock(hdev);
1338 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1341 struct mgmt_cp_set_discoverable *cp = data;
1342 struct mgmt_pending_cmd *cmd;
1346 BT_DBG("request for %s", hdev->name);
1348 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1349 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1350 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1351 MGMT_STATUS_REJECTED);
1353 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1354 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1355 MGMT_STATUS_INVALID_PARAMS);
1357 timeout = __le16_to_cpu(cp->timeout);
1359 /* Disabling discoverable requires that no timeout is set,
1360 * and enabling limited discoverable requires a timeout.
1362 if ((cp->val == 0x00 && timeout > 0) ||
1363 (cp->val == 0x02 && timeout == 0))
1364 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1365 MGMT_STATUS_INVALID_PARAMS);
1369 if (!hdev_is_powered(hdev) && timeout > 0) {
1370 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1371 MGMT_STATUS_NOT_POWERED);
1375 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1376 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1377 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1382 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1383 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1384 MGMT_STATUS_REJECTED);
1388 if (!hdev_is_powered(hdev)) {
1389 bool changed = false;
1391 /* Setting limited discoverable when powered off is
1392 * not a valid operation since it requires a timeout
1393 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1395 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1396 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1400 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1405 err = new_settings(hdev, sk);
1410 /* If the current mode is the same, then just update the timeout
1411 * value with the new value. And if only the timeout gets updated,
1412 * then no need for any HCI transactions.
1414 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1415 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1416 HCI_LIMITED_DISCOVERABLE)) {
1417 cancel_delayed_work(&hdev->discov_off);
1418 hdev->discov_timeout = timeout;
1420 if (cp->val && hdev->discov_timeout > 0) {
1421 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1422 queue_delayed_work(hdev->req_workqueue,
1423 &hdev->discov_off, to);
1426 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1430 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1436 /* Cancel any potential discoverable timeout that might be
1437 * still active and store new timeout value. The arming of
1438 * the timeout happens in the complete handler.
1440 cancel_delayed_work(&hdev->discov_off);
1441 hdev->discov_timeout = timeout;
1444 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1446 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1448 /* Limited discoverable mode */
1449 if (cp->val == 0x02)
1450 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1452 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1454 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1458 hci_dev_unlock(hdev);
1462 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1464 struct mgmt_pending_cmd *cmd;
1466 BT_DBG("status 0x%02x", status);
1470 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1475 u8 mgmt_err = mgmt_status(status);
1476 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1480 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1481 new_settings(hdev, cmd->sk);
1484 mgmt_pending_remove(cmd);
1487 hci_dev_unlock(hdev);
1490 static int set_connectable_update_settings(struct hci_dev *hdev,
1491 struct sock *sk, u8 val)
1493 bool changed = false;
1496 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1500 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1502 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1503 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1506 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1511 hci_req_update_scan(hdev);
1512 hci_update_background_scan(hdev);
1513 return new_settings(hdev, sk);
1519 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1522 struct mgmt_mode *cp = data;
1523 struct mgmt_pending_cmd *cmd;
1526 BT_DBG("request for %s", hdev->name);
1528 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1529 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1530 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1531 MGMT_STATUS_REJECTED);
1533 if (cp->val != 0x00 && cp->val != 0x01)
1534 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1535 MGMT_STATUS_INVALID_PARAMS);
1539 if (!hdev_is_powered(hdev)) {
1540 err = set_connectable_update_settings(hdev, sk, cp->val);
1544 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1545 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1546 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1551 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1558 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1560 if (hdev->discov_timeout > 0)
1561 cancel_delayed_work(&hdev->discov_off);
1563 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1564 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1565 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1568 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1572 hci_dev_unlock(hdev);
1576 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1579 struct mgmt_mode *cp = data;
1583 BT_DBG("request for %s", hdev->name);
1585 if (cp->val != 0x00 && cp->val != 0x01)
1586 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1587 MGMT_STATUS_INVALID_PARAMS);
1592 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1594 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1596 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1601 /* In limited privacy mode the change of bondable mode
1602 * may affect the local advertising address.
1604 if (hdev_is_powered(hdev) &&
1605 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1606 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1607 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1608 queue_work(hdev->req_workqueue,
1609 &hdev->discoverable_update);
1611 err = new_settings(hdev, sk);
1615 hci_dev_unlock(hdev);
1619 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1622 struct mgmt_mode *cp = data;
1623 struct mgmt_pending_cmd *cmd;
1627 BT_DBG("request for %s", hdev->name);
1629 status = mgmt_bredr_support(hdev);
1631 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1634 if (cp->val != 0x00 && cp->val != 0x01)
1635 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1636 MGMT_STATUS_INVALID_PARAMS);
1640 if (!hdev_is_powered(hdev)) {
1641 bool changed = false;
1643 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1644 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1648 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1653 err = new_settings(hdev, sk);
1658 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1659 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1666 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1667 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1671 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1677 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1679 mgmt_pending_remove(cmd);
1684 hci_dev_unlock(hdev);
1688 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1690 struct mgmt_mode *cp = data;
1691 struct mgmt_pending_cmd *cmd;
1695 BT_DBG("request for %s", hdev->name);
1697 status = mgmt_bredr_support(hdev);
1699 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1701 if (!lmp_ssp_capable(hdev))
1702 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1703 MGMT_STATUS_NOT_SUPPORTED);
1705 if (cp->val != 0x00 && cp->val != 0x01)
1706 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1707 MGMT_STATUS_INVALID_PARAMS);
1711 if (!hdev_is_powered(hdev)) {
1715 changed = !hci_dev_test_and_set_flag(hdev,
1718 changed = hci_dev_test_and_clear_flag(hdev,
1721 changed = hci_dev_test_and_clear_flag(hdev,
1724 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1727 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1732 err = new_settings(hdev, sk);
1737 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1738 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1743 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1744 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1748 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1754 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1755 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1756 sizeof(cp->val), &cp->val);
1758 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1760 mgmt_pending_remove(cmd);
1765 hci_dev_unlock(hdev);
1769 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1771 struct mgmt_mode *cp = data;
1776 BT_DBG("request for %s", hdev->name);
1778 status = mgmt_bredr_support(hdev);
1780 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1782 if (!lmp_ssp_capable(hdev))
1783 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1784 MGMT_STATUS_NOT_SUPPORTED);
1786 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1787 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1788 MGMT_STATUS_REJECTED);
1790 if (cp->val != 0x00 && cp->val != 0x01)
1791 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1792 MGMT_STATUS_INVALID_PARAMS);
1796 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1797 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1803 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1805 if (hdev_is_powered(hdev)) {
1806 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1807 MGMT_STATUS_REJECTED);
1811 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1814 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1819 err = new_settings(hdev, sk);
1822 hci_dev_unlock(hdev);
1826 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1828 struct cmd_lookup match = { NULL, hdev };
1833 u8 mgmt_err = mgmt_status(status);
1835 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1840 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1842 new_settings(hdev, match.sk);
1847 /* Make sure the controller has a good default for
1848 * advertising data. Restrict the update to when LE
1849 * has actually been enabled. During power on, the
1850 * update in powered_update_hci will take care of it.
1852 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1853 struct hci_request req;
1854 hci_req_init(&req, hdev);
1855 if (ext_adv_capable(hdev)) {
1858 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1860 __hci_req_update_scan_rsp_data(&req, 0x00);
1862 __hci_req_update_adv_data(&req, 0x00);
1863 __hci_req_update_scan_rsp_data(&req, 0x00);
1865 hci_req_run(&req, NULL);
1866 hci_update_background_scan(hdev);
1870 hci_dev_unlock(hdev);
1873 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1875 struct mgmt_mode *cp = data;
1876 struct hci_cp_write_le_host_supported hci_cp;
1877 struct mgmt_pending_cmd *cmd;
1878 struct hci_request req;
1882 BT_DBG("request for %s", hdev->name);
1884 if (!lmp_le_capable(hdev))
1885 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1886 MGMT_STATUS_NOT_SUPPORTED);
1888 if (cp->val != 0x00 && cp->val != 0x01)
1889 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1890 MGMT_STATUS_INVALID_PARAMS);
1892 /* Bluetooth single mode LE only controllers or dual-mode
1893 * controllers configured as LE only devices, do not allow
1894 * switching LE off. These have either LE enabled explicitly
1895 * or BR/EDR has been previously switched off.
1897 * When trying to enable an already enabled LE, then gracefully
1898 * send a positive response. Trying to disable it however will
1899 * result into rejection.
1901 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1902 if (cp->val == 0x01)
1903 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1905 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1906 MGMT_STATUS_REJECTED);
1912 enabled = lmp_host_le_capable(hdev);
1915 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1917 if (!hdev_is_powered(hdev) || val == enabled) {
1918 bool changed = false;
1920 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1921 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1925 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1926 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1930 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1935 err = new_settings(hdev, sk);
1940 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1941 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1942 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1947 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1953 hci_req_init(&req, hdev);
1955 memset(&hci_cp, 0, sizeof(hci_cp));
1959 hci_cp.simul = 0x00;
1961 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1962 __hci_req_disable_advertising(&req);
1964 if (ext_adv_capable(hdev))
1965 __hci_req_clear_ext_adv_sets(&req);
1968 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1971 err = hci_req_run(&req, le_enable_complete);
1973 mgmt_pending_remove(cmd);
1976 hci_dev_unlock(hdev);
1980 /* This is a helper function to test for pending mgmt commands that can
1981 * cause CoD or EIR HCI commands. We can only allow one such pending
1982 * mgmt command at a time since otherwise we cannot easily track what
1983 * the current values are, will be, and based on that calculate if a new
1984 * HCI command needs to be sent and if yes with what value.
1986 static bool pending_eir_or_class(struct hci_dev *hdev)
1988 struct mgmt_pending_cmd *cmd;
1990 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1991 switch (cmd->opcode) {
1992 case MGMT_OP_ADD_UUID:
1993 case MGMT_OP_REMOVE_UUID:
1994 case MGMT_OP_SET_DEV_CLASS:
1995 case MGMT_OP_SET_POWERED:
2003 static const u8 bluetooth_base_uuid[] = {
2004 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2005 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2008 static u8 get_uuid_size(const u8 *uuid)
2012 if (memcmp(uuid, bluetooth_base_uuid, 12))
2015 val = get_unaligned_le32(&uuid[12]);
2022 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2024 struct mgmt_pending_cmd *cmd;
2028 cmd = pending_find(mgmt_op, hdev);
2032 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2033 mgmt_status(status), hdev->dev_class, 3);
2035 mgmt_pending_remove(cmd);
2038 hci_dev_unlock(hdev);
2041 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2043 BT_DBG("status 0x%02x", status);
2045 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2048 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2050 struct mgmt_cp_add_uuid *cp = data;
2051 struct mgmt_pending_cmd *cmd;
2052 struct hci_request req;
2053 struct bt_uuid *uuid;
2056 BT_DBG("request for %s", hdev->name);
2060 if (pending_eir_or_class(hdev)) {
2061 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2066 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2072 memcpy(uuid->uuid, cp->uuid, 16);
2073 uuid->svc_hint = cp->svc_hint;
2074 uuid->size = get_uuid_size(cp->uuid);
2076 list_add_tail(&uuid->list, &hdev->uuids);
2078 hci_req_init(&req, hdev);
2080 __hci_req_update_class(&req);
2081 __hci_req_update_eir(&req);
2083 err = hci_req_run(&req, add_uuid_complete);
2085 if (err != -ENODATA)
2088 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2089 hdev->dev_class, 3);
2093 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2102 hci_dev_unlock(hdev);
2106 static bool enable_service_cache(struct hci_dev *hdev)
2108 if (!hdev_is_powered(hdev))
2111 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2112 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2120 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2122 BT_DBG("status 0x%02x", status);
2124 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2127 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2130 struct mgmt_cp_remove_uuid *cp = data;
2131 struct mgmt_pending_cmd *cmd;
2132 struct bt_uuid *match, *tmp;
2133 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2134 struct hci_request req;
2137 BT_DBG("request for %s", hdev->name);
2141 if (pending_eir_or_class(hdev)) {
2142 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2147 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2148 hci_uuids_clear(hdev);
2150 if (enable_service_cache(hdev)) {
2151 err = mgmt_cmd_complete(sk, hdev->id,
2152 MGMT_OP_REMOVE_UUID,
2153 0, hdev->dev_class, 3);
2162 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2163 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2166 list_del(&match->list);
2172 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2173 MGMT_STATUS_INVALID_PARAMS);
2178 hci_req_init(&req, hdev);
2180 __hci_req_update_class(&req);
2181 __hci_req_update_eir(&req);
2183 err = hci_req_run(&req, remove_uuid_complete);
2185 if (err != -ENODATA)
2188 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2189 hdev->dev_class, 3);
2193 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2202 hci_dev_unlock(hdev);
2206 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2208 BT_DBG("status 0x%02x", status);
2210 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2213 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2216 struct mgmt_cp_set_dev_class *cp = data;
2217 struct mgmt_pending_cmd *cmd;
2218 struct hci_request req;
2221 BT_DBG("request for %s", hdev->name);
2223 if (!lmp_bredr_capable(hdev))
2224 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2225 MGMT_STATUS_NOT_SUPPORTED);
2229 if (pending_eir_or_class(hdev)) {
2230 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2235 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2236 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2237 MGMT_STATUS_INVALID_PARAMS);
2241 hdev->major_class = cp->major;
2242 hdev->minor_class = cp->minor;
2244 if (!hdev_is_powered(hdev)) {
2245 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2246 hdev->dev_class, 3);
2250 hci_req_init(&req, hdev);
2252 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2253 hci_dev_unlock(hdev);
2254 cancel_delayed_work_sync(&hdev->service_cache);
2256 __hci_req_update_eir(&req);
2259 __hci_req_update_class(&req);
2261 err = hci_req_run(&req, set_class_complete);
2263 if (err != -ENODATA)
2266 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2267 hdev->dev_class, 3);
2271 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2280 hci_dev_unlock(hdev);
2284 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2287 struct mgmt_cp_load_link_keys *cp = data;
2288 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2289 sizeof(struct mgmt_link_key_info));
2290 u16 key_count, expected_len;
2294 BT_DBG("request for %s", hdev->name);
2296 if (!lmp_bredr_capable(hdev))
2297 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2298 MGMT_STATUS_NOT_SUPPORTED);
2300 key_count = __le16_to_cpu(cp->key_count);
2301 if (key_count > max_key_count) {
2302 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2304 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2305 MGMT_STATUS_INVALID_PARAMS);
2308 expected_len = struct_size(cp, keys, key_count);
2309 if (expected_len != len) {
2310 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2312 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2313 MGMT_STATUS_INVALID_PARAMS);
2316 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2317 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2318 MGMT_STATUS_INVALID_PARAMS);
2320 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2323 for (i = 0; i < key_count; i++) {
2324 struct mgmt_link_key_info *key = &cp->keys[i];
2326 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2327 return mgmt_cmd_status(sk, hdev->id,
2328 MGMT_OP_LOAD_LINK_KEYS,
2329 MGMT_STATUS_INVALID_PARAMS);
2334 hci_link_keys_clear(hdev);
2337 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2339 changed = hci_dev_test_and_clear_flag(hdev,
2340 HCI_KEEP_DEBUG_KEYS);
2343 new_settings(hdev, NULL);
2345 for (i = 0; i < key_count; i++) {
2346 struct mgmt_link_key_info *key = &cp->keys[i];
2348 /* Always ignore debug keys and require a new pairing if
2349 * the user wants to use them.
2351 if (key->type == HCI_LK_DEBUG_COMBINATION)
2354 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2355 key->type, key->pin_len, NULL);
2358 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2360 hci_dev_unlock(hdev);
2365 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2366 u8 addr_type, struct sock *skip_sk)
2368 struct mgmt_ev_device_unpaired ev;
2370 bacpy(&ev.addr.bdaddr, bdaddr);
2371 ev.addr.type = addr_type;
2373 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2377 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2380 struct mgmt_cp_unpair_device *cp = data;
2381 struct mgmt_rp_unpair_device rp;
2382 struct hci_conn_params *params;
2383 struct mgmt_pending_cmd *cmd;
2384 struct hci_conn *conn;
2388 memset(&rp, 0, sizeof(rp));
2389 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2390 rp.addr.type = cp->addr.type;
2392 if (!bdaddr_type_is_valid(cp->addr.type))
2393 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2394 MGMT_STATUS_INVALID_PARAMS,
2397 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2398 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2399 MGMT_STATUS_INVALID_PARAMS,
2404 if (!hdev_is_powered(hdev)) {
2405 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2406 MGMT_STATUS_NOT_POWERED, &rp,
2411 if (cp->addr.type == BDADDR_BREDR) {
2412 /* If disconnection is requested, then look up the
2413 * connection. If the remote device is connected, it
2414 * will be later used to terminate the link.
2416 * Setting it to NULL explicitly will cause no
2417 * termination of the link.
2420 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2425 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2427 err = mgmt_cmd_complete(sk, hdev->id,
2428 MGMT_OP_UNPAIR_DEVICE,
2429 MGMT_STATUS_NOT_PAIRED, &rp,
2437 /* LE address type */
2438 addr_type = le_addr_type(cp->addr.type);
2440 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2441 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2443 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2444 MGMT_STATUS_NOT_PAIRED, &rp,
2449 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2451 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2456 /* Defer clearing up the connection parameters until closing to
2457 * give a chance of keeping them if a repairing happens.
2459 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2461 /* Disable auto-connection parameters if present */
2462 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2464 if (params->explicit_connect)
2465 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2467 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2470 /* If disconnection is not requested, then clear the connection
2471 * variable so that the link is not terminated.
2473 if (!cp->disconnect)
2477 /* If the connection variable is set, then termination of the
2478 * link is requested.
2481 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2483 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2487 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2494 cmd->cmd_complete = addr_cmd_complete;
2496 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2498 mgmt_pending_remove(cmd);
2501 hci_dev_unlock(hdev);
2505 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2508 struct mgmt_cp_disconnect *cp = data;
2509 struct mgmt_rp_disconnect rp;
2510 struct mgmt_pending_cmd *cmd;
2511 struct hci_conn *conn;
2516 memset(&rp, 0, sizeof(rp));
2517 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2518 rp.addr.type = cp->addr.type;
2520 if (!bdaddr_type_is_valid(cp->addr.type))
2521 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2522 MGMT_STATUS_INVALID_PARAMS,
2527 if (!test_bit(HCI_UP, &hdev->flags)) {
2528 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2529 MGMT_STATUS_NOT_POWERED, &rp,
2534 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2535 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2536 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2540 if (cp->addr.type == BDADDR_BREDR)
2541 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2544 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2545 le_addr_type(cp->addr.type));
2547 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2548 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2549 MGMT_STATUS_NOT_CONNECTED, &rp,
2554 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2560 cmd->cmd_complete = generic_cmd_complete;
2562 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2564 mgmt_pending_remove(cmd);
2567 hci_dev_unlock(hdev);
2571 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2573 switch (link_type) {
2575 switch (addr_type) {
2576 case ADDR_LE_DEV_PUBLIC:
2577 return BDADDR_LE_PUBLIC;
2580 /* Fallback to LE Random address type */
2581 return BDADDR_LE_RANDOM;
2585 /* Fallback to BR/EDR type */
2586 return BDADDR_BREDR;
2590 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2593 struct mgmt_rp_get_connections *rp;
2602 if (!hdev_is_powered(hdev)) {
2603 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2604 MGMT_STATUS_NOT_POWERED);
2609 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2610 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2614 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2621 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2622 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2624 bacpy(&rp->addr[i].bdaddr, &c->dst);
2625 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2626 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2631 rp->conn_count = cpu_to_le16(i);
2633 /* Recalculate length in case of filtered SCO connections, etc */
2634 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2635 struct_size(rp, addr, i));
2640 hci_dev_unlock(hdev);
2644 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2645 struct mgmt_cp_pin_code_neg_reply *cp)
2647 struct mgmt_pending_cmd *cmd;
2650 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2655 cmd->cmd_complete = addr_cmd_complete;
2657 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2658 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2660 mgmt_pending_remove(cmd);
2665 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2668 struct hci_conn *conn;
2669 struct mgmt_cp_pin_code_reply *cp = data;
2670 struct hci_cp_pin_code_reply reply;
2671 struct mgmt_pending_cmd *cmd;
2678 if (!hdev_is_powered(hdev)) {
2679 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2680 MGMT_STATUS_NOT_POWERED);
2684 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2686 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2687 MGMT_STATUS_NOT_CONNECTED);
2691 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2692 struct mgmt_cp_pin_code_neg_reply ncp;
2694 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2696 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2698 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2700 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2701 MGMT_STATUS_INVALID_PARAMS);
2706 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2712 cmd->cmd_complete = addr_cmd_complete;
2714 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2715 reply.pin_len = cp->pin_len;
2716 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2718 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2720 mgmt_pending_remove(cmd);
2723 hci_dev_unlock(hdev);
2727 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2730 struct mgmt_cp_set_io_capability *cp = data;
2734 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2735 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2736 MGMT_STATUS_INVALID_PARAMS);
2740 hdev->io_capability = cp->io_capability;
2742 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2743 hdev->io_capability);
2745 hci_dev_unlock(hdev);
2747 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2751 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2753 struct hci_dev *hdev = conn->hdev;
2754 struct mgmt_pending_cmd *cmd;
2756 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2757 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2760 if (cmd->user_data != conn)
2769 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2771 struct mgmt_rp_pair_device rp;
2772 struct hci_conn *conn = cmd->user_data;
2775 bacpy(&rp.addr.bdaddr, &conn->dst);
2776 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2778 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2779 status, &rp, sizeof(rp));
2781 /* So we don't get further callbacks for this connection */
2782 conn->connect_cfm_cb = NULL;
2783 conn->security_cfm_cb = NULL;
2784 conn->disconn_cfm_cb = NULL;
2786 hci_conn_drop(conn);
2788 /* The device is paired so there is no need to remove
2789 * its connection parameters anymore.
2791 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2798 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2800 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2801 struct mgmt_pending_cmd *cmd;
2803 cmd = find_pairing(conn);
2805 cmd->cmd_complete(cmd, status);
2806 mgmt_pending_remove(cmd);
2810 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2812 struct mgmt_pending_cmd *cmd;
2814 BT_DBG("status %u", status);
2816 cmd = find_pairing(conn);
2818 BT_DBG("Unable to find a pending command");
2822 cmd->cmd_complete(cmd, mgmt_status(status));
2823 mgmt_pending_remove(cmd);
2826 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2828 struct mgmt_pending_cmd *cmd;
2830 BT_DBG("status %u", status);
2835 cmd = find_pairing(conn);
2837 BT_DBG("Unable to find a pending command");
2841 cmd->cmd_complete(cmd, mgmt_status(status));
2842 mgmt_pending_remove(cmd);
2845 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2848 struct mgmt_cp_pair_device *cp = data;
2849 struct mgmt_rp_pair_device rp;
2850 struct mgmt_pending_cmd *cmd;
2851 u8 sec_level, auth_type;
2852 struct hci_conn *conn;
2857 memset(&rp, 0, sizeof(rp));
2858 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2859 rp.addr.type = cp->addr.type;
2861 if (!bdaddr_type_is_valid(cp->addr.type))
2862 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2863 MGMT_STATUS_INVALID_PARAMS,
2866 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2867 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2868 MGMT_STATUS_INVALID_PARAMS,
2873 if (!hdev_is_powered(hdev)) {
2874 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2875 MGMT_STATUS_NOT_POWERED, &rp,
2880 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2881 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2882 MGMT_STATUS_ALREADY_PAIRED, &rp,
2887 sec_level = BT_SECURITY_MEDIUM;
2888 auth_type = HCI_AT_DEDICATED_BONDING;
2890 if (cp->addr.type == BDADDR_BREDR) {
2891 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2894 u8 addr_type = le_addr_type(cp->addr.type);
2895 struct hci_conn_params *p;
2897 /* When pairing a new device, it is expected to remember
2898 * this device for future connections. Adding the connection
2899 * parameter information ahead of time allows tracking
2900 * of the slave preferred values and will speed up any
2901 * further connection establishment.
2903 * If connection parameters already exist, then they
2904 * will be kept and this function does nothing.
2906 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2908 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2909 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2911 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
2912 addr_type, sec_level,
2913 HCI_LE_CONN_TIMEOUT);
2919 if (PTR_ERR(conn) == -EBUSY)
2920 status = MGMT_STATUS_BUSY;
2921 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2922 status = MGMT_STATUS_NOT_SUPPORTED;
2923 else if (PTR_ERR(conn) == -ECONNREFUSED)
2924 status = MGMT_STATUS_REJECTED;
2926 status = MGMT_STATUS_CONNECT_FAILED;
2928 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2929 status, &rp, sizeof(rp));
2933 if (conn->connect_cfm_cb) {
2934 hci_conn_drop(conn);
2935 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2936 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2940 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2943 hci_conn_drop(conn);
2947 cmd->cmd_complete = pairing_complete;
2949 /* For LE, just connecting isn't a proof that the pairing finished */
2950 if (cp->addr.type == BDADDR_BREDR) {
2951 conn->connect_cfm_cb = pairing_complete_cb;
2952 conn->security_cfm_cb = pairing_complete_cb;
2953 conn->disconn_cfm_cb = pairing_complete_cb;
2955 conn->connect_cfm_cb = le_pairing_complete_cb;
2956 conn->security_cfm_cb = le_pairing_complete_cb;
2957 conn->disconn_cfm_cb = le_pairing_complete_cb;
2960 conn->io_capability = cp->io_cap;
2961 cmd->user_data = hci_conn_get(conn);
2963 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2964 hci_conn_security(conn, sec_level, auth_type, true)) {
2965 cmd->cmd_complete(cmd, 0);
2966 mgmt_pending_remove(cmd);
2972 hci_dev_unlock(hdev);
2976 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2979 struct mgmt_addr_info *addr = data;
2980 struct mgmt_pending_cmd *cmd;
2981 struct hci_conn *conn;
2988 if (!hdev_is_powered(hdev)) {
2989 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2990 MGMT_STATUS_NOT_POWERED);
2994 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2996 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2997 MGMT_STATUS_INVALID_PARAMS);
3001 conn = cmd->user_data;
3003 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3004 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3005 MGMT_STATUS_INVALID_PARAMS);
3009 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3010 mgmt_pending_remove(cmd);
3012 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3013 addr, sizeof(*addr));
3015 hci_dev_unlock(hdev);
3019 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3020 struct mgmt_addr_info *addr, u16 mgmt_op,
3021 u16 hci_op, __le32 passkey)
3023 struct mgmt_pending_cmd *cmd;
3024 struct hci_conn *conn;
3029 if (!hdev_is_powered(hdev)) {
3030 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3031 MGMT_STATUS_NOT_POWERED, addr,
3036 if (addr->type == BDADDR_BREDR)
3037 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3039 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3040 le_addr_type(addr->type));
3043 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3044 MGMT_STATUS_NOT_CONNECTED, addr,
3049 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3050 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3052 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3053 MGMT_STATUS_SUCCESS, addr,
3056 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3057 MGMT_STATUS_FAILED, addr,
3063 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3069 cmd->cmd_complete = addr_cmd_complete;
3071 /* Continue with pairing via HCI */
3072 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3073 struct hci_cp_user_passkey_reply cp;
3075 bacpy(&cp.bdaddr, &addr->bdaddr);
3076 cp.passkey = passkey;
3077 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3079 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3083 mgmt_pending_remove(cmd);
3086 hci_dev_unlock(hdev);
3090 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3091 void *data, u16 len)
3093 struct mgmt_cp_pin_code_neg_reply *cp = data;
3097 return user_pairing_resp(sk, hdev, &cp->addr,
3098 MGMT_OP_PIN_CODE_NEG_REPLY,
3099 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3102 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3105 struct mgmt_cp_user_confirm_reply *cp = data;
3109 if (len != sizeof(*cp))
3110 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3111 MGMT_STATUS_INVALID_PARAMS);
3113 return user_pairing_resp(sk, hdev, &cp->addr,
3114 MGMT_OP_USER_CONFIRM_REPLY,
3115 HCI_OP_USER_CONFIRM_REPLY, 0);
3118 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3119 void *data, u16 len)
3121 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3125 return user_pairing_resp(sk, hdev, &cp->addr,
3126 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3127 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3130 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3133 struct mgmt_cp_user_passkey_reply *cp = data;
3137 return user_pairing_resp(sk, hdev, &cp->addr,
3138 MGMT_OP_USER_PASSKEY_REPLY,
3139 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3142 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3143 void *data, u16 len)
3145 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3149 return user_pairing_resp(sk, hdev, &cp->addr,
3150 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3151 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3154 static void adv_expire(struct hci_dev *hdev, u32 flags)
3156 struct adv_info *adv_instance;
3157 struct hci_request req;
3160 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3164 /* stop if current instance doesn't need to be changed */
3165 if (!(adv_instance->flags & flags))
3168 cancel_adv_timeout(hdev);
3170 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3174 hci_req_init(&req, hdev);
3175 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3180 hci_req_run(&req, NULL);
3183 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3185 struct mgmt_cp_set_local_name *cp;
3186 struct mgmt_pending_cmd *cmd;
3188 BT_DBG("status 0x%02x", status);
3192 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3199 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3200 mgmt_status(status));
3202 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3205 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3206 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3209 mgmt_pending_remove(cmd);
3212 hci_dev_unlock(hdev);
3215 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3218 struct mgmt_cp_set_local_name *cp = data;
3219 struct mgmt_pending_cmd *cmd;
3220 struct hci_request req;
3227 /* If the old values are the same as the new ones just return a
3228 * direct command complete event.
3230 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3231 !memcmp(hdev->short_name, cp->short_name,
3232 sizeof(hdev->short_name))) {
3233 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3238 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3240 if (!hdev_is_powered(hdev)) {
3241 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3243 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3248 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3249 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3250 ext_info_changed(hdev, sk);
3255 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3261 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3263 hci_req_init(&req, hdev);
3265 if (lmp_bredr_capable(hdev)) {
3266 __hci_req_update_name(&req);
3267 __hci_req_update_eir(&req);
3270 /* The name is stored in the scan response data and so
3271 * no need to udpate the advertising data here.
3273 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3274 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3276 err = hci_req_run(&req, set_name_complete);
3278 mgmt_pending_remove(cmd);
3281 hci_dev_unlock(hdev);
3285 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3288 struct mgmt_cp_set_appearance *cp = data;
3294 if (!lmp_le_capable(hdev))
3295 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3296 MGMT_STATUS_NOT_SUPPORTED);
3298 apperance = le16_to_cpu(cp->appearance);
3302 if (hdev->appearance != apperance) {
3303 hdev->appearance = apperance;
3305 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3306 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3308 ext_info_changed(hdev, sk);
3311 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3314 hci_dev_unlock(hdev);
3319 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3320 void *data, u16 len)
3322 struct mgmt_rp_get_phy_confguration rp;
3324 BT_DBG("sock %p %s", sk, hdev->name);
3328 memset(&rp, 0, sizeof(rp));
3330 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3331 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3332 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3334 hci_dev_unlock(hdev);
3336 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3340 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3342 struct mgmt_ev_phy_configuration_changed ev;
3344 memset(&ev, 0, sizeof(ev));
3346 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3348 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3352 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3353 u16 opcode, struct sk_buff *skb)
3355 struct mgmt_pending_cmd *cmd;
3357 BT_DBG("status 0x%02x", status);
3361 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3366 mgmt_cmd_status(cmd->sk, hdev->id,
3367 MGMT_OP_SET_PHY_CONFIGURATION,
3368 mgmt_status(status));
3370 mgmt_cmd_complete(cmd->sk, hdev->id,
3371 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3374 mgmt_phy_configuration_changed(hdev, cmd->sk);
3377 mgmt_pending_remove(cmd);
3380 hci_dev_unlock(hdev);
3383 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3384 void *data, u16 len)
3386 struct mgmt_cp_set_phy_confguration *cp = data;
3387 struct hci_cp_le_set_default_phy cp_phy;
3388 struct mgmt_pending_cmd *cmd;
3389 struct hci_request req;
3390 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3391 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3392 bool changed = false;
3395 BT_DBG("sock %p %s", sk, hdev->name);
3397 configurable_phys = get_configurable_phys(hdev);
3398 supported_phys = get_supported_phys(hdev);
3399 selected_phys = __le32_to_cpu(cp->selected_phys);
3401 if (selected_phys & ~supported_phys)
3402 return mgmt_cmd_status(sk, hdev->id,
3403 MGMT_OP_SET_PHY_CONFIGURATION,
3404 MGMT_STATUS_INVALID_PARAMS);
3406 unconfigure_phys = supported_phys & ~configurable_phys;
3408 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3409 return mgmt_cmd_status(sk, hdev->id,
3410 MGMT_OP_SET_PHY_CONFIGURATION,
3411 MGMT_STATUS_INVALID_PARAMS);
3413 if (selected_phys == get_selected_phys(hdev))
3414 return mgmt_cmd_complete(sk, hdev->id,
3415 MGMT_OP_SET_PHY_CONFIGURATION,
3420 if (!hdev_is_powered(hdev)) {
3421 err = mgmt_cmd_status(sk, hdev->id,
3422 MGMT_OP_SET_PHY_CONFIGURATION,
3423 MGMT_STATUS_REJECTED);
3427 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3428 err = mgmt_cmd_status(sk, hdev->id,
3429 MGMT_OP_SET_PHY_CONFIGURATION,
3434 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3435 pkt_type |= (HCI_DH3 | HCI_DM3);
3437 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3439 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3440 pkt_type |= (HCI_DH5 | HCI_DM5);
3442 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3444 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3445 pkt_type &= ~HCI_2DH1;
3447 pkt_type |= HCI_2DH1;
3449 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3450 pkt_type &= ~HCI_2DH3;
3452 pkt_type |= HCI_2DH3;
3454 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3455 pkt_type &= ~HCI_2DH5;
3457 pkt_type |= HCI_2DH5;
3459 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3460 pkt_type &= ~HCI_3DH1;
3462 pkt_type |= HCI_3DH1;
3464 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3465 pkt_type &= ~HCI_3DH3;
3467 pkt_type |= HCI_3DH3;
3469 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3470 pkt_type &= ~HCI_3DH5;
3472 pkt_type |= HCI_3DH5;
3474 if (pkt_type != hdev->pkt_type) {
3475 hdev->pkt_type = pkt_type;
3479 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3480 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3482 mgmt_phy_configuration_changed(hdev, sk);
3484 err = mgmt_cmd_complete(sk, hdev->id,
3485 MGMT_OP_SET_PHY_CONFIGURATION,
3491 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3498 hci_req_init(&req, hdev);
3500 memset(&cp_phy, 0, sizeof(cp_phy));
3502 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3503 cp_phy.all_phys |= 0x01;
3505 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3506 cp_phy.all_phys |= 0x02;
3508 if (selected_phys & MGMT_PHY_LE_1M_TX)
3509 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3511 if (selected_phys & MGMT_PHY_LE_2M_TX)
3512 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3514 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3515 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3517 if (selected_phys & MGMT_PHY_LE_1M_RX)
3518 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3520 if (selected_phys & MGMT_PHY_LE_2M_RX)
3521 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3523 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3524 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3526 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3528 err = hci_req_run_skb(&req, set_default_phy_complete);
3530 mgmt_pending_remove(cmd);
3533 hci_dev_unlock(hdev);
3538 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3539 u16 opcode, struct sk_buff *skb)
3541 struct mgmt_rp_read_local_oob_data mgmt_rp;
3542 size_t rp_size = sizeof(mgmt_rp);
3543 struct mgmt_pending_cmd *cmd;
3545 BT_DBG("%s status %u", hdev->name, status);
3547 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3551 if (status || !skb) {
3552 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3553 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3557 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3559 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3560 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3562 if (skb->len < sizeof(*rp)) {
3563 mgmt_cmd_status(cmd->sk, hdev->id,
3564 MGMT_OP_READ_LOCAL_OOB_DATA,
3565 MGMT_STATUS_FAILED);
3569 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3570 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3572 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3574 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3576 if (skb->len < sizeof(*rp)) {
3577 mgmt_cmd_status(cmd->sk, hdev->id,
3578 MGMT_OP_READ_LOCAL_OOB_DATA,
3579 MGMT_STATUS_FAILED);
3583 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3584 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3586 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3587 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3590 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3591 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3594 mgmt_pending_remove(cmd);
3597 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3598 void *data, u16 data_len)
3600 struct mgmt_pending_cmd *cmd;
3601 struct hci_request req;
3604 BT_DBG("%s", hdev->name);
3608 if (!hdev_is_powered(hdev)) {
3609 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3610 MGMT_STATUS_NOT_POWERED);
3614 if (!lmp_ssp_capable(hdev)) {
3615 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3616 MGMT_STATUS_NOT_SUPPORTED);
3620 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3621 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3626 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3632 hci_req_init(&req, hdev);
3634 if (bredr_sc_enabled(hdev))
3635 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3637 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3639 err = hci_req_run_skb(&req, read_local_oob_data_complete);
3641 mgmt_pending_remove(cmd);
3644 hci_dev_unlock(hdev);
3648 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3649 void *data, u16 len)
3651 struct mgmt_addr_info *addr = data;
3654 BT_DBG("%s ", hdev->name);
3656 if (!bdaddr_type_is_valid(addr->type))
3657 return mgmt_cmd_complete(sk, hdev->id,
3658 MGMT_OP_ADD_REMOTE_OOB_DATA,
3659 MGMT_STATUS_INVALID_PARAMS,
3660 addr, sizeof(*addr));
3664 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3665 struct mgmt_cp_add_remote_oob_data *cp = data;
3668 if (cp->addr.type != BDADDR_BREDR) {
3669 err = mgmt_cmd_complete(sk, hdev->id,
3670 MGMT_OP_ADD_REMOTE_OOB_DATA,
3671 MGMT_STATUS_INVALID_PARAMS,
3672 &cp->addr, sizeof(cp->addr));
3676 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3677 cp->addr.type, cp->hash,
3678 cp->rand, NULL, NULL);
3680 status = MGMT_STATUS_FAILED;
3682 status = MGMT_STATUS_SUCCESS;
3684 err = mgmt_cmd_complete(sk, hdev->id,
3685 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3686 &cp->addr, sizeof(cp->addr));
3687 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3688 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3689 u8 *rand192, *hash192, *rand256, *hash256;
3692 if (bdaddr_type_is_le(cp->addr.type)) {
3693 /* Enforce zero-valued 192-bit parameters as
3694 * long as legacy SMP OOB isn't implemented.
3696 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3697 memcmp(cp->hash192, ZERO_KEY, 16)) {
3698 err = mgmt_cmd_complete(sk, hdev->id,
3699 MGMT_OP_ADD_REMOTE_OOB_DATA,
3700 MGMT_STATUS_INVALID_PARAMS,
3701 addr, sizeof(*addr));
3708 /* In case one of the P-192 values is set to zero,
3709 * then just disable OOB data for P-192.
3711 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3712 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3716 rand192 = cp->rand192;
3717 hash192 = cp->hash192;
3721 /* In case one of the P-256 values is set to zero, then just
3722 * disable OOB data for P-256.
3724 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3725 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3729 rand256 = cp->rand256;
3730 hash256 = cp->hash256;
3733 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3734 cp->addr.type, hash192, rand192,
3737 status = MGMT_STATUS_FAILED;
3739 status = MGMT_STATUS_SUCCESS;
3741 err = mgmt_cmd_complete(sk, hdev->id,
3742 MGMT_OP_ADD_REMOTE_OOB_DATA,
3743 status, &cp->addr, sizeof(cp->addr));
3745 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
3747 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3748 MGMT_STATUS_INVALID_PARAMS);
3752 hci_dev_unlock(hdev);
3756 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3757 void *data, u16 len)
3759 struct mgmt_cp_remove_remote_oob_data *cp = data;
3763 BT_DBG("%s", hdev->name);
3765 if (cp->addr.type != BDADDR_BREDR)
3766 return mgmt_cmd_complete(sk, hdev->id,
3767 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3768 MGMT_STATUS_INVALID_PARAMS,
3769 &cp->addr, sizeof(cp->addr));
3773 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3774 hci_remote_oob_data_clear(hdev);
3775 status = MGMT_STATUS_SUCCESS;
3779 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3781 status = MGMT_STATUS_INVALID_PARAMS;
3783 status = MGMT_STATUS_SUCCESS;
3786 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3787 status, &cp->addr, sizeof(cp->addr));
3789 hci_dev_unlock(hdev);
3793 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
3795 struct mgmt_pending_cmd *cmd;
3797 BT_DBG("status %d", status);
3801 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
3803 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3806 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
3809 cmd->cmd_complete(cmd, mgmt_status(status));
3810 mgmt_pending_remove(cmd);
3813 hci_dev_unlock(hdev);
3816 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
3817 uint8_t *mgmt_status)
3820 case DISCOV_TYPE_LE:
3821 *mgmt_status = mgmt_le_support(hdev);
3825 case DISCOV_TYPE_INTERLEAVED:
3826 *mgmt_status = mgmt_le_support(hdev);
3829 /* Intentional fall-through */
3830 case DISCOV_TYPE_BREDR:
3831 *mgmt_status = mgmt_bredr_support(hdev);
3836 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
3843 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
3844 u16 op, void *data, u16 len)
3846 struct mgmt_cp_start_discovery *cp = data;
3847 struct mgmt_pending_cmd *cmd;
3851 BT_DBG("%s", hdev->name);
3855 if (!hdev_is_powered(hdev)) {
3856 err = mgmt_cmd_complete(sk, hdev->id, op,
3857 MGMT_STATUS_NOT_POWERED,
3858 &cp->type, sizeof(cp->type));
3862 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3863 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3864 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
3865 &cp->type, sizeof(cp->type));
3869 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3870 err = mgmt_cmd_complete(sk, hdev->id, op, status,
3871 &cp->type, sizeof(cp->type));
3875 /* Clear the discovery filter first to free any previously
3876 * allocated memory for the UUID list.
3878 hci_discovery_filter_clear(hdev);
3880 hdev->discovery.type = cp->type;
3881 hdev->discovery.report_invalid_rssi = false;
3882 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
3883 hdev->discovery.limited = true;
3885 hdev->discovery.limited = false;
3887 cmd = mgmt_pending_add(sk, op, hdev, data, len);
3893 cmd->cmd_complete = generic_cmd_complete;
3895 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3896 queue_work(hdev->req_workqueue, &hdev->discov_update);
3900 hci_dev_unlock(hdev);
3904 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3905 void *data, u16 len)
3907 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
3911 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
3912 void *data, u16 len)
3914 return start_discovery_internal(sk, hdev,
3915 MGMT_OP_START_LIMITED_DISCOVERY,
3919 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
3922 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3926 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3927 void *data, u16 len)
3929 struct mgmt_cp_start_service_discovery *cp = data;
3930 struct mgmt_pending_cmd *cmd;
3931 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3932 u16 uuid_count, expected_len;
3936 BT_DBG("%s", hdev->name);
3940 if (!hdev_is_powered(hdev)) {
3941 err = mgmt_cmd_complete(sk, hdev->id,
3942 MGMT_OP_START_SERVICE_DISCOVERY,
3943 MGMT_STATUS_NOT_POWERED,
3944 &cp->type, sizeof(cp->type));
3948 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3949 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3950 err = mgmt_cmd_complete(sk, hdev->id,
3951 MGMT_OP_START_SERVICE_DISCOVERY,
3952 MGMT_STATUS_BUSY, &cp->type,
3957 uuid_count = __le16_to_cpu(cp->uuid_count);
3958 if (uuid_count > max_uuid_count) {
3959 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
3961 err = mgmt_cmd_complete(sk, hdev->id,
3962 MGMT_OP_START_SERVICE_DISCOVERY,
3963 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3968 expected_len = sizeof(*cp) + uuid_count * 16;
3969 if (expected_len != len) {
3970 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
3972 err = mgmt_cmd_complete(sk, hdev->id,
3973 MGMT_OP_START_SERVICE_DISCOVERY,
3974 MGMT_STATUS_INVALID_PARAMS, &cp->type,
3979 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3980 err = mgmt_cmd_complete(sk, hdev->id,
3981 MGMT_OP_START_SERVICE_DISCOVERY,
3982 status, &cp->type, sizeof(cp->type));
3986 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
3993 cmd->cmd_complete = service_discovery_cmd_complete;
3995 /* Clear the discovery filter first to free any previously
3996 * allocated memory for the UUID list.
3998 hci_discovery_filter_clear(hdev);
4000 hdev->discovery.result_filtering = true;
4001 hdev->discovery.type = cp->type;
4002 hdev->discovery.rssi = cp->rssi;
4003 hdev->discovery.uuid_count = uuid_count;
4005 if (uuid_count > 0) {
4006 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4008 if (!hdev->discovery.uuids) {
4009 err = mgmt_cmd_complete(sk, hdev->id,
4010 MGMT_OP_START_SERVICE_DISCOVERY,
4012 &cp->type, sizeof(cp->type));
4013 mgmt_pending_remove(cmd);
4018 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4019 queue_work(hdev->req_workqueue, &hdev->discov_update);
4023 hci_dev_unlock(hdev);
4027 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4029 struct mgmt_pending_cmd *cmd;
4031 BT_DBG("status %d", status);
4035 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4037 cmd->cmd_complete(cmd, mgmt_status(status));
4038 mgmt_pending_remove(cmd);
4041 hci_dev_unlock(hdev);
4044 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4047 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4048 struct mgmt_pending_cmd *cmd;
4051 BT_DBG("%s", hdev->name);
4055 if (!hci_discovery_active(hdev)) {
4056 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4057 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4058 sizeof(mgmt_cp->type));
4062 if (hdev->discovery.type != mgmt_cp->type) {
4063 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4064 MGMT_STATUS_INVALID_PARAMS,
4065 &mgmt_cp->type, sizeof(mgmt_cp->type));
4069 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4075 cmd->cmd_complete = generic_cmd_complete;
4077 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4078 queue_work(hdev->req_workqueue, &hdev->discov_update);
4082 hci_dev_unlock(hdev);
4086 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4089 struct mgmt_cp_confirm_name *cp = data;
4090 struct inquiry_entry *e;
4093 BT_DBG("%s", hdev->name);
4097 if (!hci_discovery_active(hdev)) {
4098 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4099 MGMT_STATUS_FAILED, &cp->addr,
4104 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4106 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4107 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4112 if (cp->name_known) {
4113 e->name_state = NAME_KNOWN;
4116 e->name_state = NAME_NEEDED;
4117 hci_inquiry_cache_update_resolve(hdev, e);
4120 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4121 &cp->addr, sizeof(cp->addr));
4124 hci_dev_unlock(hdev);
4128 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4131 struct mgmt_cp_block_device *cp = data;
4135 BT_DBG("%s", hdev->name);
4137 if (!bdaddr_type_is_valid(cp->addr.type))
4138 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4139 MGMT_STATUS_INVALID_PARAMS,
4140 &cp->addr, sizeof(cp->addr));
4144 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4147 status = MGMT_STATUS_FAILED;
4151 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4153 status = MGMT_STATUS_SUCCESS;
4156 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4157 &cp->addr, sizeof(cp->addr));
4159 hci_dev_unlock(hdev);
4164 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4167 struct mgmt_cp_unblock_device *cp = data;
4171 BT_DBG("%s", hdev->name);
4173 if (!bdaddr_type_is_valid(cp->addr.type))
4174 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4175 MGMT_STATUS_INVALID_PARAMS,
4176 &cp->addr, sizeof(cp->addr));
4180 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4183 status = MGMT_STATUS_INVALID_PARAMS;
4187 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4189 status = MGMT_STATUS_SUCCESS;
4192 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4193 &cp->addr, sizeof(cp->addr));
4195 hci_dev_unlock(hdev);
4200 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4203 struct mgmt_cp_set_device_id *cp = data;
4204 struct hci_request req;
4208 BT_DBG("%s", hdev->name);
4210 source = __le16_to_cpu(cp->source);
4212 if (source > 0x0002)
4213 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4214 MGMT_STATUS_INVALID_PARAMS);
4218 hdev->devid_source = source;
4219 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4220 hdev->devid_product = __le16_to_cpu(cp->product);
4221 hdev->devid_version = __le16_to_cpu(cp->version);
4223 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4226 hci_req_init(&req, hdev);
4227 __hci_req_update_eir(&req);
4228 hci_req_run(&req, NULL);
4230 hci_dev_unlock(hdev);
4235 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4238 BT_DBG("status %d", status);
4241 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4244 struct cmd_lookup match = { NULL, hdev };
4245 struct hci_request req;
4247 struct adv_info *adv_instance;
4253 u8 mgmt_err = mgmt_status(status);
4255 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4256 cmd_status_rsp, &mgmt_err);
4260 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4261 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4263 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4265 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4268 new_settings(hdev, match.sk);
4273 /* If "Set Advertising" was just disabled and instance advertising was
4274 * set up earlier, then re-enable multi-instance advertising.
4276 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4277 list_empty(&hdev->adv_instances))
4280 instance = hdev->cur_adv_instance;
4282 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
4283 struct adv_info, list);
4287 instance = adv_instance->instance;
4290 hci_req_init(&req, hdev);
4292 err = __hci_req_schedule_adv_instance(&req, instance, true);
4295 err = hci_req_run(&req, enable_advertising_instance);
4298 bt_dev_err(hdev, "failed to re-configure advertising");
4301 hci_dev_unlock(hdev);
4304 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4307 struct mgmt_mode *cp = data;
4308 struct mgmt_pending_cmd *cmd;
4309 struct hci_request req;
4313 BT_DBG("request for %s", hdev->name);
4315 status = mgmt_le_support(hdev);
4317 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4320 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4321 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4322 MGMT_STATUS_INVALID_PARAMS);
4328 /* The following conditions are ones which mean that we should
4329 * not do any HCI communication but directly send a mgmt
4330 * response to user space (after toggling the flag if
4333 if (!hdev_is_powered(hdev) ||
4334 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4335 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4336 hci_conn_num(hdev, LE_LINK) > 0 ||
4337 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4338 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4342 hdev->cur_adv_instance = 0x00;
4343 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4344 if (cp->val == 0x02)
4345 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4347 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4349 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4350 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4353 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4358 err = new_settings(hdev, sk);
4363 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4364 pending_find(MGMT_OP_SET_LE, hdev)) {
4365 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4370 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4376 hci_req_init(&req, hdev);
4378 if (cp->val == 0x02)
4379 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4381 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4383 cancel_adv_timeout(hdev);
4386 /* Switch to instance "0" for the Set Advertising setting.
4387 * We cannot use update_[adv|scan_rsp]_data() here as the
4388 * HCI_ADVERTISING flag is not yet set.
4390 hdev->cur_adv_instance = 0x00;
4392 if (ext_adv_capable(hdev)) {
4393 __hci_req_start_ext_adv(&req, 0x00);
4395 __hci_req_update_adv_data(&req, 0x00);
4396 __hci_req_update_scan_rsp_data(&req, 0x00);
4397 __hci_req_enable_advertising(&req);
4400 __hci_req_disable_advertising(&req);
4403 err = hci_req_run(&req, set_advertising_complete);
4405 mgmt_pending_remove(cmd);
4408 hci_dev_unlock(hdev);
4412 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4413 void *data, u16 len)
4415 struct mgmt_cp_set_static_address *cp = data;
4418 BT_DBG("%s", hdev->name);
4420 if (!lmp_le_capable(hdev))
4421 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4422 MGMT_STATUS_NOT_SUPPORTED);
4424 if (hdev_is_powered(hdev))
4425 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4426 MGMT_STATUS_REJECTED);
4428 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4429 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4430 return mgmt_cmd_status(sk, hdev->id,
4431 MGMT_OP_SET_STATIC_ADDRESS,
4432 MGMT_STATUS_INVALID_PARAMS);
4434 /* Two most significant bits shall be set */
4435 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4436 return mgmt_cmd_status(sk, hdev->id,
4437 MGMT_OP_SET_STATIC_ADDRESS,
4438 MGMT_STATUS_INVALID_PARAMS);
4443 bacpy(&hdev->static_addr, &cp->bdaddr);
4445 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4449 err = new_settings(hdev, sk);
4452 hci_dev_unlock(hdev);
4456 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4457 void *data, u16 len)
4459 struct mgmt_cp_set_scan_params *cp = data;
4460 __u16 interval, window;
4463 BT_DBG("%s", hdev->name);
4465 if (!lmp_le_capable(hdev))
4466 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4467 MGMT_STATUS_NOT_SUPPORTED);
4469 interval = __le16_to_cpu(cp->interval);
4471 if (interval < 0x0004 || interval > 0x4000)
4472 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4473 MGMT_STATUS_INVALID_PARAMS);
4475 window = __le16_to_cpu(cp->window);
4477 if (window < 0x0004 || window > 0x4000)
4478 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4479 MGMT_STATUS_INVALID_PARAMS);
4481 if (window > interval)
4482 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4483 MGMT_STATUS_INVALID_PARAMS);
4487 hdev->le_scan_interval = interval;
4488 hdev->le_scan_window = window;
4490 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4493 /* If background scan is running, restart it so new parameters are
4496 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4497 hdev->discovery.state == DISCOVERY_STOPPED) {
4498 struct hci_request req;
4500 hci_req_init(&req, hdev);
4502 hci_req_add_le_scan_disable(&req);
4503 hci_req_add_le_passive_scan(&req);
4505 hci_req_run(&req, NULL);
4508 hci_dev_unlock(hdev);
4513 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4516 struct mgmt_pending_cmd *cmd;
4518 BT_DBG("status 0x%02x", status);
4522 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4527 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4528 mgmt_status(status));
4530 struct mgmt_mode *cp = cmd->param;
4533 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4535 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4537 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4538 new_settings(hdev, cmd->sk);
4541 mgmt_pending_remove(cmd);
4544 hci_dev_unlock(hdev);
4547 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4548 void *data, u16 len)
4550 struct mgmt_mode *cp = data;
4551 struct mgmt_pending_cmd *cmd;
4552 struct hci_request req;
4555 BT_DBG("%s", hdev->name);
4557 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4558 hdev->hci_ver < BLUETOOTH_VER_1_2)
4559 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4560 MGMT_STATUS_NOT_SUPPORTED);
4562 if (cp->val != 0x00 && cp->val != 0x01)
4563 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4564 MGMT_STATUS_INVALID_PARAMS);
4568 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4569 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4574 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4575 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4580 if (!hdev_is_powered(hdev)) {
4581 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4582 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4584 new_settings(hdev, sk);
4588 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4595 hci_req_init(&req, hdev);
4597 __hci_req_write_fast_connectable(&req, cp->val);
4599 err = hci_req_run(&req, fast_connectable_complete);
4601 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4602 MGMT_STATUS_FAILED);
4603 mgmt_pending_remove(cmd);
4607 hci_dev_unlock(hdev);
4612 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4614 struct mgmt_pending_cmd *cmd;
4616 BT_DBG("status 0x%02x", status);
4620 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4625 u8 mgmt_err = mgmt_status(status);
4627 /* We need to restore the flag if related HCI commands
4630 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4632 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4634 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4635 new_settings(hdev, cmd->sk);
4638 mgmt_pending_remove(cmd);
4641 hci_dev_unlock(hdev);
4644 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4646 struct mgmt_mode *cp = data;
4647 struct mgmt_pending_cmd *cmd;
4648 struct hci_request req;
4651 BT_DBG("request for %s", hdev->name);
4653 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4654 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4655 MGMT_STATUS_NOT_SUPPORTED);
4657 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4658 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4659 MGMT_STATUS_REJECTED);
4661 if (cp->val != 0x00 && cp->val != 0x01)
4662 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4663 MGMT_STATUS_INVALID_PARAMS);
4667 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4668 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4672 if (!hdev_is_powered(hdev)) {
4674 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4675 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4676 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4677 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4678 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4681 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4683 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4687 err = new_settings(hdev, sk);
4691 /* Reject disabling when powered on */
4693 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4694 MGMT_STATUS_REJECTED);
4697 /* When configuring a dual-mode controller to operate
4698 * with LE only and using a static address, then switching
4699 * BR/EDR back on is not allowed.
4701 * Dual-mode controllers shall operate with the public
4702 * address as its identity address for BR/EDR and LE. So
4703 * reject the attempt to create an invalid configuration.
4705 * The same restrictions applies when secure connections
4706 * has been enabled. For BR/EDR this is a controller feature
4707 * while for LE it is a host stack feature. This means that
4708 * switching BR/EDR back on when secure connections has been
4709 * enabled is not a supported transaction.
4711 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4712 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4713 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4714 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4715 MGMT_STATUS_REJECTED);
4720 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4721 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4726 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4732 /* We need to flip the bit already here so that
4733 * hci_req_update_adv_data generates the correct flags.
4735 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4737 hci_req_init(&req, hdev);
4739 __hci_req_write_fast_connectable(&req, false);
4740 __hci_req_update_scan(&req);
4742 /* Since only the advertising data flags will change, there
4743 * is no need to update the scan response data.
4745 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
4747 err = hci_req_run(&req, set_bredr_complete);
4749 mgmt_pending_remove(cmd);
4752 hci_dev_unlock(hdev);
4756 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4758 struct mgmt_pending_cmd *cmd;
4759 struct mgmt_mode *cp;
4761 BT_DBG("%s status %u", hdev->name, status);
4765 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4770 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4771 mgmt_status(status));
4779 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4780 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4783 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4784 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4787 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4788 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4792 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4793 new_settings(hdev, cmd->sk);
4796 mgmt_pending_remove(cmd);
4798 hci_dev_unlock(hdev);
4801 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4802 void *data, u16 len)
4804 struct mgmt_mode *cp = data;
4805 struct mgmt_pending_cmd *cmd;
4806 struct hci_request req;
4810 BT_DBG("request for %s", hdev->name);
4812 if (!lmp_sc_capable(hdev) &&
4813 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4814 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4815 MGMT_STATUS_NOT_SUPPORTED);
4817 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4818 lmp_sc_capable(hdev) &&
4819 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4820 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4821 MGMT_STATUS_REJECTED);
4823 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4824 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4825 MGMT_STATUS_INVALID_PARAMS);
4829 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4830 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4834 changed = !hci_dev_test_and_set_flag(hdev,
4836 if (cp->val == 0x02)
4837 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4839 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4841 changed = hci_dev_test_and_clear_flag(hdev,
4843 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4846 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4851 err = new_settings(hdev, sk);
4856 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4857 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4864 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
4865 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4866 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4870 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4876 hci_req_init(&req, hdev);
4877 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4878 err = hci_req_run(&req, sc_enable_complete);
4880 mgmt_pending_remove(cmd);
4885 hci_dev_unlock(hdev);
4889 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4890 void *data, u16 len)
4892 struct mgmt_mode *cp = data;
4893 bool changed, use_changed;
4896 BT_DBG("request for %s", hdev->name);
4898 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4899 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4900 MGMT_STATUS_INVALID_PARAMS);
4905 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
4907 changed = hci_dev_test_and_clear_flag(hdev,
4908 HCI_KEEP_DEBUG_KEYS);
4910 if (cp->val == 0x02)
4911 use_changed = !hci_dev_test_and_set_flag(hdev,
4912 HCI_USE_DEBUG_KEYS);
4914 use_changed = hci_dev_test_and_clear_flag(hdev,
4915 HCI_USE_DEBUG_KEYS);
4917 if (hdev_is_powered(hdev) && use_changed &&
4918 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
4919 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4920 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4921 sizeof(mode), &mode);
4924 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4929 err = new_settings(hdev, sk);
4932 hci_dev_unlock(hdev);
4936 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4939 struct mgmt_cp_set_privacy *cp = cp_data;
4943 BT_DBG("request for %s", hdev->name);
4945 if (!lmp_le_capable(hdev))
4946 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4947 MGMT_STATUS_NOT_SUPPORTED);
4949 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
4950 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4951 MGMT_STATUS_INVALID_PARAMS);
4953 if (hdev_is_powered(hdev))
4954 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4955 MGMT_STATUS_REJECTED);
4959 /* If user space supports this command it is also expected to
4960 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4962 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
4965 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
4966 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4967 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
4968 hci_adv_instances_set_rpa_expired(hdev, true);
4969 if (cp->privacy == 0x02)
4970 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
4972 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4974 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
4975 memset(hdev->irk, 0, sizeof(hdev->irk));
4976 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
4977 hci_adv_instances_set_rpa_expired(hdev, false);
4978 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
4981 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4986 err = new_settings(hdev, sk);
4989 hci_dev_unlock(hdev);
4993 static bool irk_is_valid(struct mgmt_irk_info *irk)
4995 switch (irk->addr.type) {
4996 case BDADDR_LE_PUBLIC:
4999 case BDADDR_LE_RANDOM:
5000 /* Two most significant bits shall be set */
5001 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5009 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5012 struct mgmt_cp_load_irks *cp = cp_data;
5013 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5014 sizeof(struct mgmt_irk_info));
5015 u16 irk_count, expected_len;
5018 BT_DBG("request for %s", hdev->name);
5020 if (!lmp_le_capable(hdev))
5021 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5022 MGMT_STATUS_NOT_SUPPORTED);
5024 irk_count = __le16_to_cpu(cp->irk_count);
5025 if (irk_count > max_irk_count) {
5026 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5028 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5029 MGMT_STATUS_INVALID_PARAMS);
5032 expected_len = struct_size(cp, irks, irk_count);
5033 if (expected_len != len) {
5034 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5036 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5037 MGMT_STATUS_INVALID_PARAMS);
5040 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5042 for (i = 0; i < irk_count; i++) {
5043 struct mgmt_irk_info *key = &cp->irks[i];
5045 if (!irk_is_valid(key))
5046 return mgmt_cmd_status(sk, hdev->id,
5048 MGMT_STATUS_INVALID_PARAMS);
5053 hci_smp_irks_clear(hdev);
5055 for (i = 0; i < irk_count; i++) {
5056 struct mgmt_irk_info *irk = &cp->irks[i];
5058 hci_add_irk(hdev, &irk->addr.bdaddr,
5059 le_addr_type(irk->addr.type), irk->val,
5063 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5065 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5067 hci_dev_unlock(hdev);
5073 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
5074 void *data, u16 len)
5076 struct mgmt_cp_set_advertising_params *cp = data;
5081 BT_DBG("%s", hdev->name);
5083 if (!lmp_le_capable(hdev))
5084 return mgmt_cmd_status(sk, hdev->id,
5085 MGMT_OP_SET_ADVERTISING_PARAMS,
5086 MGMT_STATUS_NOT_SUPPORTED);
5088 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
5089 return mgmt_cmd_status(sk, hdev->id,
5090 MGMT_OP_SET_ADVERTISING_PARAMS,
5093 min_interval = __le16_to_cpu(cp->interval_min);
5094 max_interval = __le16_to_cpu(cp->interval_max);
5096 if (min_interval > max_interval ||
5097 min_interval < 0x0020 || max_interval > 0x4000)
5098 return mgmt_cmd_status(sk, hdev->id,
5099 MGMT_OP_SET_ADVERTISING_PARAMS,
5100 MGMT_STATUS_INVALID_PARAMS);
5104 hdev->le_adv_min_interval = min_interval;
5105 hdev->le_adv_max_interval = max_interval;
5106 hdev->adv_filter_policy = cp->filter_policy;
5107 hdev->adv_type = cp->type;
5109 err = mgmt_cmd_complete(sk, hdev->id,
5110 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
5112 hci_dev_unlock(hdev);
5117 static void set_advertising_data_complete(struct hci_dev *hdev,
5118 u8 status, u16 opcode)
5120 struct mgmt_cp_set_advertising_data *cp;
5121 struct mgmt_pending_cmd *cmd;
5123 BT_DBG("status 0x%02x", status);
5127 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
5134 mgmt_cmd_status(cmd->sk, hdev->id,
5135 MGMT_OP_SET_ADVERTISING_DATA,
5136 mgmt_status(status));
5138 mgmt_cmd_complete(cmd->sk, hdev->id,
5139 MGMT_OP_SET_ADVERTISING_DATA, 0,
5142 mgmt_pending_remove(cmd);
5145 hci_dev_unlock(hdev);
5148 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
5149 void *data, u16 len)
5151 struct mgmt_pending_cmd *cmd;
5152 struct hci_request req;
5153 struct mgmt_cp_set_advertising_data *cp = data;
5154 struct hci_cp_le_set_adv_data adv;
5157 BT_DBG("%s", hdev->name);
5159 if (!lmp_le_capable(hdev)) {
5160 return mgmt_cmd_status(sk, hdev->id,
5161 MGMT_OP_SET_ADVERTISING_DATA,
5162 MGMT_STATUS_NOT_SUPPORTED);
5167 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
5168 err = mgmt_cmd_status(sk, hdev->id,
5169 MGMT_OP_SET_ADVERTISING_DATA,
5174 if (len > HCI_MAX_AD_LENGTH) {
5175 err = mgmt_cmd_status(sk, hdev->id,
5176 MGMT_OP_SET_ADVERTISING_DATA,
5177 MGMT_STATUS_INVALID_PARAMS);
5181 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
5188 hci_req_init(&req, hdev);
5190 memset(&adv, 0, sizeof(adv));
5191 memcpy(adv.data, cp->data, len);
5194 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
5196 err = hci_req_run(&req, set_advertising_data_complete);
5198 mgmt_pending_remove(cmd);
5201 hci_dev_unlock(hdev);
5206 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
5209 struct mgmt_cp_set_scan_rsp_data *cp;
5210 struct mgmt_pending_cmd *cmd;
5212 BT_DBG("status 0x%02x", status);
5216 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
5223 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
5224 mgmt_status(status));
5226 mgmt_cmd_complete(cmd->sk, hdev->id,
5227 MGMT_OP_SET_SCAN_RSP_DATA, 0,
5230 mgmt_pending_remove(cmd);
5233 hci_dev_unlock(hdev);
5236 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
5239 struct mgmt_pending_cmd *cmd;
5240 struct hci_request req;
5241 struct mgmt_cp_set_scan_rsp_data *cp = data;
5242 struct hci_cp_le_set_scan_rsp_data rsp;
5245 BT_DBG("%s", hdev->name);
5247 if (!lmp_le_capable(hdev))
5248 return mgmt_cmd_status(sk, hdev->id,
5249 MGMT_OP_SET_SCAN_RSP_DATA,
5250 MGMT_STATUS_NOT_SUPPORTED);
5254 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
5255 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
5260 if (len > HCI_MAX_AD_LENGTH) {
5261 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
5262 MGMT_STATUS_INVALID_PARAMS);
5266 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
5272 hci_req_init(&req, hdev);
5274 memset(&rsp, 0, sizeof(rsp));
5275 memcpy(rsp.data, cp->data, len);
5278 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
5280 err = hci_req_run(&req, set_scan_rsp_data_complete);
5282 mgmt_pending_remove(cmd);
5285 hci_dev_unlock(hdev);
5290 /* Adv White List feature */
5291 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5293 struct mgmt_cp_add_dev_white_list *cp;
5294 struct mgmt_pending_cmd *cmd;
5296 BT_DBG("status 0x%02x", status);
5300 cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
5307 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
5308 mgmt_status(status));
5310 mgmt_cmd_complete(cmd->sk, hdev->id,
5311 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
5313 mgmt_pending_remove(cmd);
5316 hci_dev_unlock(hdev);
5319 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
5320 void *data, u16 len)
5322 struct mgmt_pending_cmd *cmd;
5323 struct mgmt_cp_add_dev_white_list *cp = data;
5324 struct hci_request req;
5327 BT_DBG("%s", hdev->name);
5329 if (!lmp_le_capable(hdev))
5330 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
5331 MGMT_STATUS_NOT_SUPPORTED);
5333 if (!hdev_is_powered(hdev))
5334 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
5335 MGMT_STATUS_REJECTED);
5339 if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
5340 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
5345 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
5351 hci_req_init(&req, hdev);
5353 hci_req_add(&req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(*cp), cp);
5355 err = hci_req_run(&req, add_white_list_complete);
5357 mgmt_pending_remove(cmd);
5362 hci_dev_unlock(hdev);
5367 static void remove_from_white_list_complete(struct hci_dev *hdev,
5368 u8 status, u16 opcode)
5370 struct mgmt_cp_remove_dev_from_white_list *cp;
5371 struct mgmt_pending_cmd *cmd;
5373 BT_DBG("status 0x%02x", status);
5377 cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
5384 mgmt_cmd_status(cmd->sk, hdev->id,
5385 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
5386 mgmt_status(status));
5388 mgmt_cmd_complete(cmd->sk, hdev->id,
5389 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
5392 mgmt_pending_remove(cmd);
5395 hci_dev_unlock(hdev);
5398 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
5399 void *data, u16 len)
5401 struct mgmt_pending_cmd *cmd;
5402 struct mgmt_cp_remove_dev_from_white_list *cp = data;
5403 struct hci_request req;
5406 BT_DBG("%s", hdev->name);
5408 if (!lmp_le_capable(hdev))
5409 return mgmt_cmd_status(sk, hdev->id,
5410 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
5411 MGMT_STATUS_NOT_SUPPORTED);
5413 if (!hdev_is_powered(hdev))
5414 return mgmt_cmd_status(sk, hdev->id,
5415 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
5416 MGMT_STATUS_REJECTED);
5420 if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
5421 err = mgmt_cmd_status(sk, hdev->id,
5422 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
5427 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
5434 hci_req_init(&req, hdev);
5436 hci_req_add(&req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(*cp), cp);
5438 err = hci_req_run(&req, remove_from_white_list_complete);
5440 mgmt_pending_remove(cmd);
5445 hci_dev_unlock(hdev);
5450 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
5453 struct mgmt_pending_cmd *cmd;
5455 BT_DBG("status 0x%02x", status);
5459 cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
5464 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
5465 mgmt_status(status));
5467 mgmt_cmd_complete(cmd->sk, hdev->id,
5468 MGMT_OP_CLEAR_DEV_WHITE_LIST,
5471 mgmt_pending_remove(cmd);
5474 hci_dev_unlock(hdev);
5477 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
5478 void *data, u16 len)
5480 struct mgmt_pending_cmd *cmd;
5481 struct hci_request req;
5484 BT_DBG("%s", hdev->name);
5486 if (!lmp_le_capable(hdev))
5487 return mgmt_cmd_status(sk, hdev->id,
5488 MGMT_OP_CLEAR_DEV_WHITE_LIST,
5489 MGMT_STATUS_NOT_SUPPORTED);
5491 if (!hdev_is_powered(hdev))
5492 return mgmt_cmd_status(sk, hdev->id,
5493 MGMT_OP_CLEAR_DEV_WHITE_LIST,
5494 MGMT_STATUS_REJECTED);
5498 if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
5499 err = mgmt_cmd_status(sk, hdev->id,
5500 MGMT_OP_CLEAR_DEV_WHITE_LIST,
5505 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
5512 hci_req_init(&req, hdev);
5514 hci_req_add(&req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
5516 err = hci_req_run(&req, clear_white_list_complete);
5518 mgmt_pending_remove(cmd);
5523 hci_dev_unlock(hdev);
5528 static void set_rssi_threshold_complete(struct hci_dev *hdev,
5529 u8 status, u16 opcode)
5531 struct mgmt_pending_cmd *cmd;
5533 BT_DBG("status 0x%02x", status);
5537 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
5542 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5543 mgmt_status(status));
5545 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
5548 mgmt_pending_remove(cmd);
5551 hci_dev_unlock(hdev);
5554 static void set_rssi_disable_complete(struct hci_dev *hdev,
5555 u8 status, u16 opcode)
5557 struct mgmt_pending_cmd *cmd;
5559 BT_DBG("status 0x%02x", status);
5563 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
5568 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
5569 mgmt_status(status));
5571 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
5574 mgmt_pending_remove(cmd);
5577 hci_dev_unlock(hdev);
5580 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
5581 void *data, u16 len)
5584 struct hci_cp_set_rssi_threshold th = { 0, };
5585 struct mgmt_cp_set_enable_rssi *cp = data;
5586 struct hci_conn *conn;
5587 struct mgmt_pending_cmd *cmd;
5588 struct hci_request req;
5593 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
5595 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5596 MGMT_STATUS_FAILED);
5600 if (!lmp_le_capable(hdev)) {
5601 mgmt_pending_remove(cmd);
5602 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5603 MGMT_STATUS_NOT_SUPPORTED);
5607 if (!hdev_is_powered(hdev)) {
5608 BT_DBG("%s", hdev->name);
5609 mgmt_pending_remove(cmd);
5610 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5611 MGMT_STATUS_NOT_POWERED);
5615 if (cp->link_type == 0x01)
5616 dest_type = LE_LINK;
5618 dest_type = ACL_LINK;
5620 /* Get LE/ACL link handle info */
5621 conn = hci_conn_hash_lookup_ba(hdev,
5622 dest_type, &cp->bdaddr);
5625 err = mgmt_cmd_complete(sk, hdev->id,
5626 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
5627 mgmt_pending_remove(cmd);
5631 hci_req_init(&req, hdev);
5633 th.hci_le_ext_opcode = 0x0B;
5635 th.conn_handle = conn->handle;
5636 th.alert_mask = 0x07;
5637 th.low_th = cp->low_th;
5638 th.in_range_th = cp->in_range_th;
5639 th.high_th = cp->high_th;
5641 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
5642 err = hci_req_run(&req, set_rssi_threshold_complete);
5645 mgmt_pending_remove(cmd);
5646 BT_ERR("Error in requesting hci_req_run");
5651 hci_dev_unlock(hdev);
5655 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
5656 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
5658 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
5659 struct mgmt_cp_set_enable_rssi *cp = data;
5660 struct mgmt_pending_cmd *cmd;
5665 mgmt_rp.status = rp->status;
5666 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
5667 mgmt_rp.bt_address = cp->bdaddr;
5668 mgmt_rp.link_type = cp->link_type;
5670 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5671 MGMT_STATUS_SUCCESS, &mgmt_rp,
5672 sizeof(struct mgmt_cc_rsp_enable_rssi));
5674 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
5675 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
5677 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
5678 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
5679 &mgmt_rp.bt_address, true);
5683 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
5685 mgmt_pending_remove(cmd);
5687 hci_dev_unlock(hdev);
5690 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
5691 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
5693 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
5694 struct mgmt_cp_disable_rssi *cp = data;
5695 struct mgmt_pending_cmd *cmd;
5700 mgmt_rp.status = rp->status;
5701 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
5702 mgmt_rp.bt_address = cp->bdaddr;
5703 mgmt_rp.link_type = cp->link_type;
5705 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
5706 MGMT_STATUS_SUCCESS, &mgmt_rp,
5707 sizeof(struct mgmt_cc_rsp_enable_rssi));
5709 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
5710 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
5712 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
5713 &mgmt_rp.bt_address, false);
5717 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
5719 mgmt_pending_remove(cmd);
5721 hci_dev_unlock(hdev);
5724 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
5725 void *data, u16 len)
5727 struct mgmt_pending_cmd *cmd;
5728 struct hci_request req;
5729 struct hci_cp_set_enable_rssi cp_en = { 0, };
5732 BT_DBG("Set Disable RSSI.");
5734 cp_en.hci_le_ext_opcode = 0x01;
5735 cp_en.le_enable_cs_Features = 0x00;
5736 cp_en.data[0] = 0x00;
5737 cp_en.data[1] = 0x00;
5738 cp_en.data[2] = 0x00;
5742 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
5744 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
5745 MGMT_STATUS_FAILED);
5749 if (!lmp_le_capable(hdev)) {
5750 mgmt_pending_remove(cmd);
5751 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
5752 MGMT_STATUS_NOT_SUPPORTED);
5756 if (!hdev_is_powered(hdev)) {
5757 BT_DBG("%s", hdev->name);
5758 mgmt_pending_remove(cmd);
5759 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
5760 MGMT_STATUS_NOT_POWERED);
5764 hci_req_init(&req, hdev);
5766 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
5767 sizeof(struct hci_cp_set_enable_rssi),
5768 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
5769 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
5771 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
5772 err = hci_req_run(&req, set_rssi_disable_complete);
5775 mgmt_pending_remove(cmd);
5776 BT_ERR("Error in requesting hci_req_run");
5781 hci_dev_unlock(hdev);
5785 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
5787 struct hci_cc_rsp_enable_rssi *rp = response;
5788 struct mgmt_pending_cmd *cmd_enable = NULL;
5789 struct mgmt_pending_cmd *cmd_disable = NULL;
5790 struct mgmt_cp_set_enable_rssi *cp_en;
5791 struct mgmt_cp_disable_rssi *cp_dis;
5794 cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
5795 cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
5796 hci_dev_unlock(hdev);
5799 BT_DBG("Enable Request");
5802 BT_DBG("Disable Request");
5805 cp_en = cmd_enable->param;
5810 switch (rp->le_ext_opcode) {
5812 BT_DBG("RSSI enabled.. Setting Threshold...");
5813 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
5814 cp_en, sizeof(*cp_en));
5818 BT_DBG("Sending RSSI enable success");
5819 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
5820 cp_en, rp, rp->status);
5824 } else if (cmd_disable) {
5825 cp_dis = cmd_disable->param;
5830 switch (rp->le_ext_opcode) {
5832 BT_DBG("Sending RSSI disable success");
5833 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
5834 cp_dis, rp, rp->status);
5839 * Only unset RSSI Threshold values for the Link if
5840 * RSSI is monitored for other BREDR or LE Links
5842 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
5843 BT_DBG("Unset Threshold. Other links being monitored");
5844 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
5845 cp_dis, rp, rp->status);
5847 BT_DBG("Unset Threshold. Disabling...");
5848 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
5849 cp_dis, sizeof(*cp_dis));
5856 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
5859 struct mgmt_pending_cmd *cmd;
5861 BT_DBG("status 0x%02x", status);
5865 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
5870 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5871 mgmt_status(status));
5873 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
5876 mgmt_pending_remove(cmd);
5879 hci_dev_unlock(hdev);
5882 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
5883 void *data, u16 len)
5885 struct mgmt_pending_cmd *cmd;
5886 struct hci_request req;
5887 struct mgmt_cp_set_enable_rssi *cp = data;
5888 struct hci_cp_set_enable_rssi cp_en = { 0, };
5891 BT_DBG("Set Enable RSSI.");
5893 cp_en.hci_le_ext_opcode = 0x01;
5894 cp_en.le_enable_cs_Features = 0x04;
5895 cp_en.data[0] = 0x00;
5896 cp_en.data[1] = 0x00;
5897 cp_en.data[2] = 0x00;
5901 if (!lmp_le_capable(hdev)) {
5902 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5903 MGMT_STATUS_NOT_SUPPORTED);
5907 if (!hdev_is_powered(hdev)) {
5908 BT_DBG("%s", hdev->name);
5909 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5910 MGMT_STATUS_NOT_POWERED);
5914 if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
5915 BT_DBG("%s", hdev->name);
5916 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
5921 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
5924 BT_DBG("%s", hdev->name);
5929 /* If RSSI is already enabled directly set Threshold values */
5930 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
5931 hci_dev_unlock(hdev);
5932 BT_DBG("RSSI Enabled. Directly set Threshold");
5933 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
5937 hci_req_init(&req, hdev);
5939 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
5940 sizeof(struct hci_cp_set_enable_rssi),
5941 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
5942 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
5944 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
5945 err = hci_req_run(&req, set_rssi_enable_complete);
5948 mgmt_pending_remove(cmd);
5949 BT_ERR("Error in requesting hci_req_run");
5954 hci_dev_unlock(hdev);
5959 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5961 struct mgmt_pending_cmd *cmd;
5963 BT_DBG("status 0x%02x", status);
5967 cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
5971 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
5972 MGMT_STATUS_SUCCESS, &status, 1);
5974 mgmt_pending_remove(cmd);
5977 hci_dev_unlock(hdev);
5980 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
5983 struct mgmt_pending_cmd *cmd;
5984 struct hci_request req;
5985 struct mgmt_cp_get_raw_rssi *cp = data;
5986 struct hci_cp_get_raw_rssi hci_cp;
5988 struct hci_conn *conn;
5992 BT_DBG("Get Raw RSSI.");
5996 if (!lmp_le_capable(hdev)) {
5997 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
5998 MGMT_STATUS_NOT_SUPPORTED);
6002 if (cp->link_type == 0x01)
6003 dest_type = LE_LINK;
6005 dest_type = ACL_LINK;
6007 /* Get LE/BREDR link handle info */
6008 conn = hci_conn_hash_lookup_ba(hdev,
6009 dest_type, &cp->bt_address);
6011 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
6012 MGMT_STATUS_NOT_CONNECTED);
6015 hci_cp.conn_handle = conn->handle;
6017 if (!hdev_is_powered(hdev)) {
6018 BT_DBG("%s", hdev->name);
6019 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
6020 MGMT_STATUS_NOT_POWERED);
6024 if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
6025 BT_DBG("%s", hdev->name);
6026 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
6031 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
6033 BT_DBG("%s", hdev->name);
6038 hci_req_init(&req, hdev);
6040 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
6041 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
6042 err = hci_req_run(&req, get_raw_rssi_complete);
6045 mgmt_pending_remove(cmd);
6046 BT_ERR("Error in requesting hci_req_run");
6050 hci_dev_unlock(hdev);
6055 void mgmt_raw_rssi_response(struct hci_dev *hdev,
6056 struct hci_cc_rp_get_raw_rssi *rp, int success)
6058 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
6059 struct hci_conn *conn;
6061 mgmt_rp.status = rp->status;
6062 mgmt_rp.rssi_dbm = rp->rssi_dbm;
6064 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
6068 bacpy(&mgmt_rp.bt_address, &conn->dst);
6069 if (conn->type == LE_LINK)
6070 mgmt_rp.link_type = 0x01;
6072 mgmt_rp.link_type = 0x00;
6074 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
6075 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
6078 static void set_disable_threshold_complete(struct hci_dev *hdev,
6079 u8 status, u16 opcode)
6081 struct mgmt_pending_cmd *cmd;
6083 BT_DBG("status 0x%02x", status);
6087 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
6091 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6092 MGMT_STATUS_SUCCESS, &status, 1);
6094 mgmt_pending_remove(cmd);
6097 hci_dev_unlock(hdev);
6100 /** Removes monitoring for a link*/
6101 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
6102 void *data, u16 len)
6105 struct hci_cp_set_rssi_threshold th = { 0, };
6106 struct mgmt_cp_disable_rssi *cp = data;
6107 struct hci_conn *conn;
6108 struct mgmt_pending_cmd *cmd;
6109 struct hci_request req;
6112 BT_DBG("Set Disable RSSI.");
6116 if (!lmp_le_capable(hdev)) {
6117 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6118 MGMT_STATUS_NOT_SUPPORTED);
6122 /* Get LE/ACL link handle info*/
6123 if (cp->link_type == 0x01)
6124 dest_type = LE_LINK;
6126 dest_type = ACL_LINK;
6128 conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
6130 err = mgmt_cmd_complete(sk, hdev->id,
6131 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
6135 th.hci_le_ext_opcode = 0x0B;
6137 th.conn_handle = conn->handle;
6138 th.alert_mask = 0x00;
6140 th.in_range_th = 0x00;
6143 if (!hdev_is_powered(hdev)) {
6144 BT_DBG("%s", hdev->name);
6145 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6150 if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
6151 BT_DBG("%s", hdev->name);
6152 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
6157 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
6160 BT_DBG("%s", hdev->name);
6165 hci_req_init(&req, hdev);
6167 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
6168 err = hci_req_run(&req, set_disable_threshold_complete);
6170 mgmt_pending_remove(cmd);
6171 BT_ERR("Error in requesting hci_req_run");
6176 hci_dev_unlock(hdev);
6181 void mgmt_rssi_alert_evt(struct hci_dev *hdev, u16 conn_handle,
6182 s8 alert_type, s8 rssi_dbm)
6184 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
6185 struct hci_conn *conn;
6187 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
6188 conn_handle, alert_type, rssi_dbm);
6190 conn = hci_conn_hash_lookup_handle(hdev, conn_handle);
6193 BT_ERR("RSSI alert Error: Device not found for handle");
6196 bacpy(&mgmt_ev.bdaddr, &conn->dst);
6198 if (conn->type == LE_LINK)
6199 mgmt_ev.link_type = 0x01;
6201 mgmt_ev.link_type = 0x00;
6203 mgmt_ev.alert_type = alert_type;
6204 mgmt_ev.rssi_dbm = rssi_dbm;
6206 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
6207 sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
6211 static int mgmt_start_le_discovery_failed(struct hci_dev *hdev, u8 status)
6213 struct mgmt_pending_cmd *cmd;
6217 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
6219 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
6223 type = hdev->le_discovery.type;
6225 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
6226 mgmt_status(status), &type, sizeof(type));
6227 mgmt_pending_remove(cmd);
6232 static void start_le_discovery_complete(struct hci_dev *hdev, u8 status,
6235 unsigned long timeout = 0;
6237 BT_DBG("status %d", status);
6241 mgmt_start_le_discovery_failed(hdev, status);
6242 hci_dev_unlock(hdev);
6247 hci_le_discovery_set_state(hdev, DISCOVERY_FINDING);
6248 hci_dev_unlock(hdev);
6250 if (hdev->le_discovery.type != DISCOV_TYPE_LE)
6251 BT_ERR("Invalid discovery type %d", hdev->le_discovery.type);
6256 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
6259 static int start_le_discovery(struct sock *sk, struct hci_dev *hdev,
6260 void *data, u16 len)
6262 struct mgmt_cp_start_le_discovery *cp = data;
6263 struct mgmt_pending_cmd *cmd;
6264 struct hci_cp_le_set_scan_param param_cp;
6265 struct hci_cp_le_set_scan_enable enable_cp;
6266 struct hci_request req;
6267 u8 status, own_addr_type;
6270 BT_DBG("%s", hdev->name);
6274 if (!hdev_is_powered(hdev)) {
6275 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
6276 MGMT_STATUS_NOT_POWERED);
6280 if (hdev->le_discovery.state != DISCOVERY_STOPPED) {
6281 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
6286 if (cp->type != DISCOV_TYPE_LE) {
6287 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
6288 MGMT_STATUS_INVALID_PARAMS);
6292 cmd = mgmt_pending_add(sk, MGMT_OP_START_LE_DISCOVERY, hdev, NULL, 0);
6298 hdev->le_discovery.type = cp->type;
6300 hci_req_init(&req, hdev);
6302 status = mgmt_le_support(hdev);
6304 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
6306 mgmt_pending_remove(cmd);
6310 /* If controller is scanning, it means the background scanning
6311 * is running. Thus, we should temporarily stop it in order to
6312 * set the discovery scanning parameters.
6314 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
6315 hci_req_add_le_scan_disable(&req);
6317 memset(¶m_cp, 0, sizeof(param_cp));
6319 /* All active scans will be done with either a resolvable
6320 * private address (when privacy feature has been enabled)
6321 * or unresolvable private address.
6323 err = hci_update_random_address(&req, true, hci_dev_test_flag(hdev, HCI_PRIVACY), &own_addr_type);
6325 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
6326 MGMT_STATUS_FAILED);
6327 mgmt_pending_remove(cmd);
6331 param_cp.type = hdev->le_scan_type;
6332 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
6333 param_cp.window = cpu_to_le16(hdev->le_scan_window);
6334 param_cp.own_address_type = own_addr_type;
6335 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
6338 memset(&enable_cp, 0, sizeof(enable_cp));
6339 enable_cp.enable = LE_SCAN_ENABLE;
6340 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
6342 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
6345 err = hci_req_run(&req, start_le_discovery_complete);
6347 mgmt_pending_remove(cmd);
6349 hci_le_discovery_set_state(hdev, DISCOVERY_STARTING);
6352 hci_dev_unlock(hdev);
6356 static int mgmt_stop_le_discovery_failed(struct hci_dev *hdev, u8 status)
6358 struct mgmt_pending_cmd *cmd;
6361 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
6365 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
6366 mgmt_status(status), &hdev->le_discovery.type,
6367 sizeof(hdev->le_discovery.type));
6368 mgmt_pending_remove(cmd);
6373 static void stop_le_discovery_complete(struct hci_dev *hdev, u8 status,
6376 BT_DBG("status %d", status);
6381 mgmt_stop_le_discovery_failed(hdev, status);
6385 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
6388 hci_dev_unlock(hdev);
6391 static int stop_le_discovery(struct sock *sk, struct hci_dev *hdev,
6392 void *data, u16 len)
6394 struct mgmt_cp_stop_le_discovery *mgmt_cp = data;
6395 struct mgmt_pending_cmd *cmd;
6396 struct hci_request req;
6399 BT_DBG("%s", hdev->name);
6403 if (!hci_le_discovery_active(hdev)) {
6404 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
6405 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6406 sizeof(mgmt_cp->type));
6410 if (hdev->le_discovery.type != mgmt_cp->type) {
6411 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
6412 MGMT_STATUS_INVALID_PARAMS,
6413 &mgmt_cp->type, sizeof(mgmt_cp->type));
6417 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_LE_DISCOVERY, hdev, NULL, 0);
6423 hci_req_init(&req, hdev);
6425 if (hdev->le_discovery.state != DISCOVERY_FINDING) {
6426 BT_DBG("unknown le discovery state %u",
6427 hdev->le_discovery.state);
6429 mgmt_pending_remove(cmd);
6430 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
6431 MGMT_STATUS_FAILED, &mgmt_cp->type,
6432 sizeof(mgmt_cp->type));
6436 cancel_delayed_work(&hdev->le_scan_disable);
6437 hci_req_add_le_scan_disable(&req);
6439 err = hci_req_run(&req, stop_le_discovery_complete);
6441 mgmt_pending_remove(cmd);
6443 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
6446 hci_dev_unlock(hdev);
6450 /* Separate LE discovery */
6451 void mgmt_le_discovering(struct hci_dev *hdev, u8 discovering)
6453 struct mgmt_ev_discovering ev;
6454 struct mgmt_pending_cmd *cmd;
6456 BT_DBG("%s le discovering %u", hdev->name, discovering);
6459 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
6461 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
6464 u8 type = hdev->le_discovery.type;
6466 mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6468 mgmt_pending_remove(cmd);
6471 memset(&ev, 0, sizeof(ev));
6472 ev.type = hdev->le_discovery.type;
6473 ev.discovering = discovering;
6475 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6478 static int disable_le_auto_connect(struct sock *sk, struct hci_dev *hdev,
6479 void *data, u16 len)
6483 BT_DBG("%s", hdev->name);
6487 err = hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
6489 BT_ERR("HCI_OP_LE_CREATE_CONN_CANCEL is failed");
6491 hci_dev_unlock(hdev);
6496 static inline int check_le_conn_update_param(u16 min, u16 max, u16 latency,
6501 if (min > max || min < 6 || max > 3200)
6504 if (to_multiplier < 10 || to_multiplier > 3200)
6507 if (max >= to_multiplier * 8)
6510 max_latency = (to_multiplier * 8 / max) - 1;
6512 if (latency > 499 || latency > max_latency)
6518 static int le_conn_update(struct sock *sk, struct hci_dev *hdev, void *data,
6521 struct mgmt_cp_le_conn_update *cp = data;
6523 struct hci_conn *conn;
6524 u16 min, max, latency, supervision_timeout;
6527 if (!hdev_is_powered(hdev))
6528 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
6529 MGMT_STATUS_NOT_POWERED);
6531 min = __le16_to_cpu(cp->conn_interval_min);
6532 max = __le16_to_cpu(cp->conn_interval_max);
6533 latency = __le16_to_cpu(cp->conn_latency);
6534 supervision_timeout = __le16_to_cpu(cp->supervision_timeout);
6536 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x supervision_timeout: 0x%4.4x",
6537 min, max, latency, supervision_timeout);
6539 err = check_le_conn_update_param(min, max, latency,
6540 supervision_timeout);
6543 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
6544 MGMT_STATUS_INVALID_PARAMS);
6548 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
6550 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
6551 MGMT_STATUS_NOT_CONNECTED);
6552 hci_dev_unlock(hdev);
6556 hci_dev_unlock(hdev);
6558 hci_le_conn_update(conn, min, max, latency, supervision_timeout);
6560 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE, 0,
6564 static void set_manufacturer_data_complete(struct hci_dev *hdev, u8 status,
6567 struct mgmt_cp_set_manufacturer_data *cp;
6568 struct mgmt_pending_cmd *cmd;
6570 BT_DBG("status 0x%02x", status);
6574 cmd = pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev);
6581 mgmt_cmd_status(cmd->sk, hdev->id,
6582 MGMT_OP_SET_MANUFACTURER_DATA,
6583 mgmt_status(status));
6585 mgmt_cmd_complete(cmd->sk, hdev->id,
6586 MGMT_OP_SET_MANUFACTURER_DATA, 0,
6589 mgmt_pending_remove(cmd);
6592 hci_dev_unlock(hdev);
6595 static int set_manufacturer_data(struct sock *sk, struct hci_dev *hdev,
6596 void *data, u16 len)
6598 struct mgmt_pending_cmd *cmd;
6599 struct hci_request req;
6600 struct mgmt_cp_set_manufacturer_data *cp = data;
6601 u8 old_data[HCI_MAX_EIR_LENGTH] = {0, };
6605 BT_DBG("%s", hdev->name);
6607 if (!lmp_bredr_capable(hdev))
6608 return mgmt_cmd_status(sk, hdev->id,
6609 MGMT_OP_SET_MANUFACTURER_DATA,
6610 MGMT_STATUS_NOT_SUPPORTED);
6612 if (cp->data[0] == 0 ||
6613 cp->data[0] - 1 > sizeof(hdev->manufacturer_data))
6614 return mgmt_cmd_status(sk, hdev->id,
6615 MGMT_OP_SET_MANUFACTURER_DATA,
6616 MGMT_STATUS_INVALID_PARAMS);
6618 if (cp->data[1] != 0xFF)
6619 return mgmt_cmd_status(sk, hdev->id,
6620 MGMT_OP_SET_MANUFACTURER_DATA,
6621 MGMT_STATUS_NOT_SUPPORTED);
6625 if (pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev)) {
6626 err = mgmt_cmd_status(sk, hdev->id,
6627 MGMT_OP_SET_MANUFACTURER_DATA,
6632 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MANUFACTURER_DATA, hdev, data,
6639 hci_req_init(&req, hdev);
6641 /* if new data is same as previous data then return command
6644 if (hdev->manufacturer_len == cp->data[0] - 1 &&
6645 !memcmp(hdev->manufacturer_data, cp->data + 2, cp->data[0] - 1)) {
6646 mgmt_pending_remove(cmd);
6647 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
6648 0, cp, sizeof(*cp));
6653 old_len = hdev->manufacturer_len;
6655 memcpy(old_data, hdev->manufacturer_data, old_len);
6657 hdev->manufacturer_len = cp->data[0] - 1;
6658 if (hdev->manufacturer_len > 0)
6659 memcpy(hdev->manufacturer_data, cp->data + 2,
6660 hdev->manufacturer_len);
6662 __hci_req_update_eir(&req);
6664 err = hci_req_run(&req, set_manufacturer_data_complete);
6666 mgmt_pending_remove(cmd);
6671 hci_dev_unlock(hdev);
6676 memset(hdev->manufacturer_data, 0x00, sizeof(hdev->manufacturer_data));
6677 hdev->manufacturer_len = old_len;
6678 if (hdev->manufacturer_len > 0)
6679 memcpy(hdev->manufacturer_data, old_data,
6680 hdev->manufacturer_len);
6681 hci_dev_unlock(hdev);
6685 static int le_set_scan_params(struct sock *sk, struct hci_dev *hdev,
6686 void *data, u16 len)
6688 struct mgmt_cp_le_set_scan_params *cp = data;
6689 __u16 interval, window;
6692 BT_DBG("%s", hdev->name);
6694 if (!lmp_le_capable(hdev))
6695 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
6696 MGMT_STATUS_NOT_SUPPORTED);
6698 interval = __le16_to_cpu(cp->interval);
6700 if (interval < 0x0004 || interval > 0x4000)
6701 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
6702 MGMT_STATUS_INVALID_PARAMS);
6704 window = __le16_to_cpu(cp->window);
6706 if (window < 0x0004 || window > 0x4000)
6707 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
6708 MGMT_STATUS_INVALID_PARAMS);
6710 if (window > interval)
6711 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
6712 MGMT_STATUS_INVALID_PARAMS);
6716 hdev->le_scan_type = cp->type;
6717 hdev->le_scan_interval = interval;
6718 hdev->le_scan_window = window;
6720 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS, 0,
6723 /* If background scan is running, restart it so new parameters are
6726 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6727 hdev->discovery.state == DISCOVERY_STOPPED) {
6728 struct hci_request req;
6730 hci_req_init(&req, hdev);
6732 hci_req_add_le_scan_disable(&req);
6733 hci_req_add_le_passive_scan(&req);
6735 hci_req_run(&req, NULL);
6738 hci_dev_unlock(hdev);
6743 static int set_voice_setting(struct sock *sk, struct hci_dev *hdev,
6744 void *data, u16 len)
6746 struct mgmt_cp_set_voice_setting *cp = data;
6747 struct hci_conn *conn;
6748 struct hci_conn *sco_conn;
6752 BT_DBG("%s", hdev->name);
6754 if (!lmp_bredr_capable(hdev)) {
6755 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING,
6756 MGMT_STATUS_NOT_SUPPORTED);
6761 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
6763 err = mgmt_cmd_complete(sk, hdev->id,
6764 MGMT_OP_SET_VOICE_SETTING, 0, NULL, 0);
6768 conn->voice_setting = cp->voice_setting;
6769 conn->sco_role = cp->sco_role;
6771 sco_conn = hci_conn_hash_lookup_sco(hdev);
6772 if (sco_conn && bacmp(&sco_conn->dst, &cp->bdaddr) != 0) {
6773 BT_ERR("There is other SCO connection.");
6777 if (conn->sco_role == MGMT_SCO_ROLE_HANDSFREE) {
6778 if (conn->voice_setting == 0x0063)
6779 sco_connect_set_wbc(hdev);
6781 sco_connect_set_nbc(hdev);
6783 if (conn->voice_setting == 0x0063)
6784 sco_connect_set_gw_wbc(hdev);
6786 sco_connect_set_gw_nbc(hdev);
6790 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING, 0,
6794 hci_dev_unlock(hdev);
6798 static int get_adv_tx_power(struct sock *sk, struct hci_dev *hdev,
6799 void *data, u16 len)
6801 struct mgmt_rp_get_adv_tx_power *rp;
6805 BT_DBG("%s", hdev->name);
6809 rp_len = sizeof(*rp);
6810 rp = kmalloc(rp_len, GFP_KERNEL);
6816 rp->adv_tx_power = hdev->adv_tx_power;
6818 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_TX_POWER, 0, rp,
6824 hci_dev_unlock(hdev);
6829 void mgmt_hardware_error(struct hci_dev *hdev, u8 err_code)
6831 struct mgmt_ev_hardware_error ev;
6833 ev.error_code = err_code;
6834 mgmt_event(MGMT_EV_HARDWARE_ERROR, hdev, &ev, sizeof(ev), NULL);
6837 void mgmt_tx_timeout_error(struct hci_dev *hdev)
6839 mgmt_event(MGMT_EV_TX_TIMEOUT_ERROR, hdev, NULL, 0, NULL);
6842 void mgmt_multi_adv_state_change_evt(struct hci_dev *hdev, u8 adv_instance,
6843 u8 state_change_reason, u16 connection_handle)
6845 struct mgmt_ev_vendor_specific_multi_adv_state_changed mgmt_ev;
6847 BT_DBG("Multi adv state changed [%2.2X %2.2X %2.2X]",
6848 adv_instance, state_change_reason, connection_handle);
6850 mgmt_ev.adv_instance = adv_instance;
6851 mgmt_ev.state_change_reason = state_change_reason;
6852 mgmt_ev.connection_handle = connection_handle;
6854 mgmt_event(MGMT_EV_MULTI_ADV_STATE_CHANGED, hdev, &mgmt_ev,
6855 sizeof(struct mgmt_ev_vendor_specific_multi_adv_state_changed),
6858 #endif /* TIZEN_BT */
6860 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6862 if (key->master != 0x00 && key->master != 0x01)
6865 switch (key->addr.type) {
6866 case BDADDR_LE_PUBLIC:
6869 case BDADDR_LE_RANDOM:
6870 /* Two most significant bits shall be set */
6871 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6879 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6880 void *cp_data, u16 len)
6882 struct mgmt_cp_load_long_term_keys *cp = cp_data;
6883 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6884 sizeof(struct mgmt_ltk_info));
6885 u16 key_count, expected_len;
6888 BT_DBG("request for %s", hdev->name);
6890 if (!lmp_le_capable(hdev))
6891 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6892 MGMT_STATUS_NOT_SUPPORTED);
6894 key_count = __le16_to_cpu(cp->key_count);
6895 if (key_count > max_key_count) {
6896 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6898 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6899 MGMT_STATUS_INVALID_PARAMS);
6902 expected_len = struct_size(cp, keys, key_count);
6903 if (expected_len != len) {
6904 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6906 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6907 MGMT_STATUS_INVALID_PARAMS);
6910 BT_DBG("%s key_count %u", hdev->name, key_count);
6912 for (i = 0; i < key_count; i++) {
6913 struct mgmt_ltk_info *key = &cp->keys[i];
6915 if (!ltk_is_valid(key))
6916 return mgmt_cmd_status(sk, hdev->id,
6917 MGMT_OP_LOAD_LONG_TERM_KEYS,
6918 MGMT_STATUS_INVALID_PARAMS);
6923 hci_smp_ltks_clear(hdev);
6925 for (i = 0; i < key_count; i++) {
6926 struct mgmt_ltk_info *key = &cp->keys[i];
6927 u8 type, authenticated;
6929 switch (key->type) {
6930 case MGMT_LTK_UNAUTHENTICATED:
6931 authenticated = 0x00;
6932 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6934 case MGMT_LTK_AUTHENTICATED:
6935 authenticated = 0x01;
6936 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
6938 case MGMT_LTK_P256_UNAUTH:
6939 authenticated = 0x00;
6940 type = SMP_LTK_P256;
6942 case MGMT_LTK_P256_AUTH:
6943 authenticated = 0x01;
6944 type = SMP_LTK_P256;
6946 case MGMT_LTK_P256_DEBUG:
6947 authenticated = 0x00;
6948 type = SMP_LTK_P256_DEBUG;
6954 hci_add_ltk(hdev, &key->addr.bdaddr,
6955 le_addr_type(key->addr.type), type, authenticated,
6956 key->val, key->enc_size, key->ediv, key->rand);
6959 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6962 hci_dev_unlock(hdev);
6967 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
6969 struct hci_conn *conn = cmd->user_data;
6970 struct mgmt_rp_get_conn_info rp;
6973 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
6975 if (status == MGMT_STATUS_SUCCESS) {
6976 rp.rssi = conn->rssi;
6977 rp.tx_power = conn->tx_power;
6978 rp.max_tx_power = conn->max_tx_power;
6980 rp.rssi = HCI_RSSI_INVALID;
6981 rp.tx_power = HCI_TX_POWER_INVALID;
6982 rp.max_tx_power = HCI_TX_POWER_INVALID;
6985 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
6986 status, &rp, sizeof(rp));
6988 hci_conn_drop(conn);
6994 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
6997 struct hci_cp_read_rssi *cp;
6998 struct mgmt_pending_cmd *cmd;
6999 struct hci_conn *conn;
7003 BT_DBG("status 0x%02x", hci_status);
7007 /* Commands sent in request are either Read RSSI or Read Transmit Power
7008 * Level so we check which one was last sent to retrieve connection
7009 * handle. Both commands have handle as first parameter so it's safe to
7010 * cast data on the same command struct.
7012 * First command sent is always Read RSSI and we fail only if it fails.
7013 * In other case we simply override error to indicate success as we
7014 * already remembered if TX power value is actually valid.
7016 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
7018 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
7019 status = MGMT_STATUS_SUCCESS;
7021 status = mgmt_status(hci_status);
7025 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
7029 handle = __le16_to_cpu(cp->handle);
7030 conn = hci_conn_hash_lookup_handle(hdev, handle);
7032 bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
7037 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
7041 cmd->cmd_complete(cmd, status);
7042 mgmt_pending_remove(cmd);
7045 hci_dev_unlock(hdev);
7048 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7051 struct mgmt_cp_get_conn_info *cp = data;
7052 struct mgmt_rp_get_conn_info rp;
7053 struct hci_conn *conn;
7054 unsigned long conn_info_age;
7057 BT_DBG("%s", hdev->name);
7059 memset(&rp, 0, sizeof(rp));
7060 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7061 rp.addr.type = cp->addr.type;
7063 if (!bdaddr_type_is_valid(cp->addr.type))
7064 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7065 MGMT_STATUS_INVALID_PARAMS,
7070 if (!hdev_is_powered(hdev)) {
7071 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7072 MGMT_STATUS_NOT_POWERED, &rp,
7077 if (cp->addr.type == BDADDR_BREDR)
7078 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7081 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7083 if (!conn || conn->state != BT_CONNECTED) {
7084 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7085 MGMT_STATUS_NOT_CONNECTED, &rp,
7090 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
7091 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7092 MGMT_STATUS_BUSY, &rp, sizeof(rp));
7096 /* To avoid client trying to guess when to poll again for information we
7097 * calculate conn info age as random value between min/max set in hdev.
7099 conn_info_age = hdev->conn_info_min_age +
7100 prandom_u32_max(hdev->conn_info_max_age -
7101 hdev->conn_info_min_age);
7103 /* Query controller to refresh cached values if they are too old or were
7106 if (time_after(jiffies, conn->conn_info_timestamp +
7107 msecs_to_jiffies(conn_info_age)) ||
7108 !conn->conn_info_timestamp) {
7109 struct hci_request req;
7110 struct hci_cp_read_tx_power req_txp_cp;
7111 struct hci_cp_read_rssi req_rssi_cp;
7112 struct mgmt_pending_cmd *cmd;
7114 hci_req_init(&req, hdev);
7115 req_rssi_cp.handle = cpu_to_le16(conn->handle);
7116 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
7119 /* For LE links TX power does not change thus we don't need to
7120 * query for it once value is known.
7122 if (!bdaddr_type_is_le(cp->addr.type) ||
7123 conn->tx_power == HCI_TX_POWER_INVALID) {
7124 req_txp_cp.handle = cpu_to_le16(conn->handle);
7125 req_txp_cp.type = 0x00;
7126 hci_req_add(&req, HCI_OP_READ_TX_POWER,
7127 sizeof(req_txp_cp), &req_txp_cp);
7130 /* Max TX power needs to be read only once per connection */
7131 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
7132 req_txp_cp.handle = cpu_to_le16(conn->handle);
7133 req_txp_cp.type = 0x01;
7134 hci_req_add(&req, HCI_OP_READ_TX_POWER,
7135 sizeof(req_txp_cp), &req_txp_cp);
7138 err = hci_req_run(&req, conn_info_refresh_complete);
7142 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
7149 hci_conn_hold(conn);
7150 cmd->user_data = hci_conn_get(conn);
7151 cmd->cmd_complete = conn_info_cmd_complete;
7153 conn->conn_info_timestamp = jiffies;
7155 /* Cache is valid, just reply with values cached in hci_conn */
7156 rp.rssi = conn->rssi;
7157 rp.tx_power = conn->tx_power;
7158 rp.max_tx_power = conn->max_tx_power;
7160 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7161 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7165 hci_dev_unlock(hdev);
7169 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
7171 struct hci_conn *conn = cmd->user_data;
7172 struct mgmt_rp_get_clock_info rp;
7173 struct hci_dev *hdev;
7176 memset(&rp, 0, sizeof(rp));
7177 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
7182 hdev = hci_dev_get(cmd->index);
7184 rp.local_clock = cpu_to_le32(hdev->clock);
7189 rp.piconet_clock = cpu_to_le32(conn->clock);
7190 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7194 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7198 hci_conn_drop(conn);
7205 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7207 struct hci_cp_read_clock *hci_cp;
7208 struct mgmt_pending_cmd *cmd;
7209 struct hci_conn *conn;
7211 BT_DBG("%s status %u", hdev->name, status);
7215 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
7219 if (hci_cp->which) {
7220 u16 handle = __le16_to_cpu(hci_cp->handle);
7221 conn = hci_conn_hash_lookup_handle(hdev, handle);
7226 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
7230 cmd->cmd_complete(cmd, mgmt_status(status));
7231 mgmt_pending_remove(cmd);
7234 hci_dev_unlock(hdev);
7237 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7240 struct mgmt_cp_get_clock_info *cp = data;
7241 struct mgmt_rp_get_clock_info rp;
7242 struct hci_cp_read_clock hci_cp;
7243 struct mgmt_pending_cmd *cmd;
7244 struct hci_request req;
7245 struct hci_conn *conn;
7248 BT_DBG("%s", hdev->name);
7250 memset(&rp, 0, sizeof(rp));
7251 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7252 rp.addr.type = cp->addr.type;
7254 if (cp->addr.type != BDADDR_BREDR)
7255 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7256 MGMT_STATUS_INVALID_PARAMS,
7261 if (!hdev_is_powered(hdev)) {
7262 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7263 MGMT_STATUS_NOT_POWERED, &rp,
7268 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7269 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7271 if (!conn || conn->state != BT_CONNECTED) {
7272 err = mgmt_cmd_complete(sk, hdev->id,
7273 MGMT_OP_GET_CLOCK_INFO,
7274 MGMT_STATUS_NOT_CONNECTED,
7282 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7288 cmd->cmd_complete = clock_info_cmd_complete;
7290 hci_req_init(&req, hdev);
7292 memset(&hci_cp, 0, sizeof(hci_cp));
7293 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
7296 hci_conn_hold(conn);
7297 cmd->user_data = hci_conn_get(conn);
7299 hci_cp.handle = cpu_to_le16(conn->handle);
7300 hci_cp.which = 0x01; /* Piconet clock */
7301 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
7304 err = hci_req_run(&req, get_clock_info_complete);
7306 mgmt_pending_remove(cmd);
7309 hci_dev_unlock(hdev);
7313 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7315 struct hci_conn *conn;
7317 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7321 if (conn->dst_type != type)
7324 if (conn->state != BT_CONNECTED)
7330 /* This function requires the caller holds hdev->lock */
7331 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7332 u8 addr_type, u8 auto_connect)
7334 struct hci_conn_params *params;
7336 params = hci_conn_params_add(hdev, addr, addr_type);
7340 if (params->auto_connect == auto_connect)
7343 list_del_init(¶ms->action);
7345 switch (auto_connect) {
7346 case HCI_AUTO_CONN_DISABLED:
7347 case HCI_AUTO_CONN_LINK_LOSS:
7348 /* If auto connect is being disabled when we're trying to
7349 * connect to device, keep connecting.
7351 if (params->explicit_connect)
7352 list_add(¶ms->action, &hdev->pend_le_conns);
7354 case HCI_AUTO_CONN_REPORT:
7355 if (params->explicit_connect)
7356 list_add(¶ms->action, &hdev->pend_le_conns);
7358 list_add(¶ms->action, &hdev->pend_le_reports);
7360 case HCI_AUTO_CONN_DIRECT:
7361 case HCI_AUTO_CONN_ALWAYS:
7362 if (!is_connected(hdev, addr, addr_type))
7363 list_add(¶ms->action, &hdev->pend_le_conns);
7367 params->auto_connect = auto_connect;
7369 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
7375 static void device_added(struct sock *sk, struct hci_dev *hdev,
7376 bdaddr_t *bdaddr, u8 type, u8 action)
7378 struct mgmt_ev_device_added ev;
7380 bacpy(&ev.addr.bdaddr, bdaddr);
7381 ev.addr.type = type;
7384 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7387 static int add_device(struct sock *sk, struct hci_dev *hdev,
7388 void *data, u16 len)
7390 struct mgmt_cp_add_device *cp = data;
7391 u8 auto_conn, addr_type;
7394 BT_DBG("%s", hdev->name);
7396 if (!bdaddr_type_is_valid(cp->addr.type) ||
7397 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7398 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7399 MGMT_STATUS_INVALID_PARAMS,
7400 &cp->addr, sizeof(cp->addr));
7402 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7403 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7404 MGMT_STATUS_INVALID_PARAMS,
7405 &cp->addr, sizeof(cp->addr));
7409 if (cp->addr.type == BDADDR_BREDR) {
7410 /* Only incoming connections action is supported for now */
7411 if (cp->action != 0x01) {
7412 err = mgmt_cmd_complete(sk, hdev->id,
7414 MGMT_STATUS_INVALID_PARAMS,
7415 &cp->addr, sizeof(cp->addr));
7419 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
7424 hci_req_update_scan(hdev);
7429 addr_type = le_addr_type(cp->addr.type);
7431 if (cp->action == 0x02)
7432 auto_conn = HCI_AUTO_CONN_ALWAYS;
7433 else if (cp->action == 0x01)
7434 auto_conn = HCI_AUTO_CONN_DIRECT;
7436 auto_conn = HCI_AUTO_CONN_REPORT;
7438 /* Kernel internally uses conn_params with resolvable private
7439 * address, but Add Device allows only identity addresses.
7440 * Make sure it is enforced before calling
7441 * hci_conn_params_lookup.
7443 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7444 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7445 MGMT_STATUS_INVALID_PARAMS,
7446 &cp->addr, sizeof(cp->addr));
7450 /* If the connection parameters don't exist for this device,
7451 * they will be created and configured with defaults.
7453 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7455 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7456 MGMT_STATUS_FAILED, &cp->addr,
7461 hci_update_background_scan(hdev);
7464 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7466 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7467 MGMT_STATUS_SUCCESS, &cp->addr,
7471 hci_dev_unlock(hdev);
7475 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7476 bdaddr_t *bdaddr, u8 type)
7478 struct mgmt_ev_device_removed ev;
7480 bacpy(&ev.addr.bdaddr, bdaddr);
7481 ev.addr.type = type;
7483 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7486 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7487 void *data, u16 len)
7489 struct mgmt_cp_remove_device *cp = data;
7492 BT_DBG("%s", hdev->name);
7496 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7497 struct hci_conn_params *params;
7500 if (!bdaddr_type_is_valid(cp->addr.type)) {
7501 err = mgmt_cmd_complete(sk, hdev->id,
7502 MGMT_OP_REMOVE_DEVICE,
7503 MGMT_STATUS_INVALID_PARAMS,
7504 &cp->addr, sizeof(cp->addr));
7508 if (cp->addr.type == BDADDR_BREDR) {
7509 err = hci_bdaddr_list_del(&hdev->whitelist,
7513 err = mgmt_cmd_complete(sk, hdev->id,
7514 MGMT_OP_REMOVE_DEVICE,
7515 MGMT_STATUS_INVALID_PARAMS,
7521 hci_req_update_scan(hdev);
7523 device_removed(sk, hdev, &cp->addr.bdaddr,
7528 addr_type = le_addr_type(cp->addr.type);
7530 /* Kernel internally uses conn_params with resolvable private
7531 * address, but Remove Device allows only identity addresses.
7532 * Make sure it is enforced before calling
7533 * hci_conn_params_lookup.
7535 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7536 err = mgmt_cmd_complete(sk, hdev->id,
7537 MGMT_OP_REMOVE_DEVICE,
7538 MGMT_STATUS_INVALID_PARAMS,
7539 &cp->addr, sizeof(cp->addr));
7543 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7546 err = mgmt_cmd_complete(sk, hdev->id,
7547 MGMT_OP_REMOVE_DEVICE,
7548 MGMT_STATUS_INVALID_PARAMS,
7549 &cp->addr, sizeof(cp->addr));
7553 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7554 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7555 err = mgmt_cmd_complete(sk, hdev->id,
7556 MGMT_OP_REMOVE_DEVICE,
7557 MGMT_STATUS_INVALID_PARAMS,
7558 &cp->addr, sizeof(cp->addr));
7562 list_del(¶ms->action);
7563 list_del(¶ms->list);
7565 hci_update_background_scan(hdev);
7567 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7569 struct hci_conn_params *p, *tmp;
7570 struct bdaddr_list *b, *btmp;
7572 if (cp->addr.type) {
7573 err = mgmt_cmd_complete(sk, hdev->id,
7574 MGMT_OP_REMOVE_DEVICE,
7575 MGMT_STATUS_INVALID_PARAMS,
7576 &cp->addr, sizeof(cp->addr));
7580 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
7581 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7586 hci_req_update_scan(hdev);
7588 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7589 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7591 device_removed(sk, hdev, &p->addr, p->addr_type);
7592 if (p->explicit_connect) {
7593 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7596 list_del(&p->action);
7601 BT_DBG("All LE connection parameters were removed");
7603 hci_update_background_scan(hdev);
7607 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7608 MGMT_STATUS_SUCCESS, &cp->addr,
7611 hci_dev_unlock(hdev);
7615 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7618 struct mgmt_cp_load_conn_param *cp = data;
7619 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7620 sizeof(struct mgmt_conn_param));
7621 u16 param_count, expected_len;
7624 if (!lmp_le_capable(hdev))
7625 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7626 MGMT_STATUS_NOT_SUPPORTED);
7628 param_count = __le16_to_cpu(cp->param_count);
7629 if (param_count > max_param_count) {
7630 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7632 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7633 MGMT_STATUS_INVALID_PARAMS);
7636 expected_len = struct_size(cp, params, param_count);
7637 if (expected_len != len) {
7638 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7640 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7641 MGMT_STATUS_INVALID_PARAMS);
7644 BT_DBG("%s param_count %u", hdev->name, param_count);
7648 hci_conn_params_clear_disabled(hdev);
7650 for (i = 0; i < param_count; i++) {
7651 struct mgmt_conn_param *param = &cp->params[i];
7652 struct hci_conn_params *hci_param;
7653 u16 min, max, latency, timeout;
7656 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
7659 if (param->addr.type == BDADDR_LE_PUBLIC) {
7660 addr_type = ADDR_LE_DEV_PUBLIC;
7661 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7662 addr_type = ADDR_LE_DEV_RANDOM;
7664 bt_dev_err(hdev, "ignoring invalid connection parameters");
7668 min = le16_to_cpu(param->min_interval);
7669 max = le16_to_cpu(param->max_interval);
7670 latency = le16_to_cpu(param->latency);
7671 timeout = le16_to_cpu(param->timeout);
7673 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7674 min, max, latency, timeout);
7676 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7677 bt_dev_err(hdev, "ignoring invalid connection parameters");
7681 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7684 bt_dev_err(hdev, "failed to add connection parameters");
7688 hci_param->conn_min_interval = min;
7689 hci_param->conn_max_interval = max;
7690 hci_param->conn_latency = latency;
7691 hci_param->supervision_timeout = timeout;
7694 hci_dev_unlock(hdev);
7696 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7700 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7701 void *data, u16 len)
7703 struct mgmt_cp_set_external_config *cp = data;
7707 BT_DBG("%s", hdev->name);
7709 if (hdev_is_powered(hdev))
7710 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7711 MGMT_STATUS_REJECTED);
7713 if (cp->config != 0x00 && cp->config != 0x01)
7714 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7715 MGMT_STATUS_INVALID_PARAMS);
7717 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7718 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7719 MGMT_STATUS_NOT_SUPPORTED);
7724 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7726 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7728 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7735 err = new_options(hdev, sk);
7737 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7738 mgmt_index_removed(hdev);
7740 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7741 hci_dev_set_flag(hdev, HCI_CONFIG);
7742 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7744 queue_work(hdev->req_workqueue, &hdev->power_on);
7746 set_bit(HCI_RAW, &hdev->flags);
7747 mgmt_index_added(hdev);
7752 hci_dev_unlock(hdev);
7756 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7757 void *data, u16 len)
7759 struct mgmt_cp_set_public_address *cp = data;
7763 BT_DBG("%s", hdev->name);
7765 if (hdev_is_powered(hdev))
7766 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7767 MGMT_STATUS_REJECTED);
7769 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7770 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7771 MGMT_STATUS_INVALID_PARAMS);
7773 if (!hdev->set_bdaddr)
7774 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7775 MGMT_STATUS_NOT_SUPPORTED);
7779 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7780 bacpy(&hdev->public_addr, &cp->bdaddr);
7782 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7789 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7790 err = new_options(hdev, sk);
7792 if (is_configured(hdev)) {
7793 mgmt_index_removed(hdev);
7795 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7797 hci_dev_set_flag(hdev, HCI_CONFIG);
7798 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7800 queue_work(hdev->req_workqueue, &hdev->power_on);
7804 hci_dev_unlock(hdev);
7809 int mgmt_device_name_update(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name,
7813 struct mgmt_ev_device_name_update *ev = (void *)buf;
7819 bacpy(&ev->addr.bdaddr, bdaddr);
7820 ev->addr.type = BDADDR_BREDR;
7822 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7825 ev->eir_len = cpu_to_le16(eir_len);
7827 return mgmt_event(MGMT_EV_DEVICE_NAME_UPDATE, hdev, buf,
7828 sizeof(*ev) + eir_len, NULL);
7831 int mgmt_le_conn_update_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7832 u8 link_type, u8 addr_type, u8 status)
7834 struct mgmt_ev_conn_update_failed ev;
7836 bacpy(&ev.addr.bdaddr, bdaddr);
7837 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7840 return mgmt_event(MGMT_EV_CONN_UPDATE_FAILED, hdev,
7841 &ev, sizeof(ev), NULL);
7844 int mgmt_le_conn_updated(struct hci_dev *hdev, bdaddr_t *bdaddr,
7845 u8 link_type, u8 addr_type, u16 conn_interval,
7846 u16 conn_latency, u16 supervision_timeout)
7848 struct mgmt_ev_conn_updated ev;
7850 bacpy(&ev.addr.bdaddr, bdaddr);
7851 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7852 ev.conn_interval = cpu_to_le16(conn_interval);
7853 ev.conn_latency = cpu_to_le16(conn_latency);
7854 ev.supervision_timeout = cpu_to_le16(supervision_timeout);
7856 return mgmt_event(MGMT_EV_CONN_UPDATED, hdev,
7857 &ev, sizeof(ev), NULL);
7860 /* le device found event - Pass adv type */
7861 void mgmt_le_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7862 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir,
7863 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, u8 adv_type)
7866 struct mgmt_ev_le_device_found *ev = (void *)buf;
7869 if (!hci_discovery_active(hdev) && !hci_le_discovery_active(hdev))
7872 /* Make sure that the buffer is big enough. The 5 extra bytes
7873 * are for the potential CoD field.
7875 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7878 memset(buf, 0, sizeof(buf));
7880 bacpy(&ev->addr.bdaddr, bdaddr);
7881 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7883 ev->flags = cpu_to_le32(flags);
7884 ev->adv_type = adv_type;
7887 memcpy(ev->eir, eir, eir_len);
7889 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, NULL))
7890 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7893 if (scan_rsp_len > 0)
7894 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7896 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7897 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7899 mgmt_event(MGMT_EV_LE_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7903 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
7904 u16 opcode, struct sk_buff *skb)
7906 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7907 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7908 u8 *h192, *r192, *h256, *r256;
7909 struct mgmt_pending_cmd *cmd;
7913 BT_DBG("%s status %u", hdev->name, status);
7915 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
7919 mgmt_cp = cmd->param;
7922 status = mgmt_status(status);
7929 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
7930 struct hci_rp_read_local_oob_data *rp;
7932 if (skb->len != sizeof(*rp)) {
7933 status = MGMT_STATUS_FAILED;
7936 status = MGMT_STATUS_SUCCESS;
7937 rp = (void *)skb->data;
7939 eir_len = 5 + 18 + 18;
7946 struct hci_rp_read_local_oob_ext_data *rp;
7948 if (skb->len != sizeof(*rp)) {
7949 status = MGMT_STATUS_FAILED;
7952 status = MGMT_STATUS_SUCCESS;
7953 rp = (void *)skb->data;
7955 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7956 eir_len = 5 + 18 + 18;
7960 eir_len = 5 + 18 + 18 + 18 + 18;
7970 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7977 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7978 hdev->dev_class, 3);
7981 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7982 EIR_SSP_HASH_C192, h192, 16);
7983 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7984 EIR_SSP_RAND_R192, r192, 16);
7988 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7989 EIR_SSP_HASH_C256, h256, 16);
7990 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7991 EIR_SSP_RAND_R256, r256, 16);
7995 mgmt_rp->type = mgmt_cp->type;
7996 mgmt_rp->eir_len = cpu_to_le16(eir_len);
7998 err = mgmt_cmd_complete(cmd->sk, hdev->id,
7999 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8000 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8001 if (err < 0 || status)
8004 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8006 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8007 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8008 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8011 mgmt_pending_remove(cmd);
8014 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8015 struct mgmt_cp_read_local_oob_ext_data *cp)
8017 struct mgmt_pending_cmd *cmd;
8018 struct hci_request req;
8021 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8026 hci_req_init(&req, hdev);
8028 if (bredr_sc_enabled(hdev))
8029 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
8031 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
8033 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
8035 mgmt_pending_remove(cmd);
8042 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8043 void *data, u16 data_len)
8045 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8046 struct mgmt_rp_read_local_oob_ext_data *rp;
8049 u8 status, flags, role, addr[7], hash[16], rand[16];
8052 BT_DBG("%s", hdev->name);
8054 if (hdev_is_powered(hdev)) {
8056 case BIT(BDADDR_BREDR):
8057 status = mgmt_bredr_support(hdev);
8063 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8064 status = mgmt_le_support(hdev);
8068 eir_len = 9 + 3 + 18 + 18 + 3;
8071 status = MGMT_STATUS_INVALID_PARAMS;
8076 status = MGMT_STATUS_NOT_POWERED;
8080 rp_len = sizeof(*rp) + eir_len;
8081 rp = kmalloc(rp_len, GFP_ATOMIC);
8092 case BIT(BDADDR_BREDR):
8093 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8094 err = read_local_ssp_oob_req(hdev, sk, cp);
8095 hci_dev_unlock(hdev);
8099 status = MGMT_STATUS_FAILED;
8102 eir_len = eir_append_data(rp->eir, eir_len,
8104 hdev->dev_class, 3);
8107 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8108 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8109 smp_generate_oob(hdev, hash, rand) < 0) {
8110 hci_dev_unlock(hdev);
8111 status = MGMT_STATUS_FAILED;
8115 /* This should return the active RPA, but since the RPA
8116 * is only programmed on demand, it is really hard to fill
8117 * this in at the moment. For now disallow retrieving
8118 * local out-of-band data when privacy is in use.
8120 * Returning the identity address will not help here since
8121 * pairing happens before the identity resolving key is
8122 * known and thus the connection establishment happens
8123 * based on the RPA and not the identity address.
8125 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8126 hci_dev_unlock(hdev);
8127 status = MGMT_STATUS_REJECTED;
8131 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8132 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8133 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8134 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8135 memcpy(addr, &hdev->static_addr, 6);
8138 memcpy(addr, &hdev->bdaddr, 6);
8142 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8143 addr, sizeof(addr));
8145 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8150 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8151 &role, sizeof(role));
8153 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8154 eir_len = eir_append_data(rp->eir, eir_len,
8156 hash, sizeof(hash));
8158 eir_len = eir_append_data(rp->eir, eir_len,
8160 rand, sizeof(rand));
8163 flags = mgmt_get_adv_discov_flags(hdev);
8165 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8166 flags |= LE_AD_NO_BREDR;
8168 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8169 &flags, sizeof(flags));
8173 hci_dev_unlock(hdev);
8175 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8177 status = MGMT_STATUS_SUCCESS;
8180 rp->type = cp->type;
8181 rp->eir_len = cpu_to_le16(eir_len);
8183 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8184 status, rp, sizeof(*rp) + eir_len);
8185 if (err < 0 || status)
8188 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8189 rp, sizeof(*rp) + eir_len,
8190 HCI_MGMT_OOB_DATA_EVENTS, sk);
8198 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8202 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8203 flags |= MGMT_ADV_FLAG_DISCOV;
8204 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8205 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8206 flags |= MGMT_ADV_FLAG_APPEARANCE;
8207 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8209 /* In extended adv TX_POWER returned from Set Adv Param
8210 * will be always valid.
8212 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
8213 ext_adv_capable(hdev))
8214 flags |= MGMT_ADV_FLAG_TX_POWER;
8216 if (ext_adv_capable(hdev)) {
8217 flags |= MGMT_ADV_FLAG_SEC_1M;
8219 if (hdev->le_features[1] & HCI_LE_PHY_2M)
8220 flags |= MGMT_ADV_FLAG_SEC_2M;
8222 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
8223 flags |= MGMT_ADV_FLAG_SEC_CODED;
8229 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8230 void *data, u16 data_len)
8232 struct mgmt_rp_read_adv_features *rp;
8235 struct adv_info *adv_instance;
8236 u32 supported_flags;
8239 BT_DBG("%s", hdev->name);
8241 if (!lmp_le_capable(hdev))
8242 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8243 MGMT_STATUS_REJECTED);
8247 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8248 rp = kmalloc(rp_len, GFP_ATOMIC);
8250 hci_dev_unlock(hdev);
8254 supported_flags = get_supported_adv_flags(hdev);
8256 rp->supported_flags = cpu_to_le32(supported_flags);
8257 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
8258 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
8259 rp->max_instances = HCI_MAX_ADV_INSTANCES;
8260 rp->num_instances = hdev->adv_instance_cnt;
8262 instance = rp->instance;
8263 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8264 *instance = adv_instance->instance;
8268 hci_dev_unlock(hdev);
8270 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8271 MGMT_STATUS_SUCCESS, rp, rp_len);
8278 static u8 calculate_name_len(struct hci_dev *hdev)
8280 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
8282 return append_local_name(hdev, buf, 0);
8285 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8288 u8 max_len = HCI_MAX_AD_LENGTH;
8291 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8292 MGMT_ADV_FLAG_LIMITED_DISCOV |
8293 MGMT_ADV_FLAG_MANAGED_FLAGS))
8296 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8299 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8300 max_len -= calculate_name_len(hdev);
8302 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8309 static bool flags_managed(u32 adv_flags)
8311 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8312 MGMT_ADV_FLAG_LIMITED_DISCOV |
8313 MGMT_ADV_FLAG_MANAGED_FLAGS);
8316 static bool tx_power_managed(u32 adv_flags)
8318 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8321 static bool name_managed(u32 adv_flags)
8323 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8326 static bool appearance_managed(u32 adv_flags)
8328 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8331 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8332 u8 len, bool is_adv_data)
8337 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8342 /* Make sure that the data is correctly formatted. */
8343 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
8346 if (data[i + 1] == EIR_FLAGS &&
8347 (!is_adv_data || flags_managed(adv_flags)))
8350 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8353 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8356 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8359 if (data[i + 1] == EIR_APPEARANCE &&
8360 appearance_managed(adv_flags))
8363 /* If the current field length would exceed the total data
8364 * length, then it's invalid.
8366 if (i + cur_len >= len)
8373 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
8376 struct mgmt_pending_cmd *cmd;
8377 struct mgmt_cp_add_advertising *cp;
8378 struct mgmt_rp_add_advertising rp;
8379 struct adv_info *adv_instance, *n;
8382 BT_DBG("status %d", status);
8386 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
8388 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
8389 if (!adv_instance->pending)
8393 adv_instance->pending = false;
8397 instance = adv_instance->instance;
8399 if (hdev->cur_adv_instance == instance)
8400 cancel_adv_timeout(hdev);
8402 hci_remove_adv_instance(hdev, instance);
8403 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
8410 rp.instance = cp->instance;
8413 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8414 mgmt_status(status));
8416 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8417 mgmt_status(status), &rp, sizeof(rp));
8419 mgmt_pending_remove(cmd);
8422 hci_dev_unlock(hdev);
8425 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8426 void *data, u16 data_len)
8428 struct mgmt_cp_add_advertising *cp = data;
8429 struct mgmt_rp_add_advertising rp;
8431 u32 supported_flags, phy_flags;
8433 u16 timeout, duration;
8434 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
8435 u8 schedule_instance = 0;
8436 struct adv_info *next_instance;
8438 struct mgmt_pending_cmd *cmd;
8439 struct hci_request req;
8441 BT_DBG("%s", hdev->name);
8443 status = mgmt_le_support(hdev);
8445 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8448 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
8449 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8450 MGMT_STATUS_INVALID_PARAMS);
8452 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8453 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8454 MGMT_STATUS_INVALID_PARAMS);
8456 flags = __le32_to_cpu(cp->flags);
8457 timeout = __le16_to_cpu(cp->timeout);
8458 duration = __le16_to_cpu(cp->duration);
8460 /* The current implementation only supports a subset of the specified
8461 * flags. Also need to check mutual exclusiveness of sec flags.
8463 supported_flags = get_supported_adv_flags(hdev);
8464 phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
8465 if (flags & ~supported_flags ||
8466 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8467 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8468 MGMT_STATUS_INVALID_PARAMS);
8472 if (timeout && !hdev_is_powered(hdev)) {
8473 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8474 MGMT_STATUS_REJECTED);
8478 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8479 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8480 pending_find(MGMT_OP_SET_LE, hdev)) {
8481 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8486 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8487 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8488 cp->scan_rsp_len, false)) {
8489 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8490 MGMT_STATUS_INVALID_PARAMS);
8494 err = hci_add_adv_instance(hdev, cp->instance, flags,
8495 cp->adv_data_len, cp->data,
8497 cp->data + cp->adv_data_len,
8500 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8501 MGMT_STATUS_FAILED);
8505 /* Only trigger an advertising added event if a new instance was
8508 if (hdev->adv_instance_cnt > prev_instance_cnt)
8509 mgmt_advertising_added(sk, hdev, cp->instance);
8511 if (hdev->cur_adv_instance == cp->instance) {
8512 /* If the currently advertised instance is being changed then
8513 * cancel the current advertising and schedule the next
8514 * instance. If there is only one instance then the overridden
8515 * advertising data will be visible right away.
8517 cancel_adv_timeout(hdev);
8519 next_instance = hci_get_next_instance(hdev, cp->instance);
8521 schedule_instance = next_instance->instance;
8522 } else if (!hdev->adv_instance_timeout) {
8523 /* Immediately advertise the new instance if no other
8524 * instance is currently being advertised.
8526 schedule_instance = cp->instance;
8529 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8530 * there is no instance to be advertised then we have no HCI
8531 * communication to make. Simply return.
8533 if (!hdev_is_powered(hdev) ||
8534 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8535 !schedule_instance) {
8536 rp.instance = cp->instance;
8537 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8538 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8542 /* We're good to go, update advertising data, parameters, and start
8545 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8552 hci_req_init(&req, hdev);
8554 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
8557 err = hci_req_run(&req, add_advertising_complete);
8560 mgmt_pending_remove(cmd);
8563 hci_dev_unlock(hdev);
8568 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
8571 struct mgmt_pending_cmd *cmd;
8572 struct mgmt_cp_remove_advertising *cp;
8573 struct mgmt_rp_remove_advertising rp;
8575 BT_DBG("status %d", status);
8579 /* A failure status here only means that we failed to disable
8580 * advertising. Otherwise, the advertising instance has been removed,
8581 * so report success.
8583 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
8588 rp.instance = cp->instance;
8590 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
8592 mgmt_pending_remove(cmd);
8595 hci_dev_unlock(hdev);
8598 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8599 void *data, u16 data_len)
8601 struct mgmt_cp_remove_advertising *cp = data;
8602 struct mgmt_rp_remove_advertising rp;
8603 struct mgmt_pending_cmd *cmd;
8604 struct hci_request req;
8607 BT_DBG("%s", hdev->name);
8611 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8612 err = mgmt_cmd_status(sk, hdev->id,
8613 MGMT_OP_REMOVE_ADVERTISING,
8614 MGMT_STATUS_INVALID_PARAMS);
8618 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
8619 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
8620 pending_find(MGMT_OP_SET_LE, hdev)) {
8621 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8626 if (list_empty(&hdev->adv_instances)) {
8627 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8628 MGMT_STATUS_INVALID_PARAMS);
8632 hci_req_init(&req, hdev);
8634 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
8636 if (list_empty(&hdev->adv_instances))
8637 __hci_req_disable_advertising(&req);
8639 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
8640 * flag is set or the device isn't powered then we have no HCI
8641 * communication to make. Simply return.
8643 if (skb_queue_empty(&req.cmd_q) ||
8644 !hdev_is_powered(hdev) ||
8645 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
8646 hci_req_purge(&req);
8647 rp.instance = cp->instance;
8648 err = mgmt_cmd_complete(sk, hdev->id,
8649 MGMT_OP_REMOVE_ADVERTISING,
8650 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8654 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8661 err = hci_req_run(&req, remove_advertising_complete);
8663 mgmt_pending_remove(cmd);
8666 hci_dev_unlock(hdev);
8671 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8672 void *data, u16 data_len)
8674 struct mgmt_cp_get_adv_size_info *cp = data;
8675 struct mgmt_rp_get_adv_size_info rp;
8676 u32 flags, supported_flags;
8679 BT_DBG("%s", hdev->name);
8681 if (!lmp_le_capable(hdev))
8682 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8683 MGMT_STATUS_REJECTED);
8685 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
8686 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8687 MGMT_STATUS_INVALID_PARAMS);
8689 flags = __le32_to_cpu(cp->flags);
8691 /* The current implementation only supports a subset of the specified
8694 supported_flags = get_supported_adv_flags(hdev);
8695 if (flags & ~supported_flags)
8696 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8697 MGMT_STATUS_INVALID_PARAMS);
8699 rp.instance = cp->instance;
8700 rp.flags = cp->flags;
8701 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8702 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8704 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8705 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8710 static const struct hci_mgmt_handler mgmt_handlers[] = {
8711 { NULL }, /* 0x0000 (no command) */
8712 { read_version, MGMT_READ_VERSION_SIZE,
8714 HCI_MGMT_UNTRUSTED },
8715 { read_commands, MGMT_READ_COMMANDS_SIZE,
8717 HCI_MGMT_UNTRUSTED },
8718 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
8720 HCI_MGMT_UNTRUSTED },
8721 { read_controller_info, MGMT_READ_INFO_SIZE,
8722 HCI_MGMT_UNTRUSTED },
8723 { set_powered, MGMT_SETTING_SIZE },
8724 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
8725 { set_connectable, MGMT_SETTING_SIZE },
8726 { set_fast_connectable, MGMT_SETTING_SIZE },
8727 { set_bondable, MGMT_SETTING_SIZE },
8728 { set_link_security, MGMT_SETTING_SIZE },
8729 { set_ssp, MGMT_SETTING_SIZE },
8730 { set_hs, MGMT_SETTING_SIZE },
8731 { set_le, MGMT_SETTING_SIZE },
8732 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
8733 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
8734 { add_uuid, MGMT_ADD_UUID_SIZE },
8735 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
8736 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
8738 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8740 { disconnect, MGMT_DISCONNECT_SIZE },
8741 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
8742 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
8743 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
8744 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
8745 { pair_device, MGMT_PAIR_DEVICE_SIZE },
8746 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
8747 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
8748 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
8749 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8750 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
8751 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8752 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
8753 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8755 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8756 { start_discovery, MGMT_START_DISCOVERY_SIZE },
8757 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
8758 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
8759 { block_device, MGMT_BLOCK_DEVICE_SIZE },
8760 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
8761 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
8762 { set_advertising, MGMT_SETTING_SIZE },
8763 { set_bredr, MGMT_SETTING_SIZE },
8764 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
8765 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
8766 { set_secure_conn, MGMT_SETTING_SIZE },
8767 { set_debug_keys, MGMT_SETTING_SIZE },
8768 { set_privacy, MGMT_SET_PRIVACY_SIZE },
8769 { load_irks, MGMT_LOAD_IRKS_SIZE,
8771 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
8772 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
8773 { add_device, MGMT_ADD_DEVICE_SIZE },
8774 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
8775 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
8777 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8779 HCI_MGMT_UNTRUSTED },
8780 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
8781 HCI_MGMT_UNCONFIGURED |
8782 HCI_MGMT_UNTRUSTED },
8783 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
8784 HCI_MGMT_UNCONFIGURED },
8785 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
8786 HCI_MGMT_UNCONFIGURED },
8787 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8789 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8790 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
8792 HCI_MGMT_UNTRUSTED },
8793 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
8794 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
8796 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
8797 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
8798 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8799 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8800 HCI_MGMT_UNTRUSTED },
8801 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
8802 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
8803 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
8807 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
8808 { NULL }, /* 0x0000 (no command) */
8809 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
8810 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
8812 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
8814 { add_white_list, MGMT_ADD_DEV_WHITE_LIST_SIZE },
8815 { remove_from_white_list, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
8816 { clear_white_list, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
8817 { set_enable_rssi, MGMT_SET_RSSI_ENABLE_SIZE },
8818 { get_raw_rssi, MGMT_GET_RAW_RSSI_SIZE },
8819 { set_disable_threshold, MGMT_SET_RSSI_DISABLE_SIZE },
8820 { start_le_discovery, MGMT_START_LE_DISCOVERY_SIZE },
8821 { stop_le_discovery, MGMT_STOP_LE_DISCOVERY_SIZE },
8822 { disable_le_auto_connect, MGMT_DISABLE_LE_AUTO_CONNECT_SIZE },
8823 { le_conn_update, MGMT_LE_CONN_UPDATE_SIZE },
8824 { set_manufacturer_data, MGMT_SET_MANUFACTURER_DATA_SIZE },
8825 { le_set_scan_params, MGMT_LE_SET_SCAN_PARAMS_SIZE },
8826 { set_voice_setting, MGMT_SET_VOICE_SETTING_SIZE },
8827 { get_adv_tx_power, MGMT_GET_ADV_TX_POWER_SIZE },
8831 void mgmt_index_added(struct hci_dev *hdev)
8833 struct mgmt_ev_ext_index ev;
8835 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8838 switch (hdev->dev_type) {
8840 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8841 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8842 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8845 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8846 HCI_MGMT_INDEX_EVENTS);
8859 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8860 HCI_MGMT_EXT_INDEX_EVENTS);
8863 void mgmt_index_removed(struct hci_dev *hdev)
8865 struct mgmt_ev_ext_index ev;
8866 u8 status = MGMT_STATUS_INVALID_INDEX;
8868 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8871 switch (hdev->dev_type) {
8873 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8875 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8876 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8877 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8880 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8881 HCI_MGMT_INDEX_EVENTS);
8894 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8895 HCI_MGMT_EXT_INDEX_EVENTS);
8898 /* This function requires the caller holds hdev->lock */
8899 static void restart_le_actions(struct hci_dev *hdev)
8901 struct hci_conn_params *p;
8903 list_for_each_entry(p, &hdev->le_conn_params, list) {
8904 /* Needed for AUTO_OFF case where might not "really"
8905 * have been powered off.
8907 list_del_init(&p->action);
8909 switch (p->auto_connect) {
8910 case HCI_AUTO_CONN_DIRECT:
8911 case HCI_AUTO_CONN_ALWAYS:
8912 list_add(&p->action, &hdev->pend_le_conns);
8914 case HCI_AUTO_CONN_REPORT:
8915 list_add(&p->action, &hdev->pend_le_reports);
8923 void mgmt_power_on(struct hci_dev *hdev, int err)
8925 struct cmd_lookup match = { NULL, hdev };
8927 BT_DBG("err %d", err);
8932 restart_le_actions(hdev);
8933 hci_update_background_scan(hdev);
8936 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8938 new_settings(hdev, match.sk);
8943 hci_dev_unlock(hdev);
8946 void __mgmt_power_off(struct hci_dev *hdev)
8948 struct cmd_lookup match = { NULL, hdev };
8949 u8 status, zero_cod[] = { 0, 0, 0 };
8951 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8953 /* If the power off is because of hdev unregistration let
8954 * use the appropriate INVALID_INDEX status. Otherwise use
8955 * NOT_POWERED. We cover both scenarios here since later in
8956 * mgmt_index_removed() any hci_conn callbacks will have already
8957 * been triggered, potentially causing misleading DISCONNECTED
8960 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8961 status = MGMT_STATUS_INVALID_INDEX;
8963 status = MGMT_STATUS_NOT_POWERED;
8965 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8967 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8968 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8969 zero_cod, sizeof(zero_cod),
8970 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8971 ext_info_changed(hdev, NULL);
8974 new_settings(hdev, match.sk);
8980 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8982 struct mgmt_pending_cmd *cmd;
8985 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8989 if (err == -ERFKILL)
8990 status = MGMT_STATUS_RFKILLED;
8992 status = MGMT_STATUS_FAILED;
8994 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8996 mgmt_pending_remove(cmd);
8999 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9002 struct mgmt_ev_new_link_key ev;
9004 memset(&ev, 0, sizeof(ev));
9006 ev.store_hint = persistent;
9007 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9008 ev.key.addr.type = BDADDR_BREDR;
9009 ev.key.type = key->type;
9010 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9011 ev.key.pin_len = key->pin_len;
9013 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9016 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9018 switch (ltk->type) {
9021 if (ltk->authenticated)
9022 return MGMT_LTK_AUTHENTICATED;
9023 return MGMT_LTK_UNAUTHENTICATED;
9025 if (ltk->authenticated)
9026 return MGMT_LTK_P256_AUTH;
9027 return MGMT_LTK_P256_UNAUTH;
9028 case SMP_LTK_P256_DEBUG:
9029 return MGMT_LTK_P256_DEBUG;
9032 return MGMT_LTK_UNAUTHENTICATED;
9035 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9037 struct mgmt_ev_new_long_term_key ev;
9039 memset(&ev, 0, sizeof(ev));
9041 /* Devices using resolvable or non-resolvable random addresses
9042 * without providing an identity resolving key don't require
9043 * to store long term keys. Their addresses will change the
9046 * Only when a remote device provides an identity address
9047 * make sure the long term key is stored. If the remote
9048 * identity is known, the long term keys are internally
9049 * mapped to the identity address. So allow static random
9050 * and public addresses here.
9052 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9053 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9054 ev.store_hint = 0x00;
9056 ev.store_hint = persistent;
9058 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9059 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9060 ev.key.type = mgmt_ltk_type(key);
9061 ev.key.enc_size = key->enc_size;
9062 ev.key.ediv = key->ediv;
9063 ev.key.rand = key->rand;
9065 if (key->type == SMP_LTK)
9068 /* Make sure we copy only the significant bytes based on the
9069 * encryption key size, and set the rest of the value to zeroes.
9071 memcpy(ev.key.val, key->val, key->enc_size);
9072 memset(ev.key.val + key->enc_size, 0,
9073 sizeof(ev.key.val) - key->enc_size);
9075 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9078 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9080 struct mgmt_ev_new_irk ev;
9082 memset(&ev, 0, sizeof(ev));
9084 ev.store_hint = persistent;
9086 bacpy(&ev.rpa, &irk->rpa);
9087 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9088 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9089 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9091 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9094 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9097 struct mgmt_ev_new_csrk ev;
9099 memset(&ev, 0, sizeof(ev));
9101 /* Devices using resolvable or non-resolvable random addresses
9102 * without providing an identity resolving key don't require
9103 * to store signature resolving keys. Their addresses will change
9104 * the next time around.
9106 * Only when a remote device provides an identity address
9107 * make sure the signature resolving key is stored. So allow
9108 * static random and public addresses here.
9110 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9111 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9112 ev.store_hint = 0x00;
9114 ev.store_hint = persistent;
9116 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9117 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9118 ev.key.type = csrk->type;
9119 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9121 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9124 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9125 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9126 u16 max_interval, u16 latency, u16 timeout)
9128 struct mgmt_ev_new_conn_param ev;
9130 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9133 memset(&ev, 0, sizeof(ev));
9134 bacpy(&ev.addr.bdaddr, bdaddr);
9135 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9136 ev.store_hint = store_hint;
9137 ev.min_interval = cpu_to_le16(min_interval);
9138 ev.max_interval = cpu_to_le16(max_interval);
9139 ev.latency = cpu_to_le16(latency);
9140 ev.timeout = cpu_to_le16(timeout);
9142 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9145 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9146 u32 flags, u8 *name, u8 name_len)
9149 struct mgmt_ev_device_connected *ev = (void *) buf;
9152 bacpy(&ev->addr.bdaddr, &conn->dst);
9153 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9155 ev->flags = __cpu_to_le32(flags);
9157 /* We must ensure that the EIR Data fields are ordered and
9158 * unique. Keep it simple for now and avoid the problem by not
9159 * adding any BR/EDR data to the LE adv.
9161 if (conn->le_adv_data_len > 0) {
9162 memcpy(&ev->eir[eir_len],
9163 conn->le_adv_data, conn->le_adv_data_len);
9164 eir_len = conn->le_adv_data_len;
9167 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
9170 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
9171 eir_len = eir_append_data(ev->eir, eir_len,
9173 conn->dev_class, 3);
9176 ev->eir_len = cpu_to_le16(eir_len);
9178 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
9179 sizeof(*ev) + eir_len, NULL);
9182 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9184 struct sock **sk = data;
9186 cmd->cmd_complete(cmd, 0);
9191 mgmt_pending_remove(cmd);
9194 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9196 struct hci_dev *hdev = data;
9197 struct mgmt_cp_unpair_device *cp = cmd->param;
9199 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9201 cmd->cmd_complete(cmd, 0);
9202 mgmt_pending_remove(cmd);
9205 bool mgmt_powering_down(struct hci_dev *hdev)
9207 struct mgmt_pending_cmd *cmd;
9208 struct mgmt_mode *cp;
9210 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9221 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9222 u8 link_type, u8 addr_type, u8 reason,
9223 bool mgmt_connected)
9225 struct mgmt_ev_device_disconnected ev;
9226 struct sock *sk = NULL;
9228 /* The connection is still in hci_conn_hash so test for 1
9229 * instead of 0 to know if this is the last one.
9231 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9232 cancel_delayed_work(&hdev->power_off);
9233 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9236 if (!mgmt_connected)
9239 if (link_type != ACL_LINK && link_type != LE_LINK)
9242 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9244 bacpy(&ev.addr.bdaddr, bdaddr);
9245 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9248 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9253 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9257 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9258 u8 link_type, u8 addr_type, u8 status)
9260 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9261 struct mgmt_cp_disconnect *cp;
9262 struct mgmt_pending_cmd *cmd;
9264 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9267 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9273 if (bacmp(bdaddr, &cp->addr.bdaddr))
9276 if (cp->addr.type != bdaddr_type)
9279 cmd->cmd_complete(cmd, mgmt_status(status));
9280 mgmt_pending_remove(cmd);
9283 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9284 u8 addr_type, u8 status)
9286 struct mgmt_ev_connect_failed ev;
9288 /* The connection is still in hci_conn_hash so test for 1
9289 * instead of 0 to know if this is the last one.
9291 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9292 cancel_delayed_work(&hdev->power_off);
9293 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9296 bacpy(&ev.addr.bdaddr, bdaddr);
9297 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9298 ev.status = mgmt_status(status);
9300 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9303 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9305 struct mgmt_ev_pin_code_request ev;
9307 bacpy(&ev.addr.bdaddr, bdaddr);
9308 ev.addr.type = BDADDR_BREDR;
9311 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9314 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9317 struct mgmt_pending_cmd *cmd;
9319 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9323 cmd->cmd_complete(cmd, mgmt_status(status));
9324 mgmt_pending_remove(cmd);
9327 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9330 struct mgmt_pending_cmd *cmd;
9332 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9336 cmd->cmd_complete(cmd, mgmt_status(status));
9337 mgmt_pending_remove(cmd);
9340 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9341 u8 link_type, u8 addr_type, u32 value,
9344 struct mgmt_ev_user_confirm_request ev;
9346 BT_DBG("%s", hdev->name);
9348 bacpy(&ev.addr.bdaddr, bdaddr);
9349 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9350 ev.confirm_hint = confirm_hint;
9351 ev.value = cpu_to_le32(value);
9353 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9357 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9358 u8 link_type, u8 addr_type)
9360 struct mgmt_ev_user_passkey_request ev;
9362 BT_DBG("%s", hdev->name);
9364 bacpy(&ev.addr.bdaddr, bdaddr);
9365 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9367 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9371 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9372 u8 link_type, u8 addr_type, u8 status,
9375 struct mgmt_pending_cmd *cmd;
9377 cmd = pending_find(opcode, hdev);
9381 cmd->cmd_complete(cmd, mgmt_status(status));
9382 mgmt_pending_remove(cmd);
9387 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9388 u8 link_type, u8 addr_type, u8 status)
9390 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9391 status, MGMT_OP_USER_CONFIRM_REPLY);
9394 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9395 u8 link_type, u8 addr_type, u8 status)
9397 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9399 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9402 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9403 u8 link_type, u8 addr_type, u8 status)
9405 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9406 status, MGMT_OP_USER_PASSKEY_REPLY);
9409 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9410 u8 link_type, u8 addr_type, u8 status)
9412 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9414 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9417 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9418 u8 link_type, u8 addr_type, u32 passkey,
9421 struct mgmt_ev_passkey_notify ev;
9423 BT_DBG("%s", hdev->name);
9425 bacpy(&ev.addr.bdaddr, bdaddr);
9426 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9427 ev.passkey = __cpu_to_le32(passkey);
9428 ev.entered = entered;
9430 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9433 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9435 struct mgmt_ev_auth_failed ev;
9436 struct mgmt_pending_cmd *cmd;
9437 u8 status = mgmt_status(hci_status);
9439 bacpy(&ev.addr.bdaddr, &conn->dst);
9440 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9443 cmd = find_pairing(conn);
9445 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9446 cmd ? cmd->sk : NULL);
9449 cmd->cmd_complete(cmd, status);
9450 mgmt_pending_remove(cmd);
9454 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9456 struct cmd_lookup match = { NULL, hdev };
9460 u8 mgmt_err = mgmt_status(status);
9461 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9462 cmd_status_rsp, &mgmt_err);
9466 if (test_bit(HCI_AUTH, &hdev->flags))
9467 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9469 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9471 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9475 new_settings(hdev, match.sk);
9481 static void clear_eir(struct hci_request *req)
9483 struct hci_dev *hdev = req->hdev;
9484 struct hci_cp_write_eir cp;
9486 if (!lmp_ext_inq_capable(hdev))
9489 memset(hdev->eir, 0, sizeof(hdev->eir));
9491 memset(&cp, 0, sizeof(cp));
9493 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
9496 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
9498 struct cmd_lookup match = { NULL, hdev };
9499 struct hci_request req;
9500 bool changed = false;
9503 u8 mgmt_err = mgmt_status(status);
9505 if (enable && hci_dev_test_and_clear_flag(hdev,
9507 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9508 new_settings(hdev, NULL);
9511 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
9517 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
9519 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
9521 changed = hci_dev_test_and_clear_flag(hdev,
9524 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
9527 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
9530 new_settings(hdev, match.sk);
9535 hci_req_init(&req, hdev);
9537 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
9538 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
9539 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
9540 sizeof(enable), &enable);
9541 __hci_req_update_eir(&req);
9546 hci_req_run(&req, NULL);
9549 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9551 struct cmd_lookup *match = data;
9553 if (match->sk == NULL) {
9554 match->sk = cmd->sk;
9555 sock_hold(match->sk);
9559 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9562 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9564 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9565 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9566 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9569 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9570 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9571 ext_info_changed(hdev, NULL);
9578 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9580 struct mgmt_cp_set_local_name ev;
9581 struct mgmt_pending_cmd *cmd;
9586 memset(&ev, 0, sizeof(ev));
9587 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9588 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9590 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9592 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9594 /* If this is a HCI command related to powering on the
9595 * HCI dev don't send any mgmt signals.
9597 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9601 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9602 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9603 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9606 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9610 for (i = 0; i < uuid_count; i++) {
9611 if (!memcmp(uuid, uuids[i], 16))
9618 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9622 while (parsed < eir_len) {
9623 u8 field_len = eir[0];
9630 if (eir_len - parsed < field_len + 1)
9634 case EIR_UUID16_ALL:
9635 case EIR_UUID16_SOME:
9636 for (i = 0; i + 3 <= field_len; i += 2) {
9637 memcpy(uuid, bluetooth_base_uuid, 16);
9638 uuid[13] = eir[i + 3];
9639 uuid[12] = eir[i + 2];
9640 if (has_uuid(uuid, uuid_count, uuids))
9644 case EIR_UUID32_ALL:
9645 case EIR_UUID32_SOME:
9646 for (i = 0; i + 5 <= field_len; i += 4) {
9647 memcpy(uuid, bluetooth_base_uuid, 16);
9648 uuid[15] = eir[i + 5];
9649 uuid[14] = eir[i + 4];
9650 uuid[13] = eir[i + 3];
9651 uuid[12] = eir[i + 2];
9652 if (has_uuid(uuid, uuid_count, uuids))
9656 case EIR_UUID128_ALL:
9657 case EIR_UUID128_SOME:
9658 for (i = 0; i + 17 <= field_len; i += 16) {
9659 memcpy(uuid, eir + i + 2, 16);
9660 if (has_uuid(uuid, uuid_count, uuids))
9666 parsed += field_len + 1;
9667 eir += field_len + 1;
9673 static void restart_le_scan(struct hci_dev *hdev)
9675 /* If controller is not scanning we are done. */
9676 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9679 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9680 hdev->discovery.scan_start +
9681 hdev->discovery.scan_duration))
9684 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9685 DISCOV_LE_RESTART_DELAY);
9688 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9689 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9691 /* If a RSSI threshold has been specified, and
9692 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9693 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9694 * is set, let it through for further processing, as we might need to
9697 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9698 * the results are also dropped.
9700 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9701 (rssi == HCI_RSSI_INVALID ||
9702 (rssi < hdev->discovery.rssi &&
9703 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9706 if (hdev->discovery.uuid_count != 0) {
9707 /* If a list of UUIDs is provided in filter, results with no
9708 * matching UUID should be dropped.
9710 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9711 hdev->discovery.uuids) &&
9712 !eir_has_uuids(scan_rsp, scan_rsp_len,
9713 hdev->discovery.uuid_count,
9714 hdev->discovery.uuids))
9718 /* If duplicate filtering does not report RSSI changes, then restart
9719 * scanning to ensure updated result with updated RSSI values.
9721 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9722 restart_le_scan(hdev);
9724 /* Validate RSSI value against the RSSI threshold once more. */
9725 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9726 rssi < hdev->discovery.rssi)
9733 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9734 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9735 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9738 struct mgmt_ev_device_found *ev = (void *)buf;
9741 /* Don't send events for a non-kernel initiated discovery. With
9742 * LE one exception is if we have pend_le_reports > 0 in which
9743 * case we're doing passive scanning and want these events.
9745 if (!hci_discovery_active(hdev)) {
9746 if (link_type == ACL_LINK)
9748 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
9752 if (hdev->discovery.result_filtering) {
9753 /* We are using service discovery */
9754 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9759 if (hdev->discovery.limited) {
9760 /* Check for limited discoverable bit */
9762 if (!(dev_class[1] & 0x20))
9765 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9766 if (!flags || !(flags[0] & LE_AD_LIMITED))
9771 /* Make sure that the buffer is big enough. The 5 extra bytes
9772 * are for the potential CoD field.
9774 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9777 memset(buf, 0, sizeof(buf));
9779 /* In case of device discovery with BR/EDR devices (pre 1.2), the
9780 * RSSI value was reported as 0 when not available. This behavior
9781 * is kept when using device discovery. This is required for full
9782 * backwards compatibility with the API.
9784 * However when using service discovery, the value 127 will be
9785 * returned when the RSSI is not available.
9787 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9788 link_type == ACL_LINK)
9791 bacpy(&ev->addr.bdaddr, bdaddr);
9792 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9794 ev->flags = cpu_to_le32(flags);
9797 /* Copy EIR or advertising data into event */
9798 memcpy(ev->eir, eir, eir_len);
9800 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9802 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9805 if (scan_rsp_len > 0)
9806 /* Append scan response data to event */
9807 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9809 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9810 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9812 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9815 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9816 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9818 struct mgmt_ev_device_found *ev;
9819 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
9822 ev = (struct mgmt_ev_device_found *) buf;
9824 memset(buf, 0, sizeof(buf));
9826 bacpy(&ev->addr.bdaddr, bdaddr);
9827 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9830 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9833 ev->eir_len = cpu_to_le16(eir_len);
9835 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
9838 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9840 struct mgmt_ev_discovering ev;
9842 BT_DBG("%s discovering %u", hdev->name, discovering);
9844 memset(&ev, 0, sizeof(ev));
9845 ev.type = hdev->discovery.type;
9846 ev.discovering = discovering;
9848 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9851 static struct hci_mgmt_chan chan = {
9852 .channel = HCI_CHANNEL_CONTROL,
9853 .handler_count = ARRAY_SIZE(mgmt_handlers),
9854 .handlers = mgmt_handlers,
9856 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
9857 .tizen_handlers = tizen_mgmt_handlers,
9859 .hdev_init = mgmt_init_hdev,
9864 return hci_mgmt_chan_register(&chan);
9867 void mgmt_exit(void)
9869 hci_mgmt_chan_unregister(&chan);