2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
44 #define MGMT_VERSION 1
45 #define MGMT_REVISION 22
47 static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
55 MGMT_OP_SET_LINK_SECURITY,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
113 MGMT_OP_GET_PHY_CONFIGURATION,
114 MGMT_OP_SET_PHY_CONFIGURATION,
115 MGMT_OP_SET_BLOCKED_KEYS,
116 MGMT_OP_SET_WIDEBAND_SPEECH,
117 MGMT_OP_READ_CONTROLLER_CAP,
118 MGMT_OP_READ_EXP_FEATURES_INFO,
119 MGMT_OP_SET_EXP_FEATURE,
120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 MGMT_OP_GET_DEVICE_FLAGS,
125 MGMT_OP_SET_DEVICE_FLAGS,
126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 MGMT_OP_REMOVE_ADV_MONITOR,
129 MGMT_OP_ADD_EXT_ADV_PARAMS,
130 MGMT_OP_ADD_EXT_ADV_DATA,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 MGMT_OP_SET_MESH_RECEIVER,
133 MGMT_OP_MESH_READ_FEATURES,
135 MGMT_OP_MESH_SEND_CANCEL,
138 static const u16 mgmt_events[] = {
139 MGMT_EV_CONTROLLER_ERROR,
141 MGMT_EV_INDEX_REMOVED,
142 MGMT_EV_NEW_SETTINGS,
143 MGMT_EV_CLASS_OF_DEV_CHANGED,
144 MGMT_EV_LOCAL_NAME_CHANGED,
145 MGMT_EV_NEW_LINK_KEY,
146 MGMT_EV_NEW_LONG_TERM_KEY,
147 MGMT_EV_DEVICE_CONNECTED,
148 MGMT_EV_DEVICE_DISCONNECTED,
149 MGMT_EV_CONNECT_FAILED,
150 MGMT_EV_PIN_CODE_REQUEST,
151 MGMT_EV_USER_CONFIRM_REQUEST,
152 MGMT_EV_USER_PASSKEY_REQUEST,
154 MGMT_EV_DEVICE_FOUND,
156 MGMT_EV_DEVICE_BLOCKED,
157 MGMT_EV_DEVICE_UNBLOCKED,
158 MGMT_EV_DEVICE_UNPAIRED,
159 MGMT_EV_PASSKEY_NOTIFY,
162 MGMT_EV_DEVICE_ADDED,
163 MGMT_EV_DEVICE_REMOVED,
164 MGMT_EV_NEW_CONN_PARAM,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
171 MGMT_EV_ADVERTISING_ADDED,
172 MGMT_EV_ADVERTISING_REMOVED,
173 MGMT_EV_EXT_INFO_CHANGED,
174 MGMT_EV_PHY_CONFIGURATION_CHANGED,
175 MGMT_EV_EXP_FEATURE_CHANGED,
176 MGMT_EV_DEVICE_FLAGS_CHANGED,
177 MGMT_EV_ADV_MONITOR_ADDED,
178 MGMT_EV_ADV_MONITOR_REMOVED,
179 MGMT_EV_CONTROLLER_SUSPEND,
180 MGMT_EV_CONTROLLER_RESUME,
181 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
182 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
185 static const u16 mgmt_untrusted_commands[] = {
186 MGMT_OP_READ_INDEX_LIST,
188 MGMT_OP_READ_UNCONF_INDEX_LIST,
189 MGMT_OP_READ_CONFIG_INFO,
190 MGMT_OP_READ_EXT_INDEX_LIST,
191 MGMT_OP_READ_EXT_INFO,
192 MGMT_OP_READ_CONTROLLER_CAP,
193 MGMT_OP_READ_EXP_FEATURES_INFO,
194 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
195 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
198 static const u16 mgmt_untrusted_events[] = {
200 MGMT_EV_INDEX_REMOVED,
201 MGMT_EV_NEW_SETTINGS,
202 MGMT_EV_CLASS_OF_DEV_CHANGED,
203 MGMT_EV_LOCAL_NAME_CHANGED,
204 MGMT_EV_UNCONF_INDEX_ADDED,
205 MGMT_EV_UNCONF_INDEX_REMOVED,
206 MGMT_EV_NEW_CONFIG_OPTIONS,
207 MGMT_EV_EXT_INDEX_ADDED,
208 MGMT_EV_EXT_INDEX_REMOVED,
209 MGMT_EV_EXT_INFO_CHANGED,
210 MGMT_EV_EXP_FEATURE_CHANGED,
213 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
215 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
216 "\x00\x00\x00\x00\x00\x00\x00\x00"
218 /* HCI to MGMT error code conversion table */
219 static const u8 mgmt_status_table[] = {
221 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
222 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
223 MGMT_STATUS_FAILED, /* Hardware Failure */
224 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
225 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
226 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
227 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
228 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
229 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
230 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
231 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
232 MGMT_STATUS_BUSY, /* Command Disallowed */
233 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
234 MGMT_STATUS_REJECTED, /* Rejected Security */
235 MGMT_STATUS_REJECTED, /* Rejected Personal */
236 MGMT_STATUS_TIMEOUT, /* Host Timeout */
237 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
238 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
239 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
240 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
241 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
242 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
243 MGMT_STATUS_BUSY, /* Repeated Attempts */
244 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
245 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
246 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
247 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
248 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
249 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
250 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
251 MGMT_STATUS_FAILED, /* Unspecified Error */
252 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
253 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
254 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
255 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
256 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
257 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
258 MGMT_STATUS_FAILED, /* Unit Link Key Used */
259 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
260 MGMT_STATUS_TIMEOUT, /* Instant Passed */
261 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
262 MGMT_STATUS_FAILED, /* Transaction Collision */
263 MGMT_STATUS_FAILED, /* Reserved for future use */
264 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
265 MGMT_STATUS_REJECTED, /* QoS Rejected */
266 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
267 MGMT_STATUS_REJECTED, /* Insufficient Security */
268 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
269 MGMT_STATUS_FAILED, /* Reserved for future use */
270 MGMT_STATUS_BUSY, /* Role Switch Pending */
271 MGMT_STATUS_FAILED, /* Reserved for future use */
272 MGMT_STATUS_FAILED, /* Slot Violation */
273 MGMT_STATUS_FAILED, /* Role Switch Failed */
274 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
275 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
276 MGMT_STATUS_BUSY, /* Host Busy Pairing */
277 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
278 MGMT_STATUS_BUSY, /* Controller Busy */
279 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
280 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
281 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
282 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
283 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
286 static u8 mgmt_errno_status(int err)
290 return MGMT_STATUS_SUCCESS;
292 return MGMT_STATUS_REJECTED;
294 return MGMT_STATUS_INVALID_PARAMS;
296 return MGMT_STATUS_NOT_SUPPORTED;
298 return MGMT_STATUS_BUSY;
300 return MGMT_STATUS_AUTH_FAILED;
302 return MGMT_STATUS_NO_RESOURCES;
304 return MGMT_STATUS_ALREADY_CONNECTED;
306 return MGMT_STATUS_DISCONNECTED;
309 return MGMT_STATUS_FAILED;
312 static u8 mgmt_status(int err)
315 return mgmt_errno_status(err);
317 if (err < ARRAY_SIZE(mgmt_status_table))
318 return mgmt_status_table[err];
320 return MGMT_STATUS_FAILED;
323 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
326 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
330 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
331 u16 len, int flag, struct sock *skip_sk)
333 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
337 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
338 struct sock *skip_sk)
340 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 HCI_SOCK_TRUSTED, skip_sk);
344 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
346 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
350 static u8 le_addr_type(u8 mgmt_addr_type)
352 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
353 return ADDR_LE_DEV_PUBLIC;
355 return ADDR_LE_DEV_RANDOM;
358 void mgmt_fill_version_info(void *ver)
360 struct mgmt_rp_read_version *rp = ver;
362 rp->version = MGMT_VERSION;
363 rp->revision = cpu_to_le16(MGMT_REVISION);
366 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
369 struct mgmt_rp_read_version rp;
371 bt_dev_dbg(hdev, "sock %p", sk);
373 mgmt_fill_version_info(&rp);
375 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
379 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
382 struct mgmt_rp_read_commands *rp;
383 u16 num_commands, num_events;
387 bt_dev_dbg(hdev, "sock %p", sk);
389 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
390 num_commands = ARRAY_SIZE(mgmt_commands);
391 num_events = ARRAY_SIZE(mgmt_events);
393 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
394 num_events = ARRAY_SIZE(mgmt_untrusted_events);
397 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
399 rp = kmalloc(rp_size, GFP_KERNEL);
403 rp->num_commands = cpu_to_le16(num_commands);
404 rp->num_events = cpu_to_le16(num_events);
406 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
407 __le16 *opcode = rp->opcodes;
409 for (i = 0; i < num_commands; i++, opcode++)
410 put_unaligned_le16(mgmt_commands[i], opcode);
412 for (i = 0; i < num_events; i++, opcode++)
413 put_unaligned_le16(mgmt_events[i], opcode);
415 __le16 *opcode = rp->opcodes;
417 for (i = 0; i < num_commands; i++, opcode++)
418 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
420 for (i = 0; i < num_events; i++, opcode++)
421 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
424 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
431 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
434 struct mgmt_rp_read_index_list *rp;
440 bt_dev_dbg(hdev, "sock %p", sk);
442 read_lock(&hci_dev_list_lock);
445 list_for_each_entry(d, &hci_dev_list, list) {
446 if (d->dev_type == HCI_PRIMARY &&
447 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
451 rp_len = sizeof(*rp) + (2 * count);
452 rp = kmalloc(rp_len, GFP_ATOMIC);
454 read_unlock(&hci_dev_list_lock);
459 list_for_each_entry(d, &hci_dev_list, list) {
460 if (hci_dev_test_flag(d, HCI_SETUP) ||
461 hci_dev_test_flag(d, HCI_CONFIG) ||
462 hci_dev_test_flag(d, HCI_USER_CHANNEL))
465 /* Devices marked as raw-only are neither configured
466 * nor unconfigured controllers.
468 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
471 if (d->dev_type == HCI_PRIMARY &&
472 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
473 rp->index[count++] = cpu_to_le16(d->id);
474 bt_dev_dbg(hdev, "Added hci%u", d->id);
478 rp->num_controllers = cpu_to_le16(count);
479 rp_len = sizeof(*rp) + (2 * count);
481 read_unlock(&hci_dev_list_lock);
483 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
491 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
492 void *data, u16 data_len)
494 struct mgmt_rp_read_unconf_index_list *rp;
500 bt_dev_dbg(hdev, "sock %p", sk);
502 read_lock(&hci_dev_list_lock);
505 list_for_each_entry(d, &hci_dev_list, list) {
506 if (d->dev_type == HCI_PRIMARY &&
507 hci_dev_test_flag(d, HCI_UNCONFIGURED))
511 rp_len = sizeof(*rp) + (2 * count);
512 rp = kmalloc(rp_len, GFP_ATOMIC);
514 read_unlock(&hci_dev_list_lock);
519 list_for_each_entry(d, &hci_dev_list, list) {
520 if (hci_dev_test_flag(d, HCI_SETUP) ||
521 hci_dev_test_flag(d, HCI_CONFIG) ||
522 hci_dev_test_flag(d, HCI_USER_CHANNEL))
525 /* Devices marked as raw-only are neither configured
526 * nor unconfigured controllers.
528 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
531 if (d->dev_type == HCI_PRIMARY &&
532 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
533 rp->index[count++] = cpu_to_le16(d->id);
534 bt_dev_dbg(hdev, "Added hci%u", d->id);
538 rp->num_controllers = cpu_to_le16(count);
539 rp_len = sizeof(*rp) + (2 * count);
541 read_unlock(&hci_dev_list_lock);
543 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
544 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
551 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
552 void *data, u16 data_len)
554 struct mgmt_rp_read_ext_index_list *rp;
559 bt_dev_dbg(hdev, "sock %p", sk);
561 read_lock(&hci_dev_list_lock);
564 list_for_each_entry(d, &hci_dev_list, list) {
565 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
569 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
571 read_unlock(&hci_dev_list_lock);
576 list_for_each_entry(d, &hci_dev_list, list) {
577 if (hci_dev_test_flag(d, HCI_SETUP) ||
578 hci_dev_test_flag(d, HCI_CONFIG) ||
579 hci_dev_test_flag(d, HCI_USER_CHANNEL))
582 /* Devices marked as raw-only are neither configured
583 * nor unconfigured controllers.
585 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
588 if (d->dev_type == HCI_PRIMARY) {
589 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
590 rp->entry[count].type = 0x01;
592 rp->entry[count].type = 0x00;
593 } else if (d->dev_type == HCI_AMP) {
594 rp->entry[count].type = 0x02;
599 rp->entry[count].bus = d->bus;
600 rp->entry[count++].index = cpu_to_le16(d->id);
601 bt_dev_dbg(hdev, "Added hci%u", d->id);
604 rp->num_controllers = cpu_to_le16(count);
606 read_unlock(&hci_dev_list_lock);
608 /* If this command is called at least once, then all the
609 * default index and unconfigured index events are disabled
610 * and from now on only extended index events are used.
612 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
613 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
614 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
616 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
617 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
618 struct_size(rp, entry, count));
625 static bool is_configured(struct hci_dev *hdev)
627 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
628 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
631 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
632 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
633 !bacmp(&hdev->public_addr, BDADDR_ANY))
639 static __le32 get_missing_options(struct hci_dev *hdev)
643 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
644 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
645 options |= MGMT_OPTION_EXTERNAL_CONFIG;
647 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
648 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
649 !bacmp(&hdev->public_addr, BDADDR_ANY))
650 options |= MGMT_OPTION_PUBLIC_ADDRESS;
652 return cpu_to_le32(options);
655 static int new_options(struct hci_dev *hdev, struct sock *skip)
657 __le32 options = get_missing_options(hdev);
659 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
660 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
663 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
665 __le32 options = get_missing_options(hdev);
667 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
671 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
672 void *data, u16 data_len)
674 struct mgmt_rp_read_config_info rp;
677 bt_dev_dbg(hdev, "sock %p", sk);
681 memset(&rp, 0, sizeof(rp));
682 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
684 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
685 options |= MGMT_OPTION_EXTERNAL_CONFIG;
687 if (hdev->set_bdaddr)
688 options |= MGMT_OPTION_PUBLIC_ADDRESS;
690 rp.supported_options = cpu_to_le32(options);
691 rp.missing_options = get_missing_options(hdev);
693 hci_dev_unlock(hdev);
695 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
699 static u32 get_supported_phys(struct hci_dev *hdev)
701 u32 supported_phys = 0;
703 if (lmp_bredr_capable(hdev)) {
704 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
706 if (hdev->features[0][0] & LMP_3SLOT)
707 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
709 if (hdev->features[0][0] & LMP_5SLOT)
710 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
712 if (lmp_edr_2m_capable(hdev)) {
713 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
715 if (lmp_edr_3slot_capable(hdev))
716 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
718 if (lmp_edr_5slot_capable(hdev))
719 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
721 if (lmp_edr_3m_capable(hdev)) {
722 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
724 if (lmp_edr_3slot_capable(hdev))
725 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
727 if (lmp_edr_5slot_capable(hdev))
728 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
733 if (lmp_le_capable(hdev)) {
734 supported_phys |= MGMT_PHY_LE_1M_TX;
735 supported_phys |= MGMT_PHY_LE_1M_RX;
737 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
738 supported_phys |= MGMT_PHY_LE_2M_TX;
739 supported_phys |= MGMT_PHY_LE_2M_RX;
742 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
743 supported_phys |= MGMT_PHY_LE_CODED_TX;
744 supported_phys |= MGMT_PHY_LE_CODED_RX;
748 return supported_phys;
751 static u32 get_selected_phys(struct hci_dev *hdev)
753 u32 selected_phys = 0;
755 if (lmp_bredr_capable(hdev)) {
756 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
758 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
759 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
761 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
762 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
764 if (lmp_edr_2m_capable(hdev)) {
765 if (!(hdev->pkt_type & HCI_2DH1))
766 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
768 if (lmp_edr_3slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_2DH3))
770 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
772 if (lmp_edr_5slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_2DH5))
774 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
776 if (lmp_edr_3m_capable(hdev)) {
777 if (!(hdev->pkt_type & HCI_3DH1))
778 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
780 if (lmp_edr_3slot_capable(hdev) &&
781 !(hdev->pkt_type & HCI_3DH3))
782 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
784 if (lmp_edr_5slot_capable(hdev) &&
785 !(hdev->pkt_type & HCI_3DH5))
786 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
791 if (lmp_le_capable(hdev)) {
792 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
793 selected_phys |= MGMT_PHY_LE_1M_TX;
795 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
796 selected_phys |= MGMT_PHY_LE_1M_RX;
798 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
799 selected_phys |= MGMT_PHY_LE_2M_TX;
801 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
802 selected_phys |= MGMT_PHY_LE_2M_RX;
804 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
805 selected_phys |= MGMT_PHY_LE_CODED_TX;
807 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
808 selected_phys |= MGMT_PHY_LE_CODED_RX;
811 return selected_phys;
814 static u32 get_configurable_phys(struct hci_dev *hdev)
816 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
817 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
820 static u32 get_supported_settings(struct hci_dev *hdev)
824 settings |= MGMT_SETTING_POWERED;
825 settings |= MGMT_SETTING_BONDABLE;
826 settings |= MGMT_SETTING_DEBUG_KEYS;
827 settings |= MGMT_SETTING_CONNECTABLE;
828 settings |= MGMT_SETTING_DISCOVERABLE;
830 if (lmp_bredr_capable(hdev)) {
831 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
832 settings |= MGMT_SETTING_FAST_CONNECTABLE;
833 settings |= MGMT_SETTING_BREDR;
834 settings |= MGMT_SETTING_LINK_SECURITY;
836 if (lmp_ssp_capable(hdev)) {
837 settings |= MGMT_SETTING_SSP;
838 if (IS_ENABLED(CONFIG_BT_HS))
839 settings |= MGMT_SETTING_HS;
842 if (lmp_sc_capable(hdev))
843 settings |= MGMT_SETTING_SECURE_CONN;
845 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
847 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
850 if (lmp_le_capable(hdev)) {
851 settings |= MGMT_SETTING_LE;
852 settings |= MGMT_SETTING_SECURE_CONN;
853 settings |= MGMT_SETTING_PRIVACY;
854 settings |= MGMT_SETTING_STATIC_ADDRESS;
855 settings |= MGMT_SETTING_ADVERTISING;
858 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
860 settings |= MGMT_SETTING_CONFIGURATION;
862 if (cis_central_capable(hdev))
863 settings |= MGMT_SETTING_CIS_CENTRAL;
865 if (cis_peripheral_capable(hdev))
866 settings |= MGMT_SETTING_CIS_PERIPHERAL;
868 settings |= MGMT_SETTING_PHY_CONFIGURATION;
873 static u32 get_current_settings(struct hci_dev *hdev)
877 if (hdev_is_powered(hdev))
878 settings |= MGMT_SETTING_POWERED;
880 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
881 settings |= MGMT_SETTING_CONNECTABLE;
883 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
884 settings |= MGMT_SETTING_FAST_CONNECTABLE;
886 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
887 settings |= MGMT_SETTING_DISCOVERABLE;
889 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
890 settings |= MGMT_SETTING_BONDABLE;
892 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
893 settings |= MGMT_SETTING_BREDR;
895 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
896 settings |= MGMT_SETTING_LE;
898 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
899 settings |= MGMT_SETTING_LINK_SECURITY;
901 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
902 settings |= MGMT_SETTING_SSP;
904 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
905 settings |= MGMT_SETTING_HS;
907 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
908 settings |= MGMT_SETTING_ADVERTISING;
910 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
911 settings |= MGMT_SETTING_SECURE_CONN;
913 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
914 settings |= MGMT_SETTING_DEBUG_KEYS;
916 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
917 settings |= MGMT_SETTING_PRIVACY;
919 /* The current setting for static address has two purposes. The
920 * first is to indicate if the static address will be used and
921 * the second is to indicate if it is actually set.
923 * This means if the static address is not configured, this flag
924 * will never be set. If the address is configured, then if the
925 * address is actually used decides if the flag is set or not.
927 * For single mode LE only controllers and dual-mode controllers
928 * with BR/EDR disabled, the existence of the static address will
931 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
932 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
933 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
934 if (bacmp(&hdev->static_addr, BDADDR_ANY))
935 settings |= MGMT_SETTING_STATIC_ADDRESS;
938 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
939 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
941 if (cis_central_capable(hdev))
942 settings |= MGMT_SETTING_CIS_CENTRAL;
944 if (cis_peripheral_capable(hdev))
945 settings |= MGMT_SETTING_CIS_PERIPHERAL;
947 if (bis_capable(hdev))
948 settings |= MGMT_SETTING_ISO_BROADCASTER;
950 if (sync_recv_capable(hdev))
951 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
956 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
958 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
961 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
963 struct mgmt_pending_cmd *cmd;
965 /* If there's a pending mgmt command the flags will not yet have
966 * their final values, so check for this first.
968 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
970 struct mgmt_mode *cp = cmd->param;
972 return LE_AD_GENERAL;
973 else if (cp->val == 0x02)
974 return LE_AD_LIMITED;
976 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
977 return LE_AD_LIMITED;
978 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
979 return LE_AD_GENERAL;
985 bool mgmt_get_connectable(struct hci_dev *hdev)
987 struct mgmt_pending_cmd *cmd;
989 /* If there's a pending mgmt command the flag will not yet have
990 * it's final value, so check for this first.
992 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
994 struct mgmt_mode *cp = cmd->param;
999 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1002 static int service_cache_sync(struct hci_dev *hdev, void *data)
1004 hci_update_eir_sync(hdev);
1005 hci_update_class_sync(hdev);
1010 static void service_cache_off(struct work_struct *work)
1012 struct hci_dev *hdev = container_of(work, struct hci_dev,
1013 service_cache.work);
1015 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1018 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1021 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1023 /* The generation of a new RPA and programming it into the
1024 * controller happens in the hci_req_enable_advertising()
1027 if (ext_adv_capable(hdev))
1028 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1030 return hci_enable_advertising_sync(hdev);
1033 static void rpa_expired(struct work_struct *work)
1035 struct hci_dev *hdev = container_of(work, struct hci_dev,
1038 bt_dev_dbg(hdev, "");
1040 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1042 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1045 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1048 static void discov_off(struct work_struct *work)
1050 struct hci_dev *hdev = container_of(work, struct hci_dev,
1053 bt_dev_dbg(hdev, "");
1057 /* When discoverable timeout triggers, then just make sure
1058 * the limited discoverable flag is cleared. Even in the case
1059 * of a timeout triggered from general discoverable, it is
1060 * safe to unconditionally clear the flag.
1062 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1063 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1064 hdev->discov_timeout = 0;
1066 hci_update_discoverable(hdev);
1068 mgmt_new_settings(hdev);
1070 hci_dev_unlock(hdev);
1073 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1075 static void mesh_send_complete(struct hci_dev *hdev,
1076 struct mgmt_mesh_tx *mesh_tx, bool silent)
1078 u8 handle = mesh_tx->handle;
1081 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1082 sizeof(handle), NULL);
1084 mgmt_mesh_remove(mesh_tx);
1087 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1089 struct mgmt_mesh_tx *mesh_tx;
1091 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1092 hci_disable_advertising_sync(hdev);
1093 mesh_tx = mgmt_mesh_next(hdev, NULL);
1096 mesh_send_complete(hdev, mesh_tx, false);
1101 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1102 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1103 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1105 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1110 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1111 mesh_send_start_complete);
1114 mesh_send_complete(hdev, mesh_tx, false);
1116 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1119 static void mesh_send_done(struct work_struct *work)
1121 struct hci_dev *hdev = container_of(work, struct hci_dev,
1122 mesh_send_done.work);
1124 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1127 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1130 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1132 if (hci_dev_test_flag(hdev, HCI_MGMT))
1135 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1137 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1138 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1139 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1140 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1142 /* Non-mgmt controlled devices get this bit set
1143 * implicitly so that pairing works for them, however
1144 * for mgmt we require user-space to explicitly enable
1147 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1149 hci_dev_set_flag(hdev, HCI_MGMT);
1152 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1153 void *data, u16 data_len)
1155 struct mgmt_rp_read_info rp;
1157 bt_dev_dbg(hdev, "sock %p", sk);
1161 memset(&rp, 0, sizeof(rp));
1163 bacpy(&rp.bdaddr, &hdev->bdaddr);
1165 rp.version = hdev->hci_ver;
1166 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1168 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1169 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1171 memcpy(rp.dev_class, hdev->dev_class, 3);
1173 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1174 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1176 hci_dev_unlock(hdev);
1178 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1182 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1187 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1188 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1189 hdev->dev_class, 3);
1191 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1192 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1195 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1196 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1197 hdev->dev_name, name_len);
1199 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1200 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1201 hdev->short_name, name_len);
1206 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1207 void *data, u16 data_len)
1210 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1213 bt_dev_dbg(hdev, "sock %p", sk);
1215 memset(&buf, 0, sizeof(buf));
1219 bacpy(&rp->bdaddr, &hdev->bdaddr);
1221 rp->version = hdev->hci_ver;
1222 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1224 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1225 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1228 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1229 rp->eir_len = cpu_to_le16(eir_len);
1231 hci_dev_unlock(hdev);
1233 /* If this command is called at least once, then the events
1234 * for class of device and local name changes are disabled
1235 * and only the new extended controller information event
1238 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1239 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1240 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1242 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1243 sizeof(*rp) + eir_len);
1246 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1249 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1252 memset(buf, 0, sizeof(buf));
1254 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1255 ev->eir_len = cpu_to_le16(eir_len);
1257 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1258 sizeof(*ev) + eir_len,
1259 HCI_MGMT_EXT_INFO_EVENTS, skip);
1262 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1264 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1266 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1270 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1272 struct mgmt_ev_advertising_added ev;
1274 ev.instance = instance;
1276 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1279 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1282 struct mgmt_ev_advertising_removed ev;
1284 ev.instance = instance;
1286 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1289 static void cancel_adv_timeout(struct hci_dev *hdev)
1291 if (hdev->adv_instance_timeout) {
1292 hdev->adv_instance_timeout = 0;
1293 cancel_delayed_work(&hdev->adv_instance_expire);
1297 /* This function requires the caller holds hdev->lock */
1298 static void restart_le_actions(struct hci_dev *hdev)
1300 struct hci_conn_params *p;
1302 list_for_each_entry(p, &hdev->le_conn_params, list) {
1303 /* Needed for AUTO_OFF case where might not "really"
1304 * have been powered off.
1306 hci_pend_le_list_del_init(p);
1308 switch (p->auto_connect) {
1309 case HCI_AUTO_CONN_DIRECT:
1310 case HCI_AUTO_CONN_ALWAYS:
1311 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1313 case HCI_AUTO_CONN_REPORT:
1314 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1322 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1324 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1326 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1327 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1330 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1332 struct mgmt_pending_cmd *cmd = data;
1333 struct mgmt_mode *cp;
1335 /* Make sure cmd still outstanding. */
1336 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1341 bt_dev_dbg(hdev, "err %d", err);
1346 restart_le_actions(hdev);
1347 hci_update_passive_scan(hdev);
1348 hci_dev_unlock(hdev);
1351 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1353 /* Only call new_setting for power on as power off is deferred
1354 * to hdev->power_off work which does call hci_dev_do_close.
1357 new_settings(hdev, cmd->sk);
1359 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1363 mgmt_pending_remove(cmd);
1366 static int set_powered_sync(struct hci_dev *hdev, void *data)
1368 struct mgmt_pending_cmd *cmd = data;
1369 struct mgmt_mode *cp = cmd->param;
1371 BT_DBG("%s", hdev->name);
1373 return hci_set_powered_sync(hdev, cp->val);
1376 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1379 struct mgmt_mode *cp = data;
1380 struct mgmt_pending_cmd *cmd;
1383 bt_dev_dbg(hdev, "sock %p", sk);
1385 if (cp->val != 0x00 && cp->val != 0x01)
1386 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1387 MGMT_STATUS_INVALID_PARAMS);
1391 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1392 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1397 if (!!cp->val == hdev_is_powered(hdev)) {
1398 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1402 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1408 /* Cancel potentially blocking sync operation before power off */
1409 if (cp->val == 0x00) {
1410 __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
1411 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1412 mgmt_set_powered_complete);
1414 /* Use hci_cmd_sync_submit since hdev might not be running */
1415 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1416 mgmt_set_powered_complete);
1420 mgmt_pending_remove(cmd);
1423 hci_dev_unlock(hdev);
1427 int mgmt_new_settings(struct hci_dev *hdev)
1429 return new_settings(hdev, NULL);
1434 struct hci_dev *hdev;
1438 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1440 struct cmd_lookup *match = data;
1442 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1444 list_del(&cmd->list);
1446 if (match->sk == NULL) {
1447 match->sk = cmd->sk;
1448 sock_hold(match->sk);
1451 mgmt_pending_free(cmd);
1454 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1458 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1459 mgmt_pending_remove(cmd);
1462 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1464 if (cmd->cmd_complete) {
1467 cmd->cmd_complete(cmd, *status);
1468 mgmt_pending_remove(cmd);
1473 cmd_status_rsp(cmd, data);
1476 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1478 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1479 cmd->param, cmd->param_len);
1482 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1484 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1485 cmd->param, sizeof(struct mgmt_addr_info));
1488 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1490 if (!lmp_bredr_capable(hdev))
1491 return MGMT_STATUS_NOT_SUPPORTED;
1492 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1493 return MGMT_STATUS_REJECTED;
1495 return MGMT_STATUS_SUCCESS;
1498 static u8 mgmt_le_support(struct hci_dev *hdev)
1500 if (!lmp_le_capable(hdev))
1501 return MGMT_STATUS_NOT_SUPPORTED;
1502 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1503 return MGMT_STATUS_REJECTED;
1505 return MGMT_STATUS_SUCCESS;
1508 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1511 struct mgmt_pending_cmd *cmd = data;
1513 bt_dev_dbg(hdev, "err %d", err);
1515 /* Make sure cmd still outstanding. */
1516 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1522 u8 mgmt_err = mgmt_status(err);
1523 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1524 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1528 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1529 hdev->discov_timeout > 0) {
1530 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1531 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1534 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1535 new_settings(hdev, cmd->sk);
1538 mgmt_pending_remove(cmd);
1539 hci_dev_unlock(hdev);
1542 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1544 BT_DBG("%s", hdev->name);
1546 return hci_update_discoverable_sync(hdev);
1549 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1552 struct mgmt_cp_set_discoverable *cp = data;
1553 struct mgmt_pending_cmd *cmd;
1557 bt_dev_dbg(hdev, "sock %p", sk);
1559 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1560 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1561 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1562 MGMT_STATUS_REJECTED);
1564 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1565 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1566 MGMT_STATUS_INVALID_PARAMS);
1568 timeout = __le16_to_cpu(cp->timeout);
1570 /* Disabling discoverable requires that no timeout is set,
1571 * and enabling limited discoverable requires a timeout.
1573 if ((cp->val == 0x00 && timeout > 0) ||
1574 (cp->val == 0x02 && timeout == 0))
1575 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1576 MGMT_STATUS_INVALID_PARAMS);
1580 if (!hdev_is_powered(hdev) && timeout > 0) {
1581 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1582 MGMT_STATUS_NOT_POWERED);
1586 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1587 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1588 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1593 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1594 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 MGMT_STATUS_REJECTED);
1599 if (hdev->advertising_paused) {
1600 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1605 if (!hdev_is_powered(hdev)) {
1606 bool changed = false;
1608 /* Setting limited discoverable when powered off is
1609 * not a valid operation since it requires a timeout
1610 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1612 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1613 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1617 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1622 err = new_settings(hdev, sk);
1627 /* If the current mode is the same, then just update the timeout
1628 * value with the new value. And if only the timeout gets updated,
1629 * then no need for any HCI transactions.
1631 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1632 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1633 HCI_LIMITED_DISCOVERABLE)) {
1634 cancel_delayed_work(&hdev->discov_off);
1635 hdev->discov_timeout = timeout;
1637 if (cp->val && hdev->discov_timeout > 0) {
1638 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1639 queue_delayed_work(hdev->req_workqueue,
1640 &hdev->discov_off, to);
1643 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1647 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1653 /* Cancel any potential discoverable timeout that might be
1654 * still active and store new timeout value. The arming of
1655 * the timeout happens in the complete handler.
1657 cancel_delayed_work(&hdev->discov_off);
1658 hdev->discov_timeout = timeout;
1661 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1663 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1665 /* Limited discoverable mode */
1666 if (cp->val == 0x02)
1667 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1669 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1671 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1672 mgmt_set_discoverable_complete);
1675 mgmt_pending_remove(cmd);
1678 hci_dev_unlock(hdev);
1682 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1685 struct mgmt_pending_cmd *cmd = data;
1687 bt_dev_dbg(hdev, "err %d", err);
1689 /* Make sure cmd still outstanding. */
1690 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1696 u8 mgmt_err = mgmt_status(err);
1697 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1701 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1702 new_settings(hdev, cmd->sk);
1706 mgmt_pending_remove(cmd);
1708 hci_dev_unlock(hdev);
1711 static int set_connectable_update_settings(struct hci_dev *hdev,
1712 struct sock *sk, u8 val)
1714 bool changed = false;
1717 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1721 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1723 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1724 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1727 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1732 hci_update_scan(hdev);
1733 hci_update_passive_scan(hdev);
1734 return new_settings(hdev, sk);
1740 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1742 BT_DBG("%s", hdev->name);
1744 return hci_update_connectable_sync(hdev);
1747 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1750 struct mgmt_mode *cp = data;
1751 struct mgmt_pending_cmd *cmd;
1754 bt_dev_dbg(hdev, "sock %p", sk);
1756 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1757 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1758 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1759 MGMT_STATUS_REJECTED);
1761 if (cp->val != 0x00 && cp->val != 0x01)
1762 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1763 MGMT_STATUS_INVALID_PARAMS);
1767 if (!hdev_is_powered(hdev)) {
1768 err = set_connectable_update_settings(hdev, sk, cp->val);
1772 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1773 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1774 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1779 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1786 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1788 if (hdev->discov_timeout > 0)
1789 cancel_delayed_work(&hdev->discov_off);
1791 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1792 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1793 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1796 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1797 mgmt_set_connectable_complete);
1800 mgmt_pending_remove(cmd);
1803 hci_dev_unlock(hdev);
1807 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1810 struct mgmt_mode *cp = data;
1814 bt_dev_dbg(hdev, "sock %p", sk);
1816 if (cp->val != 0x00 && cp->val != 0x01)
1817 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1818 MGMT_STATUS_INVALID_PARAMS);
1823 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1825 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1827 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1832 /* In limited privacy mode the change of bondable mode
1833 * may affect the local advertising address.
1835 hci_update_discoverable(hdev);
1837 err = new_settings(hdev, sk);
1841 hci_dev_unlock(hdev);
1845 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1848 struct mgmt_mode *cp = data;
1849 struct mgmt_pending_cmd *cmd;
1853 bt_dev_dbg(hdev, "sock %p", sk);
1855 status = mgmt_bredr_support(hdev);
1857 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1860 if (cp->val != 0x00 && cp->val != 0x01)
1861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1862 MGMT_STATUS_INVALID_PARAMS);
1866 if (!hdev_is_powered(hdev)) {
1867 bool changed = false;
1869 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1870 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1874 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1879 err = new_settings(hdev, sk);
1884 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1885 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1892 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1893 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1897 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1903 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1905 mgmt_pending_remove(cmd);
1910 hci_dev_unlock(hdev);
1914 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1916 struct cmd_lookup match = { NULL, hdev };
1917 struct mgmt_pending_cmd *cmd = data;
1918 struct mgmt_mode *cp = cmd->param;
1919 u8 enable = cp->val;
1922 /* Make sure cmd still outstanding. */
1923 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1927 u8 mgmt_err = mgmt_status(err);
1929 if (enable && hci_dev_test_and_clear_flag(hdev,
1931 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1932 new_settings(hdev, NULL);
1935 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1941 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1943 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1946 changed = hci_dev_test_and_clear_flag(hdev,
1949 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1952 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1955 new_settings(hdev, match.sk);
1960 hci_update_eir_sync(hdev);
1963 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1965 struct mgmt_pending_cmd *cmd = data;
1966 struct mgmt_mode *cp = cmd->param;
1967 bool changed = false;
1971 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1973 err = hci_write_ssp_mode_sync(hdev, cp->val);
1975 if (!err && changed)
1976 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1981 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1983 struct mgmt_mode *cp = data;
1984 struct mgmt_pending_cmd *cmd;
1988 bt_dev_dbg(hdev, "sock %p", sk);
1990 status = mgmt_bredr_support(hdev);
1992 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1994 if (!lmp_ssp_capable(hdev))
1995 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1996 MGMT_STATUS_NOT_SUPPORTED);
1998 if (cp->val != 0x00 && cp->val != 0x01)
1999 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2000 MGMT_STATUS_INVALID_PARAMS);
2004 if (!hdev_is_powered(hdev)) {
2008 changed = !hci_dev_test_and_set_flag(hdev,
2011 changed = hci_dev_test_and_clear_flag(hdev,
2014 changed = hci_dev_test_and_clear_flag(hdev,
2017 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2020 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2025 err = new_settings(hdev, sk);
2030 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2031 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2036 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2037 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2041 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2045 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2049 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2050 MGMT_STATUS_FAILED);
2053 mgmt_pending_remove(cmd);
2057 hci_dev_unlock(hdev);
2061 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2063 struct mgmt_mode *cp = data;
2068 bt_dev_dbg(hdev, "sock %p", sk);
2070 if (!IS_ENABLED(CONFIG_BT_HS))
2071 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2072 MGMT_STATUS_NOT_SUPPORTED);
2074 status = mgmt_bredr_support(hdev);
2076 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2078 if (!lmp_ssp_capable(hdev))
2079 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2080 MGMT_STATUS_NOT_SUPPORTED);
2082 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2083 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2084 MGMT_STATUS_REJECTED);
2086 if (cp->val != 0x00 && cp->val != 0x01)
2087 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2088 MGMT_STATUS_INVALID_PARAMS);
2092 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2093 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2099 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2101 if (hdev_is_powered(hdev)) {
2102 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2103 MGMT_STATUS_REJECTED);
2107 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2110 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2115 err = new_settings(hdev, sk);
2118 hci_dev_unlock(hdev);
2122 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2124 struct cmd_lookup match = { NULL, hdev };
2125 u8 status = mgmt_status(err);
2127 bt_dev_dbg(hdev, "err %d", err);
2130 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2135 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2137 new_settings(hdev, match.sk);
2143 static int set_le_sync(struct hci_dev *hdev, void *data)
2145 struct mgmt_pending_cmd *cmd = data;
2146 struct mgmt_mode *cp = cmd->param;
2151 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2153 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2154 hci_disable_advertising_sync(hdev);
2156 if (ext_adv_capable(hdev))
2157 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2159 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2162 err = hci_write_le_host_supported_sync(hdev, val, 0);
2164 /* Make sure the controller has a good default for
2165 * advertising data. Restrict the update to when LE
2166 * has actually been enabled. During power on, the
2167 * update in powered_update_hci will take care of it.
2169 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2170 if (ext_adv_capable(hdev)) {
2173 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2175 hci_update_scan_rsp_data_sync(hdev, 0x00);
2177 hci_update_adv_data_sync(hdev, 0x00);
2178 hci_update_scan_rsp_data_sync(hdev, 0x00);
2181 hci_update_passive_scan(hdev);
2187 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2189 struct mgmt_pending_cmd *cmd = data;
2190 u8 status = mgmt_status(err);
2191 struct sock *sk = cmd->sk;
2194 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2195 cmd_status_rsp, &status);
2199 mgmt_pending_remove(cmd);
2200 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2203 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2205 struct mgmt_pending_cmd *cmd = data;
2206 struct mgmt_cp_set_mesh *cp = cmd->param;
2207 size_t len = cmd->param_len;
2209 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2212 hci_dev_set_flag(hdev, HCI_MESH);
2214 hci_dev_clear_flag(hdev, HCI_MESH);
2218 /* If filters don't fit, forward all adv pkts */
2219 if (len <= sizeof(hdev->mesh_ad_types))
2220 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2222 hci_update_passive_scan_sync(hdev);
2226 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2228 struct mgmt_cp_set_mesh *cp = data;
2229 struct mgmt_pending_cmd *cmd;
2232 bt_dev_dbg(hdev, "sock %p", sk);
2234 if (!lmp_le_capable(hdev) ||
2235 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2236 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2237 MGMT_STATUS_NOT_SUPPORTED);
2239 if (cp->enable != 0x00 && cp->enable != 0x01)
2240 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2241 MGMT_STATUS_INVALID_PARAMS);
2245 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2249 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2253 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2254 MGMT_STATUS_FAILED);
2257 mgmt_pending_remove(cmd);
2260 hci_dev_unlock(hdev);
2264 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2266 struct mgmt_mesh_tx *mesh_tx = data;
2267 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2268 unsigned long mesh_send_interval;
2269 u8 mgmt_err = mgmt_status(err);
2271 /* Report any errors here, but don't report completion */
2274 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2275 /* Send Complete Error Code for handle */
2276 mesh_send_complete(hdev, mesh_tx, false);
2280 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2281 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2282 mesh_send_interval);
2285 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2287 struct mgmt_mesh_tx *mesh_tx = data;
2288 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2289 struct adv_info *adv, *next_instance;
2290 u8 instance = hdev->le_num_of_adv_sets + 1;
2291 u16 timeout, duration;
2294 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2295 return MGMT_STATUS_BUSY;
2298 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2299 adv = hci_add_adv_instance(hdev, instance, 0,
2300 send->adv_data_len, send->adv_data,
2303 HCI_ADV_TX_POWER_NO_PREFERENCE,
2304 hdev->le_adv_min_interval,
2305 hdev->le_adv_max_interval,
2309 mesh_tx->instance = instance;
2313 if (hdev->cur_adv_instance == instance) {
2314 /* If the currently advertised instance is being changed then
2315 * cancel the current advertising and schedule the next
2316 * instance. If there is only one instance then the overridden
2317 * advertising data will be visible right away.
2319 cancel_adv_timeout(hdev);
2321 next_instance = hci_get_next_instance(hdev, instance);
2323 instance = next_instance->instance;
2326 } else if (hdev->adv_instance_timeout) {
2327 /* Immediately advertise the new instance if no other, or
2328 * let it go naturally from queue if ADV is already happening
2334 return hci_schedule_adv_instance_sync(hdev, instance, true);
2339 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2341 struct mgmt_rp_mesh_read_features *rp = data;
2343 if (rp->used_handles >= rp->max_handles)
2346 rp->handles[rp->used_handles++] = mesh_tx->handle;
2349 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2350 void *data, u16 len)
2352 struct mgmt_rp_mesh_read_features rp;
2354 if (!lmp_le_capable(hdev) ||
2355 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2356 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2357 MGMT_STATUS_NOT_SUPPORTED);
2359 memset(&rp, 0, sizeof(rp));
2360 rp.index = cpu_to_le16(hdev->id);
2361 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2362 rp.max_handles = MESH_HANDLES_MAX;
2367 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2369 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2370 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2372 hci_dev_unlock(hdev);
2376 static int send_cancel(struct hci_dev *hdev, void *data)
2378 struct mgmt_pending_cmd *cmd = data;
2379 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2380 struct mgmt_mesh_tx *mesh_tx;
2382 if (!cancel->handle) {
2384 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2387 mesh_send_complete(hdev, mesh_tx, false);
2390 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2392 if (mesh_tx && mesh_tx->sk == cmd->sk)
2393 mesh_send_complete(hdev, mesh_tx, false);
2396 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2398 mgmt_pending_free(cmd);
2403 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2404 void *data, u16 len)
2406 struct mgmt_pending_cmd *cmd;
2409 if (!lmp_le_capable(hdev) ||
2410 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2411 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2412 MGMT_STATUS_NOT_SUPPORTED);
2414 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2415 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2416 MGMT_STATUS_REJECTED);
2419 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2423 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2426 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2427 MGMT_STATUS_FAILED);
2430 mgmt_pending_free(cmd);
2433 hci_dev_unlock(hdev);
2437 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2439 struct mgmt_mesh_tx *mesh_tx;
2440 struct mgmt_cp_mesh_send *send = data;
2441 struct mgmt_rp_mesh_read_features rp;
2445 if (!lmp_le_capable(hdev) ||
2446 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2448 MGMT_STATUS_NOT_SUPPORTED);
2449 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2450 len <= MGMT_MESH_SEND_SIZE ||
2451 len > (MGMT_MESH_SEND_SIZE + 31))
2452 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2453 MGMT_STATUS_REJECTED);
2457 memset(&rp, 0, sizeof(rp));
2458 rp.max_handles = MESH_HANDLES_MAX;
2460 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2462 if (rp.max_handles <= rp.used_handles) {
2463 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2468 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2469 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2474 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2475 mesh_send_start_complete);
2478 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2479 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2480 MGMT_STATUS_FAILED);
2484 mgmt_mesh_remove(mesh_tx);
2487 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2489 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2490 &mesh_tx->handle, 1);
2494 hci_dev_unlock(hdev);
2498 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2500 struct mgmt_mode *cp = data;
2501 struct mgmt_pending_cmd *cmd;
2505 bt_dev_dbg(hdev, "sock %p", sk);
2507 if (!lmp_le_capable(hdev))
2508 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2509 MGMT_STATUS_NOT_SUPPORTED);
2511 if (cp->val != 0x00 && cp->val != 0x01)
2512 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2513 MGMT_STATUS_INVALID_PARAMS);
2515 /* Bluetooth single mode LE only controllers or dual-mode
2516 * controllers configured as LE only devices, do not allow
2517 * switching LE off. These have either LE enabled explicitly
2518 * or BR/EDR has been previously switched off.
2520 * When trying to enable an already enabled LE, then gracefully
2521 * send a positive response. Trying to disable it however will
2522 * result into rejection.
2524 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2525 if (cp->val == 0x01)
2526 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2528 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2529 MGMT_STATUS_REJECTED);
2535 enabled = lmp_host_le_capable(hdev);
2537 if (!hdev_is_powered(hdev) || val == enabled) {
2538 bool changed = false;
2540 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2541 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2545 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2546 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2550 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2555 err = new_settings(hdev, sk);
2560 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2561 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2562 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2567 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2571 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2575 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2576 MGMT_STATUS_FAILED);
2579 mgmt_pending_remove(cmd);
2583 hci_dev_unlock(hdev);
2587 /* This is a helper function to test for pending mgmt commands that can
2588 * cause CoD or EIR HCI commands. We can only allow one such pending
2589 * mgmt command at a time since otherwise we cannot easily track what
2590 * the current values are, will be, and based on that calculate if a new
2591 * HCI command needs to be sent and if yes with what value.
2593 static bool pending_eir_or_class(struct hci_dev *hdev)
2595 struct mgmt_pending_cmd *cmd;
2597 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2598 switch (cmd->opcode) {
2599 case MGMT_OP_ADD_UUID:
2600 case MGMT_OP_REMOVE_UUID:
2601 case MGMT_OP_SET_DEV_CLASS:
2602 case MGMT_OP_SET_POWERED:
2610 static const u8 bluetooth_base_uuid[] = {
2611 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2612 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2615 static u8 get_uuid_size(const u8 *uuid)
2619 if (memcmp(uuid, bluetooth_base_uuid, 12))
2622 val = get_unaligned_le32(&uuid[12]);
2629 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2631 struct mgmt_pending_cmd *cmd = data;
2633 bt_dev_dbg(hdev, "err %d", err);
2635 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2636 mgmt_status(err), hdev->dev_class, 3);
2638 mgmt_pending_free(cmd);
2641 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2645 err = hci_update_class_sync(hdev);
2649 return hci_update_eir_sync(hdev);
2652 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2654 struct mgmt_cp_add_uuid *cp = data;
2655 struct mgmt_pending_cmd *cmd;
2656 struct bt_uuid *uuid;
2659 bt_dev_dbg(hdev, "sock %p", sk);
2663 if (pending_eir_or_class(hdev)) {
2664 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2669 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2675 memcpy(uuid->uuid, cp->uuid, 16);
2676 uuid->svc_hint = cp->svc_hint;
2677 uuid->size = get_uuid_size(cp->uuid);
2679 list_add_tail(&uuid->list, &hdev->uuids);
2681 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2687 err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2689 mgmt_pending_free(cmd);
2694 hci_dev_unlock(hdev);
2698 static bool enable_service_cache(struct hci_dev *hdev)
2700 if (!hdev_is_powered(hdev))
2703 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2704 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2712 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2716 err = hci_update_class_sync(hdev);
2720 return hci_update_eir_sync(hdev);
2723 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2726 struct mgmt_cp_remove_uuid *cp = data;
2727 struct mgmt_pending_cmd *cmd;
2728 struct bt_uuid *match, *tmp;
2729 static const u8 bt_uuid_any[] = {
2730 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2734 bt_dev_dbg(hdev, "sock %p", sk);
2738 if (pending_eir_or_class(hdev)) {
2739 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2744 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2745 hci_uuids_clear(hdev);
2747 if (enable_service_cache(hdev)) {
2748 err = mgmt_cmd_complete(sk, hdev->id,
2749 MGMT_OP_REMOVE_UUID,
2750 0, hdev->dev_class, 3);
2759 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2760 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2763 list_del(&match->list);
2769 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2770 MGMT_STATUS_INVALID_PARAMS);
2775 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2781 err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2782 mgmt_class_complete);
2784 mgmt_pending_free(cmd);
2787 hci_dev_unlock(hdev);
2791 static int set_class_sync(struct hci_dev *hdev, void *data)
2795 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2796 cancel_delayed_work_sync(&hdev->service_cache);
2797 err = hci_update_eir_sync(hdev);
2803 return hci_update_class_sync(hdev);
2806 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2809 struct mgmt_cp_set_dev_class *cp = data;
2810 struct mgmt_pending_cmd *cmd;
2813 bt_dev_dbg(hdev, "sock %p", sk);
2815 if (!lmp_bredr_capable(hdev))
2816 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2817 MGMT_STATUS_NOT_SUPPORTED);
2821 if (pending_eir_or_class(hdev)) {
2822 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2827 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2828 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2829 MGMT_STATUS_INVALID_PARAMS);
2833 hdev->major_class = cp->major;
2834 hdev->minor_class = cp->minor;
2836 if (!hdev_is_powered(hdev)) {
2837 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2838 hdev->dev_class, 3);
2842 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2848 err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2849 mgmt_class_complete);
2851 mgmt_pending_free(cmd);
2854 hci_dev_unlock(hdev);
2858 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2861 struct mgmt_cp_load_link_keys *cp = data;
2862 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2863 sizeof(struct mgmt_link_key_info));
2864 u16 key_count, expected_len;
2868 bt_dev_dbg(hdev, "sock %p", sk);
2870 if (!lmp_bredr_capable(hdev))
2871 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2872 MGMT_STATUS_NOT_SUPPORTED);
2874 key_count = __le16_to_cpu(cp->key_count);
2875 if (key_count > max_key_count) {
2876 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2878 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2879 MGMT_STATUS_INVALID_PARAMS);
2882 expected_len = struct_size(cp, keys, key_count);
2883 if (expected_len != len) {
2884 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2886 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2887 MGMT_STATUS_INVALID_PARAMS);
2890 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2891 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2892 MGMT_STATUS_INVALID_PARAMS);
2894 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2897 for (i = 0; i < key_count; i++) {
2898 struct mgmt_link_key_info *key = &cp->keys[i];
2900 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2901 return mgmt_cmd_status(sk, hdev->id,
2902 MGMT_OP_LOAD_LINK_KEYS,
2903 MGMT_STATUS_INVALID_PARAMS);
2908 hci_link_keys_clear(hdev);
2911 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2913 changed = hci_dev_test_and_clear_flag(hdev,
2914 HCI_KEEP_DEBUG_KEYS);
2917 new_settings(hdev, NULL);
2919 for (i = 0; i < key_count; i++) {
2920 struct mgmt_link_key_info *key = &cp->keys[i];
2922 if (hci_is_blocked_key(hdev,
2923 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2925 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2930 /* Always ignore debug keys and require a new pairing if
2931 * the user wants to use them.
2933 if (key->type == HCI_LK_DEBUG_COMBINATION)
2936 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2937 key->type, key->pin_len, NULL);
2940 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2942 hci_dev_unlock(hdev);
2947 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2948 u8 addr_type, struct sock *skip_sk)
2950 struct mgmt_ev_device_unpaired ev;
2952 bacpy(&ev.addr.bdaddr, bdaddr);
2953 ev.addr.type = addr_type;
2955 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2959 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2961 struct mgmt_pending_cmd *cmd = data;
2962 struct mgmt_cp_unpair_device *cp = cmd->param;
2965 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2967 cmd->cmd_complete(cmd, err);
2968 mgmt_pending_free(cmd);
2971 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2973 struct mgmt_pending_cmd *cmd = data;
2974 struct mgmt_cp_unpair_device *cp = cmd->param;
2975 struct hci_conn *conn;
2977 if (cp->addr.type == BDADDR_BREDR)
2978 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2981 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2982 le_addr_type(cp->addr.type));
2987 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2990 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2993 struct mgmt_cp_unpair_device *cp = data;
2994 struct mgmt_rp_unpair_device rp;
2995 struct hci_conn_params *params;
2996 struct mgmt_pending_cmd *cmd;
2997 struct hci_conn *conn;
3001 memset(&rp, 0, sizeof(rp));
3002 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3003 rp.addr.type = cp->addr.type;
3005 if (!bdaddr_type_is_valid(cp->addr.type))
3006 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3007 MGMT_STATUS_INVALID_PARAMS,
3010 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3011 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3012 MGMT_STATUS_INVALID_PARAMS,
3017 if (!hdev_is_powered(hdev)) {
3018 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3019 MGMT_STATUS_NOT_POWERED, &rp,
3024 if (cp->addr.type == BDADDR_BREDR) {
3025 /* If disconnection is requested, then look up the
3026 * connection. If the remote device is connected, it
3027 * will be later used to terminate the link.
3029 * Setting it to NULL explicitly will cause no
3030 * termination of the link.
3033 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3038 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3040 err = mgmt_cmd_complete(sk, hdev->id,
3041 MGMT_OP_UNPAIR_DEVICE,
3042 MGMT_STATUS_NOT_PAIRED, &rp,
3050 /* LE address type */
3051 addr_type = le_addr_type(cp->addr.type);
3053 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3054 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3056 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3057 MGMT_STATUS_NOT_PAIRED, &rp,
3062 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3064 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3069 /* Defer clearing up the connection parameters until closing to
3070 * give a chance of keeping them if a repairing happens.
3072 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3074 /* Disable auto-connection parameters if present */
3075 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3077 if (params->explicit_connect)
3078 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3080 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3083 /* If disconnection is not requested, then clear the connection
3084 * variable so that the link is not terminated.
3086 if (!cp->disconnect)
3090 /* If the connection variable is set, then termination of the
3091 * link is requested.
3094 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3096 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3100 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3107 cmd->cmd_complete = addr_cmd_complete;
3109 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3110 unpair_device_complete);
3112 mgmt_pending_free(cmd);
3115 hci_dev_unlock(hdev);
3119 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3122 struct mgmt_cp_disconnect *cp = data;
3123 struct mgmt_rp_disconnect rp;
3124 struct mgmt_pending_cmd *cmd;
3125 struct hci_conn *conn;
3128 bt_dev_dbg(hdev, "sock %p", sk);
3130 memset(&rp, 0, sizeof(rp));
3131 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3132 rp.addr.type = cp->addr.type;
3134 if (!bdaddr_type_is_valid(cp->addr.type))
3135 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3136 MGMT_STATUS_INVALID_PARAMS,
3141 if (!test_bit(HCI_UP, &hdev->flags)) {
3142 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3143 MGMT_STATUS_NOT_POWERED, &rp,
3148 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3149 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3150 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3154 if (cp->addr.type == BDADDR_BREDR)
3155 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3158 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3159 le_addr_type(cp->addr.type));
3161 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3162 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3163 MGMT_STATUS_NOT_CONNECTED, &rp,
3168 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3174 cmd->cmd_complete = generic_cmd_complete;
3176 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3178 mgmt_pending_remove(cmd);
3181 hci_dev_unlock(hdev);
3185 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3187 switch (link_type) {
3189 switch (addr_type) {
3190 case ADDR_LE_DEV_PUBLIC:
3191 return BDADDR_LE_PUBLIC;
3194 /* Fallback to LE Random address type */
3195 return BDADDR_LE_RANDOM;
3199 /* Fallback to BR/EDR type */
3200 return BDADDR_BREDR;
3204 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3207 struct mgmt_rp_get_connections *rp;
3212 bt_dev_dbg(hdev, "sock %p", sk);
3216 if (!hdev_is_powered(hdev)) {
3217 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3218 MGMT_STATUS_NOT_POWERED);
3223 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3224 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3228 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3235 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3236 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3238 bacpy(&rp->addr[i].bdaddr, &c->dst);
3239 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3240 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3245 rp->conn_count = cpu_to_le16(i);
3247 /* Recalculate length in case of filtered SCO connections, etc */
3248 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3249 struct_size(rp, addr, i));
3254 hci_dev_unlock(hdev);
3258 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3259 struct mgmt_cp_pin_code_neg_reply *cp)
3261 struct mgmt_pending_cmd *cmd;
3264 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3269 cmd->cmd_complete = addr_cmd_complete;
3271 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3272 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3274 mgmt_pending_remove(cmd);
3279 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3282 struct hci_conn *conn;
3283 struct mgmt_cp_pin_code_reply *cp = data;
3284 struct hci_cp_pin_code_reply reply;
3285 struct mgmt_pending_cmd *cmd;
3288 bt_dev_dbg(hdev, "sock %p", sk);
3292 if (!hdev_is_powered(hdev)) {
3293 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3294 MGMT_STATUS_NOT_POWERED);
3298 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3300 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3301 MGMT_STATUS_NOT_CONNECTED);
3305 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3306 struct mgmt_cp_pin_code_neg_reply ncp;
3308 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3310 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3312 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3314 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3315 MGMT_STATUS_INVALID_PARAMS);
3320 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3326 cmd->cmd_complete = addr_cmd_complete;
3328 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3329 reply.pin_len = cp->pin_len;
3330 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3332 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3334 mgmt_pending_remove(cmd);
3337 hci_dev_unlock(hdev);
3341 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3344 struct mgmt_cp_set_io_capability *cp = data;
3346 bt_dev_dbg(hdev, "sock %p", sk);
3348 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3349 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3350 MGMT_STATUS_INVALID_PARAMS);
3354 hdev->io_capability = cp->io_capability;
3356 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3358 hci_dev_unlock(hdev);
3360 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3364 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3366 struct hci_dev *hdev = conn->hdev;
3367 struct mgmt_pending_cmd *cmd;
3369 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3370 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3373 if (cmd->user_data != conn)
3382 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3384 struct mgmt_rp_pair_device rp;
3385 struct hci_conn *conn = cmd->user_data;
3388 bacpy(&rp.addr.bdaddr, &conn->dst);
3389 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3391 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3392 status, &rp, sizeof(rp));
3394 /* So we don't get further callbacks for this connection */
3395 conn->connect_cfm_cb = NULL;
3396 conn->security_cfm_cb = NULL;
3397 conn->disconn_cfm_cb = NULL;
3399 hci_conn_drop(conn);
3401 /* The device is paired so there is no need to remove
3402 * its connection parameters anymore.
3404 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3411 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3413 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3414 struct mgmt_pending_cmd *cmd;
3416 cmd = find_pairing(conn);
3418 cmd->cmd_complete(cmd, status);
3419 mgmt_pending_remove(cmd);
3423 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3425 struct mgmt_pending_cmd *cmd;
3427 BT_DBG("status %u", status);
3429 cmd = find_pairing(conn);
3431 BT_DBG("Unable to find a pending command");
3435 cmd->cmd_complete(cmd, mgmt_status(status));
3436 mgmt_pending_remove(cmd);
3439 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3441 struct mgmt_pending_cmd *cmd;
3443 BT_DBG("status %u", status);
3448 cmd = find_pairing(conn);
3450 BT_DBG("Unable to find a pending command");
3454 cmd->cmd_complete(cmd, mgmt_status(status));
3455 mgmt_pending_remove(cmd);
3458 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3461 struct mgmt_cp_pair_device *cp = data;
3462 struct mgmt_rp_pair_device rp;
3463 struct mgmt_pending_cmd *cmd;
3464 u8 sec_level, auth_type;
3465 struct hci_conn *conn;
3468 bt_dev_dbg(hdev, "sock %p", sk);
3470 memset(&rp, 0, sizeof(rp));
3471 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3472 rp.addr.type = cp->addr.type;
3474 if (!bdaddr_type_is_valid(cp->addr.type))
3475 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3476 MGMT_STATUS_INVALID_PARAMS,
3479 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3480 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3481 MGMT_STATUS_INVALID_PARAMS,
3486 if (!hdev_is_powered(hdev)) {
3487 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3488 MGMT_STATUS_NOT_POWERED, &rp,
3493 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3494 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3495 MGMT_STATUS_ALREADY_PAIRED, &rp,
3500 sec_level = BT_SECURITY_MEDIUM;
3501 auth_type = HCI_AT_DEDICATED_BONDING;
3503 if (cp->addr.type == BDADDR_BREDR) {
3504 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3505 auth_type, CONN_REASON_PAIR_DEVICE);
3507 u8 addr_type = le_addr_type(cp->addr.type);
3508 struct hci_conn_params *p;
3510 /* When pairing a new device, it is expected to remember
3511 * this device for future connections. Adding the connection
3512 * parameter information ahead of time allows tracking
3513 * of the peripheral preferred values and will speed up any
3514 * further connection establishment.
3516 * If connection parameters already exist, then they
3517 * will be kept and this function does nothing.
3519 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3521 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3522 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3524 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3525 sec_level, HCI_LE_CONN_TIMEOUT,
3526 CONN_REASON_PAIR_DEVICE);
3532 if (PTR_ERR(conn) == -EBUSY)
3533 status = MGMT_STATUS_BUSY;
3534 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3535 status = MGMT_STATUS_NOT_SUPPORTED;
3536 else if (PTR_ERR(conn) == -ECONNREFUSED)
3537 status = MGMT_STATUS_REJECTED;
3539 status = MGMT_STATUS_CONNECT_FAILED;
3541 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3542 status, &rp, sizeof(rp));
3546 if (conn->connect_cfm_cb) {
3547 hci_conn_drop(conn);
3548 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3549 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3553 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3556 hci_conn_drop(conn);
3560 cmd->cmd_complete = pairing_complete;
3562 /* For LE, just connecting isn't a proof that the pairing finished */
3563 if (cp->addr.type == BDADDR_BREDR) {
3564 conn->connect_cfm_cb = pairing_complete_cb;
3565 conn->security_cfm_cb = pairing_complete_cb;
3566 conn->disconn_cfm_cb = pairing_complete_cb;
3568 conn->connect_cfm_cb = le_pairing_complete_cb;
3569 conn->security_cfm_cb = le_pairing_complete_cb;
3570 conn->disconn_cfm_cb = le_pairing_complete_cb;
3573 conn->io_capability = cp->io_cap;
3574 cmd->user_data = hci_conn_get(conn);
3576 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3577 hci_conn_security(conn, sec_level, auth_type, true)) {
3578 cmd->cmd_complete(cmd, 0);
3579 mgmt_pending_remove(cmd);
3585 hci_dev_unlock(hdev);
3589 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3592 struct mgmt_addr_info *addr = data;
3593 struct mgmt_pending_cmd *cmd;
3594 struct hci_conn *conn;
3597 bt_dev_dbg(hdev, "sock %p", sk);
3601 if (!hdev_is_powered(hdev)) {
3602 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3603 MGMT_STATUS_NOT_POWERED);
3607 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3609 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3610 MGMT_STATUS_INVALID_PARAMS);
3614 conn = cmd->user_data;
3616 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3617 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3618 MGMT_STATUS_INVALID_PARAMS);
3622 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3623 mgmt_pending_remove(cmd);
3625 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3626 addr, sizeof(*addr));
3628 /* Since user doesn't want to proceed with the connection, abort any
3629 * ongoing pairing and then terminate the link if it was created
3630 * because of the pair device action.
3632 if (addr->type == BDADDR_BREDR)
3633 hci_remove_link_key(hdev, &addr->bdaddr);
3635 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3636 le_addr_type(addr->type));
3638 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3639 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3642 hci_dev_unlock(hdev);
3646 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3647 struct mgmt_addr_info *addr, u16 mgmt_op,
3648 u16 hci_op, __le32 passkey)
3650 struct mgmt_pending_cmd *cmd;
3651 struct hci_conn *conn;
3656 if (!hdev_is_powered(hdev)) {
3657 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3658 MGMT_STATUS_NOT_POWERED, addr,
3663 if (addr->type == BDADDR_BREDR)
3664 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3666 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3667 le_addr_type(addr->type));
3670 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3671 MGMT_STATUS_NOT_CONNECTED, addr,
3676 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3677 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3679 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3680 MGMT_STATUS_SUCCESS, addr,
3683 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3684 MGMT_STATUS_FAILED, addr,
3690 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3696 cmd->cmd_complete = addr_cmd_complete;
3698 /* Continue with pairing via HCI */
3699 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3700 struct hci_cp_user_passkey_reply cp;
3702 bacpy(&cp.bdaddr, &addr->bdaddr);
3703 cp.passkey = passkey;
3704 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3706 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3710 mgmt_pending_remove(cmd);
3713 hci_dev_unlock(hdev);
3717 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3718 void *data, u16 len)
3720 struct mgmt_cp_pin_code_neg_reply *cp = data;
3722 bt_dev_dbg(hdev, "sock %p", sk);
3724 return user_pairing_resp(sk, hdev, &cp->addr,
3725 MGMT_OP_PIN_CODE_NEG_REPLY,
3726 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3729 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3732 struct mgmt_cp_user_confirm_reply *cp = data;
3734 bt_dev_dbg(hdev, "sock %p", sk);
3736 if (len != sizeof(*cp))
3737 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3738 MGMT_STATUS_INVALID_PARAMS);
3740 return user_pairing_resp(sk, hdev, &cp->addr,
3741 MGMT_OP_USER_CONFIRM_REPLY,
3742 HCI_OP_USER_CONFIRM_REPLY, 0);
3745 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3746 void *data, u16 len)
3748 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3750 bt_dev_dbg(hdev, "sock %p", sk);
3752 return user_pairing_resp(sk, hdev, &cp->addr,
3753 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3754 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3757 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3760 struct mgmt_cp_user_passkey_reply *cp = data;
3762 bt_dev_dbg(hdev, "sock %p", sk);
3764 return user_pairing_resp(sk, hdev, &cp->addr,
3765 MGMT_OP_USER_PASSKEY_REPLY,
3766 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3769 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3770 void *data, u16 len)
3772 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3774 bt_dev_dbg(hdev, "sock %p", sk);
3776 return user_pairing_resp(sk, hdev, &cp->addr,
3777 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3778 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3781 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3783 struct adv_info *adv_instance;
3785 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3789 /* stop if current instance doesn't need to be changed */
3790 if (!(adv_instance->flags & flags))
3793 cancel_adv_timeout(hdev);
3795 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3799 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3804 static int name_changed_sync(struct hci_dev *hdev, void *data)
3806 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3809 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3811 struct mgmt_pending_cmd *cmd = data;
3812 struct mgmt_cp_set_local_name *cp = cmd->param;
3813 u8 status = mgmt_status(err);
3815 bt_dev_dbg(hdev, "err %d", err);
3817 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3821 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3824 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3827 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3828 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3831 mgmt_pending_remove(cmd);
3834 static int set_name_sync(struct hci_dev *hdev, void *data)
3836 if (lmp_bredr_capable(hdev)) {
3837 hci_update_name_sync(hdev);
3838 hci_update_eir_sync(hdev);
3841 /* The name is stored in the scan response data and so
3842 * no need to update the advertising data here.
3844 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3845 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3850 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3853 struct mgmt_cp_set_local_name *cp = data;
3854 struct mgmt_pending_cmd *cmd;
3857 bt_dev_dbg(hdev, "sock %p", sk);
3861 /* If the old values are the same as the new ones just return a
3862 * direct command complete event.
3864 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3865 !memcmp(hdev->short_name, cp->short_name,
3866 sizeof(hdev->short_name))) {
3867 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3872 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3874 if (!hdev_is_powered(hdev)) {
3875 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3877 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3882 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3883 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3884 ext_info_changed(hdev, sk);
3889 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3893 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3897 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3898 MGMT_STATUS_FAILED);
3901 mgmt_pending_remove(cmd);
3906 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3909 hci_dev_unlock(hdev);
3913 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3915 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3918 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3921 struct mgmt_cp_set_appearance *cp = data;
3925 bt_dev_dbg(hdev, "sock %p", sk);
3927 if (!lmp_le_capable(hdev))
3928 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3929 MGMT_STATUS_NOT_SUPPORTED);
3931 appearance = le16_to_cpu(cp->appearance);
3935 if (hdev->appearance != appearance) {
3936 hdev->appearance = appearance;
3938 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3939 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3942 ext_info_changed(hdev, sk);
3945 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3948 hci_dev_unlock(hdev);
3953 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3954 void *data, u16 len)
3956 struct mgmt_rp_get_phy_configuration rp;
3958 bt_dev_dbg(hdev, "sock %p", sk);
3962 memset(&rp, 0, sizeof(rp));
3964 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3965 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3966 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3968 hci_dev_unlock(hdev);
3970 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3974 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3976 struct mgmt_ev_phy_configuration_changed ev;
3978 memset(&ev, 0, sizeof(ev));
3980 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3982 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3986 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3988 struct mgmt_pending_cmd *cmd = data;
3989 struct sk_buff *skb = cmd->skb;
3990 u8 status = mgmt_status(err);
3992 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3997 status = MGMT_STATUS_FAILED;
3998 else if (IS_ERR(skb))
3999 status = mgmt_status(PTR_ERR(skb));
4001 status = mgmt_status(skb->data[0]);
4004 bt_dev_dbg(hdev, "status %d", status);
4007 mgmt_cmd_status(cmd->sk, hdev->id,
4008 MGMT_OP_SET_PHY_CONFIGURATION, status);
4010 mgmt_cmd_complete(cmd->sk, hdev->id,
4011 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4014 mgmt_phy_configuration_changed(hdev, cmd->sk);
4017 if (skb && !IS_ERR(skb))
4020 mgmt_pending_remove(cmd);
4023 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4025 struct mgmt_pending_cmd *cmd = data;
4026 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4027 struct hci_cp_le_set_default_phy cp_phy;
4028 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4030 memset(&cp_phy, 0, sizeof(cp_phy));
4032 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4033 cp_phy.all_phys |= 0x01;
4035 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4036 cp_phy.all_phys |= 0x02;
4038 if (selected_phys & MGMT_PHY_LE_1M_TX)
4039 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4041 if (selected_phys & MGMT_PHY_LE_2M_TX)
4042 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4044 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4045 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4047 if (selected_phys & MGMT_PHY_LE_1M_RX)
4048 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4050 if (selected_phys & MGMT_PHY_LE_2M_RX)
4051 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4053 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4054 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4056 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4057 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4062 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4063 void *data, u16 len)
4065 struct mgmt_cp_set_phy_configuration *cp = data;
4066 struct mgmt_pending_cmd *cmd;
4067 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4068 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4069 bool changed = false;
4072 bt_dev_dbg(hdev, "sock %p", sk);
4074 configurable_phys = get_configurable_phys(hdev);
4075 supported_phys = get_supported_phys(hdev);
4076 selected_phys = __le32_to_cpu(cp->selected_phys);
4078 if (selected_phys & ~supported_phys)
4079 return mgmt_cmd_status(sk, hdev->id,
4080 MGMT_OP_SET_PHY_CONFIGURATION,
4081 MGMT_STATUS_INVALID_PARAMS);
4083 unconfigure_phys = supported_phys & ~configurable_phys;
4085 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4086 return mgmt_cmd_status(sk, hdev->id,
4087 MGMT_OP_SET_PHY_CONFIGURATION,
4088 MGMT_STATUS_INVALID_PARAMS);
4090 if (selected_phys == get_selected_phys(hdev))
4091 return mgmt_cmd_complete(sk, hdev->id,
4092 MGMT_OP_SET_PHY_CONFIGURATION,
4097 if (!hdev_is_powered(hdev)) {
4098 err = mgmt_cmd_status(sk, hdev->id,
4099 MGMT_OP_SET_PHY_CONFIGURATION,
4100 MGMT_STATUS_REJECTED);
4104 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4105 err = mgmt_cmd_status(sk, hdev->id,
4106 MGMT_OP_SET_PHY_CONFIGURATION,
4111 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4112 pkt_type |= (HCI_DH3 | HCI_DM3);
4114 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4116 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4117 pkt_type |= (HCI_DH5 | HCI_DM5);
4119 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4121 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4122 pkt_type &= ~HCI_2DH1;
4124 pkt_type |= HCI_2DH1;
4126 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4127 pkt_type &= ~HCI_2DH3;
4129 pkt_type |= HCI_2DH3;
4131 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4132 pkt_type &= ~HCI_2DH5;
4134 pkt_type |= HCI_2DH5;
4136 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4137 pkt_type &= ~HCI_3DH1;
4139 pkt_type |= HCI_3DH1;
4141 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4142 pkt_type &= ~HCI_3DH3;
4144 pkt_type |= HCI_3DH3;
4146 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4147 pkt_type &= ~HCI_3DH5;
4149 pkt_type |= HCI_3DH5;
4151 if (pkt_type != hdev->pkt_type) {
4152 hdev->pkt_type = pkt_type;
4156 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4157 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4159 mgmt_phy_configuration_changed(hdev, sk);
4161 err = mgmt_cmd_complete(sk, hdev->id,
4162 MGMT_OP_SET_PHY_CONFIGURATION,
4168 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4173 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4174 set_default_phy_complete);
4177 err = mgmt_cmd_status(sk, hdev->id,
4178 MGMT_OP_SET_PHY_CONFIGURATION,
4179 MGMT_STATUS_FAILED);
4182 mgmt_pending_remove(cmd);
4186 hci_dev_unlock(hdev);
4191 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4194 int err = MGMT_STATUS_SUCCESS;
4195 struct mgmt_cp_set_blocked_keys *keys = data;
4196 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4197 sizeof(struct mgmt_blocked_key_info));
4198 u16 key_count, expected_len;
4201 bt_dev_dbg(hdev, "sock %p", sk);
4203 key_count = __le16_to_cpu(keys->key_count);
4204 if (key_count > max_key_count) {
4205 bt_dev_err(hdev, "too big key_count value %u", key_count);
4206 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4207 MGMT_STATUS_INVALID_PARAMS);
4210 expected_len = struct_size(keys, keys, key_count);
4211 if (expected_len != len) {
4212 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4214 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4215 MGMT_STATUS_INVALID_PARAMS);
4220 hci_blocked_keys_clear(hdev);
4222 for (i = 0; i < key_count; ++i) {
4223 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4226 err = MGMT_STATUS_NO_RESOURCES;
4230 b->type = keys->keys[i].type;
4231 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4232 list_add_rcu(&b->list, &hdev->blocked_keys);
4234 hci_dev_unlock(hdev);
4236 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4240 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4241 void *data, u16 len)
4243 struct mgmt_mode *cp = data;
4245 bool changed = false;
4247 bt_dev_dbg(hdev, "sock %p", sk);
4249 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4250 return mgmt_cmd_status(sk, hdev->id,
4251 MGMT_OP_SET_WIDEBAND_SPEECH,
4252 MGMT_STATUS_NOT_SUPPORTED);
4254 if (cp->val != 0x00 && cp->val != 0x01)
4255 return mgmt_cmd_status(sk, hdev->id,
4256 MGMT_OP_SET_WIDEBAND_SPEECH,
4257 MGMT_STATUS_INVALID_PARAMS);
4261 if (hdev_is_powered(hdev) &&
4262 !!cp->val != hci_dev_test_flag(hdev,
4263 HCI_WIDEBAND_SPEECH_ENABLED)) {
4264 err = mgmt_cmd_status(sk, hdev->id,
4265 MGMT_OP_SET_WIDEBAND_SPEECH,
4266 MGMT_STATUS_REJECTED);
4271 changed = !hci_dev_test_and_set_flag(hdev,
4272 HCI_WIDEBAND_SPEECH_ENABLED);
4274 changed = hci_dev_test_and_clear_flag(hdev,
4275 HCI_WIDEBAND_SPEECH_ENABLED);
4277 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4282 err = new_settings(hdev, sk);
4285 hci_dev_unlock(hdev);
4289 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4290 void *data, u16 data_len)
4293 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4296 u8 tx_power_range[2];
4298 bt_dev_dbg(hdev, "sock %p", sk);
4300 memset(&buf, 0, sizeof(buf));
4304 /* When the Read Simple Pairing Options command is supported, then
4305 * the remote public key validation is supported.
4307 * Alternatively, when Microsoft extensions are available, they can
4308 * indicate support for public key validation as well.
4310 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4311 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4313 flags |= 0x02; /* Remote public key validation (LE) */
4315 /* When the Read Encryption Key Size command is supported, then the
4316 * encryption key size is enforced.
4318 if (hdev->commands[20] & 0x10)
4319 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4321 flags |= 0x08; /* Encryption key size enforcement (LE) */
4323 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4326 /* When the Read Simple Pairing Options command is supported, then
4327 * also max encryption key size information is provided.
4329 if (hdev->commands[41] & 0x08)
4330 cap_len = eir_append_le16(rp->cap, cap_len,
4331 MGMT_CAP_MAX_ENC_KEY_SIZE,
4332 hdev->max_enc_key_size);
4334 cap_len = eir_append_le16(rp->cap, cap_len,
4335 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4336 SMP_MAX_ENC_KEY_SIZE);
4338 /* Append the min/max LE tx power parameters if we were able to fetch
4339 * it from the controller
4341 if (hdev->commands[38] & 0x80) {
4342 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4343 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4344 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4348 rp->cap_len = cpu_to_le16(cap_len);
4350 hci_dev_unlock(hdev);
4352 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4353 rp, sizeof(*rp) + cap_len);
4356 #ifdef CONFIG_BT_FEATURE_DEBUG
4357 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4358 static const u8 debug_uuid[16] = {
4359 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4360 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4364 /* 330859bc-7506-492d-9370-9a6f0614037f */
4365 static const u8 quality_report_uuid[16] = {
4366 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4367 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4370 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4371 static const u8 offload_codecs_uuid[16] = {
4372 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4373 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4376 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4377 static const u8 le_simultaneous_roles_uuid[16] = {
4378 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4379 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4382 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4383 static const u8 rpa_resolution_uuid[16] = {
4384 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4385 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4388 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4389 static const u8 iso_socket_uuid[16] = {
4390 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4391 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4394 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4395 static const u8 mgmt_mesh_uuid[16] = {
4396 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4397 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4400 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4401 void *data, u16 data_len)
4403 struct mgmt_rp_read_exp_features_info *rp;
4409 bt_dev_dbg(hdev, "sock %p", sk);
4411 /* Enough space for 7 features */
4412 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4413 rp = kzalloc(len, GFP_KERNEL);
4417 #ifdef CONFIG_BT_FEATURE_DEBUG
4419 flags = bt_dbg_get() ? BIT(0) : 0;
4421 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4422 rp->features[idx].flags = cpu_to_le32(flags);
4427 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4428 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4433 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4434 rp->features[idx].flags = cpu_to_le32(flags);
4438 if (hdev && ll_privacy_capable(hdev)) {
4439 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4440 flags = BIT(0) | BIT(1);
4444 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4445 rp->features[idx].flags = cpu_to_le32(flags);
4449 if (hdev && (aosp_has_quality_report(hdev) ||
4450 hdev->set_quality_report)) {
4451 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4456 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4457 rp->features[idx].flags = cpu_to_le32(flags);
4461 if (hdev && hdev->get_data_path_id) {
4462 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4467 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4468 rp->features[idx].flags = cpu_to_le32(flags);
4472 if (IS_ENABLED(CONFIG_BT_LE)) {
4473 flags = iso_enabled() ? BIT(0) : 0;
4474 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4475 rp->features[idx].flags = cpu_to_le32(flags);
4479 if (hdev && lmp_le_capable(hdev)) {
4480 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4485 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4486 rp->features[idx].flags = cpu_to_le32(flags);
4490 rp->feature_count = cpu_to_le16(idx);
4492 /* After reading the experimental features information, enable
4493 * the events to update client on any future change.
4495 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4497 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4498 MGMT_OP_READ_EXP_FEATURES_INFO,
4499 0, rp, sizeof(*rp) + (20 * idx));
4505 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4508 struct mgmt_ev_exp_feature_changed ev;
4510 memset(&ev, 0, sizeof(ev));
4511 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4512 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4514 // Do we need to be atomic with the conn_flags?
4515 if (enabled && privacy_mode_capable(hdev))
4516 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4518 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4520 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4522 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4526 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4527 bool enabled, struct sock *skip)
4529 struct mgmt_ev_exp_feature_changed ev;
4531 memset(&ev, 0, sizeof(ev));
4532 memcpy(ev.uuid, uuid, 16);
4533 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4535 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4537 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4540 #define EXP_FEAT(_uuid, _set_func) \
4543 .set_func = _set_func, \
4546 /* The zero key uuid is special. Multiple exp features are set through it. */
4547 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4548 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4550 struct mgmt_rp_set_exp_feature rp;
4552 memset(rp.uuid, 0, 16);
4553 rp.flags = cpu_to_le32(0);
4555 #ifdef CONFIG_BT_FEATURE_DEBUG
4557 bool changed = bt_dbg_get();
4562 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4566 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4569 changed = hci_dev_test_and_clear_flag(hdev,
4570 HCI_ENABLE_LL_PRIVACY);
4572 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4576 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4578 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4579 MGMT_OP_SET_EXP_FEATURE, 0,
4583 #ifdef CONFIG_BT_FEATURE_DEBUG
4584 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4585 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4587 struct mgmt_rp_set_exp_feature rp;
4592 /* Command requires to use the non-controller index */
4594 return mgmt_cmd_status(sk, hdev->id,
4595 MGMT_OP_SET_EXP_FEATURE,
4596 MGMT_STATUS_INVALID_INDEX);
4598 /* Parameters are limited to a single octet */
4599 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4600 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4601 MGMT_OP_SET_EXP_FEATURE,
4602 MGMT_STATUS_INVALID_PARAMS);
4604 /* Only boolean on/off is supported */
4605 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4606 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4607 MGMT_OP_SET_EXP_FEATURE,
4608 MGMT_STATUS_INVALID_PARAMS);
4610 val = !!cp->param[0];
4611 changed = val ? !bt_dbg_get() : bt_dbg_get();
4614 memcpy(rp.uuid, debug_uuid, 16);
4615 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4617 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4619 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4620 MGMT_OP_SET_EXP_FEATURE, 0,
4624 exp_feature_changed(hdev, debug_uuid, val, sk);
4630 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4631 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4633 struct mgmt_rp_set_exp_feature rp;
4637 /* Command requires to use the controller index */
4639 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4640 MGMT_OP_SET_EXP_FEATURE,
4641 MGMT_STATUS_INVALID_INDEX);
4643 /* Parameters are limited to a single octet */
4644 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4645 return mgmt_cmd_status(sk, hdev->id,
4646 MGMT_OP_SET_EXP_FEATURE,
4647 MGMT_STATUS_INVALID_PARAMS);
4649 /* Only boolean on/off is supported */
4650 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4651 return mgmt_cmd_status(sk, hdev->id,
4652 MGMT_OP_SET_EXP_FEATURE,
4653 MGMT_STATUS_INVALID_PARAMS);
4655 val = !!cp->param[0];
4658 changed = !hci_dev_test_and_set_flag(hdev,
4659 HCI_MESH_EXPERIMENTAL);
4661 hci_dev_clear_flag(hdev, HCI_MESH);
4662 changed = hci_dev_test_and_clear_flag(hdev,
4663 HCI_MESH_EXPERIMENTAL);
4666 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4667 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4669 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4671 err = mgmt_cmd_complete(sk, hdev->id,
4672 MGMT_OP_SET_EXP_FEATURE, 0,
4676 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4681 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4682 struct mgmt_cp_set_exp_feature *cp,
4685 struct mgmt_rp_set_exp_feature rp;
4690 /* Command requires to use the controller index */
4692 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4693 MGMT_OP_SET_EXP_FEATURE,
4694 MGMT_STATUS_INVALID_INDEX);
4696 /* Changes can only be made when controller is powered down */
4697 if (hdev_is_powered(hdev))
4698 return mgmt_cmd_status(sk, hdev->id,
4699 MGMT_OP_SET_EXP_FEATURE,
4700 MGMT_STATUS_REJECTED);
4702 /* Parameters are limited to a single octet */
4703 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4704 return mgmt_cmd_status(sk, hdev->id,
4705 MGMT_OP_SET_EXP_FEATURE,
4706 MGMT_STATUS_INVALID_PARAMS);
4708 /* Only boolean on/off is supported */
4709 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4710 return mgmt_cmd_status(sk, hdev->id,
4711 MGMT_OP_SET_EXP_FEATURE,
4712 MGMT_STATUS_INVALID_PARAMS);
4714 val = !!cp->param[0];
4717 changed = !hci_dev_test_and_set_flag(hdev,
4718 HCI_ENABLE_LL_PRIVACY);
4719 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4721 /* Enable LL privacy + supported settings changed */
4722 flags = BIT(0) | BIT(1);
4724 changed = hci_dev_test_and_clear_flag(hdev,
4725 HCI_ENABLE_LL_PRIVACY);
4727 /* Disable LL privacy + supported settings changed */
4731 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4732 rp.flags = cpu_to_le32(flags);
4734 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4736 err = mgmt_cmd_complete(sk, hdev->id,
4737 MGMT_OP_SET_EXP_FEATURE, 0,
4741 exp_ll_privacy_feature_changed(val, hdev, sk);
4746 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4747 struct mgmt_cp_set_exp_feature *cp,
4750 struct mgmt_rp_set_exp_feature rp;
4754 /* Command requires to use a valid controller index */
4756 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4757 MGMT_OP_SET_EXP_FEATURE,
4758 MGMT_STATUS_INVALID_INDEX);
4760 /* Parameters are limited to a single octet */
4761 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4762 return mgmt_cmd_status(sk, hdev->id,
4763 MGMT_OP_SET_EXP_FEATURE,
4764 MGMT_STATUS_INVALID_PARAMS);
4766 /* Only boolean on/off is supported */
4767 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4768 return mgmt_cmd_status(sk, hdev->id,
4769 MGMT_OP_SET_EXP_FEATURE,
4770 MGMT_STATUS_INVALID_PARAMS);
4772 hci_req_sync_lock(hdev);
4774 val = !!cp->param[0];
4775 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4777 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4778 err = mgmt_cmd_status(sk, hdev->id,
4779 MGMT_OP_SET_EXP_FEATURE,
4780 MGMT_STATUS_NOT_SUPPORTED);
4781 goto unlock_quality_report;
4785 if (hdev->set_quality_report)
4786 err = hdev->set_quality_report(hdev, val);
4788 err = aosp_set_quality_report(hdev, val);
4791 err = mgmt_cmd_status(sk, hdev->id,
4792 MGMT_OP_SET_EXP_FEATURE,
4793 MGMT_STATUS_FAILED);
4794 goto unlock_quality_report;
4798 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4800 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4803 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4805 memcpy(rp.uuid, quality_report_uuid, 16);
4806 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4807 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4809 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4813 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4815 unlock_quality_report:
4816 hci_req_sync_unlock(hdev);
4820 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4821 struct mgmt_cp_set_exp_feature *cp,
4826 struct mgmt_rp_set_exp_feature rp;
4828 /* Command requires to use a valid controller index */
4830 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4831 MGMT_OP_SET_EXP_FEATURE,
4832 MGMT_STATUS_INVALID_INDEX);
4834 /* Parameters are limited to a single octet */
4835 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4836 return mgmt_cmd_status(sk, hdev->id,
4837 MGMT_OP_SET_EXP_FEATURE,
4838 MGMT_STATUS_INVALID_PARAMS);
4840 /* Only boolean on/off is supported */
4841 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4842 return mgmt_cmd_status(sk, hdev->id,
4843 MGMT_OP_SET_EXP_FEATURE,
4844 MGMT_STATUS_INVALID_PARAMS);
4846 val = !!cp->param[0];
4847 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4849 if (!hdev->get_data_path_id) {
4850 return mgmt_cmd_status(sk, hdev->id,
4851 MGMT_OP_SET_EXP_FEATURE,
4852 MGMT_STATUS_NOT_SUPPORTED);
4857 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4859 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4862 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4865 memcpy(rp.uuid, offload_codecs_uuid, 16);
4866 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4867 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4868 err = mgmt_cmd_complete(sk, hdev->id,
4869 MGMT_OP_SET_EXP_FEATURE, 0,
4873 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4878 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4879 struct mgmt_cp_set_exp_feature *cp,
4884 struct mgmt_rp_set_exp_feature rp;
4886 /* Command requires to use a valid controller index */
4888 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4889 MGMT_OP_SET_EXP_FEATURE,
4890 MGMT_STATUS_INVALID_INDEX);
4892 /* Parameters are limited to a single octet */
4893 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4894 return mgmt_cmd_status(sk, hdev->id,
4895 MGMT_OP_SET_EXP_FEATURE,
4896 MGMT_STATUS_INVALID_PARAMS);
4898 /* Only boolean on/off is supported */
4899 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4900 return mgmt_cmd_status(sk, hdev->id,
4901 MGMT_OP_SET_EXP_FEATURE,
4902 MGMT_STATUS_INVALID_PARAMS);
4904 val = !!cp->param[0];
4905 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4907 if (!hci_dev_le_state_simultaneous(hdev)) {
4908 return mgmt_cmd_status(sk, hdev->id,
4909 MGMT_OP_SET_EXP_FEATURE,
4910 MGMT_STATUS_NOT_SUPPORTED);
4915 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4917 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4920 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4923 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4924 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4925 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4926 err = mgmt_cmd_complete(sk, hdev->id,
4927 MGMT_OP_SET_EXP_FEATURE, 0,
4931 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4937 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4938 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4940 struct mgmt_rp_set_exp_feature rp;
4941 bool val, changed = false;
4944 /* Command requires to use the non-controller index */
4946 return mgmt_cmd_status(sk, hdev->id,
4947 MGMT_OP_SET_EXP_FEATURE,
4948 MGMT_STATUS_INVALID_INDEX);
4950 /* Parameters are limited to a single octet */
4951 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4952 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4953 MGMT_OP_SET_EXP_FEATURE,
4954 MGMT_STATUS_INVALID_PARAMS);
4956 /* Only boolean on/off is supported */
4957 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4958 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4959 MGMT_OP_SET_EXP_FEATURE,
4960 MGMT_STATUS_INVALID_PARAMS);
4962 val = cp->param[0] ? true : false;
4971 memcpy(rp.uuid, iso_socket_uuid, 16);
4972 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4974 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4976 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4977 MGMT_OP_SET_EXP_FEATURE, 0,
4981 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4987 static const struct mgmt_exp_feature {
4989 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4990 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4991 } exp_features[] = {
4992 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4993 #ifdef CONFIG_BT_FEATURE_DEBUG
4994 EXP_FEAT(debug_uuid, set_debug_func),
4996 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
4997 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4998 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4999 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5000 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5002 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5005 /* end with a null feature */
5006 EXP_FEAT(NULL, NULL)
5009 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5010 void *data, u16 data_len)
5012 struct mgmt_cp_set_exp_feature *cp = data;
5015 bt_dev_dbg(hdev, "sock %p", sk);
5017 for (i = 0; exp_features[i].uuid; i++) {
5018 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5019 return exp_features[i].set_func(sk, hdev, cp, data_len);
5022 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5023 MGMT_OP_SET_EXP_FEATURE,
5024 MGMT_STATUS_NOT_SUPPORTED);
5027 static u32 get_params_flags(struct hci_dev *hdev,
5028 struct hci_conn_params *params)
5030 u32 flags = hdev->conn_flags;
5032 /* Devices using RPAs can only be programmed in the acceptlist if
5033 * LL Privacy has been enable otherwise they cannot mark
5034 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5036 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5037 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
5038 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5043 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5046 struct mgmt_cp_get_device_flags *cp = data;
5047 struct mgmt_rp_get_device_flags rp;
5048 struct bdaddr_list_with_flags *br_params;
5049 struct hci_conn_params *params;
5050 u32 supported_flags;
5051 u32 current_flags = 0;
5052 u8 status = MGMT_STATUS_INVALID_PARAMS;
5054 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5055 &cp->addr.bdaddr, cp->addr.type);
5059 supported_flags = hdev->conn_flags;
5061 memset(&rp, 0, sizeof(rp));
5063 if (cp->addr.type == BDADDR_BREDR) {
5064 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5070 current_flags = br_params->flags;
5072 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5073 le_addr_type(cp->addr.type));
5077 supported_flags = get_params_flags(hdev, params);
5078 current_flags = params->flags;
5081 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5082 rp.addr.type = cp->addr.type;
5083 rp.supported_flags = cpu_to_le32(supported_flags);
5084 rp.current_flags = cpu_to_le32(current_flags);
5086 status = MGMT_STATUS_SUCCESS;
5089 hci_dev_unlock(hdev);
5091 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5095 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5096 bdaddr_t *bdaddr, u8 bdaddr_type,
5097 u32 supported_flags, u32 current_flags)
5099 struct mgmt_ev_device_flags_changed ev;
5101 bacpy(&ev.addr.bdaddr, bdaddr);
5102 ev.addr.type = bdaddr_type;
5103 ev.supported_flags = cpu_to_le32(supported_flags);
5104 ev.current_flags = cpu_to_le32(current_flags);
5106 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5109 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5112 struct mgmt_cp_set_device_flags *cp = data;
5113 struct bdaddr_list_with_flags *br_params;
5114 struct hci_conn_params *params;
5115 u8 status = MGMT_STATUS_INVALID_PARAMS;
5116 u32 supported_flags;
5117 u32 current_flags = __le32_to_cpu(cp->current_flags);
5119 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5120 &cp->addr.bdaddr, cp->addr.type, current_flags);
5122 // We should take hci_dev_lock() early, I think.. conn_flags can change
5123 supported_flags = hdev->conn_flags;
5125 if ((supported_flags | current_flags) != supported_flags) {
5126 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5127 current_flags, supported_flags);
5133 if (cp->addr.type == BDADDR_BREDR) {
5134 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5139 br_params->flags = current_flags;
5140 status = MGMT_STATUS_SUCCESS;
5142 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5143 &cp->addr.bdaddr, cp->addr.type);
5149 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5150 le_addr_type(cp->addr.type));
5152 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5153 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5157 supported_flags = get_params_flags(hdev, params);
5159 if ((supported_flags | current_flags) != supported_flags) {
5160 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5161 current_flags, supported_flags);
5165 WRITE_ONCE(params->flags, current_flags);
5166 status = MGMT_STATUS_SUCCESS;
5168 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5171 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5172 hci_update_passive_scan(hdev);
5175 hci_dev_unlock(hdev);
5178 if (status == MGMT_STATUS_SUCCESS)
5179 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5180 supported_flags, current_flags);
5182 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5183 &cp->addr, sizeof(cp->addr));
5186 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5189 struct mgmt_ev_adv_monitor_added ev;
5191 ev.monitor_handle = cpu_to_le16(handle);
5193 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5196 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5198 struct mgmt_ev_adv_monitor_removed ev;
5199 struct mgmt_pending_cmd *cmd;
5200 struct sock *sk_skip = NULL;
5201 struct mgmt_cp_remove_adv_monitor *cp;
5203 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5207 if (cp->monitor_handle)
5211 ev.monitor_handle = cpu_to_le16(handle);
5213 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5216 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5217 void *data, u16 len)
5219 struct adv_monitor *monitor = NULL;
5220 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5223 __u32 supported = 0;
5225 __u16 num_handles = 0;
5226 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5228 BT_DBG("request for %s", hdev->name);
5232 if (msft_monitor_supported(hdev))
5233 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5235 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5236 handles[num_handles++] = monitor->handle;
5238 hci_dev_unlock(hdev);
5240 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5241 rp = kmalloc(rp_size, GFP_KERNEL);
5245 /* All supported features are currently enabled */
5246 enabled = supported;
5248 rp->supported_features = cpu_to_le32(supported);
5249 rp->enabled_features = cpu_to_le32(enabled);
5250 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5251 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5252 rp->num_handles = cpu_to_le16(num_handles);
5254 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5256 err = mgmt_cmd_complete(sk, hdev->id,
5257 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5258 MGMT_STATUS_SUCCESS, rp, rp_size);
5265 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5266 void *data, int status)
5268 struct mgmt_rp_add_adv_patterns_monitor rp;
5269 struct mgmt_pending_cmd *cmd = data;
5270 struct adv_monitor *monitor = cmd->user_data;
5274 rp.monitor_handle = cpu_to_le16(monitor->handle);
5277 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5278 hdev->adv_monitors_cnt++;
5279 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5280 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5281 hci_update_passive_scan(hdev);
5284 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5285 mgmt_status(status), &rp, sizeof(rp));
5286 mgmt_pending_remove(cmd);
5288 hci_dev_unlock(hdev);
5289 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5290 rp.monitor_handle, status);
5293 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5295 struct mgmt_pending_cmd *cmd = data;
5296 struct adv_monitor *monitor = cmd->user_data;
5298 return hci_add_adv_monitor(hdev, monitor);
5301 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5302 struct adv_monitor *m, u8 status,
5303 void *data, u16 len, u16 op)
5305 struct mgmt_pending_cmd *cmd;
5313 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5314 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5315 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5316 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5317 status = MGMT_STATUS_BUSY;
5321 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5323 status = MGMT_STATUS_NO_RESOURCES;
5328 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5329 mgmt_add_adv_patterns_monitor_complete);
5332 status = MGMT_STATUS_NO_RESOURCES;
5334 status = MGMT_STATUS_FAILED;
5339 hci_dev_unlock(hdev);
5344 hci_free_adv_monitor(hdev, m);
5345 hci_dev_unlock(hdev);
5346 return mgmt_cmd_status(sk, hdev->id, op, status);
5349 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5350 struct mgmt_adv_rssi_thresholds *rssi)
5353 m->rssi.low_threshold = rssi->low_threshold;
5354 m->rssi.low_threshold_timeout =
5355 __le16_to_cpu(rssi->low_threshold_timeout);
5356 m->rssi.high_threshold = rssi->high_threshold;
5357 m->rssi.high_threshold_timeout =
5358 __le16_to_cpu(rssi->high_threshold_timeout);
5359 m->rssi.sampling_period = rssi->sampling_period;
5361 /* Default values. These numbers are the least constricting
5362 * parameters for MSFT API to work, so it behaves as if there
5363 * are no rssi parameter to consider. May need to be changed
5364 * if other API are to be supported.
5366 m->rssi.low_threshold = -127;
5367 m->rssi.low_threshold_timeout = 60;
5368 m->rssi.high_threshold = -127;
5369 m->rssi.high_threshold_timeout = 0;
5370 m->rssi.sampling_period = 0;
5374 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5375 struct mgmt_adv_pattern *patterns)
5377 u8 offset = 0, length = 0;
5378 struct adv_pattern *p = NULL;
5381 for (i = 0; i < pattern_count; i++) {
5382 offset = patterns[i].offset;
5383 length = patterns[i].length;
5384 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5385 length > HCI_MAX_EXT_AD_LENGTH ||
5386 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5387 return MGMT_STATUS_INVALID_PARAMS;
5389 p = kmalloc(sizeof(*p), GFP_KERNEL);
5391 return MGMT_STATUS_NO_RESOURCES;
5393 p->ad_type = patterns[i].ad_type;
5394 p->offset = patterns[i].offset;
5395 p->length = patterns[i].length;
5396 memcpy(p->value, patterns[i].value, p->length);
5398 INIT_LIST_HEAD(&p->list);
5399 list_add(&p->list, &m->patterns);
5402 return MGMT_STATUS_SUCCESS;
5405 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5406 void *data, u16 len)
5408 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5409 struct adv_monitor *m = NULL;
5410 u8 status = MGMT_STATUS_SUCCESS;
5411 size_t expected_size = sizeof(*cp);
5413 BT_DBG("request for %s", hdev->name);
5415 if (len <= sizeof(*cp)) {
5416 status = MGMT_STATUS_INVALID_PARAMS;
5420 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5421 if (len != expected_size) {
5422 status = MGMT_STATUS_INVALID_PARAMS;
5426 m = kzalloc(sizeof(*m), GFP_KERNEL);
5428 status = MGMT_STATUS_NO_RESOURCES;
5432 INIT_LIST_HEAD(&m->patterns);
5434 parse_adv_monitor_rssi(m, NULL);
5435 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5438 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5439 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5442 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5443 void *data, u16 len)
5445 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5446 struct adv_monitor *m = NULL;
5447 u8 status = MGMT_STATUS_SUCCESS;
5448 size_t expected_size = sizeof(*cp);
5450 BT_DBG("request for %s", hdev->name);
5452 if (len <= sizeof(*cp)) {
5453 status = MGMT_STATUS_INVALID_PARAMS;
5457 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5458 if (len != expected_size) {
5459 status = MGMT_STATUS_INVALID_PARAMS;
5463 m = kzalloc(sizeof(*m), GFP_KERNEL);
5465 status = MGMT_STATUS_NO_RESOURCES;
5469 INIT_LIST_HEAD(&m->patterns);
5471 parse_adv_monitor_rssi(m, &cp->rssi);
5472 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5475 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5476 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5479 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5480 void *data, int status)
5482 struct mgmt_rp_remove_adv_monitor rp;
5483 struct mgmt_pending_cmd *cmd = data;
5484 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5488 rp.monitor_handle = cp->monitor_handle;
5491 hci_update_passive_scan(hdev);
5493 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5494 mgmt_status(status), &rp, sizeof(rp));
5495 mgmt_pending_remove(cmd);
5497 hci_dev_unlock(hdev);
5498 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5499 rp.monitor_handle, status);
5502 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5504 struct mgmt_pending_cmd *cmd = data;
5505 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5506 u16 handle = __le16_to_cpu(cp->monitor_handle);
5509 return hci_remove_all_adv_monitor(hdev);
5511 return hci_remove_single_adv_monitor(hdev, handle);
5514 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5515 void *data, u16 len)
5517 struct mgmt_pending_cmd *cmd;
5522 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5523 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5524 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5525 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5526 status = MGMT_STATUS_BUSY;
5530 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5532 status = MGMT_STATUS_NO_RESOURCES;
5536 err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5537 mgmt_remove_adv_monitor_complete);
5540 mgmt_pending_remove(cmd);
5543 status = MGMT_STATUS_NO_RESOURCES;
5545 status = MGMT_STATUS_FAILED;
5550 hci_dev_unlock(hdev);
5555 hci_dev_unlock(hdev);
5556 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5560 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5562 struct mgmt_rp_read_local_oob_data mgmt_rp;
5563 size_t rp_size = sizeof(mgmt_rp);
5564 struct mgmt_pending_cmd *cmd = data;
5565 struct sk_buff *skb = cmd->skb;
5566 u8 status = mgmt_status(err);
5570 status = MGMT_STATUS_FAILED;
5571 else if (IS_ERR(skb))
5572 status = mgmt_status(PTR_ERR(skb));
5574 status = mgmt_status(skb->data[0]);
5577 bt_dev_dbg(hdev, "status %d", status);
5580 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5584 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5586 if (!bredr_sc_enabled(hdev)) {
5587 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5589 if (skb->len < sizeof(*rp)) {
5590 mgmt_cmd_status(cmd->sk, hdev->id,
5591 MGMT_OP_READ_LOCAL_OOB_DATA,
5592 MGMT_STATUS_FAILED);
5596 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5597 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5599 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5601 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5603 if (skb->len < sizeof(*rp)) {
5604 mgmt_cmd_status(cmd->sk, hdev->id,
5605 MGMT_OP_READ_LOCAL_OOB_DATA,
5606 MGMT_STATUS_FAILED);
5610 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5611 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5613 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5614 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5617 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5618 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5621 if (skb && !IS_ERR(skb))
5624 mgmt_pending_free(cmd);
5627 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5629 struct mgmt_pending_cmd *cmd = data;
5631 if (bredr_sc_enabled(hdev))
5632 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5634 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5636 if (IS_ERR(cmd->skb))
5637 return PTR_ERR(cmd->skb);
5642 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5643 void *data, u16 data_len)
5645 struct mgmt_pending_cmd *cmd;
5648 bt_dev_dbg(hdev, "sock %p", sk);
5652 if (!hdev_is_powered(hdev)) {
5653 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5654 MGMT_STATUS_NOT_POWERED);
5658 if (!lmp_ssp_capable(hdev)) {
5659 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5660 MGMT_STATUS_NOT_SUPPORTED);
5664 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5668 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5669 read_local_oob_data_complete);
5672 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5673 MGMT_STATUS_FAILED);
5676 mgmt_pending_free(cmd);
5680 hci_dev_unlock(hdev);
5684 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5685 void *data, u16 len)
5687 struct mgmt_addr_info *addr = data;
5690 bt_dev_dbg(hdev, "sock %p", sk);
5692 if (!bdaddr_type_is_valid(addr->type))
5693 return mgmt_cmd_complete(sk, hdev->id,
5694 MGMT_OP_ADD_REMOTE_OOB_DATA,
5695 MGMT_STATUS_INVALID_PARAMS,
5696 addr, sizeof(*addr));
5700 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5701 struct mgmt_cp_add_remote_oob_data *cp = data;
5704 if (cp->addr.type != BDADDR_BREDR) {
5705 err = mgmt_cmd_complete(sk, hdev->id,
5706 MGMT_OP_ADD_REMOTE_OOB_DATA,
5707 MGMT_STATUS_INVALID_PARAMS,
5708 &cp->addr, sizeof(cp->addr));
5712 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5713 cp->addr.type, cp->hash,
5714 cp->rand, NULL, NULL);
5716 status = MGMT_STATUS_FAILED;
5718 status = MGMT_STATUS_SUCCESS;
5720 err = mgmt_cmd_complete(sk, hdev->id,
5721 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5722 &cp->addr, sizeof(cp->addr));
5723 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5724 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5725 u8 *rand192, *hash192, *rand256, *hash256;
5728 if (bdaddr_type_is_le(cp->addr.type)) {
5729 /* Enforce zero-valued 192-bit parameters as
5730 * long as legacy SMP OOB isn't implemented.
5732 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5733 memcmp(cp->hash192, ZERO_KEY, 16)) {
5734 err = mgmt_cmd_complete(sk, hdev->id,
5735 MGMT_OP_ADD_REMOTE_OOB_DATA,
5736 MGMT_STATUS_INVALID_PARAMS,
5737 addr, sizeof(*addr));
5744 /* In case one of the P-192 values is set to zero,
5745 * then just disable OOB data for P-192.
5747 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5748 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5752 rand192 = cp->rand192;
5753 hash192 = cp->hash192;
5757 /* In case one of the P-256 values is set to zero, then just
5758 * disable OOB data for P-256.
5760 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5761 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5765 rand256 = cp->rand256;
5766 hash256 = cp->hash256;
5769 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5770 cp->addr.type, hash192, rand192,
5773 status = MGMT_STATUS_FAILED;
5775 status = MGMT_STATUS_SUCCESS;
5777 err = mgmt_cmd_complete(sk, hdev->id,
5778 MGMT_OP_ADD_REMOTE_OOB_DATA,
5779 status, &cp->addr, sizeof(cp->addr));
5781 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5783 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5784 MGMT_STATUS_INVALID_PARAMS);
5788 hci_dev_unlock(hdev);
5792 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5793 void *data, u16 len)
5795 struct mgmt_cp_remove_remote_oob_data *cp = data;
5799 bt_dev_dbg(hdev, "sock %p", sk);
5801 if (cp->addr.type != BDADDR_BREDR)
5802 return mgmt_cmd_complete(sk, hdev->id,
5803 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5804 MGMT_STATUS_INVALID_PARAMS,
5805 &cp->addr, sizeof(cp->addr));
5809 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5810 hci_remote_oob_data_clear(hdev);
5811 status = MGMT_STATUS_SUCCESS;
5815 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5817 status = MGMT_STATUS_INVALID_PARAMS;
5819 status = MGMT_STATUS_SUCCESS;
5822 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5823 status, &cp->addr, sizeof(cp->addr));
5825 hci_dev_unlock(hdev);
5829 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5831 struct mgmt_pending_cmd *cmd;
5833 bt_dev_dbg(hdev, "status %u", status);
5837 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5839 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5842 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5845 cmd->cmd_complete(cmd, mgmt_status(status));
5846 mgmt_pending_remove(cmd);
5849 hci_dev_unlock(hdev);
5852 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5853 uint8_t *mgmt_status)
5856 case DISCOV_TYPE_LE:
5857 *mgmt_status = mgmt_le_support(hdev);
5861 case DISCOV_TYPE_INTERLEAVED:
5862 *mgmt_status = mgmt_le_support(hdev);
5866 case DISCOV_TYPE_BREDR:
5867 *mgmt_status = mgmt_bredr_support(hdev);
5872 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5879 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5881 struct mgmt_pending_cmd *cmd = data;
5883 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5884 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5885 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5888 bt_dev_dbg(hdev, "err %d", err);
5890 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5892 mgmt_pending_remove(cmd);
5894 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5898 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5900 return hci_start_discovery_sync(hdev);
5903 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5904 u16 op, void *data, u16 len)
5906 struct mgmt_cp_start_discovery *cp = data;
5907 struct mgmt_pending_cmd *cmd;
5911 bt_dev_dbg(hdev, "sock %p", sk);
5915 if (!hdev_is_powered(hdev)) {
5916 err = mgmt_cmd_complete(sk, hdev->id, op,
5917 MGMT_STATUS_NOT_POWERED,
5918 &cp->type, sizeof(cp->type));
5922 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5923 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5924 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5925 &cp->type, sizeof(cp->type));
5929 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5930 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5931 &cp->type, sizeof(cp->type));
5935 /* Can't start discovery when it is paused */
5936 if (hdev->discovery_paused) {
5937 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5938 &cp->type, sizeof(cp->type));
5942 /* Clear the discovery filter first to free any previously
5943 * allocated memory for the UUID list.
5945 hci_discovery_filter_clear(hdev);
5947 hdev->discovery.type = cp->type;
5948 hdev->discovery.report_invalid_rssi = false;
5949 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5950 hdev->discovery.limited = true;
5952 hdev->discovery.limited = false;
5954 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5960 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5961 start_discovery_complete);
5963 mgmt_pending_remove(cmd);
5967 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5970 hci_dev_unlock(hdev);
5974 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5975 void *data, u16 len)
5977 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5981 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5982 void *data, u16 len)
5984 return start_discovery_internal(sk, hdev,
5985 MGMT_OP_START_LIMITED_DISCOVERY,
5989 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5990 void *data, u16 len)
5992 struct mgmt_cp_start_service_discovery *cp = data;
5993 struct mgmt_pending_cmd *cmd;
5994 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5995 u16 uuid_count, expected_len;
5999 bt_dev_dbg(hdev, "sock %p", sk);
6003 if (!hdev_is_powered(hdev)) {
6004 err = mgmt_cmd_complete(sk, hdev->id,
6005 MGMT_OP_START_SERVICE_DISCOVERY,
6006 MGMT_STATUS_NOT_POWERED,
6007 &cp->type, sizeof(cp->type));
6011 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6012 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6013 err = mgmt_cmd_complete(sk, hdev->id,
6014 MGMT_OP_START_SERVICE_DISCOVERY,
6015 MGMT_STATUS_BUSY, &cp->type,
6020 if (hdev->discovery_paused) {
6021 err = mgmt_cmd_complete(sk, hdev->id,
6022 MGMT_OP_START_SERVICE_DISCOVERY,
6023 MGMT_STATUS_BUSY, &cp->type,
6028 uuid_count = __le16_to_cpu(cp->uuid_count);
6029 if (uuid_count > max_uuid_count) {
6030 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6032 err = mgmt_cmd_complete(sk, hdev->id,
6033 MGMT_OP_START_SERVICE_DISCOVERY,
6034 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6039 expected_len = sizeof(*cp) + uuid_count * 16;
6040 if (expected_len != len) {
6041 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6043 err = mgmt_cmd_complete(sk, hdev->id,
6044 MGMT_OP_START_SERVICE_DISCOVERY,
6045 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6050 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6051 err = mgmt_cmd_complete(sk, hdev->id,
6052 MGMT_OP_START_SERVICE_DISCOVERY,
6053 status, &cp->type, sizeof(cp->type));
6057 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6064 /* Clear the discovery filter first to free any previously
6065 * allocated memory for the UUID list.
6067 hci_discovery_filter_clear(hdev);
6069 hdev->discovery.result_filtering = true;
6070 hdev->discovery.type = cp->type;
6071 hdev->discovery.rssi = cp->rssi;
6072 hdev->discovery.uuid_count = uuid_count;
6074 if (uuid_count > 0) {
6075 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6077 if (!hdev->discovery.uuids) {
6078 err = mgmt_cmd_complete(sk, hdev->id,
6079 MGMT_OP_START_SERVICE_DISCOVERY,
6081 &cp->type, sizeof(cp->type));
6082 mgmt_pending_remove(cmd);
6087 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6088 start_discovery_complete);
6090 mgmt_pending_remove(cmd);
6094 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6097 hci_dev_unlock(hdev);
6101 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6103 struct mgmt_pending_cmd *cmd;
6105 bt_dev_dbg(hdev, "status %u", status);
6109 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6111 cmd->cmd_complete(cmd, mgmt_status(status));
6112 mgmt_pending_remove(cmd);
6115 hci_dev_unlock(hdev);
6118 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6120 struct mgmt_pending_cmd *cmd = data;
6122 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6125 bt_dev_dbg(hdev, "err %d", err);
6127 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6129 mgmt_pending_remove(cmd);
6132 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6135 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6137 return hci_stop_discovery_sync(hdev);
6140 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6143 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6144 struct mgmt_pending_cmd *cmd;
6147 bt_dev_dbg(hdev, "sock %p", sk);
6151 if (!hci_discovery_active(hdev)) {
6152 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6153 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6154 sizeof(mgmt_cp->type));
6158 if (hdev->discovery.type != mgmt_cp->type) {
6159 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6160 MGMT_STATUS_INVALID_PARAMS,
6161 &mgmt_cp->type, sizeof(mgmt_cp->type));
6165 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6171 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6172 stop_discovery_complete);
6174 mgmt_pending_remove(cmd);
6178 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6181 hci_dev_unlock(hdev);
6185 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6188 struct mgmt_cp_confirm_name *cp = data;
6189 struct inquiry_entry *e;
6192 bt_dev_dbg(hdev, "sock %p", sk);
6196 if (!hci_discovery_active(hdev)) {
6197 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6198 MGMT_STATUS_FAILED, &cp->addr,
6203 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6205 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6206 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6211 if (cp->name_known) {
6212 e->name_state = NAME_KNOWN;
6215 e->name_state = NAME_NEEDED;
6216 hci_inquiry_cache_update_resolve(hdev, e);
6219 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6220 &cp->addr, sizeof(cp->addr));
6223 hci_dev_unlock(hdev);
6227 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6230 struct mgmt_cp_block_device *cp = data;
6234 bt_dev_dbg(hdev, "sock %p", sk);
6236 if (!bdaddr_type_is_valid(cp->addr.type))
6237 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6238 MGMT_STATUS_INVALID_PARAMS,
6239 &cp->addr, sizeof(cp->addr));
6243 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6246 status = MGMT_STATUS_FAILED;
6250 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6252 status = MGMT_STATUS_SUCCESS;
6255 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6256 &cp->addr, sizeof(cp->addr));
6258 hci_dev_unlock(hdev);
6263 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6266 struct mgmt_cp_unblock_device *cp = data;
6270 bt_dev_dbg(hdev, "sock %p", sk);
6272 if (!bdaddr_type_is_valid(cp->addr.type))
6273 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6274 MGMT_STATUS_INVALID_PARAMS,
6275 &cp->addr, sizeof(cp->addr));
6279 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6282 status = MGMT_STATUS_INVALID_PARAMS;
6286 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6288 status = MGMT_STATUS_SUCCESS;
6291 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6292 &cp->addr, sizeof(cp->addr));
6294 hci_dev_unlock(hdev);
6299 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6301 return hci_update_eir_sync(hdev);
6304 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6307 struct mgmt_cp_set_device_id *cp = data;
6311 bt_dev_dbg(hdev, "sock %p", sk);
6313 source = __le16_to_cpu(cp->source);
6315 if (source > 0x0002)
6316 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6317 MGMT_STATUS_INVALID_PARAMS);
6321 hdev->devid_source = source;
6322 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6323 hdev->devid_product = __le16_to_cpu(cp->product);
6324 hdev->devid_version = __le16_to_cpu(cp->version);
6326 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6329 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6331 hci_dev_unlock(hdev);
6336 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6339 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6341 bt_dev_dbg(hdev, "status %d", err);
6344 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6346 struct cmd_lookup match = { NULL, hdev };
6348 struct adv_info *adv_instance;
6349 u8 status = mgmt_status(err);
6352 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6353 cmd_status_rsp, &status);
6357 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6358 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6360 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6362 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6365 new_settings(hdev, match.sk);
6370 /* If "Set Advertising" was just disabled and instance advertising was
6371 * set up earlier, then re-enable multi-instance advertising.
6373 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6374 list_empty(&hdev->adv_instances))
6377 instance = hdev->cur_adv_instance;
6379 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6380 struct adv_info, list);
6384 instance = adv_instance->instance;
6387 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6389 enable_advertising_instance(hdev, err);
6392 static int set_adv_sync(struct hci_dev *hdev, void *data)
6394 struct mgmt_pending_cmd *cmd = data;
6395 struct mgmt_mode *cp = cmd->param;
6398 if (cp->val == 0x02)
6399 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6401 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6403 cancel_adv_timeout(hdev);
6406 /* Switch to instance "0" for the Set Advertising setting.
6407 * We cannot use update_[adv|scan_rsp]_data() here as the
6408 * HCI_ADVERTISING flag is not yet set.
6410 hdev->cur_adv_instance = 0x00;
6412 if (ext_adv_capable(hdev)) {
6413 hci_start_ext_adv_sync(hdev, 0x00);
6415 hci_update_adv_data_sync(hdev, 0x00);
6416 hci_update_scan_rsp_data_sync(hdev, 0x00);
6417 hci_enable_advertising_sync(hdev);
6420 hci_disable_advertising_sync(hdev);
6426 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6429 struct mgmt_mode *cp = data;
6430 struct mgmt_pending_cmd *cmd;
6434 bt_dev_dbg(hdev, "sock %p", sk);
6436 status = mgmt_le_support(hdev);
6438 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6441 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6442 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6443 MGMT_STATUS_INVALID_PARAMS);
6445 if (hdev->advertising_paused)
6446 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6453 /* The following conditions are ones which mean that we should
6454 * not do any HCI communication but directly send a mgmt
6455 * response to user space (after toggling the flag if
6458 if (!hdev_is_powered(hdev) ||
6459 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6460 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6461 hci_dev_test_flag(hdev, HCI_MESH) ||
6462 hci_conn_num(hdev, LE_LINK) > 0 ||
6463 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6464 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6468 hdev->cur_adv_instance = 0x00;
6469 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6470 if (cp->val == 0x02)
6471 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6473 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6475 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6476 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6479 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6484 err = new_settings(hdev, sk);
6489 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6490 pending_find(MGMT_OP_SET_LE, hdev)) {
6491 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6496 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6500 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6501 set_advertising_complete);
6504 mgmt_pending_remove(cmd);
6507 hci_dev_unlock(hdev);
6511 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6512 void *data, u16 len)
6514 struct mgmt_cp_set_static_address *cp = data;
6517 bt_dev_dbg(hdev, "sock %p", sk);
6519 if (!lmp_le_capable(hdev))
6520 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6521 MGMT_STATUS_NOT_SUPPORTED);
6523 if (hdev_is_powered(hdev))
6524 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6525 MGMT_STATUS_REJECTED);
6527 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6528 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6529 return mgmt_cmd_status(sk, hdev->id,
6530 MGMT_OP_SET_STATIC_ADDRESS,
6531 MGMT_STATUS_INVALID_PARAMS);
6533 /* Two most significant bits shall be set */
6534 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6535 return mgmt_cmd_status(sk, hdev->id,
6536 MGMT_OP_SET_STATIC_ADDRESS,
6537 MGMT_STATUS_INVALID_PARAMS);
6542 bacpy(&hdev->static_addr, &cp->bdaddr);
6544 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6548 err = new_settings(hdev, sk);
6551 hci_dev_unlock(hdev);
6555 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6556 void *data, u16 len)
6558 struct mgmt_cp_set_scan_params *cp = data;
6559 __u16 interval, window;
6562 bt_dev_dbg(hdev, "sock %p", sk);
6564 if (!lmp_le_capable(hdev))
6565 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6566 MGMT_STATUS_NOT_SUPPORTED);
6568 interval = __le16_to_cpu(cp->interval);
6570 if (interval < 0x0004 || interval > 0x4000)
6571 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6572 MGMT_STATUS_INVALID_PARAMS);
6574 window = __le16_to_cpu(cp->window);
6576 if (window < 0x0004 || window > 0x4000)
6577 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6578 MGMT_STATUS_INVALID_PARAMS);
6580 if (window > interval)
6581 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6582 MGMT_STATUS_INVALID_PARAMS);
6586 hdev->le_scan_interval = interval;
6587 hdev->le_scan_window = window;
6589 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6592 /* If background scan is running, restart it so new parameters are
6595 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6596 hdev->discovery.state == DISCOVERY_STOPPED)
6597 hci_update_passive_scan(hdev);
6599 hci_dev_unlock(hdev);
6604 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6606 struct mgmt_pending_cmd *cmd = data;
6608 bt_dev_dbg(hdev, "err %d", err);
6611 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6614 struct mgmt_mode *cp = cmd->param;
6617 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6619 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6621 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6622 new_settings(hdev, cmd->sk);
6625 mgmt_pending_free(cmd);
6628 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6630 struct mgmt_pending_cmd *cmd = data;
6631 struct mgmt_mode *cp = cmd->param;
6633 return hci_write_fast_connectable_sync(hdev, cp->val);
6636 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6637 void *data, u16 len)
6639 struct mgmt_mode *cp = data;
6640 struct mgmt_pending_cmd *cmd;
6643 bt_dev_dbg(hdev, "sock %p", sk);
6645 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6646 hdev->hci_ver < BLUETOOTH_VER_1_2)
6647 return mgmt_cmd_status(sk, hdev->id,
6648 MGMT_OP_SET_FAST_CONNECTABLE,
6649 MGMT_STATUS_NOT_SUPPORTED);
6651 if (cp->val != 0x00 && cp->val != 0x01)
6652 return mgmt_cmd_status(sk, hdev->id,
6653 MGMT_OP_SET_FAST_CONNECTABLE,
6654 MGMT_STATUS_INVALID_PARAMS);
6658 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6659 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6663 if (!hdev_is_powered(hdev)) {
6664 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6665 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6666 new_settings(hdev, sk);
6670 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6675 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6676 fast_connectable_complete);
6679 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6680 MGMT_STATUS_FAILED);
6683 mgmt_pending_free(cmd);
6687 hci_dev_unlock(hdev);
6692 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6694 struct mgmt_pending_cmd *cmd = data;
6696 bt_dev_dbg(hdev, "err %d", err);
6699 u8 mgmt_err = mgmt_status(err);
6701 /* We need to restore the flag if related HCI commands
6704 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6706 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6708 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6709 new_settings(hdev, cmd->sk);
6712 mgmt_pending_free(cmd);
6715 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6719 status = hci_write_fast_connectable_sync(hdev, false);
6722 status = hci_update_scan_sync(hdev);
6724 /* Since only the advertising data flags will change, there
6725 * is no need to update the scan response data.
6728 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6733 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6735 struct mgmt_mode *cp = data;
6736 struct mgmt_pending_cmd *cmd;
6739 bt_dev_dbg(hdev, "sock %p", sk);
6741 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6742 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6743 MGMT_STATUS_NOT_SUPPORTED);
6745 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6746 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6747 MGMT_STATUS_REJECTED);
6749 if (cp->val != 0x00 && cp->val != 0x01)
6750 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6751 MGMT_STATUS_INVALID_PARAMS);
6755 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6756 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6760 if (!hdev_is_powered(hdev)) {
6762 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6763 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6764 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6765 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6766 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6769 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6771 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6775 err = new_settings(hdev, sk);
6779 /* Reject disabling when powered on */
6781 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6782 MGMT_STATUS_REJECTED);
6785 /* When configuring a dual-mode controller to operate
6786 * with LE only and using a static address, then switching
6787 * BR/EDR back on is not allowed.
6789 * Dual-mode controllers shall operate with the public
6790 * address as its identity address for BR/EDR and LE. So
6791 * reject the attempt to create an invalid configuration.
6793 * The same restrictions applies when secure connections
6794 * has been enabled. For BR/EDR this is a controller feature
6795 * while for LE it is a host stack feature. This means that
6796 * switching BR/EDR back on when secure connections has been
6797 * enabled is not a supported transaction.
6799 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6800 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6801 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6802 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6803 MGMT_STATUS_REJECTED);
6808 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6812 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6813 set_bredr_complete);
6816 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6817 MGMT_STATUS_FAILED);
6819 mgmt_pending_free(cmd);
6824 /* We need to flip the bit already here so that
6825 * hci_req_update_adv_data generates the correct flags.
6827 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6830 hci_dev_unlock(hdev);
6834 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6836 struct mgmt_pending_cmd *cmd = data;
6837 struct mgmt_mode *cp;
6839 bt_dev_dbg(hdev, "err %d", err);
6842 u8 mgmt_err = mgmt_status(err);
6844 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6852 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6853 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6856 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6857 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6860 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6861 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6865 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6866 new_settings(hdev, cmd->sk);
6869 mgmt_pending_free(cmd);
6872 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6874 struct mgmt_pending_cmd *cmd = data;
6875 struct mgmt_mode *cp = cmd->param;
6878 /* Force write of val */
6879 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6881 return hci_write_sc_support_sync(hdev, val);
6884 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6885 void *data, u16 len)
6887 struct mgmt_mode *cp = data;
6888 struct mgmt_pending_cmd *cmd;
6892 bt_dev_dbg(hdev, "sock %p", sk);
6894 if (!lmp_sc_capable(hdev) &&
6895 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6896 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6897 MGMT_STATUS_NOT_SUPPORTED);
6899 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6900 lmp_sc_capable(hdev) &&
6901 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6902 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6903 MGMT_STATUS_REJECTED);
6905 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6906 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6907 MGMT_STATUS_INVALID_PARAMS);
6911 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6912 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6916 changed = !hci_dev_test_and_set_flag(hdev,
6918 if (cp->val == 0x02)
6919 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6921 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6923 changed = hci_dev_test_and_clear_flag(hdev,
6925 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6928 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6933 err = new_settings(hdev, sk);
6940 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6941 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6942 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6946 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6950 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6951 set_secure_conn_complete);
6954 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6955 MGMT_STATUS_FAILED);
6957 mgmt_pending_free(cmd);
6961 hci_dev_unlock(hdev);
6965 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6966 void *data, u16 len)
6968 struct mgmt_mode *cp = data;
6969 bool changed, use_changed;
6972 bt_dev_dbg(hdev, "sock %p", sk);
6974 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6975 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6976 MGMT_STATUS_INVALID_PARAMS);
6981 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6983 changed = hci_dev_test_and_clear_flag(hdev,
6984 HCI_KEEP_DEBUG_KEYS);
6986 if (cp->val == 0x02)
6987 use_changed = !hci_dev_test_and_set_flag(hdev,
6988 HCI_USE_DEBUG_KEYS);
6990 use_changed = hci_dev_test_and_clear_flag(hdev,
6991 HCI_USE_DEBUG_KEYS);
6993 if (hdev_is_powered(hdev) && use_changed &&
6994 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6995 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6996 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6997 sizeof(mode), &mode);
7000 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7005 err = new_settings(hdev, sk);
7008 hci_dev_unlock(hdev);
7012 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7015 struct mgmt_cp_set_privacy *cp = cp_data;
7019 bt_dev_dbg(hdev, "sock %p", sk);
7021 if (!lmp_le_capable(hdev))
7022 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7023 MGMT_STATUS_NOT_SUPPORTED);
7025 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7026 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7027 MGMT_STATUS_INVALID_PARAMS);
7029 if (hdev_is_powered(hdev))
7030 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7031 MGMT_STATUS_REJECTED);
7035 /* If user space supports this command it is also expected to
7036 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7038 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7041 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7042 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7043 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7044 hci_adv_instances_set_rpa_expired(hdev, true);
7045 if (cp->privacy == 0x02)
7046 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7048 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7050 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7051 memset(hdev->irk, 0, sizeof(hdev->irk));
7052 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7053 hci_adv_instances_set_rpa_expired(hdev, false);
7054 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7057 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7062 err = new_settings(hdev, sk);
7065 hci_dev_unlock(hdev);
7069 static bool irk_is_valid(struct mgmt_irk_info *irk)
7071 switch (irk->addr.type) {
7072 case BDADDR_LE_PUBLIC:
7075 case BDADDR_LE_RANDOM:
7076 /* Two most significant bits shall be set */
7077 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7085 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7088 struct mgmt_cp_load_irks *cp = cp_data;
7089 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7090 sizeof(struct mgmt_irk_info));
7091 u16 irk_count, expected_len;
7094 bt_dev_dbg(hdev, "sock %p", sk);
7096 if (!lmp_le_capable(hdev))
7097 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7098 MGMT_STATUS_NOT_SUPPORTED);
7100 irk_count = __le16_to_cpu(cp->irk_count);
7101 if (irk_count > max_irk_count) {
7102 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7104 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7105 MGMT_STATUS_INVALID_PARAMS);
7108 expected_len = struct_size(cp, irks, irk_count);
7109 if (expected_len != len) {
7110 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7112 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7113 MGMT_STATUS_INVALID_PARAMS);
7116 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7118 for (i = 0; i < irk_count; i++) {
7119 struct mgmt_irk_info *key = &cp->irks[i];
7121 if (!irk_is_valid(key))
7122 return mgmt_cmd_status(sk, hdev->id,
7124 MGMT_STATUS_INVALID_PARAMS);
7129 hci_smp_irks_clear(hdev);
7131 for (i = 0; i < irk_count; i++) {
7132 struct mgmt_irk_info *irk = &cp->irks[i];
7134 if (hci_is_blocked_key(hdev,
7135 HCI_BLOCKED_KEY_TYPE_IRK,
7137 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7142 hci_add_irk(hdev, &irk->addr.bdaddr,
7143 le_addr_type(irk->addr.type), irk->val,
7147 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7149 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7151 hci_dev_unlock(hdev);
7156 static bool ltk_is_valid(struct mgmt_ltk_info *key)
7158 if (key->initiator != 0x00 && key->initiator != 0x01)
7161 switch (key->addr.type) {
7162 case BDADDR_LE_PUBLIC:
7165 case BDADDR_LE_RANDOM:
7166 /* Two most significant bits shall be set */
7167 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7175 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
7176 void *cp_data, u16 len)
7178 struct mgmt_cp_load_long_term_keys *cp = cp_data;
7179 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
7180 sizeof(struct mgmt_ltk_info));
7181 u16 key_count, expected_len;
7184 bt_dev_dbg(hdev, "sock %p", sk);
7186 if (!lmp_le_capable(hdev))
7187 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7188 MGMT_STATUS_NOT_SUPPORTED);
7190 key_count = __le16_to_cpu(cp->key_count);
7191 if (key_count > max_key_count) {
7192 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
7194 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7195 MGMT_STATUS_INVALID_PARAMS);
7198 expected_len = struct_size(cp, keys, key_count);
7199 if (expected_len != len) {
7200 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
7202 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
7203 MGMT_STATUS_INVALID_PARAMS);
7206 bt_dev_dbg(hdev, "key_count %u", key_count);
7208 for (i = 0; i < key_count; i++) {
7209 struct mgmt_ltk_info *key = &cp->keys[i];
7211 if (!ltk_is_valid(key))
7212 return mgmt_cmd_status(sk, hdev->id,
7213 MGMT_OP_LOAD_LONG_TERM_KEYS,
7214 MGMT_STATUS_INVALID_PARAMS);
7219 hci_smp_ltks_clear(hdev);
7221 for (i = 0; i < key_count; i++) {
7222 struct mgmt_ltk_info *key = &cp->keys[i];
7223 u8 type, authenticated;
7225 if (hci_is_blocked_key(hdev,
7226 HCI_BLOCKED_KEY_TYPE_LTK,
7228 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
7233 switch (key->type) {
7234 case MGMT_LTK_UNAUTHENTICATED:
7235 authenticated = 0x00;
7236 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7238 case MGMT_LTK_AUTHENTICATED:
7239 authenticated = 0x01;
7240 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
7242 case MGMT_LTK_P256_UNAUTH:
7243 authenticated = 0x00;
7244 type = SMP_LTK_P256;
7246 case MGMT_LTK_P256_AUTH:
7247 authenticated = 0x01;
7248 type = SMP_LTK_P256;
7250 case MGMT_LTK_P256_DEBUG:
7251 authenticated = 0x00;
7252 type = SMP_LTK_P256_DEBUG;
7258 hci_add_ltk(hdev, &key->addr.bdaddr,
7259 le_addr_type(key->addr.type), type, authenticated,
7260 key->val, key->enc_size, key->ediv, key->rand);
7263 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
7266 hci_dev_unlock(hdev);
7271 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
7273 struct mgmt_pending_cmd *cmd = data;
7274 struct hci_conn *conn = cmd->user_data;
7275 struct mgmt_cp_get_conn_info *cp = cmd->param;
7276 struct mgmt_rp_get_conn_info rp;
7279 bt_dev_dbg(hdev, "err %d", err);
7281 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
7283 status = mgmt_status(err);
7284 if (status == MGMT_STATUS_SUCCESS) {
7285 rp.rssi = conn->rssi;
7286 rp.tx_power = conn->tx_power;
7287 rp.max_tx_power = conn->max_tx_power;
7289 rp.rssi = HCI_RSSI_INVALID;
7290 rp.tx_power = HCI_TX_POWER_INVALID;
7291 rp.max_tx_power = HCI_TX_POWER_INVALID;
7294 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
7297 mgmt_pending_free(cmd);
7300 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
7302 struct mgmt_pending_cmd *cmd = data;
7303 struct mgmt_cp_get_conn_info *cp = cmd->param;
7304 struct hci_conn *conn;
7308 /* Make sure we are still connected */
7309 if (cp->addr.type == BDADDR_BREDR)
7310 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7313 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7315 if (!conn || conn->state != BT_CONNECTED)
7316 return MGMT_STATUS_NOT_CONNECTED;
7318 cmd->user_data = conn;
7319 handle = cpu_to_le16(conn->handle);
7321 /* Refresh RSSI each time */
7322 err = hci_read_rssi_sync(hdev, handle);
7324 /* For LE links TX power does not change thus we don't need to
7325 * query for it once value is known.
7327 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
7328 conn->tx_power == HCI_TX_POWER_INVALID))
7329 err = hci_read_tx_power_sync(hdev, handle, 0x00);
7331 /* Max TX power needs to be read only once per connection */
7332 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
7333 err = hci_read_tx_power_sync(hdev, handle, 0x01);
7338 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
7341 struct mgmt_cp_get_conn_info *cp = data;
7342 struct mgmt_rp_get_conn_info rp;
7343 struct hci_conn *conn;
7344 unsigned long conn_info_age;
7347 bt_dev_dbg(hdev, "sock %p", sk);
7349 memset(&rp, 0, sizeof(rp));
7350 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7351 rp.addr.type = cp->addr.type;
7353 if (!bdaddr_type_is_valid(cp->addr.type))
7354 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7355 MGMT_STATUS_INVALID_PARAMS,
7360 if (!hdev_is_powered(hdev)) {
7361 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7362 MGMT_STATUS_NOT_POWERED, &rp,
7367 if (cp->addr.type == BDADDR_BREDR)
7368 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7371 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
7373 if (!conn || conn->state != BT_CONNECTED) {
7374 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7375 MGMT_STATUS_NOT_CONNECTED, &rp,
7380 /* To avoid client trying to guess when to poll again for information we
7381 * calculate conn info age as random value between min/max set in hdev.
7383 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
7384 hdev->conn_info_max_age - 1);
7386 /* Query controller to refresh cached values if they are too old or were
7389 if (time_after(jiffies, conn->conn_info_timestamp +
7390 msecs_to_jiffies(conn_info_age)) ||
7391 !conn->conn_info_timestamp) {
7392 struct mgmt_pending_cmd *cmd;
7394 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
7399 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
7400 cmd, get_conn_info_complete);
7404 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7405 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7408 mgmt_pending_free(cmd);
7413 conn->conn_info_timestamp = jiffies;
7415 /* Cache is valid, just reply with values cached in hci_conn */
7416 rp.rssi = conn->rssi;
7417 rp.tx_power = conn->tx_power;
7418 rp.max_tx_power = conn->max_tx_power;
7420 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
7421 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
7425 hci_dev_unlock(hdev);
7429 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
7431 struct mgmt_pending_cmd *cmd = data;
7432 struct mgmt_cp_get_clock_info *cp = cmd->param;
7433 struct mgmt_rp_get_clock_info rp;
7434 struct hci_conn *conn = cmd->user_data;
7435 u8 status = mgmt_status(err);
7437 bt_dev_dbg(hdev, "err %d", err);
7439 memset(&rp, 0, sizeof(rp));
7440 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7441 rp.addr.type = cp->addr.type;
7446 rp.local_clock = cpu_to_le32(hdev->clock);
7449 rp.piconet_clock = cpu_to_le32(conn->clock);
7450 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
7454 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
7457 mgmt_pending_free(cmd);
7460 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
7462 struct mgmt_pending_cmd *cmd = data;
7463 struct mgmt_cp_get_clock_info *cp = cmd->param;
7464 struct hci_cp_read_clock hci_cp;
7465 struct hci_conn *conn;
7467 memset(&hci_cp, 0, sizeof(hci_cp));
7468 hci_read_clock_sync(hdev, &hci_cp);
7470 /* Make sure connection still exists */
7471 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
7472 if (!conn || conn->state != BT_CONNECTED)
7473 return MGMT_STATUS_NOT_CONNECTED;
7475 cmd->user_data = conn;
7476 hci_cp.handle = cpu_to_le16(conn->handle);
7477 hci_cp.which = 0x01; /* Piconet clock */
7479 return hci_read_clock_sync(hdev, &hci_cp);
7482 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
7485 struct mgmt_cp_get_clock_info *cp = data;
7486 struct mgmt_rp_get_clock_info rp;
7487 struct mgmt_pending_cmd *cmd;
7488 struct hci_conn *conn;
7491 bt_dev_dbg(hdev, "sock %p", sk);
7493 memset(&rp, 0, sizeof(rp));
7494 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7495 rp.addr.type = cp->addr.type;
7497 if (cp->addr.type != BDADDR_BREDR)
7498 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7499 MGMT_STATUS_INVALID_PARAMS,
7504 if (!hdev_is_powered(hdev)) {
7505 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7506 MGMT_STATUS_NOT_POWERED, &rp,
7511 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7512 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7514 if (!conn || conn->state != BT_CONNECTED) {
7515 err = mgmt_cmd_complete(sk, hdev->id,
7516 MGMT_OP_GET_CLOCK_INFO,
7517 MGMT_STATUS_NOT_CONNECTED,
7525 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7529 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7530 get_clock_info_complete);
7533 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7534 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7537 mgmt_pending_free(cmd);
7542 hci_dev_unlock(hdev);
7546 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7548 struct hci_conn *conn;
7550 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7554 if (conn->dst_type != type)
7557 if (conn->state != BT_CONNECTED)
7563 /* This function requires the caller holds hdev->lock */
7564 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7565 u8 addr_type, u8 auto_connect)
7567 struct hci_conn_params *params;
7569 params = hci_conn_params_add(hdev, addr, addr_type);
7573 if (params->auto_connect == auto_connect)
7576 hci_pend_le_list_del_init(params);
7578 switch (auto_connect) {
7579 case HCI_AUTO_CONN_DISABLED:
7580 case HCI_AUTO_CONN_LINK_LOSS:
7581 /* If auto connect is being disabled when we're trying to
7582 * connect to device, keep connecting.
7584 if (params->explicit_connect)
7585 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7587 case HCI_AUTO_CONN_REPORT:
7588 if (params->explicit_connect)
7589 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7591 hci_pend_le_list_add(params, &hdev->pend_le_reports);
7593 case HCI_AUTO_CONN_DIRECT:
7594 case HCI_AUTO_CONN_ALWAYS:
7595 if (!is_connected(hdev, addr, addr_type))
7596 hci_pend_le_list_add(params, &hdev->pend_le_conns);
7600 params->auto_connect = auto_connect;
7602 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7603 addr, addr_type, auto_connect);
7608 static void device_added(struct sock *sk, struct hci_dev *hdev,
7609 bdaddr_t *bdaddr, u8 type, u8 action)
7611 struct mgmt_ev_device_added ev;
7613 bacpy(&ev.addr.bdaddr, bdaddr);
7614 ev.addr.type = type;
7617 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7620 static int add_device_sync(struct hci_dev *hdev, void *data)
7622 return hci_update_passive_scan_sync(hdev);
7625 static int add_device(struct sock *sk, struct hci_dev *hdev,
7626 void *data, u16 len)
7628 struct mgmt_cp_add_device *cp = data;
7629 u8 auto_conn, addr_type;
7630 struct hci_conn_params *params;
7632 u32 current_flags = 0;
7633 u32 supported_flags;
7635 bt_dev_dbg(hdev, "sock %p", sk);
7637 if (!bdaddr_type_is_valid(cp->addr.type) ||
7638 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7639 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7640 MGMT_STATUS_INVALID_PARAMS,
7641 &cp->addr, sizeof(cp->addr));
7643 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7644 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7645 MGMT_STATUS_INVALID_PARAMS,
7646 &cp->addr, sizeof(cp->addr));
7650 if (cp->addr.type == BDADDR_BREDR) {
7651 /* Only incoming connections action is supported for now */
7652 if (cp->action != 0x01) {
7653 err = mgmt_cmd_complete(sk, hdev->id,
7655 MGMT_STATUS_INVALID_PARAMS,
7656 &cp->addr, sizeof(cp->addr));
7660 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7666 hci_update_scan(hdev);
7671 addr_type = le_addr_type(cp->addr.type);
7673 if (cp->action == 0x02)
7674 auto_conn = HCI_AUTO_CONN_ALWAYS;
7675 else if (cp->action == 0x01)
7676 auto_conn = HCI_AUTO_CONN_DIRECT;
7678 auto_conn = HCI_AUTO_CONN_REPORT;
7680 /* Kernel internally uses conn_params with resolvable private
7681 * address, but Add Device allows only identity addresses.
7682 * Make sure it is enforced before calling
7683 * hci_conn_params_lookup.
7685 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7686 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7687 MGMT_STATUS_INVALID_PARAMS,
7688 &cp->addr, sizeof(cp->addr));
7692 /* If the connection parameters don't exist for this device,
7693 * they will be created and configured with defaults.
7695 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7697 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7698 MGMT_STATUS_FAILED, &cp->addr,
7702 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7705 current_flags = params->flags;
7708 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7713 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7714 supported_flags = hdev->conn_flags;
7715 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7716 supported_flags, current_flags);
7718 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7719 MGMT_STATUS_SUCCESS, &cp->addr,
7723 hci_dev_unlock(hdev);
7727 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7728 bdaddr_t *bdaddr, u8 type)
7730 struct mgmt_ev_device_removed ev;
7732 bacpy(&ev.addr.bdaddr, bdaddr);
7733 ev.addr.type = type;
7735 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7738 static int remove_device_sync(struct hci_dev *hdev, void *data)
7740 return hci_update_passive_scan_sync(hdev);
7743 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7744 void *data, u16 len)
7746 struct mgmt_cp_remove_device *cp = data;
7749 bt_dev_dbg(hdev, "sock %p", sk);
7753 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7754 struct hci_conn_params *params;
7757 if (!bdaddr_type_is_valid(cp->addr.type)) {
7758 err = mgmt_cmd_complete(sk, hdev->id,
7759 MGMT_OP_REMOVE_DEVICE,
7760 MGMT_STATUS_INVALID_PARAMS,
7761 &cp->addr, sizeof(cp->addr));
7765 if (cp->addr.type == BDADDR_BREDR) {
7766 err = hci_bdaddr_list_del(&hdev->accept_list,
7770 err = mgmt_cmd_complete(sk, hdev->id,
7771 MGMT_OP_REMOVE_DEVICE,
7772 MGMT_STATUS_INVALID_PARAMS,
7778 hci_update_scan(hdev);
7780 device_removed(sk, hdev, &cp->addr.bdaddr,
7785 addr_type = le_addr_type(cp->addr.type);
7787 /* Kernel internally uses conn_params with resolvable private
7788 * address, but Remove Device allows only identity addresses.
7789 * Make sure it is enforced before calling
7790 * hci_conn_params_lookup.
7792 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7793 err = mgmt_cmd_complete(sk, hdev->id,
7794 MGMT_OP_REMOVE_DEVICE,
7795 MGMT_STATUS_INVALID_PARAMS,
7796 &cp->addr, sizeof(cp->addr));
7800 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7803 err = mgmt_cmd_complete(sk, hdev->id,
7804 MGMT_OP_REMOVE_DEVICE,
7805 MGMT_STATUS_INVALID_PARAMS,
7806 &cp->addr, sizeof(cp->addr));
7810 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7811 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7812 err = mgmt_cmd_complete(sk, hdev->id,
7813 MGMT_OP_REMOVE_DEVICE,
7814 MGMT_STATUS_INVALID_PARAMS,
7815 &cp->addr, sizeof(cp->addr));
7819 hci_conn_params_free(params);
7821 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7823 struct hci_conn_params *p, *tmp;
7824 struct bdaddr_list *b, *btmp;
7826 if (cp->addr.type) {
7827 err = mgmt_cmd_complete(sk, hdev->id,
7828 MGMT_OP_REMOVE_DEVICE,
7829 MGMT_STATUS_INVALID_PARAMS,
7830 &cp->addr, sizeof(cp->addr));
7834 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7835 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7840 hci_update_scan(hdev);
7842 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7843 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7845 device_removed(sk, hdev, &p->addr, p->addr_type);
7846 if (p->explicit_connect) {
7847 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7850 hci_conn_params_free(p);
7853 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7856 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7859 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7860 MGMT_STATUS_SUCCESS, &cp->addr,
7863 hci_dev_unlock(hdev);
7867 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7870 struct mgmt_cp_load_conn_param *cp = data;
7871 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7872 sizeof(struct mgmt_conn_param));
7873 u16 param_count, expected_len;
7876 if (!lmp_le_capable(hdev))
7877 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7878 MGMT_STATUS_NOT_SUPPORTED);
7880 param_count = __le16_to_cpu(cp->param_count);
7881 if (param_count > max_param_count) {
7882 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7884 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7885 MGMT_STATUS_INVALID_PARAMS);
7888 expected_len = struct_size(cp, params, param_count);
7889 if (expected_len != len) {
7890 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7892 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7893 MGMT_STATUS_INVALID_PARAMS);
7896 bt_dev_dbg(hdev, "param_count %u", param_count);
7900 hci_conn_params_clear_disabled(hdev);
7902 for (i = 0; i < param_count; i++) {
7903 struct mgmt_conn_param *param = &cp->params[i];
7904 struct hci_conn_params *hci_param;
7905 u16 min, max, latency, timeout;
7908 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7911 if (param->addr.type == BDADDR_LE_PUBLIC) {
7912 addr_type = ADDR_LE_DEV_PUBLIC;
7913 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7914 addr_type = ADDR_LE_DEV_RANDOM;
7916 bt_dev_err(hdev, "ignoring invalid connection parameters");
7920 min = le16_to_cpu(param->min_interval);
7921 max = le16_to_cpu(param->max_interval);
7922 latency = le16_to_cpu(param->latency);
7923 timeout = le16_to_cpu(param->timeout);
7925 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7926 min, max, latency, timeout);
7928 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7929 bt_dev_err(hdev, "ignoring invalid connection parameters");
7933 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7936 bt_dev_err(hdev, "failed to add connection parameters");
7940 hci_param->conn_min_interval = min;
7941 hci_param->conn_max_interval = max;
7942 hci_param->conn_latency = latency;
7943 hci_param->supervision_timeout = timeout;
7946 hci_dev_unlock(hdev);
7948 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7952 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7953 void *data, u16 len)
7955 struct mgmt_cp_set_external_config *cp = data;
7959 bt_dev_dbg(hdev, "sock %p", sk);
7961 if (hdev_is_powered(hdev))
7962 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7963 MGMT_STATUS_REJECTED);
7965 if (cp->config != 0x00 && cp->config != 0x01)
7966 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7967 MGMT_STATUS_INVALID_PARAMS);
7969 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7970 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7971 MGMT_STATUS_NOT_SUPPORTED);
7976 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7978 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7980 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7987 err = new_options(hdev, sk);
7989 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7990 mgmt_index_removed(hdev);
7992 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7993 hci_dev_set_flag(hdev, HCI_CONFIG);
7994 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7996 queue_work(hdev->req_workqueue, &hdev->power_on);
7998 set_bit(HCI_RAW, &hdev->flags);
7999 mgmt_index_added(hdev);
8004 hci_dev_unlock(hdev);
8008 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
8009 void *data, u16 len)
8011 struct mgmt_cp_set_public_address *cp = data;
8015 bt_dev_dbg(hdev, "sock %p", sk);
8017 if (hdev_is_powered(hdev))
8018 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8019 MGMT_STATUS_REJECTED);
8021 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
8022 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8023 MGMT_STATUS_INVALID_PARAMS);
8025 if (!hdev->set_bdaddr)
8026 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
8027 MGMT_STATUS_NOT_SUPPORTED);
8031 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
8032 bacpy(&hdev->public_addr, &cp->bdaddr);
8034 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
8041 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
8042 err = new_options(hdev, sk);
8044 if (is_configured(hdev)) {
8045 mgmt_index_removed(hdev);
8047 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
8049 hci_dev_set_flag(hdev, HCI_CONFIG);
8050 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
8052 queue_work(hdev->req_workqueue, &hdev->power_on);
8056 hci_dev_unlock(hdev);
8060 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
8063 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
8064 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
8065 u8 *h192, *r192, *h256, *r256;
8066 struct mgmt_pending_cmd *cmd = data;
8067 struct sk_buff *skb = cmd->skb;
8068 u8 status = mgmt_status(err);
8071 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
8076 status = MGMT_STATUS_FAILED;
8077 else if (IS_ERR(skb))
8078 status = mgmt_status(PTR_ERR(skb));
8080 status = mgmt_status(skb->data[0]);
8083 bt_dev_dbg(hdev, "status %u", status);
8085 mgmt_cp = cmd->param;
8088 status = mgmt_status(status);
8095 } else if (!bredr_sc_enabled(hdev)) {
8096 struct hci_rp_read_local_oob_data *rp;
8098 if (skb->len != sizeof(*rp)) {
8099 status = MGMT_STATUS_FAILED;
8102 status = MGMT_STATUS_SUCCESS;
8103 rp = (void *)skb->data;
8105 eir_len = 5 + 18 + 18;
8112 struct hci_rp_read_local_oob_ext_data *rp;
8114 if (skb->len != sizeof(*rp)) {
8115 status = MGMT_STATUS_FAILED;
8118 status = MGMT_STATUS_SUCCESS;
8119 rp = (void *)skb->data;
8121 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
8122 eir_len = 5 + 18 + 18;
8126 eir_len = 5 + 18 + 18 + 18 + 18;
8136 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
8143 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
8144 hdev->dev_class, 3);
8147 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8148 EIR_SSP_HASH_C192, h192, 16);
8149 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8150 EIR_SSP_RAND_R192, r192, 16);
8154 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8155 EIR_SSP_HASH_C256, h256, 16);
8156 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
8157 EIR_SSP_RAND_R256, r256, 16);
8161 mgmt_rp->type = mgmt_cp->type;
8162 mgmt_rp->eir_len = cpu_to_le16(eir_len);
8164 err = mgmt_cmd_complete(cmd->sk, hdev->id,
8165 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
8166 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
8167 if (err < 0 || status)
8170 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
8172 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8173 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
8174 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
8176 if (skb && !IS_ERR(skb))
8180 mgmt_pending_remove(cmd);
8183 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
8184 struct mgmt_cp_read_local_oob_ext_data *cp)
8186 struct mgmt_pending_cmd *cmd;
8189 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
8194 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
8195 read_local_oob_ext_data_complete);
8198 mgmt_pending_remove(cmd);
8205 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
8206 void *data, u16 data_len)
8208 struct mgmt_cp_read_local_oob_ext_data *cp = data;
8209 struct mgmt_rp_read_local_oob_ext_data *rp;
8212 u8 status, flags, role, addr[7], hash[16], rand[16];
8215 bt_dev_dbg(hdev, "sock %p", sk);
8217 if (hdev_is_powered(hdev)) {
8219 case BIT(BDADDR_BREDR):
8220 status = mgmt_bredr_support(hdev);
8226 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8227 status = mgmt_le_support(hdev);
8231 eir_len = 9 + 3 + 18 + 18 + 3;
8234 status = MGMT_STATUS_INVALID_PARAMS;
8239 status = MGMT_STATUS_NOT_POWERED;
8243 rp_len = sizeof(*rp) + eir_len;
8244 rp = kmalloc(rp_len, GFP_ATOMIC);
8248 if (!status && !lmp_ssp_capable(hdev)) {
8249 status = MGMT_STATUS_NOT_SUPPORTED;
8260 case BIT(BDADDR_BREDR):
8261 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
8262 err = read_local_ssp_oob_req(hdev, sk, cp);
8263 hci_dev_unlock(hdev);
8267 status = MGMT_STATUS_FAILED;
8270 eir_len = eir_append_data(rp->eir, eir_len,
8272 hdev->dev_class, 3);
8275 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
8276 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
8277 smp_generate_oob(hdev, hash, rand) < 0) {
8278 hci_dev_unlock(hdev);
8279 status = MGMT_STATUS_FAILED;
8283 /* This should return the active RPA, but since the RPA
8284 * is only programmed on demand, it is really hard to fill
8285 * this in at the moment. For now disallow retrieving
8286 * local out-of-band data when privacy is in use.
8288 * Returning the identity address will not help here since
8289 * pairing happens before the identity resolving key is
8290 * known and thus the connection establishment happens
8291 * based on the RPA and not the identity address.
8293 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
8294 hci_dev_unlock(hdev);
8295 status = MGMT_STATUS_REJECTED;
8299 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
8300 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
8301 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
8302 bacmp(&hdev->static_addr, BDADDR_ANY))) {
8303 memcpy(addr, &hdev->static_addr, 6);
8306 memcpy(addr, &hdev->bdaddr, 6);
8310 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
8311 addr, sizeof(addr));
8313 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8318 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
8319 &role, sizeof(role));
8321 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
8322 eir_len = eir_append_data(rp->eir, eir_len,
8324 hash, sizeof(hash));
8326 eir_len = eir_append_data(rp->eir, eir_len,
8328 rand, sizeof(rand));
8331 flags = mgmt_get_adv_discov_flags(hdev);
8333 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
8334 flags |= LE_AD_NO_BREDR;
8336 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
8337 &flags, sizeof(flags));
8341 hci_dev_unlock(hdev);
8343 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
8345 status = MGMT_STATUS_SUCCESS;
8348 rp->type = cp->type;
8349 rp->eir_len = cpu_to_le16(eir_len);
8351 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
8352 status, rp, sizeof(*rp) + eir_len);
8353 if (err < 0 || status)
8356 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
8357 rp, sizeof(*rp) + eir_len,
8358 HCI_MGMT_OOB_DATA_EVENTS, sk);
8366 static u32 get_supported_adv_flags(struct hci_dev *hdev)
8370 flags |= MGMT_ADV_FLAG_CONNECTABLE;
8371 flags |= MGMT_ADV_FLAG_DISCOV;
8372 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
8373 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
8374 flags |= MGMT_ADV_FLAG_APPEARANCE;
8375 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
8376 flags |= MGMT_ADV_PARAM_DURATION;
8377 flags |= MGMT_ADV_PARAM_TIMEOUT;
8378 flags |= MGMT_ADV_PARAM_INTERVALS;
8379 flags |= MGMT_ADV_PARAM_TX_POWER;
8380 flags |= MGMT_ADV_PARAM_SCAN_RSP;
8382 /* In extended adv TX_POWER returned from Set Adv Param
8383 * will be always valid.
8385 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
8386 flags |= MGMT_ADV_FLAG_TX_POWER;
8388 if (ext_adv_capable(hdev)) {
8389 flags |= MGMT_ADV_FLAG_SEC_1M;
8390 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
8391 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
8393 if (le_2m_capable(hdev))
8394 flags |= MGMT_ADV_FLAG_SEC_2M;
8396 if (le_coded_capable(hdev))
8397 flags |= MGMT_ADV_FLAG_SEC_CODED;
8403 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
8404 void *data, u16 data_len)
8406 struct mgmt_rp_read_adv_features *rp;
8409 struct adv_info *adv_instance;
8410 u32 supported_flags;
8413 bt_dev_dbg(hdev, "sock %p", sk);
8415 if (!lmp_le_capable(hdev))
8416 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8417 MGMT_STATUS_REJECTED);
8421 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
8422 rp = kmalloc(rp_len, GFP_ATOMIC);
8424 hci_dev_unlock(hdev);
8428 supported_flags = get_supported_adv_flags(hdev);
8430 rp->supported_flags = cpu_to_le32(supported_flags);
8431 rp->max_adv_data_len = max_adv_len(hdev);
8432 rp->max_scan_rsp_len = max_adv_len(hdev);
8433 rp->max_instances = hdev->le_num_of_adv_sets;
8434 rp->num_instances = hdev->adv_instance_cnt;
8436 instance = rp->instance;
8437 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
8438 /* Only instances 1-le_num_of_adv_sets are externally visible */
8439 if (adv_instance->instance <= hdev->adv_instance_cnt) {
8440 *instance = adv_instance->instance;
8443 rp->num_instances--;
8448 hci_dev_unlock(hdev);
8450 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
8451 MGMT_STATUS_SUCCESS, rp, rp_len);
8458 static u8 calculate_name_len(struct hci_dev *hdev)
8460 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
8462 return eir_append_local_name(hdev, buf, 0);
8465 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
8468 u8 max_len = max_adv_len(hdev);
8471 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
8472 MGMT_ADV_FLAG_LIMITED_DISCOV |
8473 MGMT_ADV_FLAG_MANAGED_FLAGS))
8476 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
8479 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
8480 max_len -= calculate_name_len(hdev);
8482 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
8489 static bool flags_managed(u32 adv_flags)
8491 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8492 MGMT_ADV_FLAG_LIMITED_DISCOV |
8493 MGMT_ADV_FLAG_MANAGED_FLAGS);
8496 static bool tx_power_managed(u32 adv_flags)
8498 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8501 static bool name_managed(u32 adv_flags)
8503 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8506 static bool appearance_managed(u32 adv_flags)
8508 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8511 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8512 u8 len, bool is_adv_data)
8517 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8522 /* Make sure that the data is correctly formatted. */
8523 for (i = 0; i < len; i += (cur_len + 1)) {
8529 if (data[i + 1] == EIR_FLAGS &&
8530 (!is_adv_data || flags_managed(adv_flags)))
8533 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8536 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8539 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8542 if (data[i + 1] == EIR_APPEARANCE &&
8543 appearance_managed(adv_flags))
8546 /* If the current field length would exceed the total data
8547 * length, then it's invalid.
8549 if (i + cur_len >= len)
8556 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8558 u32 supported_flags, phy_flags;
8560 /* The current implementation only supports a subset of the specified
8561 * flags. Also need to check mutual exclusiveness of sec flags.
8563 supported_flags = get_supported_adv_flags(hdev);
8564 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8565 if (adv_flags & ~supported_flags ||
8566 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8572 static bool adv_busy(struct hci_dev *hdev)
8574 return pending_find(MGMT_OP_SET_LE, hdev);
8577 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8580 struct adv_info *adv, *n;
8582 bt_dev_dbg(hdev, "err %d", err);
8586 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8593 adv->pending = false;
8597 instance = adv->instance;
8599 if (hdev->cur_adv_instance == instance)
8600 cancel_adv_timeout(hdev);
8602 hci_remove_adv_instance(hdev, instance);
8603 mgmt_advertising_removed(sk, hdev, instance);
8606 hci_dev_unlock(hdev);
8609 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8611 struct mgmt_pending_cmd *cmd = data;
8612 struct mgmt_cp_add_advertising *cp = cmd->param;
8613 struct mgmt_rp_add_advertising rp;
8615 memset(&rp, 0, sizeof(rp));
8617 rp.instance = cp->instance;
8620 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8623 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8624 mgmt_status(err), &rp, sizeof(rp));
8626 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8628 mgmt_pending_free(cmd);
8631 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8633 struct mgmt_pending_cmd *cmd = data;
8634 struct mgmt_cp_add_advertising *cp = cmd->param;
8636 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8639 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8640 void *data, u16 data_len)
8642 struct mgmt_cp_add_advertising *cp = data;
8643 struct mgmt_rp_add_advertising rp;
8646 u16 timeout, duration;
8647 unsigned int prev_instance_cnt;
8648 u8 schedule_instance = 0;
8649 struct adv_info *adv, *next_instance;
8651 struct mgmt_pending_cmd *cmd;
8653 bt_dev_dbg(hdev, "sock %p", sk);
8655 status = mgmt_le_support(hdev);
8657 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8660 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8661 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8662 MGMT_STATUS_INVALID_PARAMS);
8664 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8665 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8666 MGMT_STATUS_INVALID_PARAMS);
8668 flags = __le32_to_cpu(cp->flags);
8669 timeout = __le16_to_cpu(cp->timeout);
8670 duration = __le16_to_cpu(cp->duration);
8672 if (!requested_adv_flags_are_valid(hdev, flags))
8673 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8674 MGMT_STATUS_INVALID_PARAMS);
8678 if (timeout && !hdev_is_powered(hdev)) {
8679 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8680 MGMT_STATUS_REJECTED);
8684 if (adv_busy(hdev)) {
8685 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8690 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8691 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8692 cp->scan_rsp_len, false)) {
8693 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8694 MGMT_STATUS_INVALID_PARAMS);
8698 prev_instance_cnt = hdev->adv_instance_cnt;
8700 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8701 cp->adv_data_len, cp->data,
8703 cp->data + cp->adv_data_len,
8705 HCI_ADV_TX_POWER_NO_PREFERENCE,
8706 hdev->le_adv_min_interval,
8707 hdev->le_adv_max_interval, 0);
8709 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8710 MGMT_STATUS_FAILED);
8714 /* Only trigger an advertising added event if a new instance was
8717 if (hdev->adv_instance_cnt > prev_instance_cnt)
8718 mgmt_advertising_added(sk, hdev, cp->instance);
8720 if (hdev->cur_adv_instance == cp->instance) {
8721 /* If the currently advertised instance is being changed then
8722 * cancel the current advertising and schedule the next
8723 * instance. If there is only one instance then the overridden
8724 * advertising data will be visible right away.
8726 cancel_adv_timeout(hdev);
8728 next_instance = hci_get_next_instance(hdev, cp->instance);
8730 schedule_instance = next_instance->instance;
8731 } else if (!hdev->adv_instance_timeout) {
8732 /* Immediately advertise the new instance if no other
8733 * instance is currently being advertised.
8735 schedule_instance = cp->instance;
8738 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8739 * there is no instance to be advertised then we have no HCI
8740 * communication to make. Simply return.
8742 if (!hdev_is_powered(hdev) ||
8743 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8744 !schedule_instance) {
8745 rp.instance = cp->instance;
8746 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8747 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8751 /* We're good to go, update advertising data, parameters, and start
8754 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8761 cp->instance = schedule_instance;
8763 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8764 add_advertising_complete);
8766 mgmt_pending_free(cmd);
8769 hci_dev_unlock(hdev);
8774 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8777 struct mgmt_pending_cmd *cmd = data;
8778 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8779 struct mgmt_rp_add_ext_adv_params rp;
8780 struct adv_info *adv;
8783 BT_DBG("%s", hdev->name);
8787 adv = hci_find_adv_instance(hdev, cp->instance);
8791 rp.instance = cp->instance;
8792 rp.tx_power = adv->tx_power;
8794 /* While we're at it, inform userspace of the available space for this
8795 * advertisement, given the flags that will be used.
8797 flags = __le32_to_cpu(cp->flags);
8798 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8799 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8802 /* If this advertisement was previously advertising and we
8803 * failed to update it, we signal that it has been removed and
8804 * delete its structure
8807 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8809 hci_remove_adv_instance(hdev, cp->instance);
8811 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8814 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8815 mgmt_status(err), &rp, sizeof(rp));
8820 mgmt_pending_free(cmd);
8822 hci_dev_unlock(hdev);
8825 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8827 struct mgmt_pending_cmd *cmd = data;
8828 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8830 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8833 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8834 void *data, u16 data_len)
8836 struct mgmt_cp_add_ext_adv_params *cp = data;
8837 struct mgmt_rp_add_ext_adv_params rp;
8838 struct mgmt_pending_cmd *cmd = NULL;
8839 struct adv_info *adv;
8840 u32 flags, min_interval, max_interval;
8841 u16 timeout, duration;
8846 BT_DBG("%s", hdev->name);
8848 status = mgmt_le_support(hdev);
8850 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8853 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8854 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8855 MGMT_STATUS_INVALID_PARAMS);
8857 /* The purpose of breaking add_advertising into two separate MGMT calls
8858 * for params and data is to allow more parameters to be added to this
8859 * structure in the future. For this reason, we verify that we have the
8860 * bare minimum structure we know of when the interface was defined. Any
8861 * extra parameters we don't know about will be ignored in this request.
8863 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8864 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8865 MGMT_STATUS_INVALID_PARAMS);
8867 flags = __le32_to_cpu(cp->flags);
8869 if (!requested_adv_flags_are_valid(hdev, flags))
8870 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8871 MGMT_STATUS_INVALID_PARAMS);
8875 /* In new interface, we require that we are powered to register */
8876 if (!hdev_is_powered(hdev)) {
8877 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8878 MGMT_STATUS_REJECTED);
8882 if (adv_busy(hdev)) {
8883 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8888 /* Parse defined parameters from request, use defaults otherwise */
8889 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8890 __le16_to_cpu(cp->timeout) : 0;
8892 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8893 __le16_to_cpu(cp->duration) :
8894 hdev->def_multi_adv_rotation_duration;
8896 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8897 __le32_to_cpu(cp->min_interval) :
8898 hdev->le_adv_min_interval;
8900 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8901 __le32_to_cpu(cp->max_interval) :
8902 hdev->le_adv_max_interval;
8904 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8906 HCI_ADV_TX_POWER_NO_PREFERENCE;
8908 /* Create advertising instance with no advertising or response data */
8909 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8910 timeout, duration, tx_power, min_interval,
8914 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8915 MGMT_STATUS_FAILED);
8919 /* Submit request for advertising params if ext adv available */
8920 if (ext_adv_capable(hdev)) {
8921 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8925 hci_remove_adv_instance(hdev, cp->instance);
8929 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8930 add_ext_adv_params_complete);
8932 mgmt_pending_free(cmd);
8934 rp.instance = cp->instance;
8935 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8936 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8937 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8938 err = mgmt_cmd_complete(sk, hdev->id,
8939 MGMT_OP_ADD_EXT_ADV_PARAMS,
8940 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8944 hci_dev_unlock(hdev);
8949 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8951 struct mgmt_pending_cmd *cmd = data;
8952 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8953 struct mgmt_rp_add_advertising rp;
8955 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8957 memset(&rp, 0, sizeof(rp));
8959 rp.instance = cp->instance;
8962 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8965 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8966 mgmt_status(err), &rp, sizeof(rp));
8968 mgmt_pending_free(cmd);
8971 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8973 struct mgmt_pending_cmd *cmd = data;
8974 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8977 if (ext_adv_capable(hdev)) {
8978 err = hci_update_adv_data_sync(hdev, cp->instance);
8982 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8986 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8989 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8992 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8995 struct mgmt_cp_add_ext_adv_data *cp = data;
8996 struct mgmt_rp_add_ext_adv_data rp;
8997 u8 schedule_instance = 0;
8998 struct adv_info *next_instance;
8999 struct adv_info *adv_instance;
9001 struct mgmt_pending_cmd *cmd;
9003 BT_DBG("%s", hdev->name);
9007 adv_instance = hci_find_adv_instance(hdev, cp->instance);
9009 if (!adv_instance) {
9010 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9011 MGMT_STATUS_INVALID_PARAMS);
9015 /* In new interface, we require that we are powered to register */
9016 if (!hdev_is_powered(hdev)) {
9017 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9018 MGMT_STATUS_REJECTED);
9019 goto clear_new_instance;
9022 if (adv_busy(hdev)) {
9023 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9025 goto clear_new_instance;
9028 /* Validate new data */
9029 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
9030 cp->adv_data_len, true) ||
9031 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
9032 cp->adv_data_len, cp->scan_rsp_len, false)) {
9033 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9034 MGMT_STATUS_INVALID_PARAMS);
9035 goto clear_new_instance;
9038 /* Set the data in the advertising instance */
9039 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
9040 cp->data, cp->scan_rsp_len,
9041 cp->data + cp->adv_data_len);
9043 /* If using software rotation, determine next instance to use */
9044 if (hdev->cur_adv_instance == cp->instance) {
9045 /* If the currently advertised instance is being changed
9046 * then cancel the current advertising and schedule the
9047 * next instance. If there is only one instance then the
9048 * overridden advertising data will be visible right
9051 cancel_adv_timeout(hdev);
9053 next_instance = hci_get_next_instance(hdev, cp->instance);
9055 schedule_instance = next_instance->instance;
9056 } else if (!hdev->adv_instance_timeout) {
9057 /* Immediately advertise the new instance if no other
9058 * instance is currently being advertised.
9060 schedule_instance = cp->instance;
9063 /* If the HCI_ADVERTISING flag is set or there is no instance to
9064 * be advertised then we have no HCI communication to make.
9067 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
9068 if (adv_instance->pending) {
9069 mgmt_advertising_added(sk, hdev, cp->instance);
9070 adv_instance->pending = false;
9072 rp.instance = cp->instance;
9073 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
9074 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9078 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
9082 goto clear_new_instance;
9085 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
9086 add_ext_adv_data_complete);
9088 mgmt_pending_free(cmd);
9089 goto clear_new_instance;
9092 /* We were successful in updating data, so trigger advertising_added
9093 * event if this is an instance that wasn't previously advertising. If
9094 * a failure occurs in the requests we initiated, we will remove the
9095 * instance again in add_advertising_complete
9097 if (adv_instance->pending)
9098 mgmt_advertising_added(sk, hdev, cp->instance);
9103 hci_remove_adv_instance(hdev, cp->instance);
9106 hci_dev_unlock(hdev);
9111 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
9114 struct mgmt_pending_cmd *cmd = data;
9115 struct mgmt_cp_remove_advertising *cp = cmd->param;
9116 struct mgmt_rp_remove_advertising rp;
9118 bt_dev_dbg(hdev, "err %d", err);
9120 memset(&rp, 0, sizeof(rp));
9121 rp.instance = cp->instance;
9124 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
9127 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
9128 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9130 mgmt_pending_free(cmd);
9133 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
9135 struct mgmt_pending_cmd *cmd = data;
9136 struct mgmt_cp_remove_advertising *cp = cmd->param;
9139 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
9143 if (list_empty(&hdev->adv_instances))
9144 err = hci_disable_advertising_sync(hdev);
9149 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
9150 void *data, u16 data_len)
9152 struct mgmt_cp_remove_advertising *cp = data;
9153 struct mgmt_pending_cmd *cmd;
9156 bt_dev_dbg(hdev, "sock %p", sk);
9160 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
9161 err = mgmt_cmd_status(sk, hdev->id,
9162 MGMT_OP_REMOVE_ADVERTISING,
9163 MGMT_STATUS_INVALID_PARAMS);
9167 if (pending_find(MGMT_OP_SET_LE, hdev)) {
9168 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9173 if (list_empty(&hdev->adv_instances)) {
9174 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
9175 MGMT_STATUS_INVALID_PARAMS);
9179 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
9186 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
9187 remove_advertising_complete);
9189 mgmt_pending_free(cmd);
9192 hci_dev_unlock(hdev);
9197 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
9198 void *data, u16 data_len)
9200 struct mgmt_cp_get_adv_size_info *cp = data;
9201 struct mgmt_rp_get_adv_size_info rp;
9202 u32 flags, supported_flags;
9204 bt_dev_dbg(hdev, "sock %p", sk);
9206 if (!lmp_le_capable(hdev))
9207 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9208 MGMT_STATUS_REJECTED);
9210 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
9211 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9212 MGMT_STATUS_INVALID_PARAMS);
9214 flags = __le32_to_cpu(cp->flags);
9216 /* The current implementation only supports a subset of the specified
9219 supported_flags = get_supported_adv_flags(hdev);
9220 if (flags & ~supported_flags)
9221 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9222 MGMT_STATUS_INVALID_PARAMS);
9224 rp.instance = cp->instance;
9225 rp.flags = cp->flags;
9226 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
9227 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
9229 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
9230 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9233 static const struct hci_mgmt_handler mgmt_handlers[] = {
9234 { NULL }, /* 0x0000 (no command) */
9235 { read_version, MGMT_READ_VERSION_SIZE,
9237 HCI_MGMT_UNTRUSTED },
9238 { read_commands, MGMT_READ_COMMANDS_SIZE,
9240 HCI_MGMT_UNTRUSTED },
9241 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
9243 HCI_MGMT_UNTRUSTED },
9244 { read_controller_info, MGMT_READ_INFO_SIZE,
9245 HCI_MGMT_UNTRUSTED },
9246 { set_powered, MGMT_SETTING_SIZE },
9247 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
9248 { set_connectable, MGMT_SETTING_SIZE },
9249 { set_fast_connectable, MGMT_SETTING_SIZE },
9250 { set_bondable, MGMT_SETTING_SIZE },
9251 { set_link_security, MGMT_SETTING_SIZE },
9252 { set_ssp, MGMT_SETTING_SIZE },
9253 { set_hs, MGMT_SETTING_SIZE },
9254 { set_le, MGMT_SETTING_SIZE },
9255 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
9256 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
9257 { add_uuid, MGMT_ADD_UUID_SIZE },
9258 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
9259 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
9261 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
9263 { disconnect, MGMT_DISCONNECT_SIZE },
9264 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
9265 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
9266 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
9267 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
9268 { pair_device, MGMT_PAIR_DEVICE_SIZE },
9269 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
9270 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
9271 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
9272 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
9273 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
9274 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
9275 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
9276 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
9278 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
9279 { start_discovery, MGMT_START_DISCOVERY_SIZE },
9280 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
9281 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
9282 { block_device, MGMT_BLOCK_DEVICE_SIZE },
9283 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
9284 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
9285 { set_advertising, MGMT_SETTING_SIZE },
9286 { set_bredr, MGMT_SETTING_SIZE },
9287 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
9288 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
9289 { set_secure_conn, MGMT_SETTING_SIZE },
9290 { set_debug_keys, MGMT_SETTING_SIZE },
9291 { set_privacy, MGMT_SET_PRIVACY_SIZE },
9292 { load_irks, MGMT_LOAD_IRKS_SIZE,
9294 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
9295 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
9296 { add_device, MGMT_ADD_DEVICE_SIZE },
9297 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
9298 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
9300 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
9302 HCI_MGMT_UNTRUSTED },
9303 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
9304 HCI_MGMT_UNCONFIGURED |
9305 HCI_MGMT_UNTRUSTED },
9306 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
9307 HCI_MGMT_UNCONFIGURED },
9308 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
9309 HCI_MGMT_UNCONFIGURED },
9310 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
9312 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
9313 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
9315 HCI_MGMT_UNTRUSTED },
9316 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
9317 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
9319 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
9320 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
9321 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
9322 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
9323 HCI_MGMT_UNTRUSTED },
9324 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
9325 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
9326 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
9327 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
9329 { set_wideband_speech, MGMT_SETTING_SIZE },
9330 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
9331 HCI_MGMT_UNTRUSTED },
9332 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
9333 HCI_MGMT_UNTRUSTED |
9334 HCI_MGMT_HDEV_OPTIONAL },
9335 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
9337 HCI_MGMT_HDEV_OPTIONAL },
9338 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
9339 HCI_MGMT_UNTRUSTED },
9340 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
9342 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
9343 HCI_MGMT_UNTRUSTED },
9344 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
9346 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
9347 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
9348 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
9349 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
9351 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
9352 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
9354 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
9356 { add_adv_patterns_monitor_rssi,
9357 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
9359 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
9361 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
9362 { mesh_send, MGMT_MESH_SEND_SIZE,
9364 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
9367 void mgmt_index_added(struct hci_dev *hdev)
9369 struct mgmt_ev_ext_index ev;
9371 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9374 switch (hdev->dev_type) {
9376 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9377 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
9378 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9381 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
9382 HCI_MGMT_INDEX_EVENTS);
9395 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
9396 HCI_MGMT_EXT_INDEX_EVENTS);
9399 void mgmt_index_removed(struct hci_dev *hdev)
9401 struct mgmt_ev_ext_index ev;
9402 u8 status = MGMT_STATUS_INVALID_INDEX;
9404 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
9407 switch (hdev->dev_type) {
9409 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9411 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
9412 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
9413 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
9416 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
9417 HCI_MGMT_INDEX_EVENTS);
9430 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
9431 HCI_MGMT_EXT_INDEX_EVENTS);
9433 /* Cancel any remaining timed work */
9434 if (!hci_dev_test_flag(hdev, HCI_MGMT))
9436 cancel_delayed_work_sync(&hdev->discov_off);
9437 cancel_delayed_work_sync(&hdev->service_cache);
9438 cancel_delayed_work_sync(&hdev->rpa_expired);
9441 void mgmt_power_on(struct hci_dev *hdev, int err)
9443 struct cmd_lookup match = { NULL, hdev };
9445 bt_dev_dbg(hdev, "err %d", err);
9450 restart_le_actions(hdev);
9451 hci_update_passive_scan(hdev);
9454 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9456 new_settings(hdev, match.sk);
9461 hci_dev_unlock(hdev);
9464 void __mgmt_power_off(struct hci_dev *hdev)
9466 struct cmd_lookup match = { NULL, hdev };
9467 u8 status, zero_cod[] = { 0, 0, 0 };
9469 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
9471 /* If the power off is because of hdev unregistration let
9472 * use the appropriate INVALID_INDEX status. Otherwise use
9473 * NOT_POWERED. We cover both scenarios here since later in
9474 * mgmt_index_removed() any hci_conn callbacks will have already
9475 * been triggered, potentially causing misleading DISCONNECTED
9478 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
9479 status = MGMT_STATUS_INVALID_INDEX;
9481 status = MGMT_STATUS_NOT_POWERED;
9483 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
9485 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
9486 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
9487 zero_cod, sizeof(zero_cod),
9488 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9489 ext_info_changed(hdev, NULL);
9492 new_settings(hdev, match.sk);
9498 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9500 struct mgmt_pending_cmd *cmd;
9503 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9507 if (err == -ERFKILL)
9508 status = MGMT_STATUS_RFKILLED;
9510 status = MGMT_STATUS_FAILED;
9512 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9514 mgmt_pending_remove(cmd);
9517 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9520 struct mgmt_ev_new_link_key ev;
9522 memset(&ev, 0, sizeof(ev));
9524 ev.store_hint = persistent;
9525 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9526 ev.key.addr.type = BDADDR_BREDR;
9527 ev.key.type = key->type;
9528 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9529 ev.key.pin_len = key->pin_len;
9531 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9534 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9536 switch (ltk->type) {
9538 case SMP_LTK_RESPONDER:
9539 if (ltk->authenticated)
9540 return MGMT_LTK_AUTHENTICATED;
9541 return MGMT_LTK_UNAUTHENTICATED;
9543 if (ltk->authenticated)
9544 return MGMT_LTK_P256_AUTH;
9545 return MGMT_LTK_P256_UNAUTH;
9546 case SMP_LTK_P256_DEBUG:
9547 return MGMT_LTK_P256_DEBUG;
9550 return MGMT_LTK_UNAUTHENTICATED;
9553 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9555 struct mgmt_ev_new_long_term_key ev;
9557 memset(&ev, 0, sizeof(ev));
9559 /* Devices using resolvable or non-resolvable random addresses
9560 * without providing an identity resolving key don't require
9561 * to store long term keys. Their addresses will change the
9564 * Only when a remote device provides an identity address
9565 * make sure the long term key is stored. If the remote
9566 * identity is known, the long term keys are internally
9567 * mapped to the identity address. So allow static random
9568 * and public addresses here.
9570 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9571 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9572 ev.store_hint = 0x00;
9574 ev.store_hint = persistent;
9576 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9577 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9578 ev.key.type = mgmt_ltk_type(key);
9579 ev.key.enc_size = key->enc_size;
9580 ev.key.ediv = key->ediv;
9581 ev.key.rand = key->rand;
9583 if (key->type == SMP_LTK)
9584 ev.key.initiator = 1;
9586 /* Make sure we copy only the significant bytes based on the
9587 * encryption key size, and set the rest of the value to zeroes.
9589 memcpy(ev.key.val, key->val, key->enc_size);
9590 memset(ev.key.val + key->enc_size, 0,
9591 sizeof(ev.key.val) - key->enc_size);
9593 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9596 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9598 struct mgmt_ev_new_irk ev;
9600 memset(&ev, 0, sizeof(ev));
9602 ev.store_hint = persistent;
9604 bacpy(&ev.rpa, &irk->rpa);
9605 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9606 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9607 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9609 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9612 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9615 struct mgmt_ev_new_csrk ev;
9617 memset(&ev, 0, sizeof(ev));
9619 /* Devices using resolvable or non-resolvable random addresses
9620 * without providing an identity resolving key don't require
9621 * to store signature resolving keys. Their addresses will change
9622 * the next time around.
9624 * Only when a remote device provides an identity address
9625 * make sure the signature resolving key is stored. So allow
9626 * static random and public addresses here.
9628 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9629 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9630 ev.store_hint = 0x00;
9632 ev.store_hint = persistent;
9634 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9635 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9636 ev.key.type = csrk->type;
9637 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9639 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9642 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9643 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9644 u16 max_interval, u16 latency, u16 timeout)
9646 struct mgmt_ev_new_conn_param ev;
9648 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9651 memset(&ev, 0, sizeof(ev));
9652 bacpy(&ev.addr.bdaddr, bdaddr);
9653 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9654 ev.store_hint = store_hint;
9655 ev.min_interval = cpu_to_le16(min_interval);
9656 ev.max_interval = cpu_to_le16(max_interval);
9657 ev.latency = cpu_to_le16(latency);
9658 ev.timeout = cpu_to_le16(timeout);
9660 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9663 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9664 u8 *name, u8 name_len)
9666 struct sk_buff *skb;
9667 struct mgmt_ev_device_connected *ev;
9671 /* allocate buff for LE or BR/EDR adv */
9672 if (conn->le_adv_data_len > 0)
9673 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9674 sizeof(*ev) + conn->le_adv_data_len);
9676 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9677 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9678 eir_precalc_len(sizeof(conn->dev_class)));
9680 ev = skb_put(skb, sizeof(*ev));
9681 bacpy(&ev->addr.bdaddr, &conn->dst);
9682 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9685 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9687 ev->flags = __cpu_to_le32(flags);
9689 /* We must ensure that the EIR Data fields are ordered and
9690 * unique. Keep it simple for now and avoid the problem by not
9691 * adding any BR/EDR data to the LE adv.
9693 if (conn->le_adv_data_len > 0) {
9694 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9695 eir_len = conn->le_adv_data_len;
9698 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9700 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9701 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9702 conn->dev_class, sizeof(conn->dev_class));
9705 ev->eir_len = cpu_to_le16(eir_len);
9707 mgmt_event_skb(skb, NULL);
9710 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9712 struct sock **sk = data;
9714 cmd->cmd_complete(cmd, 0);
9719 mgmt_pending_remove(cmd);
9722 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9724 struct hci_dev *hdev = data;
9725 struct mgmt_cp_unpair_device *cp = cmd->param;
9727 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9729 cmd->cmd_complete(cmd, 0);
9730 mgmt_pending_remove(cmd);
9733 bool mgmt_powering_down(struct hci_dev *hdev)
9735 struct mgmt_pending_cmd *cmd;
9736 struct mgmt_mode *cp;
9738 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9749 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9750 u8 link_type, u8 addr_type, u8 reason,
9751 bool mgmt_connected)
9753 struct mgmt_ev_device_disconnected ev;
9754 struct sock *sk = NULL;
9756 /* The connection is still in hci_conn_hash so test for 1
9757 * instead of 0 to know if this is the last one.
9759 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9760 cancel_delayed_work(&hdev->power_off);
9761 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9764 if (!mgmt_connected)
9767 if (link_type != ACL_LINK && link_type != LE_LINK)
9770 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9772 bacpy(&ev.addr.bdaddr, bdaddr);
9773 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9776 /* Report disconnects due to suspend */
9777 if (hdev->suspended)
9778 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9780 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9785 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9789 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9790 u8 link_type, u8 addr_type, u8 status)
9792 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9793 struct mgmt_cp_disconnect *cp;
9794 struct mgmt_pending_cmd *cmd;
9796 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9799 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9805 if (bacmp(bdaddr, &cp->addr.bdaddr))
9808 if (cp->addr.type != bdaddr_type)
9811 cmd->cmd_complete(cmd, mgmt_status(status));
9812 mgmt_pending_remove(cmd);
9815 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9816 u8 addr_type, u8 status)
9818 struct mgmt_ev_connect_failed ev;
9820 /* The connection is still in hci_conn_hash so test for 1
9821 * instead of 0 to know if this is the last one.
9823 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9824 cancel_delayed_work(&hdev->power_off);
9825 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9828 bacpy(&ev.addr.bdaddr, bdaddr);
9829 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9830 ev.status = mgmt_status(status);
9832 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9835 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9837 struct mgmt_ev_pin_code_request ev;
9839 bacpy(&ev.addr.bdaddr, bdaddr);
9840 ev.addr.type = BDADDR_BREDR;
9843 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9846 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9849 struct mgmt_pending_cmd *cmd;
9851 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9855 cmd->cmd_complete(cmd, mgmt_status(status));
9856 mgmt_pending_remove(cmd);
9859 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9862 struct mgmt_pending_cmd *cmd;
9864 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9868 cmd->cmd_complete(cmd, mgmt_status(status));
9869 mgmt_pending_remove(cmd);
9872 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9873 u8 link_type, u8 addr_type, u32 value,
9876 struct mgmt_ev_user_confirm_request ev;
9878 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9880 bacpy(&ev.addr.bdaddr, bdaddr);
9881 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9882 ev.confirm_hint = confirm_hint;
9883 ev.value = cpu_to_le32(value);
9885 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9889 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9890 u8 link_type, u8 addr_type)
9892 struct mgmt_ev_user_passkey_request ev;
9894 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9896 bacpy(&ev.addr.bdaddr, bdaddr);
9897 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9899 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9903 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9904 u8 link_type, u8 addr_type, u8 status,
9907 struct mgmt_pending_cmd *cmd;
9909 cmd = pending_find(opcode, hdev);
9913 cmd->cmd_complete(cmd, mgmt_status(status));
9914 mgmt_pending_remove(cmd);
9919 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9920 u8 link_type, u8 addr_type, u8 status)
9922 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9923 status, MGMT_OP_USER_CONFIRM_REPLY);
9926 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9927 u8 link_type, u8 addr_type, u8 status)
9929 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9931 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9934 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9935 u8 link_type, u8 addr_type, u8 status)
9937 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9938 status, MGMT_OP_USER_PASSKEY_REPLY);
9941 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9942 u8 link_type, u8 addr_type, u8 status)
9944 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9946 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9949 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9950 u8 link_type, u8 addr_type, u32 passkey,
9953 struct mgmt_ev_passkey_notify ev;
9955 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9957 bacpy(&ev.addr.bdaddr, bdaddr);
9958 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9959 ev.passkey = __cpu_to_le32(passkey);
9960 ev.entered = entered;
9962 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9965 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9967 struct mgmt_ev_auth_failed ev;
9968 struct mgmt_pending_cmd *cmd;
9969 u8 status = mgmt_status(hci_status);
9971 bacpy(&ev.addr.bdaddr, &conn->dst);
9972 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9975 cmd = find_pairing(conn);
9977 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9978 cmd ? cmd->sk : NULL);
9981 cmd->cmd_complete(cmd, status);
9982 mgmt_pending_remove(cmd);
9986 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9988 struct cmd_lookup match = { NULL, hdev };
9992 u8 mgmt_err = mgmt_status(status);
9993 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9994 cmd_status_rsp, &mgmt_err);
9998 if (test_bit(HCI_AUTH, &hdev->flags))
9999 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
10001 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
10003 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
10007 new_settings(hdev, match.sk);
10010 sock_put(match.sk);
10013 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
10015 struct cmd_lookup *match = data;
10017 if (match->sk == NULL) {
10018 match->sk = cmd->sk;
10019 sock_hold(match->sk);
10023 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
10026 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
10028 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
10029 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
10030 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
10033 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
10034 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
10035 ext_info_changed(hdev, NULL);
10039 sock_put(match.sk);
10042 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
10044 struct mgmt_cp_set_local_name ev;
10045 struct mgmt_pending_cmd *cmd;
10050 memset(&ev, 0, sizeof(ev));
10051 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
10052 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
10054 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
10056 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
10058 /* If this is a HCI command related to powering on the
10059 * HCI dev don't send any mgmt signals.
10061 if (pending_find(MGMT_OP_SET_POWERED, hdev))
10065 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
10066 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
10067 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
10070 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
10074 for (i = 0; i < uuid_count; i++) {
10075 if (!memcmp(uuid, uuids[i], 16))
10082 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
10086 while (parsed < eir_len) {
10087 u8 field_len = eir[0];
10091 if (field_len == 0)
10094 if (eir_len - parsed < field_len + 1)
10098 case EIR_UUID16_ALL:
10099 case EIR_UUID16_SOME:
10100 for (i = 0; i + 3 <= field_len; i += 2) {
10101 memcpy(uuid, bluetooth_base_uuid, 16);
10102 uuid[13] = eir[i + 3];
10103 uuid[12] = eir[i + 2];
10104 if (has_uuid(uuid, uuid_count, uuids))
10108 case EIR_UUID32_ALL:
10109 case EIR_UUID32_SOME:
10110 for (i = 0; i + 5 <= field_len; i += 4) {
10111 memcpy(uuid, bluetooth_base_uuid, 16);
10112 uuid[15] = eir[i + 5];
10113 uuid[14] = eir[i + 4];
10114 uuid[13] = eir[i + 3];
10115 uuid[12] = eir[i + 2];
10116 if (has_uuid(uuid, uuid_count, uuids))
10120 case EIR_UUID128_ALL:
10121 case EIR_UUID128_SOME:
10122 for (i = 0; i + 17 <= field_len; i += 16) {
10123 memcpy(uuid, eir + i + 2, 16);
10124 if (has_uuid(uuid, uuid_count, uuids))
10130 parsed += field_len + 1;
10131 eir += field_len + 1;
10137 static void restart_le_scan(struct hci_dev *hdev)
10139 /* If controller is not scanning we are done. */
10140 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
10143 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
10144 hdev->discovery.scan_start +
10145 hdev->discovery.scan_duration))
10148 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
10149 DISCOV_LE_RESTART_DELAY);
10152 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
10153 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
10155 /* If a RSSI threshold has been specified, and
10156 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
10157 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
10158 * is set, let it through for further processing, as we might need to
10159 * restart the scan.
10161 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
10162 * the results are also dropped.
10164 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10165 (rssi == HCI_RSSI_INVALID ||
10166 (rssi < hdev->discovery.rssi &&
10167 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
10170 if (hdev->discovery.uuid_count != 0) {
10171 /* If a list of UUIDs is provided in filter, results with no
10172 * matching UUID should be dropped.
10174 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
10175 hdev->discovery.uuids) &&
10176 !eir_has_uuids(scan_rsp, scan_rsp_len,
10177 hdev->discovery.uuid_count,
10178 hdev->discovery.uuids))
10182 /* If duplicate filtering does not report RSSI changes, then restart
10183 * scanning to ensure updated result with updated RSSI values.
10185 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
10186 restart_le_scan(hdev);
10188 /* Validate RSSI value against the RSSI threshold once more. */
10189 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
10190 rssi < hdev->discovery.rssi)
10197 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
10198 bdaddr_t *bdaddr, u8 addr_type)
10200 struct mgmt_ev_adv_monitor_device_lost ev;
10202 ev.monitor_handle = cpu_to_le16(handle);
10203 bacpy(&ev.addr.bdaddr, bdaddr);
10204 ev.addr.type = addr_type;
10206 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
10210 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
10211 struct sk_buff *skb,
10212 struct sock *skip_sk,
10215 struct sk_buff *advmon_skb;
10216 size_t advmon_skb_len;
10217 __le16 *monitor_handle;
10222 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
10223 sizeof(struct mgmt_ev_device_found)) + skb->len;
10224 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
10229 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
10230 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
10231 * store monitor_handle of the matched monitor.
10233 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
10234 *monitor_handle = cpu_to_le16(handle);
10235 skb_put_data(advmon_skb, skb->data, skb->len);
10237 mgmt_event_skb(advmon_skb, skip_sk);
10240 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
10241 bdaddr_t *bdaddr, bool report_device,
10242 struct sk_buff *skb,
10243 struct sock *skip_sk)
10245 struct monitored_device *dev, *tmp;
10246 bool matched = false;
10247 bool notified = false;
10249 /* We have received the Advertisement Report because:
10250 * 1. the kernel has initiated active discovery
10251 * 2. if not, we have pend_le_reports > 0 in which case we are doing
10253 * 3. if none of the above is true, we have one or more active
10254 * Advertisement Monitor
10256 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
10257 * and report ONLY one advertisement per device for the matched Monitor
10258 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10260 * For case 3, since we are not active scanning and all advertisements
10261 * received are due to a matched Advertisement Monitor, report all
10262 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
10264 if (report_device && !hdev->advmon_pend_notify) {
10265 mgmt_event_skb(skb, skip_sk);
10269 hdev->advmon_pend_notify = false;
10271 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
10272 if (!bacmp(&dev->bdaddr, bdaddr)) {
10275 if (!dev->notified) {
10276 mgmt_send_adv_monitor_device_found(hdev, skb,
10280 dev->notified = true;
10284 if (!dev->notified)
10285 hdev->advmon_pend_notify = true;
10288 if (!report_device &&
10289 ((matched && !notified) || !msft_monitor_supported(hdev))) {
10290 /* Handle 0 indicates that we are not active scanning and this
10291 * is a subsequent advertisement report for an already matched
10292 * Advertisement Monitor or the controller offloading support
10293 * is not available.
10295 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
10299 mgmt_event_skb(skb, skip_sk);
10304 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
10305 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
10306 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10309 struct sk_buff *skb;
10310 struct mgmt_ev_mesh_device_found *ev;
10313 if (!hdev->mesh_ad_types[0])
10316 /* Scan for requested AD types */
10318 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
10319 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10320 if (!hdev->mesh_ad_types[j])
10323 if (hdev->mesh_ad_types[j] == eir[i + 1])
10329 if (scan_rsp_len > 0) {
10330 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
10331 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
10332 if (!hdev->mesh_ad_types[j])
10335 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
10344 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
10345 sizeof(*ev) + eir_len + scan_rsp_len);
10349 ev = skb_put(skb, sizeof(*ev));
10351 bacpy(&ev->addr.bdaddr, bdaddr);
10352 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
10354 ev->flags = cpu_to_le32(flags);
10355 ev->instant = cpu_to_le64(instant);
10358 /* Copy EIR or advertising data into event */
10359 skb_put_data(skb, eir, eir_len);
10361 if (scan_rsp_len > 0)
10362 /* Append scan response data to event */
10363 skb_put_data(skb, scan_rsp, scan_rsp_len);
10365 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10367 mgmt_event_skb(skb, NULL);
10370 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10371 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
10372 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
10375 struct sk_buff *skb;
10376 struct mgmt_ev_device_found *ev;
10377 bool report_device = hci_discovery_active(hdev);
10379 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
10380 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
10381 eir, eir_len, scan_rsp, scan_rsp_len,
10384 /* Don't send events for a non-kernel initiated discovery. With
10385 * LE one exception is if we have pend_le_reports > 0 in which
10386 * case we're doing passive scanning and want these events.
10388 if (!hci_discovery_active(hdev)) {
10389 if (link_type == ACL_LINK)
10391 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
10392 report_device = true;
10393 else if (!hci_is_adv_monitoring(hdev))
10397 if (hdev->discovery.result_filtering) {
10398 /* We are using service discovery */
10399 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
10404 if (hdev->discovery.limited) {
10405 /* Check for limited discoverable bit */
10407 if (!(dev_class[1] & 0x20))
10410 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
10411 if (!flags || !(flags[0] & LE_AD_LIMITED))
10416 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
10417 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10418 sizeof(*ev) + eir_len + scan_rsp_len + 5);
10422 ev = skb_put(skb, sizeof(*ev));
10424 /* In case of device discovery with BR/EDR devices (pre 1.2), the
10425 * RSSI value was reported as 0 when not available. This behavior
10426 * is kept when using device discovery. This is required for full
10427 * backwards compatibility with the API.
10429 * However when using service discovery, the value 127 will be
10430 * returned when the RSSI is not available.
10432 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
10433 link_type == ACL_LINK)
10436 bacpy(&ev->addr.bdaddr, bdaddr);
10437 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10439 ev->flags = cpu_to_le32(flags);
10442 /* Copy EIR or advertising data into event */
10443 skb_put_data(skb, eir, eir_len);
10445 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
10448 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
10450 skb_put_data(skb, eir_cod, sizeof(eir_cod));
10453 if (scan_rsp_len > 0)
10454 /* Append scan response data to event */
10455 skb_put_data(skb, scan_rsp, scan_rsp_len);
10457 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10459 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
10462 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10463 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
10465 struct sk_buff *skb;
10466 struct mgmt_ev_device_found *ev;
10470 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
10471 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
10473 ev = skb_put(skb, sizeof(*ev));
10474 bacpy(&ev->addr.bdaddr, bdaddr);
10475 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10479 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
10481 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
10483 ev->eir_len = cpu_to_le16(eir_len);
10484 ev->flags = cpu_to_le32(flags);
10486 mgmt_event_skb(skb, NULL);
10489 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
10491 struct mgmt_ev_discovering ev;
10493 bt_dev_dbg(hdev, "discovering %u", discovering);
10495 memset(&ev, 0, sizeof(ev));
10496 ev.type = hdev->discovery.type;
10497 ev.discovering = discovering;
10499 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
10502 void mgmt_suspending(struct hci_dev *hdev, u8 state)
10504 struct mgmt_ev_controller_suspend ev;
10506 ev.suspend_state = state;
10507 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
10510 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
10513 struct mgmt_ev_controller_resume ev;
10515 ev.wake_reason = reason;
10517 bacpy(&ev.addr.bdaddr, bdaddr);
10518 ev.addr.type = addr_type;
10520 memset(&ev.addr, 0, sizeof(ev.addr));
10523 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
10526 static struct hci_mgmt_chan chan = {
10527 .channel = HCI_CHANNEL_CONTROL,
10528 .handler_count = ARRAY_SIZE(mgmt_handlers),
10529 .handlers = mgmt_handlers,
10530 .hdev_init = mgmt_init_hdev,
10533 int mgmt_init(void)
10535 return hci_mgmt_chan_register(&chan);
10538 void mgmt_exit(void)
10540 hci_mgmt_chan_unregister(&chan);
10543 void mgmt_cleanup(struct sock *sk)
10545 struct mgmt_mesh_tx *mesh_tx;
10546 struct hci_dev *hdev;
10548 read_lock(&hci_dev_list_lock);
10550 list_for_each_entry(hdev, &hci_dev_list, list) {
10552 mesh_tx = mgmt_mesh_next(hdev, sk);
10555 mesh_send_complete(hdev, mesh_tx, true);
10559 read_unlock(&hci_dev_list_lock);