2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
44 #define MGMT_VERSION 1
45 #define MGMT_REVISION 22
47 static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
55 MGMT_OP_SET_LINK_SECURITY,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
113 MGMT_OP_GET_PHY_CONFIGURATION,
114 MGMT_OP_SET_PHY_CONFIGURATION,
115 MGMT_OP_SET_BLOCKED_KEYS,
116 MGMT_OP_SET_WIDEBAND_SPEECH,
117 MGMT_OP_READ_CONTROLLER_CAP,
118 MGMT_OP_READ_EXP_FEATURES_INFO,
119 MGMT_OP_SET_EXP_FEATURE,
120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 MGMT_OP_GET_DEVICE_FLAGS,
125 MGMT_OP_SET_DEVICE_FLAGS,
126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 MGMT_OP_REMOVE_ADV_MONITOR,
129 MGMT_OP_ADD_EXT_ADV_PARAMS,
130 MGMT_OP_ADD_EXT_ADV_DATA,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
134 static const u16 mgmt_events[] = {
135 MGMT_EV_CONTROLLER_ERROR,
137 MGMT_EV_INDEX_REMOVED,
138 MGMT_EV_NEW_SETTINGS,
139 MGMT_EV_CLASS_OF_DEV_CHANGED,
140 MGMT_EV_LOCAL_NAME_CHANGED,
141 MGMT_EV_NEW_LINK_KEY,
142 MGMT_EV_NEW_LONG_TERM_KEY,
143 MGMT_EV_DEVICE_CONNECTED,
144 MGMT_EV_DEVICE_DISCONNECTED,
145 MGMT_EV_CONNECT_FAILED,
146 MGMT_EV_PIN_CODE_REQUEST,
147 MGMT_EV_USER_CONFIRM_REQUEST,
148 MGMT_EV_USER_PASSKEY_REQUEST,
150 MGMT_EV_DEVICE_FOUND,
152 MGMT_EV_DEVICE_BLOCKED,
153 MGMT_EV_DEVICE_UNBLOCKED,
154 MGMT_EV_DEVICE_UNPAIRED,
155 MGMT_EV_PASSKEY_NOTIFY,
158 MGMT_EV_DEVICE_ADDED,
159 MGMT_EV_DEVICE_REMOVED,
160 MGMT_EV_NEW_CONN_PARAM,
161 MGMT_EV_UNCONF_INDEX_ADDED,
162 MGMT_EV_UNCONF_INDEX_REMOVED,
163 MGMT_EV_NEW_CONFIG_OPTIONS,
164 MGMT_EV_EXT_INDEX_ADDED,
165 MGMT_EV_EXT_INDEX_REMOVED,
166 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
167 MGMT_EV_ADVERTISING_ADDED,
168 MGMT_EV_ADVERTISING_REMOVED,
169 MGMT_EV_EXT_INFO_CHANGED,
170 MGMT_EV_PHY_CONFIGURATION_CHANGED,
171 MGMT_EV_EXP_FEATURE_CHANGED,
172 MGMT_EV_DEVICE_FLAGS_CHANGED,
173 MGMT_EV_ADV_MONITOR_ADDED,
174 MGMT_EV_ADV_MONITOR_REMOVED,
175 MGMT_EV_CONTROLLER_SUSPEND,
176 MGMT_EV_CONTROLLER_RESUME,
177 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
178 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
181 static const u16 mgmt_untrusted_commands[] = {
182 MGMT_OP_READ_INDEX_LIST,
184 MGMT_OP_READ_UNCONF_INDEX_LIST,
185 MGMT_OP_READ_CONFIG_INFO,
186 MGMT_OP_READ_EXT_INDEX_LIST,
187 MGMT_OP_READ_EXT_INFO,
188 MGMT_OP_READ_CONTROLLER_CAP,
189 MGMT_OP_READ_EXP_FEATURES_INFO,
190 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
191 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
194 static const u16 mgmt_untrusted_events[] = {
196 MGMT_EV_INDEX_REMOVED,
197 MGMT_EV_NEW_SETTINGS,
198 MGMT_EV_CLASS_OF_DEV_CHANGED,
199 MGMT_EV_LOCAL_NAME_CHANGED,
200 MGMT_EV_UNCONF_INDEX_ADDED,
201 MGMT_EV_UNCONF_INDEX_REMOVED,
202 MGMT_EV_NEW_CONFIG_OPTIONS,
203 MGMT_EV_EXT_INDEX_ADDED,
204 MGMT_EV_EXT_INDEX_REMOVED,
205 MGMT_EV_EXT_INFO_CHANGED,
206 MGMT_EV_EXP_FEATURE_CHANGED,
209 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
211 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
212 "\x00\x00\x00\x00\x00\x00\x00\x00"
214 /* HCI to MGMT error code conversion table */
215 static const u8 mgmt_status_table[] = {
217 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
218 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
219 MGMT_STATUS_FAILED, /* Hardware Failure */
220 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
221 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
222 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
223 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
224 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
225 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
226 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
227 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
228 MGMT_STATUS_BUSY, /* Command Disallowed */
229 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
230 MGMT_STATUS_REJECTED, /* Rejected Security */
231 MGMT_STATUS_REJECTED, /* Rejected Personal */
232 MGMT_STATUS_TIMEOUT, /* Host Timeout */
233 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
234 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
235 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
236 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
237 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
238 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
239 MGMT_STATUS_BUSY, /* Repeated Attempts */
240 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
241 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
242 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
243 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
244 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
245 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
246 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
247 MGMT_STATUS_FAILED, /* Unspecified Error */
248 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
249 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
250 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
251 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
252 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
253 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
254 MGMT_STATUS_FAILED, /* Unit Link Key Used */
255 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
256 MGMT_STATUS_TIMEOUT, /* Instant Passed */
257 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
258 MGMT_STATUS_FAILED, /* Transaction Collision */
259 MGMT_STATUS_FAILED, /* Reserved for future use */
260 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
261 MGMT_STATUS_REJECTED, /* QoS Rejected */
262 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
263 MGMT_STATUS_REJECTED, /* Insufficient Security */
264 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
265 MGMT_STATUS_FAILED, /* Reserved for future use */
266 MGMT_STATUS_BUSY, /* Role Switch Pending */
267 MGMT_STATUS_FAILED, /* Reserved for future use */
268 MGMT_STATUS_FAILED, /* Slot Violation */
269 MGMT_STATUS_FAILED, /* Role Switch Failed */
270 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
271 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
272 MGMT_STATUS_BUSY, /* Host Busy Pairing */
273 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
274 MGMT_STATUS_BUSY, /* Controller Busy */
275 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
276 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
277 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
278 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
279 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
282 static u8 mgmt_errno_status(int err)
286 return MGMT_STATUS_SUCCESS;
288 return MGMT_STATUS_REJECTED;
290 return MGMT_STATUS_INVALID_PARAMS;
292 return MGMT_STATUS_NOT_SUPPORTED;
294 return MGMT_STATUS_BUSY;
296 return MGMT_STATUS_AUTH_FAILED;
298 return MGMT_STATUS_NO_RESOURCES;
300 return MGMT_STATUS_ALREADY_CONNECTED;
302 return MGMT_STATUS_DISCONNECTED;
305 return MGMT_STATUS_FAILED;
308 static u8 mgmt_status(int err)
311 return mgmt_errno_status(err);
313 if (err < ARRAY_SIZE(mgmt_status_table))
314 return mgmt_status_table[err];
316 return MGMT_STATUS_FAILED;
319 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
322 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
326 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
327 u16 len, int flag, struct sock *skip_sk)
329 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
333 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
334 struct sock *skip_sk)
336 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
337 HCI_SOCK_TRUSTED, skip_sk);
340 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
342 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
346 static u8 le_addr_type(u8 mgmt_addr_type)
348 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
349 return ADDR_LE_DEV_PUBLIC;
351 return ADDR_LE_DEV_RANDOM;
354 void mgmt_fill_version_info(void *ver)
356 struct mgmt_rp_read_version *rp = ver;
358 rp->version = MGMT_VERSION;
359 rp->revision = cpu_to_le16(MGMT_REVISION);
362 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
365 struct mgmt_rp_read_version rp;
367 bt_dev_dbg(hdev, "sock %p", sk);
369 mgmt_fill_version_info(&rp);
371 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
375 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
378 struct mgmt_rp_read_commands *rp;
379 u16 num_commands, num_events;
383 bt_dev_dbg(hdev, "sock %p", sk);
385 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
386 num_commands = ARRAY_SIZE(mgmt_commands);
387 num_events = ARRAY_SIZE(mgmt_events);
389 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
390 num_events = ARRAY_SIZE(mgmt_untrusted_events);
393 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
395 rp = kmalloc(rp_size, GFP_KERNEL);
399 rp->num_commands = cpu_to_le16(num_commands);
400 rp->num_events = cpu_to_le16(num_events);
402 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
403 __le16 *opcode = rp->opcodes;
405 for (i = 0; i < num_commands; i++, opcode++)
406 put_unaligned_le16(mgmt_commands[i], opcode);
408 for (i = 0; i < num_events; i++, opcode++)
409 put_unaligned_le16(mgmt_events[i], opcode);
411 __le16 *opcode = rp->opcodes;
413 for (i = 0; i < num_commands; i++, opcode++)
414 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
416 for (i = 0; i < num_events; i++, opcode++)
417 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
420 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
427 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
430 struct mgmt_rp_read_index_list *rp;
436 bt_dev_dbg(hdev, "sock %p", sk);
438 read_lock(&hci_dev_list_lock);
441 list_for_each_entry(d, &hci_dev_list, list) {
442 if (d->dev_type == HCI_PRIMARY &&
443 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 rp_len = sizeof(*rp) + (2 * count);
448 rp = kmalloc(rp_len, GFP_ATOMIC);
450 read_unlock(&hci_dev_list_lock);
455 list_for_each_entry(d, &hci_dev_list, list) {
456 if (hci_dev_test_flag(d, HCI_SETUP) ||
457 hci_dev_test_flag(d, HCI_CONFIG) ||
458 hci_dev_test_flag(d, HCI_USER_CHANNEL))
461 /* Devices marked as raw-only are neither configured
462 * nor unconfigured controllers.
464 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
467 if (d->dev_type == HCI_PRIMARY &&
468 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
469 rp->index[count++] = cpu_to_le16(d->id);
470 bt_dev_dbg(hdev, "Added hci%u", d->id);
474 rp->num_controllers = cpu_to_le16(count);
475 rp_len = sizeof(*rp) + (2 * count);
477 read_unlock(&hci_dev_list_lock);
479 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
487 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
488 void *data, u16 data_len)
490 struct mgmt_rp_read_unconf_index_list *rp;
496 bt_dev_dbg(hdev, "sock %p", sk);
498 read_lock(&hci_dev_list_lock);
501 list_for_each_entry(d, &hci_dev_list, list) {
502 if (d->dev_type == HCI_PRIMARY &&
503 hci_dev_test_flag(d, HCI_UNCONFIGURED))
507 rp_len = sizeof(*rp) + (2 * count);
508 rp = kmalloc(rp_len, GFP_ATOMIC);
510 read_unlock(&hci_dev_list_lock);
515 list_for_each_entry(d, &hci_dev_list, list) {
516 if (hci_dev_test_flag(d, HCI_SETUP) ||
517 hci_dev_test_flag(d, HCI_CONFIG) ||
518 hci_dev_test_flag(d, HCI_USER_CHANNEL))
521 /* Devices marked as raw-only are neither configured
522 * nor unconfigured controllers.
524 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
527 if (d->dev_type == HCI_PRIMARY &&
528 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
537 read_unlock(&hci_dev_list_lock);
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
550 struct mgmt_rp_read_ext_index_list *rp;
555 bt_dev_dbg(hdev, "sock %p", sk);
557 read_lock(&hci_dev_list_lock);
560 list_for_each_entry(d, &hci_dev_list, list) {
561 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
565 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
567 read_unlock(&hci_dev_list_lock);
572 list_for_each_entry(d, &hci_dev_list, list) {
573 if (hci_dev_test_flag(d, HCI_SETUP) ||
574 hci_dev_test_flag(d, HCI_CONFIG) ||
575 hci_dev_test_flag(d, HCI_USER_CHANNEL))
578 /* Devices marked as raw-only are neither configured
579 * nor unconfigured controllers.
581 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
584 if (d->dev_type == HCI_PRIMARY) {
585 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
586 rp->entry[count].type = 0x01;
588 rp->entry[count].type = 0x00;
589 } else if (d->dev_type == HCI_AMP) {
590 rp->entry[count].type = 0x02;
595 rp->entry[count].bus = d->bus;
596 rp->entry[count++].index = cpu_to_le16(d->id);
597 bt_dev_dbg(hdev, "Added hci%u", d->id);
600 rp->num_controllers = cpu_to_le16(count);
602 read_unlock(&hci_dev_list_lock);
604 /* If this command is called at least once, then all the
605 * default index and unconfigured index events are disabled
606 * and from now on only extended index events are used.
608 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
609 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
610 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
612 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
613 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
614 struct_size(rp, entry, count));
621 static bool is_configured(struct hci_dev *hdev)
623 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
624 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
627 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
628 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
629 !bacmp(&hdev->public_addr, BDADDR_ANY))
635 static __le32 get_missing_options(struct hci_dev *hdev)
639 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
640 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
641 options |= MGMT_OPTION_EXTERNAL_CONFIG;
643 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
644 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
645 !bacmp(&hdev->public_addr, BDADDR_ANY))
646 options |= MGMT_OPTION_PUBLIC_ADDRESS;
648 return cpu_to_le32(options);
651 static int new_options(struct hci_dev *hdev, struct sock *skip)
653 __le32 options = get_missing_options(hdev);
655 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
656 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
659 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
661 __le32 options = get_missing_options(hdev);
663 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
667 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
668 void *data, u16 data_len)
670 struct mgmt_rp_read_config_info rp;
673 bt_dev_dbg(hdev, "sock %p", sk);
677 memset(&rp, 0, sizeof(rp));
678 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
680 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
681 options |= MGMT_OPTION_EXTERNAL_CONFIG;
683 if (hdev->set_bdaddr)
684 options |= MGMT_OPTION_PUBLIC_ADDRESS;
686 rp.supported_options = cpu_to_le32(options);
687 rp.missing_options = get_missing_options(hdev);
689 hci_dev_unlock(hdev);
691 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
695 static u32 get_supported_phys(struct hci_dev *hdev)
697 u32 supported_phys = 0;
699 if (lmp_bredr_capable(hdev)) {
700 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
702 if (hdev->features[0][0] & LMP_3SLOT)
703 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
705 if (hdev->features[0][0] & LMP_5SLOT)
706 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
708 if (lmp_edr_2m_capable(hdev)) {
709 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
711 if (lmp_edr_3slot_capable(hdev))
712 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
714 if (lmp_edr_5slot_capable(hdev))
715 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
717 if (lmp_edr_3m_capable(hdev)) {
718 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
720 if (lmp_edr_3slot_capable(hdev))
721 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
723 if (lmp_edr_5slot_capable(hdev))
724 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
729 if (lmp_le_capable(hdev)) {
730 supported_phys |= MGMT_PHY_LE_1M_TX;
731 supported_phys |= MGMT_PHY_LE_1M_RX;
733 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
734 supported_phys |= MGMT_PHY_LE_2M_TX;
735 supported_phys |= MGMT_PHY_LE_2M_RX;
738 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
739 supported_phys |= MGMT_PHY_LE_CODED_TX;
740 supported_phys |= MGMT_PHY_LE_CODED_RX;
744 return supported_phys;
747 static u32 get_selected_phys(struct hci_dev *hdev)
749 u32 selected_phys = 0;
751 if (lmp_bredr_capable(hdev)) {
752 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
754 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
755 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
757 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
758 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
760 if (lmp_edr_2m_capable(hdev)) {
761 if (!(hdev->pkt_type & HCI_2DH1))
762 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
764 if (lmp_edr_3slot_capable(hdev) &&
765 !(hdev->pkt_type & HCI_2DH3))
766 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
768 if (lmp_edr_5slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_2DH5))
770 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
772 if (lmp_edr_3m_capable(hdev)) {
773 if (!(hdev->pkt_type & HCI_3DH1))
774 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
776 if (lmp_edr_3slot_capable(hdev) &&
777 !(hdev->pkt_type & HCI_3DH3))
778 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
780 if (lmp_edr_5slot_capable(hdev) &&
781 !(hdev->pkt_type & HCI_3DH5))
782 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
787 if (lmp_le_capable(hdev)) {
788 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
789 selected_phys |= MGMT_PHY_LE_1M_TX;
791 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
792 selected_phys |= MGMT_PHY_LE_1M_RX;
794 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
795 selected_phys |= MGMT_PHY_LE_2M_TX;
797 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
798 selected_phys |= MGMT_PHY_LE_2M_RX;
800 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
801 selected_phys |= MGMT_PHY_LE_CODED_TX;
803 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
804 selected_phys |= MGMT_PHY_LE_CODED_RX;
807 return selected_phys;
810 static u32 get_configurable_phys(struct hci_dev *hdev)
812 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
813 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
816 static u32 get_supported_settings(struct hci_dev *hdev)
820 settings |= MGMT_SETTING_POWERED;
821 settings |= MGMT_SETTING_BONDABLE;
822 settings |= MGMT_SETTING_DEBUG_KEYS;
823 settings |= MGMT_SETTING_CONNECTABLE;
824 settings |= MGMT_SETTING_DISCOVERABLE;
826 if (lmp_bredr_capable(hdev)) {
827 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
828 settings |= MGMT_SETTING_FAST_CONNECTABLE;
829 settings |= MGMT_SETTING_BREDR;
830 settings |= MGMT_SETTING_LINK_SECURITY;
832 if (lmp_ssp_capable(hdev)) {
833 settings |= MGMT_SETTING_SSP;
834 if (IS_ENABLED(CONFIG_BT_HS))
835 settings |= MGMT_SETTING_HS;
838 if (lmp_sc_capable(hdev))
839 settings |= MGMT_SETTING_SECURE_CONN;
841 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
843 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
846 if (lmp_le_capable(hdev)) {
847 settings |= MGMT_SETTING_LE;
848 settings |= MGMT_SETTING_SECURE_CONN;
849 settings |= MGMT_SETTING_PRIVACY;
850 settings |= MGMT_SETTING_STATIC_ADDRESS;
851 settings |= MGMT_SETTING_ADVERTISING;
854 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
856 settings |= MGMT_SETTING_CONFIGURATION;
858 settings |= MGMT_SETTING_PHY_CONFIGURATION;
863 static u32 get_current_settings(struct hci_dev *hdev)
867 if (hdev_is_powered(hdev))
868 settings |= MGMT_SETTING_POWERED;
870 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
871 settings |= MGMT_SETTING_CONNECTABLE;
873 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
874 settings |= MGMT_SETTING_FAST_CONNECTABLE;
876 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
877 settings |= MGMT_SETTING_DISCOVERABLE;
879 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
880 settings |= MGMT_SETTING_BONDABLE;
882 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
883 settings |= MGMT_SETTING_BREDR;
885 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
886 settings |= MGMT_SETTING_LE;
888 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
889 settings |= MGMT_SETTING_LINK_SECURITY;
891 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
892 settings |= MGMT_SETTING_SSP;
894 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
895 settings |= MGMT_SETTING_HS;
897 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
898 settings |= MGMT_SETTING_ADVERTISING;
900 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
901 settings |= MGMT_SETTING_SECURE_CONN;
903 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
904 settings |= MGMT_SETTING_DEBUG_KEYS;
906 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
907 settings |= MGMT_SETTING_PRIVACY;
909 /* The current setting for static address has two purposes. The
910 * first is to indicate if the static address will be used and
911 * the second is to indicate if it is actually set.
913 * This means if the static address is not configured, this flag
914 * will never be set. If the address is configured, then if the
915 * address is actually used decides if the flag is set or not.
917 * For single mode LE only controllers and dual-mode controllers
918 * with BR/EDR disabled, the existence of the static address will
921 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
922 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
923 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
924 if (bacmp(&hdev->static_addr, BDADDR_ANY))
925 settings |= MGMT_SETTING_STATIC_ADDRESS;
928 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
929 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
934 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
936 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
939 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
941 struct mgmt_pending_cmd *cmd;
943 /* If there's a pending mgmt command the flags will not yet have
944 * their final values, so check for this first.
946 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
948 struct mgmt_mode *cp = cmd->param;
950 return LE_AD_GENERAL;
951 else if (cp->val == 0x02)
952 return LE_AD_LIMITED;
954 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
955 return LE_AD_LIMITED;
956 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
957 return LE_AD_GENERAL;
963 bool mgmt_get_connectable(struct hci_dev *hdev)
965 struct mgmt_pending_cmd *cmd;
967 /* If there's a pending mgmt command the flag will not yet have
968 * it's final value, so check for this first.
970 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
972 struct mgmt_mode *cp = cmd->param;
977 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
980 static int service_cache_sync(struct hci_dev *hdev, void *data)
982 hci_update_eir_sync(hdev);
983 hci_update_class_sync(hdev);
988 static void service_cache_off(struct work_struct *work)
990 struct hci_dev *hdev = container_of(work, struct hci_dev,
993 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
996 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
999 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1001 /* The generation of a new RPA and programming it into the
1002 * controller happens in the hci_req_enable_advertising()
1005 if (ext_adv_capable(hdev))
1006 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1008 return hci_enable_advertising_sync(hdev);
1011 static void rpa_expired(struct work_struct *work)
1013 struct hci_dev *hdev = container_of(work, struct hci_dev,
1016 bt_dev_dbg(hdev, "");
1018 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1020 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1023 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1026 static void discov_off(struct work_struct *work)
1028 struct hci_dev *hdev = container_of(work, struct hci_dev,
1031 bt_dev_dbg(hdev, "");
1035 /* When discoverable timeout triggers, then just make sure
1036 * the limited discoverable flag is cleared. Even in the case
1037 * of a timeout triggered from general discoverable, it is
1038 * safe to unconditionally clear the flag.
1040 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1041 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1042 hdev->discov_timeout = 0;
1044 hci_update_discoverable(hdev);
1046 mgmt_new_settings(hdev);
1048 hci_dev_unlock(hdev);
1051 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1053 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1056 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1058 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1059 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1060 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1062 /* Non-mgmt controlled devices get this bit set
1063 * implicitly so that pairing works for them, however
1064 * for mgmt we require user-space to explicitly enable
1067 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1070 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1071 void *data, u16 data_len)
1073 struct mgmt_rp_read_info rp;
1075 bt_dev_dbg(hdev, "sock %p", sk);
1079 memset(&rp, 0, sizeof(rp));
1081 bacpy(&rp.bdaddr, &hdev->bdaddr);
1083 rp.version = hdev->hci_ver;
1084 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1086 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1087 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1089 memcpy(rp.dev_class, hdev->dev_class, 3);
1091 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1092 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1094 hci_dev_unlock(hdev);
1096 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1100 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1105 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1106 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1107 hdev->dev_class, 3);
1109 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1110 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1113 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1114 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1115 hdev->dev_name, name_len);
1117 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1118 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1119 hdev->short_name, name_len);
1124 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1125 void *data, u16 data_len)
1128 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1131 bt_dev_dbg(hdev, "sock %p", sk);
1133 memset(&buf, 0, sizeof(buf));
1137 bacpy(&rp->bdaddr, &hdev->bdaddr);
1139 rp->version = hdev->hci_ver;
1140 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1142 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1143 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1146 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1147 rp->eir_len = cpu_to_le16(eir_len);
1149 hci_dev_unlock(hdev);
1151 /* If this command is called at least once, then the events
1152 * for class of device and local name changes are disabled
1153 * and only the new extended controller information event
1156 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1157 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1158 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1160 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1161 sizeof(*rp) + eir_len);
1164 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1167 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1170 memset(buf, 0, sizeof(buf));
1172 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1173 ev->eir_len = cpu_to_le16(eir_len);
1175 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1176 sizeof(*ev) + eir_len,
1177 HCI_MGMT_EXT_INFO_EVENTS, skip);
1180 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1182 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1184 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1188 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1190 struct mgmt_ev_advertising_added ev;
1192 ev.instance = instance;
1194 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1197 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1200 struct mgmt_ev_advertising_removed ev;
1202 ev.instance = instance;
1204 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1207 static void cancel_adv_timeout(struct hci_dev *hdev)
1209 if (hdev->adv_instance_timeout) {
1210 hdev->adv_instance_timeout = 0;
1211 cancel_delayed_work(&hdev->adv_instance_expire);
1215 /* This function requires the caller holds hdev->lock */
1216 static void restart_le_actions(struct hci_dev *hdev)
1218 struct hci_conn_params *p;
1220 list_for_each_entry(p, &hdev->le_conn_params, list) {
1221 /* Needed for AUTO_OFF case where might not "really"
1222 * have been powered off.
1224 list_del_init(&p->action);
1226 switch (p->auto_connect) {
1227 case HCI_AUTO_CONN_DIRECT:
1228 case HCI_AUTO_CONN_ALWAYS:
1229 list_add(&p->action, &hdev->pend_le_conns);
1231 case HCI_AUTO_CONN_REPORT:
1232 list_add(&p->action, &hdev->pend_le_reports);
1240 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1242 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1244 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1245 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1248 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1250 struct mgmt_pending_cmd *cmd = data;
1251 struct mgmt_mode *cp;
1253 /* Make sure cmd still outstanding. */
1254 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1259 bt_dev_dbg(hdev, "err %d", err);
1264 restart_le_actions(hdev);
1265 hci_update_passive_scan(hdev);
1266 hci_dev_unlock(hdev);
1269 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1271 /* Only call new_setting for power on as power off is deferred
1272 * to hdev->power_off work which does call hci_dev_do_close.
1275 new_settings(hdev, cmd->sk);
1277 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1281 mgmt_pending_remove(cmd);
1284 static int set_powered_sync(struct hci_dev *hdev, void *data)
1286 struct mgmt_pending_cmd *cmd = data;
1287 struct mgmt_mode *cp = cmd->param;
1289 BT_DBG("%s", hdev->name);
1291 return hci_set_powered_sync(hdev, cp->val);
1294 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1297 struct mgmt_mode *cp = data;
1298 struct mgmt_pending_cmd *cmd;
1301 bt_dev_dbg(hdev, "sock %p", sk);
1303 if (cp->val != 0x00 && cp->val != 0x01)
1304 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1305 MGMT_STATUS_INVALID_PARAMS);
1309 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1310 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1315 if (!!cp->val == hdev_is_powered(hdev)) {
1316 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1320 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1326 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1327 mgmt_set_powered_complete);
1330 mgmt_pending_remove(cmd);
1333 hci_dev_unlock(hdev);
1337 int mgmt_new_settings(struct hci_dev *hdev)
1339 return new_settings(hdev, NULL);
1344 struct hci_dev *hdev;
1348 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1350 struct cmd_lookup *match = data;
1352 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1354 list_del(&cmd->list);
1356 if (match->sk == NULL) {
1357 match->sk = cmd->sk;
1358 sock_hold(match->sk);
1361 mgmt_pending_free(cmd);
1364 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1368 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1369 mgmt_pending_remove(cmd);
1372 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1374 if (cmd->cmd_complete) {
1377 cmd->cmd_complete(cmd, *status);
1378 mgmt_pending_remove(cmd);
1383 cmd_status_rsp(cmd, data);
1386 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1388 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1389 cmd->param, cmd->param_len);
1392 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1394 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1395 cmd->param, sizeof(struct mgmt_addr_info));
1398 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1400 if (!lmp_bredr_capable(hdev))
1401 return MGMT_STATUS_NOT_SUPPORTED;
1402 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1403 return MGMT_STATUS_REJECTED;
1405 return MGMT_STATUS_SUCCESS;
1408 static u8 mgmt_le_support(struct hci_dev *hdev)
1410 if (!lmp_le_capable(hdev))
1411 return MGMT_STATUS_NOT_SUPPORTED;
1412 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1413 return MGMT_STATUS_REJECTED;
1415 return MGMT_STATUS_SUCCESS;
1418 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1421 struct mgmt_pending_cmd *cmd = data;
1423 bt_dev_dbg(hdev, "err %d", err);
1425 /* Make sure cmd still outstanding. */
1426 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1432 u8 mgmt_err = mgmt_status(err);
1433 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1434 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1438 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1439 hdev->discov_timeout > 0) {
1440 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1441 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1444 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1445 new_settings(hdev, cmd->sk);
1448 mgmt_pending_remove(cmd);
1449 hci_dev_unlock(hdev);
1452 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1454 BT_DBG("%s", hdev->name);
1456 return hci_update_discoverable_sync(hdev);
1459 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1462 struct mgmt_cp_set_discoverable *cp = data;
1463 struct mgmt_pending_cmd *cmd;
1467 bt_dev_dbg(hdev, "sock %p", sk);
1469 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1470 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1471 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1472 MGMT_STATUS_REJECTED);
1474 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1475 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1476 MGMT_STATUS_INVALID_PARAMS);
1478 timeout = __le16_to_cpu(cp->timeout);
1480 /* Disabling discoverable requires that no timeout is set,
1481 * and enabling limited discoverable requires a timeout.
1483 if ((cp->val == 0x00 && timeout > 0) ||
1484 (cp->val == 0x02 && timeout == 0))
1485 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1486 MGMT_STATUS_INVALID_PARAMS);
1490 if (!hdev_is_powered(hdev) && timeout > 0) {
1491 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1492 MGMT_STATUS_NOT_POWERED);
1496 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1497 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1498 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1503 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1504 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1505 MGMT_STATUS_REJECTED);
1509 if (hdev->advertising_paused) {
1510 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1515 if (!hdev_is_powered(hdev)) {
1516 bool changed = false;
1518 /* Setting limited discoverable when powered off is
1519 * not a valid operation since it requires a timeout
1520 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1522 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1523 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1527 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1532 err = new_settings(hdev, sk);
1537 /* If the current mode is the same, then just update the timeout
1538 * value with the new value. And if only the timeout gets updated,
1539 * then no need for any HCI transactions.
1541 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1542 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1543 HCI_LIMITED_DISCOVERABLE)) {
1544 cancel_delayed_work(&hdev->discov_off);
1545 hdev->discov_timeout = timeout;
1547 if (cp->val && hdev->discov_timeout > 0) {
1548 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1549 queue_delayed_work(hdev->req_workqueue,
1550 &hdev->discov_off, to);
1553 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1557 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1563 /* Cancel any potential discoverable timeout that might be
1564 * still active and store new timeout value. The arming of
1565 * the timeout happens in the complete handler.
1567 cancel_delayed_work(&hdev->discov_off);
1568 hdev->discov_timeout = timeout;
1571 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1573 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1575 /* Limited discoverable mode */
1576 if (cp->val == 0x02)
1577 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1579 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1581 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1582 mgmt_set_discoverable_complete);
1585 mgmt_pending_remove(cmd);
1588 hci_dev_unlock(hdev);
1592 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1595 struct mgmt_pending_cmd *cmd = data;
1597 bt_dev_dbg(hdev, "err %d", err);
1599 /* Make sure cmd still outstanding. */
1600 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1606 u8 mgmt_err = mgmt_status(err);
1607 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1611 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1612 new_settings(hdev, cmd->sk);
1616 mgmt_pending_remove(cmd);
1618 hci_dev_unlock(hdev);
1621 static int set_connectable_update_settings(struct hci_dev *hdev,
1622 struct sock *sk, u8 val)
1624 bool changed = false;
1627 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1631 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1633 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1634 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1637 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1642 hci_update_scan(hdev);
1643 hci_update_passive_scan(hdev);
1644 return new_settings(hdev, sk);
1650 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1652 BT_DBG("%s", hdev->name);
1654 return hci_update_connectable_sync(hdev);
1657 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1660 struct mgmt_mode *cp = data;
1661 struct mgmt_pending_cmd *cmd;
1664 bt_dev_dbg(hdev, "sock %p", sk);
1666 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1667 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1668 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1669 MGMT_STATUS_REJECTED);
1671 if (cp->val != 0x00 && cp->val != 0x01)
1672 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1673 MGMT_STATUS_INVALID_PARAMS);
1677 if (!hdev_is_powered(hdev)) {
1678 err = set_connectable_update_settings(hdev, sk, cp->val);
1682 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1683 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1684 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1689 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1696 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1698 if (hdev->discov_timeout > 0)
1699 cancel_delayed_work(&hdev->discov_off);
1701 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1702 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1703 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1706 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1707 mgmt_set_connectable_complete);
1710 mgmt_pending_remove(cmd);
1713 hci_dev_unlock(hdev);
1717 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1720 struct mgmt_mode *cp = data;
1724 bt_dev_dbg(hdev, "sock %p", sk);
1726 if (cp->val != 0x00 && cp->val != 0x01)
1727 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1728 MGMT_STATUS_INVALID_PARAMS);
1733 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1735 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1737 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1742 /* In limited privacy mode the change of bondable mode
1743 * may affect the local advertising address.
1745 hci_update_discoverable(hdev);
1747 err = new_settings(hdev, sk);
1751 hci_dev_unlock(hdev);
1755 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1758 struct mgmt_mode *cp = data;
1759 struct mgmt_pending_cmd *cmd;
1763 bt_dev_dbg(hdev, "sock %p", sk);
1765 status = mgmt_bredr_support(hdev);
1767 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1770 if (cp->val != 0x00 && cp->val != 0x01)
1771 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1772 MGMT_STATUS_INVALID_PARAMS);
1776 if (!hdev_is_powered(hdev)) {
1777 bool changed = false;
1779 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1780 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1784 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1789 err = new_settings(hdev, sk);
1794 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1795 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1802 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1803 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1807 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1813 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1815 mgmt_pending_remove(cmd);
1820 hci_dev_unlock(hdev);
1824 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1826 struct cmd_lookup match = { NULL, hdev };
1827 struct mgmt_pending_cmd *cmd = data;
1828 struct mgmt_mode *cp = cmd->param;
1829 u8 enable = cp->val;
1832 /* Make sure cmd still outstanding. */
1833 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1837 u8 mgmt_err = mgmt_status(err);
1839 if (enable && hci_dev_test_and_clear_flag(hdev,
1841 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1842 new_settings(hdev, NULL);
1845 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1851 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1853 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1856 changed = hci_dev_test_and_clear_flag(hdev,
1859 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1862 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1865 new_settings(hdev, match.sk);
1870 hci_update_eir_sync(hdev);
1873 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1875 struct mgmt_pending_cmd *cmd = data;
1876 struct mgmt_mode *cp = cmd->param;
1877 bool changed = false;
1881 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1883 err = hci_write_ssp_mode_sync(hdev, cp->val);
1885 if (!err && changed)
1886 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1891 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1893 struct mgmt_mode *cp = data;
1894 struct mgmt_pending_cmd *cmd;
1898 bt_dev_dbg(hdev, "sock %p", sk);
1900 status = mgmt_bredr_support(hdev);
1902 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1904 if (!lmp_ssp_capable(hdev))
1905 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1906 MGMT_STATUS_NOT_SUPPORTED);
1908 if (cp->val != 0x00 && cp->val != 0x01)
1909 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1910 MGMT_STATUS_INVALID_PARAMS);
1914 if (!hdev_is_powered(hdev)) {
1918 changed = !hci_dev_test_and_set_flag(hdev,
1921 changed = hci_dev_test_and_clear_flag(hdev,
1924 changed = hci_dev_test_and_clear_flag(hdev,
1927 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1930 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1935 err = new_settings(hdev, sk);
1940 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1941 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1946 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1947 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1951 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1955 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
1959 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1960 MGMT_STATUS_FAILED);
1963 mgmt_pending_remove(cmd);
1967 hci_dev_unlock(hdev);
1971 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1973 struct mgmt_mode *cp = data;
1978 bt_dev_dbg(hdev, "sock %p", sk);
1980 if (!IS_ENABLED(CONFIG_BT_HS))
1981 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1982 MGMT_STATUS_NOT_SUPPORTED);
1984 status = mgmt_bredr_support(hdev);
1986 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1988 if (!lmp_ssp_capable(hdev))
1989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1990 MGMT_STATUS_NOT_SUPPORTED);
1992 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1993 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1994 MGMT_STATUS_REJECTED);
1996 if (cp->val != 0x00 && cp->val != 0x01)
1997 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1998 MGMT_STATUS_INVALID_PARAMS);
2002 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2003 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2009 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2011 if (hdev_is_powered(hdev)) {
2012 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2013 MGMT_STATUS_REJECTED);
2017 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2020 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2025 err = new_settings(hdev, sk);
2028 hci_dev_unlock(hdev);
2032 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2034 struct cmd_lookup match = { NULL, hdev };
2035 u8 status = mgmt_status(err);
2037 bt_dev_dbg(hdev, "err %d", err);
2040 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2045 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2047 new_settings(hdev, match.sk);
2053 static int set_le_sync(struct hci_dev *hdev, void *data)
2055 struct mgmt_pending_cmd *cmd = data;
2056 struct mgmt_mode *cp = cmd->param;
2061 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2062 hci_disable_advertising_sync(hdev);
2064 if (ext_adv_capable(hdev))
2065 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2067 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2070 err = hci_write_le_host_supported_sync(hdev, val, 0);
2072 /* Make sure the controller has a good default for
2073 * advertising data. Restrict the update to when LE
2074 * has actually been enabled. During power on, the
2075 * update in powered_update_hci will take care of it.
2077 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2078 if (ext_adv_capable(hdev)) {
2081 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2083 hci_update_scan_rsp_data_sync(hdev, 0x00);
2085 hci_update_adv_data_sync(hdev, 0x00);
2086 hci_update_scan_rsp_data_sync(hdev, 0x00);
2089 hci_update_passive_scan(hdev);
2095 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2097 struct mgmt_mode *cp = data;
2098 struct mgmt_pending_cmd *cmd;
2102 bt_dev_dbg(hdev, "sock %p", sk);
2104 if (!lmp_le_capable(hdev))
2105 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2106 MGMT_STATUS_NOT_SUPPORTED);
2108 if (cp->val != 0x00 && cp->val != 0x01)
2109 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2110 MGMT_STATUS_INVALID_PARAMS);
2112 /* Bluetooth single mode LE only controllers or dual-mode
2113 * controllers configured as LE only devices, do not allow
2114 * switching LE off. These have either LE enabled explicitly
2115 * or BR/EDR has been previously switched off.
2117 * When trying to enable an already enabled LE, then gracefully
2118 * send a positive response. Trying to disable it however will
2119 * result into rejection.
2121 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2122 if (cp->val == 0x01)
2123 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2125 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2126 MGMT_STATUS_REJECTED);
2132 enabled = lmp_host_le_capable(hdev);
2135 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
2137 if (!hdev_is_powered(hdev) || val == enabled) {
2138 bool changed = false;
2140 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2141 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2145 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2146 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2150 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2155 err = new_settings(hdev, sk);
2160 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2161 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2162 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2167 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2171 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2175 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2176 MGMT_STATUS_FAILED);
2179 mgmt_pending_remove(cmd);
2183 hci_dev_unlock(hdev);
2187 /* This is a helper function to test for pending mgmt commands that can
2188 * cause CoD or EIR HCI commands. We can only allow one such pending
2189 * mgmt command at a time since otherwise we cannot easily track what
2190 * the current values are, will be, and based on that calculate if a new
2191 * HCI command needs to be sent and if yes with what value.
2193 static bool pending_eir_or_class(struct hci_dev *hdev)
2195 struct mgmt_pending_cmd *cmd;
2197 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2198 switch (cmd->opcode) {
2199 case MGMT_OP_ADD_UUID:
2200 case MGMT_OP_REMOVE_UUID:
2201 case MGMT_OP_SET_DEV_CLASS:
2202 case MGMT_OP_SET_POWERED:
2210 static const u8 bluetooth_base_uuid[] = {
2211 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2212 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2215 static u8 get_uuid_size(const u8 *uuid)
2219 if (memcmp(uuid, bluetooth_base_uuid, 12))
2222 val = get_unaligned_le32(&uuid[12]);
2229 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2231 struct mgmt_pending_cmd *cmd = data;
2233 bt_dev_dbg(hdev, "err %d", err);
2235 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2236 mgmt_status(err), hdev->dev_class, 3);
2238 mgmt_pending_free(cmd);
2241 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2245 err = hci_update_class_sync(hdev);
2249 return hci_update_eir_sync(hdev);
2252 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2254 struct mgmt_cp_add_uuid *cp = data;
2255 struct mgmt_pending_cmd *cmd;
2256 struct bt_uuid *uuid;
2259 bt_dev_dbg(hdev, "sock %p", sk);
2263 if (pending_eir_or_class(hdev)) {
2264 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2269 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2275 memcpy(uuid->uuid, cp->uuid, 16);
2276 uuid->svc_hint = cp->svc_hint;
2277 uuid->size = get_uuid_size(cp->uuid);
2279 list_add_tail(&uuid->list, &hdev->uuids);
2281 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2287 err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2289 mgmt_pending_free(cmd);
2294 hci_dev_unlock(hdev);
2298 static bool enable_service_cache(struct hci_dev *hdev)
2300 if (!hdev_is_powered(hdev))
2303 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2304 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2312 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2316 err = hci_update_class_sync(hdev);
2320 return hci_update_eir_sync(hdev);
2323 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2326 struct mgmt_cp_remove_uuid *cp = data;
2327 struct mgmt_pending_cmd *cmd;
2328 struct bt_uuid *match, *tmp;
2329 static const u8 bt_uuid_any[] = {
2330 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2334 bt_dev_dbg(hdev, "sock %p", sk);
2338 if (pending_eir_or_class(hdev)) {
2339 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2344 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2345 hci_uuids_clear(hdev);
2347 if (enable_service_cache(hdev)) {
2348 err = mgmt_cmd_complete(sk, hdev->id,
2349 MGMT_OP_REMOVE_UUID,
2350 0, hdev->dev_class, 3);
2359 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2360 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2363 list_del(&match->list);
2369 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2370 MGMT_STATUS_INVALID_PARAMS);
2375 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2381 err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2382 mgmt_class_complete);
2384 mgmt_pending_free(cmd);
2387 hci_dev_unlock(hdev);
2391 static int set_class_sync(struct hci_dev *hdev, void *data)
2395 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2396 cancel_delayed_work_sync(&hdev->service_cache);
2397 err = hci_update_eir_sync(hdev);
2403 return hci_update_class_sync(hdev);
2406 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2409 struct mgmt_cp_set_dev_class *cp = data;
2410 struct mgmt_pending_cmd *cmd;
2413 bt_dev_dbg(hdev, "sock %p", sk);
2415 if (!lmp_bredr_capable(hdev))
2416 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2417 MGMT_STATUS_NOT_SUPPORTED);
2421 if (pending_eir_or_class(hdev)) {
2422 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2427 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2428 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2429 MGMT_STATUS_INVALID_PARAMS);
2433 hdev->major_class = cp->major;
2434 hdev->minor_class = cp->minor;
2436 if (!hdev_is_powered(hdev)) {
2437 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2438 hdev->dev_class, 3);
2442 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2448 err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2449 mgmt_class_complete);
2451 mgmt_pending_free(cmd);
2454 hci_dev_unlock(hdev);
2458 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2461 struct mgmt_cp_load_link_keys *cp = data;
2462 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2463 sizeof(struct mgmt_link_key_info));
2464 u16 key_count, expected_len;
2468 bt_dev_dbg(hdev, "sock %p", sk);
2470 if (!lmp_bredr_capable(hdev))
2471 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2472 MGMT_STATUS_NOT_SUPPORTED);
2474 key_count = __le16_to_cpu(cp->key_count);
2475 if (key_count > max_key_count) {
2476 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2478 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2479 MGMT_STATUS_INVALID_PARAMS);
2482 expected_len = struct_size(cp, keys, key_count);
2483 if (expected_len != len) {
2484 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2486 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2487 MGMT_STATUS_INVALID_PARAMS);
2490 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2491 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2492 MGMT_STATUS_INVALID_PARAMS);
2494 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2497 for (i = 0; i < key_count; i++) {
2498 struct mgmt_link_key_info *key = &cp->keys[i];
2500 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2501 return mgmt_cmd_status(sk, hdev->id,
2502 MGMT_OP_LOAD_LINK_KEYS,
2503 MGMT_STATUS_INVALID_PARAMS);
2508 hci_link_keys_clear(hdev);
2511 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2513 changed = hci_dev_test_and_clear_flag(hdev,
2514 HCI_KEEP_DEBUG_KEYS);
2517 new_settings(hdev, NULL);
2519 for (i = 0; i < key_count; i++) {
2520 struct mgmt_link_key_info *key = &cp->keys[i];
2522 if (hci_is_blocked_key(hdev,
2523 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2525 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2530 /* Always ignore debug keys and require a new pairing if
2531 * the user wants to use them.
2533 if (key->type == HCI_LK_DEBUG_COMBINATION)
2536 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2537 key->type, key->pin_len, NULL);
2540 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2542 hci_dev_unlock(hdev);
2547 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2548 u8 addr_type, struct sock *skip_sk)
2550 struct mgmt_ev_device_unpaired ev;
2552 bacpy(&ev.addr.bdaddr, bdaddr);
2553 ev.addr.type = addr_type;
2555 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2559 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2561 struct mgmt_pending_cmd *cmd = data;
2562 struct mgmt_cp_unpair_device *cp = cmd->param;
2565 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2567 cmd->cmd_complete(cmd, err);
2568 mgmt_pending_free(cmd);
2571 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2573 struct mgmt_pending_cmd *cmd = data;
2574 struct mgmt_cp_unpair_device *cp = cmd->param;
2575 struct hci_conn *conn;
2577 if (cp->addr.type == BDADDR_BREDR)
2578 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2581 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2582 le_addr_type(cp->addr.type));
2587 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2590 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2593 struct mgmt_cp_unpair_device *cp = data;
2594 struct mgmt_rp_unpair_device rp;
2595 struct hci_conn_params *params;
2596 struct mgmt_pending_cmd *cmd;
2597 struct hci_conn *conn;
2601 memset(&rp, 0, sizeof(rp));
2602 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2603 rp.addr.type = cp->addr.type;
2605 if (!bdaddr_type_is_valid(cp->addr.type))
2606 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2607 MGMT_STATUS_INVALID_PARAMS,
2610 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2611 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2612 MGMT_STATUS_INVALID_PARAMS,
2617 if (!hdev_is_powered(hdev)) {
2618 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2619 MGMT_STATUS_NOT_POWERED, &rp,
2624 if (cp->addr.type == BDADDR_BREDR) {
2625 /* If disconnection is requested, then look up the
2626 * connection. If the remote device is connected, it
2627 * will be later used to terminate the link.
2629 * Setting it to NULL explicitly will cause no
2630 * termination of the link.
2633 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2638 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2640 err = mgmt_cmd_complete(sk, hdev->id,
2641 MGMT_OP_UNPAIR_DEVICE,
2642 MGMT_STATUS_NOT_PAIRED, &rp,
2650 /* LE address type */
2651 addr_type = le_addr_type(cp->addr.type);
2653 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2654 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2656 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2657 MGMT_STATUS_NOT_PAIRED, &rp,
2662 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2664 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2669 /* Defer clearing up the connection parameters until closing to
2670 * give a chance of keeping them if a repairing happens.
2672 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2674 /* Disable auto-connection parameters if present */
2675 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2677 if (params->explicit_connect)
2678 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2680 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2683 /* If disconnection is not requested, then clear the connection
2684 * variable so that the link is not terminated.
2686 if (!cp->disconnect)
2690 /* If the connection variable is set, then termination of the
2691 * link is requested.
2694 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2696 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2700 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2707 cmd->cmd_complete = addr_cmd_complete;
2709 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
2710 unpair_device_complete);
2712 mgmt_pending_free(cmd);
2715 hci_dev_unlock(hdev);
2719 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2722 struct mgmt_cp_disconnect *cp = data;
2723 struct mgmt_rp_disconnect rp;
2724 struct mgmt_pending_cmd *cmd;
2725 struct hci_conn *conn;
2728 bt_dev_dbg(hdev, "sock %p", sk);
2730 memset(&rp, 0, sizeof(rp));
2731 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2732 rp.addr.type = cp->addr.type;
2734 if (!bdaddr_type_is_valid(cp->addr.type))
2735 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2736 MGMT_STATUS_INVALID_PARAMS,
2741 if (!test_bit(HCI_UP, &hdev->flags)) {
2742 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2743 MGMT_STATUS_NOT_POWERED, &rp,
2748 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2749 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2750 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2754 if (cp->addr.type == BDADDR_BREDR)
2755 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2758 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2759 le_addr_type(cp->addr.type));
2761 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2762 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2763 MGMT_STATUS_NOT_CONNECTED, &rp,
2768 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2774 cmd->cmd_complete = generic_cmd_complete;
2776 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2778 mgmt_pending_remove(cmd);
2781 hci_dev_unlock(hdev);
2785 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2787 switch (link_type) {
2789 switch (addr_type) {
2790 case ADDR_LE_DEV_PUBLIC:
2791 return BDADDR_LE_PUBLIC;
2794 /* Fallback to LE Random address type */
2795 return BDADDR_LE_RANDOM;
2799 /* Fallback to BR/EDR type */
2800 return BDADDR_BREDR;
2804 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2807 struct mgmt_rp_get_connections *rp;
2812 bt_dev_dbg(hdev, "sock %p", sk);
2816 if (!hdev_is_powered(hdev)) {
2817 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2818 MGMT_STATUS_NOT_POWERED);
2823 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2824 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2828 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2835 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2836 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2838 bacpy(&rp->addr[i].bdaddr, &c->dst);
2839 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2840 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2845 rp->conn_count = cpu_to_le16(i);
2847 /* Recalculate length in case of filtered SCO connections, etc */
2848 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2849 struct_size(rp, addr, i));
2854 hci_dev_unlock(hdev);
2858 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2859 struct mgmt_cp_pin_code_neg_reply *cp)
2861 struct mgmt_pending_cmd *cmd;
2864 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2869 cmd->cmd_complete = addr_cmd_complete;
2871 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2872 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2874 mgmt_pending_remove(cmd);
2879 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2882 struct hci_conn *conn;
2883 struct mgmt_cp_pin_code_reply *cp = data;
2884 struct hci_cp_pin_code_reply reply;
2885 struct mgmt_pending_cmd *cmd;
2888 bt_dev_dbg(hdev, "sock %p", sk);
2892 if (!hdev_is_powered(hdev)) {
2893 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2894 MGMT_STATUS_NOT_POWERED);
2898 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2900 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2901 MGMT_STATUS_NOT_CONNECTED);
2905 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2906 struct mgmt_cp_pin_code_neg_reply ncp;
2908 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2910 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2912 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2914 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2915 MGMT_STATUS_INVALID_PARAMS);
2920 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2926 cmd->cmd_complete = addr_cmd_complete;
2928 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2929 reply.pin_len = cp->pin_len;
2930 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2932 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2934 mgmt_pending_remove(cmd);
2937 hci_dev_unlock(hdev);
2941 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2944 struct mgmt_cp_set_io_capability *cp = data;
2946 bt_dev_dbg(hdev, "sock %p", sk);
2948 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2949 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2950 MGMT_STATUS_INVALID_PARAMS);
2954 hdev->io_capability = cp->io_capability;
2956 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2958 hci_dev_unlock(hdev);
2960 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2964 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2966 struct hci_dev *hdev = conn->hdev;
2967 struct mgmt_pending_cmd *cmd;
2969 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2970 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2973 if (cmd->user_data != conn)
2982 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2984 struct mgmt_rp_pair_device rp;
2985 struct hci_conn *conn = cmd->user_data;
2988 bacpy(&rp.addr.bdaddr, &conn->dst);
2989 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2991 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2992 status, &rp, sizeof(rp));
2994 /* So we don't get further callbacks for this connection */
2995 conn->connect_cfm_cb = NULL;
2996 conn->security_cfm_cb = NULL;
2997 conn->disconn_cfm_cb = NULL;
2999 hci_conn_drop(conn);
3001 /* The device is paired so there is no need to remove
3002 * its connection parameters anymore.
3004 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3011 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3013 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3014 struct mgmt_pending_cmd *cmd;
3016 cmd = find_pairing(conn);
3018 cmd->cmd_complete(cmd, status);
3019 mgmt_pending_remove(cmd);
3023 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3025 struct mgmt_pending_cmd *cmd;
3027 BT_DBG("status %u", status);
3029 cmd = find_pairing(conn);
3031 BT_DBG("Unable to find a pending command");
3035 cmd->cmd_complete(cmd, mgmt_status(status));
3036 mgmt_pending_remove(cmd);
3039 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3041 struct mgmt_pending_cmd *cmd;
3043 BT_DBG("status %u", status);
3048 cmd = find_pairing(conn);
3050 BT_DBG("Unable to find a pending command");
3054 cmd->cmd_complete(cmd, mgmt_status(status));
3055 mgmt_pending_remove(cmd);
3058 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3061 struct mgmt_cp_pair_device *cp = data;
3062 struct mgmt_rp_pair_device rp;
3063 struct mgmt_pending_cmd *cmd;
3064 u8 sec_level, auth_type;
3065 struct hci_conn *conn;
3068 bt_dev_dbg(hdev, "sock %p", sk);
3070 memset(&rp, 0, sizeof(rp));
3071 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3072 rp.addr.type = cp->addr.type;
3074 if (!bdaddr_type_is_valid(cp->addr.type))
3075 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3076 MGMT_STATUS_INVALID_PARAMS,
3079 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3080 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3081 MGMT_STATUS_INVALID_PARAMS,
3086 if (!hdev_is_powered(hdev)) {
3087 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3088 MGMT_STATUS_NOT_POWERED, &rp,
3093 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3094 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3095 MGMT_STATUS_ALREADY_PAIRED, &rp,
3100 sec_level = BT_SECURITY_MEDIUM;
3101 auth_type = HCI_AT_DEDICATED_BONDING;
3103 if (cp->addr.type == BDADDR_BREDR) {
3104 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3105 auth_type, CONN_REASON_PAIR_DEVICE);
3107 u8 addr_type = le_addr_type(cp->addr.type);
3108 struct hci_conn_params *p;
3110 /* When pairing a new device, it is expected to remember
3111 * this device for future connections. Adding the connection
3112 * parameter information ahead of time allows tracking
3113 * of the peripheral preferred values and will speed up any
3114 * further connection establishment.
3116 * If connection parameters already exist, then they
3117 * will be kept and this function does nothing.
3119 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3121 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3122 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3124 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3125 sec_level, HCI_LE_CONN_TIMEOUT,
3126 CONN_REASON_PAIR_DEVICE);
3132 if (PTR_ERR(conn) == -EBUSY)
3133 status = MGMT_STATUS_BUSY;
3134 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3135 status = MGMT_STATUS_NOT_SUPPORTED;
3136 else if (PTR_ERR(conn) == -ECONNREFUSED)
3137 status = MGMT_STATUS_REJECTED;
3139 status = MGMT_STATUS_CONNECT_FAILED;
3141 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3142 status, &rp, sizeof(rp));
3146 if (conn->connect_cfm_cb) {
3147 hci_conn_drop(conn);
3148 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3149 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3153 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3156 hci_conn_drop(conn);
3160 cmd->cmd_complete = pairing_complete;
3162 /* For LE, just connecting isn't a proof that the pairing finished */
3163 if (cp->addr.type == BDADDR_BREDR) {
3164 conn->connect_cfm_cb = pairing_complete_cb;
3165 conn->security_cfm_cb = pairing_complete_cb;
3166 conn->disconn_cfm_cb = pairing_complete_cb;
3168 conn->connect_cfm_cb = le_pairing_complete_cb;
3169 conn->security_cfm_cb = le_pairing_complete_cb;
3170 conn->disconn_cfm_cb = le_pairing_complete_cb;
3173 conn->io_capability = cp->io_cap;
3174 cmd->user_data = hci_conn_get(conn);
3176 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3177 hci_conn_security(conn, sec_level, auth_type, true)) {
3178 cmd->cmd_complete(cmd, 0);
3179 mgmt_pending_remove(cmd);
3185 hci_dev_unlock(hdev);
3189 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3192 struct mgmt_addr_info *addr = data;
3193 struct mgmt_pending_cmd *cmd;
3194 struct hci_conn *conn;
3197 bt_dev_dbg(hdev, "sock %p", sk);
3201 if (!hdev_is_powered(hdev)) {
3202 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3203 MGMT_STATUS_NOT_POWERED);
3207 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3209 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3210 MGMT_STATUS_INVALID_PARAMS);
3214 conn = cmd->user_data;
3216 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3217 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3218 MGMT_STATUS_INVALID_PARAMS);
3222 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3223 mgmt_pending_remove(cmd);
3225 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3226 addr, sizeof(*addr));
3228 /* Since user doesn't want to proceed with the connection, abort any
3229 * ongoing pairing and then terminate the link if it was created
3230 * because of the pair device action.
3232 if (addr->type == BDADDR_BREDR)
3233 hci_remove_link_key(hdev, &addr->bdaddr);
3235 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3236 le_addr_type(addr->type));
3238 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3239 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3242 hci_dev_unlock(hdev);
3246 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3247 struct mgmt_addr_info *addr, u16 mgmt_op,
3248 u16 hci_op, __le32 passkey)
3250 struct mgmt_pending_cmd *cmd;
3251 struct hci_conn *conn;
3256 if (!hdev_is_powered(hdev)) {
3257 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3258 MGMT_STATUS_NOT_POWERED, addr,
3263 if (addr->type == BDADDR_BREDR)
3264 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3266 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3267 le_addr_type(addr->type));
3270 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3271 MGMT_STATUS_NOT_CONNECTED, addr,
3276 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3277 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3279 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3280 MGMT_STATUS_SUCCESS, addr,
3283 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3284 MGMT_STATUS_FAILED, addr,
3290 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3296 cmd->cmd_complete = addr_cmd_complete;
3298 /* Continue with pairing via HCI */
3299 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3300 struct hci_cp_user_passkey_reply cp;
3302 bacpy(&cp.bdaddr, &addr->bdaddr);
3303 cp.passkey = passkey;
3304 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3306 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3310 mgmt_pending_remove(cmd);
3313 hci_dev_unlock(hdev);
3317 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3318 void *data, u16 len)
3320 struct mgmt_cp_pin_code_neg_reply *cp = data;
3322 bt_dev_dbg(hdev, "sock %p", sk);
3324 return user_pairing_resp(sk, hdev, &cp->addr,
3325 MGMT_OP_PIN_CODE_NEG_REPLY,
3326 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3329 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3332 struct mgmt_cp_user_confirm_reply *cp = data;
3334 bt_dev_dbg(hdev, "sock %p", sk);
3336 if (len != sizeof(*cp))
3337 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3338 MGMT_STATUS_INVALID_PARAMS);
3340 return user_pairing_resp(sk, hdev, &cp->addr,
3341 MGMT_OP_USER_CONFIRM_REPLY,
3342 HCI_OP_USER_CONFIRM_REPLY, 0);
3345 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3346 void *data, u16 len)
3348 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3350 bt_dev_dbg(hdev, "sock %p", sk);
3352 return user_pairing_resp(sk, hdev, &cp->addr,
3353 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3354 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3357 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3360 struct mgmt_cp_user_passkey_reply *cp = data;
3362 bt_dev_dbg(hdev, "sock %p", sk);
3364 return user_pairing_resp(sk, hdev, &cp->addr,
3365 MGMT_OP_USER_PASSKEY_REPLY,
3366 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3369 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3370 void *data, u16 len)
3372 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3374 bt_dev_dbg(hdev, "sock %p", sk);
3376 return user_pairing_resp(sk, hdev, &cp->addr,
3377 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3378 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3381 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3383 struct adv_info *adv_instance;
3385 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3389 /* stop if current instance doesn't need to be changed */
3390 if (!(adv_instance->flags & flags))
3393 cancel_adv_timeout(hdev);
3395 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3399 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3404 static int name_changed_sync(struct hci_dev *hdev, void *data)
3406 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3409 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3411 struct mgmt_pending_cmd *cmd = data;
3412 struct mgmt_cp_set_local_name *cp = cmd->param;
3413 u8 status = mgmt_status(err);
3415 bt_dev_dbg(hdev, "err %d", err);
3417 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3421 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3424 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3427 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3428 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3431 mgmt_pending_remove(cmd);
3434 static int set_name_sync(struct hci_dev *hdev, void *data)
3436 if (lmp_bredr_capable(hdev)) {
3437 hci_update_name_sync(hdev);
3438 hci_update_eir_sync(hdev);
3441 /* The name is stored in the scan response data and so
3442 * no need to update the advertising data here.
3444 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3445 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3450 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3453 struct mgmt_cp_set_local_name *cp = data;
3454 struct mgmt_pending_cmd *cmd;
3457 bt_dev_dbg(hdev, "sock %p", sk);
3461 /* If the old values are the same as the new ones just return a
3462 * direct command complete event.
3464 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3465 !memcmp(hdev->short_name, cp->short_name,
3466 sizeof(hdev->short_name))) {
3467 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3472 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3474 if (!hdev_is_powered(hdev)) {
3475 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3477 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3482 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3483 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3484 ext_info_changed(hdev, sk);
3489 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3493 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3497 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3498 MGMT_STATUS_FAILED);
3501 mgmt_pending_remove(cmd);
3506 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3509 hci_dev_unlock(hdev);
3513 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3515 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3518 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3521 struct mgmt_cp_set_appearance *cp = data;
3525 bt_dev_dbg(hdev, "sock %p", sk);
3527 if (!lmp_le_capable(hdev))
3528 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3529 MGMT_STATUS_NOT_SUPPORTED);
3531 appearance = le16_to_cpu(cp->appearance);
3535 if (hdev->appearance != appearance) {
3536 hdev->appearance = appearance;
3538 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3539 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3542 ext_info_changed(hdev, sk);
3545 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3548 hci_dev_unlock(hdev);
3553 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3554 void *data, u16 len)
3556 struct mgmt_rp_get_phy_configuration rp;
3558 bt_dev_dbg(hdev, "sock %p", sk);
3562 memset(&rp, 0, sizeof(rp));
3564 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3565 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3566 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3568 hci_dev_unlock(hdev);
3570 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3574 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3576 struct mgmt_ev_phy_configuration_changed ev;
3578 memset(&ev, 0, sizeof(ev));
3580 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3582 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3586 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3588 struct mgmt_pending_cmd *cmd = data;
3589 struct sk_buff *skb = cmd->skb;
3590 u8 status = mgmt_status(err);
3592 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3597 status = MGMT_STATUS_FAILED;
3598 else if (IS_ERR(skb))
3599 status = mgmt_status(PTR_ERR(skb));
3601 status = mgmt_status(skb->data[0]);
3604 bt_dev_dbg(hdev, "status %d", status);
3607 mgmt_cmd_status(cmd->sk, hdev->id,
3608 MGMT_OP_SET_PHY_CONFIGURATION, status);
3610 mgmt_cmd_complete(cmd->sk, hdev->id,
3611 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3614 mgmt_phy_configuration_changed(hdev, cmd->sk);
3617 if (skb && !IS_ERR(skb))
3620 mgmt_pending_remove(cmd);
3623 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3625 struct mgmt_pending_cmd *cmd = data;
3626 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3627 struct hci_cp_le_set_default_phy cp_phy;
3628 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3630 memset(&cp_phy, 0, sizeof(cp_phy));
3632 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3633 cp_phy.all_phys |= 0x01;
3635 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3636 cp_phy.all_phys |= 0x02;
3638 if (selected_phys & MGMT_PHY_LE_1M_TX)
3639 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3641 if (selected_phys & MGMT_PHY_LE_2M_TX)
3642 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3644 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3645 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3647 if (selected_phys & MGMT_PHY_LE_1M_RX)
3648 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3650 if (selected_phys & MGMT_PHY_LE_2M_RX)
3651 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3653 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3654 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3656 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3657 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3662 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3663 void *data, u16 len)
3665 struct mgmt_cp_set_phy_configuration *cp = data;
3666 struct mgmt_pending_cmd *cmd;
3667 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3668 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3669 bool changed = false;
3672 bt_dev_dbg(hdev, "sock %p", sk);
3674 configurable_phys = get_configurable_phys(hdev);
3675 supported_phys = get_supported_phys(hdev);
3676 selected_phys = __le32_to_cpu(cp->selected_phys);
3678 if (selected_phys & ~supported_phys)
3679 return mgmt_cmd_status(sk, hdev->id,
3680 MGMT_OP_SET_PHY_CONFIGURATION,
3681 MGMT_STATUS_INVALID_PARAMS);
3683 unconfigure_phys = supported_phys & ~configurable_phys;
3685 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3686 return mgmt_cmd_status(sk, hdev->id,
3687 MGMT_OP_SET_PHY_CONFIGURATION,
3688 MGMT_STATUS_INVALID_PARAMS);
3690 if (selected_phys == get_selected_phys(hdev))
3691 return mgmt_cmd_complete(sk, hdev->id,
3692 MGMT_OP_SET_PHY_CONFIGURATION,
3697 if (!hdev_is_powered(hdev)) {
3698 err = mgmt_cmd_status(sk, hdev->id,
3699 MGMT_OP_SET_PHY_CONFIGURATION,
3700 MGMT_STATUS_REJECTED);
3704 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3705 err = mgmt_cmd_status(sk, hdev->id,
3706 MGMT_OP_SET_PHY_CONFIGURATION,
3711 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3712 pkt_type |= (HCI_DH3 | HCI_DM3);
3714 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3716 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3717 pkt_type |= (HCI_DH5 | HCI_DM5);
3719 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3721 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3722 pkt_type &= ~HCI_2DH1;
3724 pkt_type |= HCI_2DH1;
3726 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3727 pkt_type &= ~HCI_2DH3;
3729 pkt_type |= HCI_2DH3;
3731 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3732 pkt_type &= ~HCI_2DH5;
3734 pkt_type |= HCI_2DH5;
3736 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3737 pkt_type &= ~HCI_3DH1;
3739 pkt_type |= HCI_3DH1;
3741 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3742 pkt_type &= ~HCI_3DH3;
3744 pkt_type |= HCI_3DH3;
3746 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3747 pkt_type &= ~HCI_3DH5;
3749 pkt_type |= HCI_3DH5;
3751 if (pkt_type != hdev->pkt_type) {
3752 hdev->pkt_type = pkt_type;
3756 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3757 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3759 mgmt_phy_configuration_changed(hdev, sk);
3761 err = mgmt_cmd_complete(sk, hdev->id,
3762 MGMT_OP_SET_PHY_CONFIGURATION,
3768 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3773 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
3774 set_default_phy_complete);
3777 err = mgmt_cmd_status(sk, hdev->id,
3778 MGMT_OP_SET_PHY_CONFIGURATION,
3779 MGMT_STATUS_FAILED);
3782 mgmt_pending_remove(cmd);
3786 hci_dev_unlock(hdev);
3791 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3794 int err = MGMT_STATUS_SUCCESS;
3795 struct mgmt_cp_set_blocked_keys *keys = data;
3796 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3797 sizeof(struct mgmt_blocked_key_info));
3798 u16 key_count, expected_len;
3801 bt_dev_dbg(hdev, "sock %p", sk);
3803 key_count = __le16_to_cpu(keys->key_count);
3804 if (key_count > max_key_count) {
3805 bt_dev_err(hdev, "too big key_count value %u", key_count);
3806 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3807 MGMT_STATUS_INVALID_PARAMS);
3810 expected_len = struct_size(keys, keys, key_count);
3811 if (expected_len != len) {
3812 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3814 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3815 MGMT_STATUS_INVALID_PARAMS);
3820 hci_blocked_keys_clear(hdev);
3822 for (i = 0; i < key_count; ++i) {
3823 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3826 err = MGMT_STATUS_NO_RESOURCES;
3830 b->type = keys->keys[i].type;
3831 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3832 list_add_rcu(&b->list, &hdev->blocked_keys);
3834 hci_dev_unlock(hdev);
3836 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3840 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3841 void *data, u16 len)
3843 struct mgmt_mode *cp = data;
3845 bool changed = false;
3847 bt_dev_dbg(hdev, "sock %p", sk);
3849 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3850 return mgmt_cmd_status(sk, hdev->id,
3851 MGMT_OP_SET_WIDEBAND_SPEECH,
3852 MGMT_STATUS_NOT_SUPPORTED);
3854 if (cp->val != 0x00 && cp->val != 0x01)
3855 return mgmt_cmd_status(sk, hdev->id,
3856 MGMT_OP_SET_WIDEBAND_SPEECH,
3857 MGMT_STATUS_INVALID_PARAMS);
3861 if (hdev_is_powered(hdev) &&
3862 !!cp->val != hci_dev_test_flag(hdev,
3863 HCI_WIDEBAND_SPEECH_ENABLED)) {
3864 err = mgmt_cmd_status(sk, hdev->id,
3865 MGMT_OP_SET_WIDEBAND_SPEECH,
3866 MGMT_STATUS_REJECTED);
3871 changed = !hci_dev_test_and_set_flag(hdev,
3872 HCI_WIDEBAND_SPEECH_ENABLED);
3874 changed = hci_dev_test_and_clear_flag(hdev,
3875 HCI_WIDEBAND_SPEECH_ENABLED);
3877 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3882 err = new_settings(hdev, sk);
3885 hci_dev_unlock(hdev);
3889 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3890 void *data, u16 data_len)
3893 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3896 u8 tx_power_range[2];
3898 bt_dev_dbg(hdev, "sock %p", sk);
3900 memset(&buf, 0, sizeof(buf));
3904 /* When the Read Simple Pairing Options command is supported, then
3905 * the remote public key validation is supported.
3907 * Alternatively, when Microsoft extensions are available, they can
3908 * indicate support for public key validation as well.
3910 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3911 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3913 flags |= 0x02; /* Remote public key validation (LE) */
3915 /* When the Read Encryption Key Size command is supported, then the
3916 * encryption key size is enforced.
3918 if (hdev->commands[20] & 0x10)
3919 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3921 flags |= 0x08; /* Encryption key size enforcement (LE) */
3923 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3926 /* When the Read Simple Pairing Options command is supported, then
3927 * also max encryption key size information is provided.
3929 if (hdev->commands[41] & 0x08)
3930 cap_len = eir_append_le16(rp->cap, cap_len,
3931 MGMT_CAP_MAX_ENC_KEY_SIZE,
3932 hdev->max_enc_key_size);
3934 cap_len = eir_append_le16(rp->cap, cap_len,
3935 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3936 SMP_MAX_ENC_KEY_SIZE);
3938 /* Append the min/max LE tx power parameters if we were able to fetch
3939 * it from the controller
3941 if (hdev->commands[38] & 0x80) {
3942 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3943 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3944 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3948 rp->cap_len = cpu_to_le16(cap_len);
3950 hci_dev_unlock(hdev);
3952 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3953 rp, sizeof(*rp) + cap_len);
3956 #ifdef CONFIG_BT_FEATURE_DEBUG
3957 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3958 static const u8 debug_uuid[16] = {
3959 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3960 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3964 /* 330859bc-7506-492d-9370-9a6f0614037f */
3965 static const u8 quality_report_uuid[16] = {
3966 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
3967 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
3970 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
3971 static const u8 offload_codecs_uuid[16] = {
3972 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
3973 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
3976 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3977 static const u8 le_simultaneous_roles_uuid[16] = {
3978 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3979 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3982 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3983 static const u8 rpa_resolution_uuid[16] = {
3984 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3985 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3988 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
3989 static const u8 iso_socket_uuid[16] = {
3990 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
3991 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
3994 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3995 void *data, u16 data_len)
3997 char buf[122]; /* Enough space for 6 features: 2 + 20 * 6 */
3998 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
4002 bt_dev_dbg(hdev, "sock %p", sk);
4004 memset(&buf, 0, sizeof(buf));
4006 #ifdef CONFIG_BT_FEATURE_DEBUG
4008 flags = bt_dbg_get() ? BIT(0) : 0;
4010 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4011 rp->features[idx].flags = cpu_to_le32(flags);
4016 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4017 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4022 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4023 rp->features[idx].flags = cpu_to_le32(flags);
4027 if (hdev && ll_privacy_capable(hdev)) {
4028 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4029 flags = BIT(0) | BIT(1);
4033 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4034 rp->features[idx].flags = cpu_to_le32(flags);
4038 if (hdev && (aosp_has_quality_report(hdev) ||
4039 hdev->set_quality_report)) {
4040 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4045 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4046 rp->features[idx].flags = cpu_to_le32(flags);
4050 if (hdev && hdev->get_data_path_id) {
4051 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4056 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4057 rp->features[idx].flags = cpu_to_le32(flags);
4061 if (IS_ENABLED(CONFIG_BT_LE)) {
4062 flags = iso_enabled() ? BIT(0) : 0;
4063 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4064 rp->features[idx].flags = cpu_to_le32(flags);
4068 rp->feature_count = cpu_to_le16(idx);
4070 /* After reading the experimental features information, enable
4071 * the events to update client on any future change.
4073 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4075 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4076 MGMT_OP_READ_EXP_FEATURES_INFO,
4077 0, rp, sizeof(*rp) + (20 * idx));
4080 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4083 struct mgmt_ev_exp_feature_changed ev;
4085 memset(&ev, 0, sizeof(ev));
4086 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4087 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4089 // Do we need to be atomic with the conn_flags?
4090 if (enabled && privacy_mode_capable(hdev))
4091 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4093 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4095 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4097 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4101 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4102 bool enabled, struct sock *skip)
4104 struct mgmt_ev_exp_feature_changed ev;
4106 memset(&ev, 0, sizeof(ev));
4107 memcpy(ev.uuid, uuid, 16);
4108 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4110 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4112 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4115 #define EXP_FEAT(_uuid, _set_func) \
4118 .set_func = _set_func, \
4121 /* The zero key uuid is special. Multiple exp features are set through it. */
4122 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4123 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4125 struct mgmt_rp_set_exp_feature rp;
4127 memset(rp.uuid, 0, 16);
4128 rp.flags = cpu_to_le32(0);
4130 #ifdef CONFIG_BT_FEATURE_DEBUG
4132 bool changed = bt_dbg_get();
4137 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4141 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4144 changed = hci_dev_test_and_clear_flag(hdev,
4145 HCI_ENABLE_LL_PRIVACY);
4147 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4151 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4153 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4154 MGMT_OP_SET_EXP_FEATURE, 0,
4158 #ifdef CONFIG_BT_FEATURE_DEBUG
4159 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4160 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4162 struct mgmt_rp_set_exp_feature rp;
4167 /* Command requires to use the non-controller index */
4169 return mgmt_cmd_status(sk, hdev->id,
4170 MGMT_OP_SET_EXP_FEATURE,
4171 MGMT_STATUS_INVALID_INDEX);
4173 /* Parameters are limited to a single octet */
4174 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4175 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4176 MGMT_OP_SET_EXP_FEATURE,
4177 MGMT_STATUS_INVALID_PARAMS);
4179 /* Only boolean on/off is supported */
4180 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4181 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4182 MGMT_OP_SET_EXP_FEATURE,
4183 MGMT_STATUS_INVALID_PARAMS);
4185 val = !!cp->param[0];
4186 changed = val ? !bt_dbg_get() : bt_dbg_get();
4189 memcpy(rp.uuid, debug_uuid, 16);
4190 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4192 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4194 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4195 MGMT_OP_SET_EXP_FEATURE, 0,
4199 exp_feature_changed(hdev, debug_uuid, val, sk);
4205 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4206 struct mgmt_cp_set_exp_feature *cp,
4209 struct mgmt_rp_set_exp_feature rp;
4214 /* Command requires to use the controller index */
4216 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4217 MGMT_OP_SET_EXP_FEATURE,
4218 MGMT_STATUS_INVALID_INDEX);
4220 /* Changes can only be made when controller is powered down */
4221 if (hdev_is_powered(hdev))
4222 return mgmt_cmd_status(sk, hdev->id,
4223 MGMT_OP_SET_EXP_FEATURE,
4224 MGMT_STATUS_REJECTED);
4226 /* Parameters are limited to a single octet */
4227 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4228 return mgmt_cmd_status(sk, hdev->id,
4229 MGMT_OP_SET_EXP_FEATURE,
4230 MGMT_STATUS_INVALID_PARAMS);
4232 /* Only boolean on/off is supported */
4233 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4234 return mgmt_cmd_status(sk, hdev->id,
4235 MGMT_OP_SET_EXP_FEATURE,
4236 MGMT_STATUS_INVALID_PARAMS);
4238 val = !!cp->param[0];
4241 changed = !hci_dev_test_and_set_flag(hdev,
4242 HCI_ENABLE_LL_PRIVACY);
4243 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4245 /* Enable LL privacy + supported settings changed */
4246 flags = BIT(0) | BIT(1);
4248 changed = hci_dev_test_and_clear_flag(hdev,
4249 HCI_ENABLE_LL_PRIVACY);
4251 /* Disable LL privacy + supported settings changed */
4255 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4256 rp.flags = cpu_to_le32(flags);
4258 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4260 err = mgmt_cmd_complete(sk, hdev->id,
4261 MGMT_OP_SET_EXP_FEATURE, 0,
4265 exp_ll_privacy_feature_changed(val, hdev, sk);
4270 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4271 struct mgmt_cp_set_exp_feature *cp,
4274 struct mgmt_rp_set_exp_feature rp;
4278 /* Command requires to use a valid controller index */
4280 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4281 MGMT_OP_SET_EXP_FEATURE,
4282 MGMT_STATUS_INVALID_INDEX);
4284 /* Parameters are limited to a single octet */
4285 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4286 return mgmt_cmd_status(sk, hdev->id,
4287 MGMT_OP_SET_EXP_FEATURE,
4288 MGMT_STATUS_INVALID_PARAMS);
4290 /* Only boolean on/off is supported */
4291 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4292 return mgmt_cmd_status(sk, hdev->id,
4293 MGMT_OP_SET_EXP_FEATURE,
4294 MGMT_STATUS_INVALID_PARAMS);
4296 hci_req_sync_lock(hdev);
4298 val = !!cp->param[0];
4299 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4301 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4302 err = mgmt_cmd_status(sk, hdev->id,
4303 MGMT_OP_SET_EXP_FEATURE,
4304 MGMT_STATUS_NOT_SUPPORTED);
4305 goto unlock_quality_report;
4309 if (hdev->set_quality_report)
4310 err = hdev->set_quality_report(hdev, val);
4312 err = aosp_set_quality_report(hdev, val);
4315 err = mgmt_cmd_status(sk, hdev->id,
4316 MGMT_OP_SET_EXP_FEATURE,
4317 MGMT_STATUS_FAILED);
4318 goto unlock_quality_report;
4322 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4324 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4327 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4329 memcpy(rp.uuid, quality_report_uuid, 16);
4330 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4331 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4333 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4337 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4339 unlock_quality_report:
4340 hci_req_sync_unlock(hdev);
4344 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4345 struct mgmt_cp_set_exp_feature *cp,
4350 struct mgmt_rp_set_exp_feature rp;
4352 /* Command requires to use a valid controller index */
4354 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4355 MGMT_OP_SET_EXP_FEATURE,
4356 MGMT_STATUS_INVALID_INDEX);
4358 /* Parameters are limited to a single octet */
4359 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4360 return mgmt_cmd_status(sk, hdev->id,
4361 MGMT_OP_SET_EXP_FEATURE,
4362 MGMT_STATUS_INVALID_PARAMS);
4364 /* Only boolean on/off is supported */
4365 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4366 return mgmt_cmd_status(sk, hdev->id,
4367 MGMT_OP_SET_EXP_FEATURE,
4368 MGMT_STATUS_INVALID_PARAMS);
4370 val = !!cp->param[0];
4371 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4373 if (!hdev->get_data_path_id) {
4374 return mgmt_cmd_status(sk, hdev->id,
4375 MGMT_OP_SET_EXP_FEATURE,
4376 MGMT_STATUS_NOT_SUPPORTED);
4381 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4383 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4386 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4389 memcpy(rp.uuid, offload_codecs_uuid, 16);
4390 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4391 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4392 err = mgmt_cmd_complete(sk, hdev->id,
4393 MGMT_OP_SET_EXP_FEATURE, 0,
4397 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4402 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4403 struct mgmt_cp_set_exp_feature *cp,
4408 struct mgmt_rp_set_exp_feature rp;
4410 /* Command requires to use a valid controller index */
4412 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4413 MGMT_OP_SET_EXP_FEATURE,
4414 MGMT_STATUS_INVALID_INDEX);
4416 /* Parameters are limited to a single octet */
4417 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4418 return mgmt_cmd_status(sk, hdev->id,
4419 MGMT_OP_SET_EXP_FEATURE,
4420 MGMT_STATUS_INVALID_PARAMS);
4422 /* Only boolean on/off is supported */
4423 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4424 return mgmt_cmd_status(sk, hdev->id,
4425 MGMT_OP_SET_EXP_FEATURE,
4426 MGMT_STATUS_INVALID_PARAMS);
4428 val = !!cp->param[0];
4429 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4431 if (!hci_dev_le_state_simultaneous(hdev)) {
4432 return mgmt_cmd_status(sk, hdev->id,
4433 MGMT_OP_SET_EXP_FEATURE,
4434 MGMT_STATUS_NOT_SUPPORTED);
4439 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4441 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4444 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4447 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4448 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4449 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4450 err = mgmt_cmd_complete(sk, hdev->id,
4451 MGMT_OP_SET_EXP_FEATURE, 0,
4455 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4461 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4462 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4464 struct mgmt_rp_set_exp_feature rp;
4465 bool val, changed = false;
4468 /* Command requires to use the non-controller index */
4470 return mgmt_cmd_status(sk, hdev->id,
4471 MGMT_OP_SET_EXP_FEATURE,
4472 MGMT_STATUS_INVALID_INDEX);
4474 /* Parameters are limited to a single octet */
4475 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4476 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4477 MGMT_OP_SET_EXP_FEATURE,
4478 MGMT_STATUS_INVALID_PARAMS);
4480 /* Only boolean on/off is supported */
4481 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4482 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4483 MGMT_OP_SET_EXP_FEATURE,
4484 MGMT_STATUS_INVALID_PARAMS);
4486 val = cp->param[0] ? true : false;
4495 memcpy(rp.uuid, iso_socket_uuid, 16);
4496 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4498 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4500 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4501 MGMT_OP_SET_EXP_FEATURE, 0,
4505 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4511 static const struct mgmt_exp_feature {
4513 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4514 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4515 } exp_features[] = {
4516 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4517 #ifdef CONFIG_BT_FEATURE_DEBUG
4518 EXP_FEAT(debug_uuid, set_debug_func),
4520 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4521 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4522 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4523 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4525 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
4528 /* end with a null feature */
4529 EXP_FEAT(NULL, NULL)
4532 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4533 void *data, u16 data_len)
4535 struct mgmt_cp_set_exp_feature *cp = data;
4538 bt_dev_dbg(hdev, "sock %p", sk);
4540 for (i = 0; exp_features[i].uuid; i++) {
4541 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4542 return exp_features[i].set_func(sk, hdev, cp, data_len);
4545 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4546 MGMT_OP_SET_EXP_FEATURE,
4547 MGMT_STATUS_NOT_SUPPORTED);
4550 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4553 struct mgmt_cp_get_device_flags *cp = data;
4554 struct mgmt_rp_get_device_flags rp;
4555 struct bdaddr_list_with_flags *br_params;
4556 struct hci_conn_params *params;
4557 u32 supported_flags;
4558 u32 current_flags = 0;
4559 u8 status = MGMT_STATUS_INVALID_PARAMS;
4561 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4562 &cp->addr.bdaddr, cp->addr.type);
4566 supported_flags = hdev->conn_flags;
4568 memset(&rp, 0, sizeof(rp));
4570 if (cp->addr.type == BDADDR_BREDR) {
4571 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4577 current_flags = br_params->flags;
4579 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4580 le_addr_type(cp->addr.type));
4585 current_flags = params->flags;
4588 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4589 rp.addr.type = cp->addr.type;
4590 rp.supported_flags = cpu_to_le32(supported_flags);
4591 rp.current_flags = cpu_to_le32(current_flags);
4593 status = MGMT_STATUS_SUCCESS;
4596 hci_dev_unlock(hdev);
4598 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4602 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4603 bdaddr_t *bdaddr, u8 bdaddr_type,
4604 u32 supported_flags, u32 current_flags)
4606 struct mgmt_ev_device_flags_changed ev;
4608 bacpy(&ev.addr.bdaddr, bdaddr);
4609 ev.addr.type = bdaddr_type;
4610 ev.supported_flags = cpu_to_le32(supported_flags);
4611 ev.current_flags = cpu_to_le32(current_flags);
4613 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4616 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4619 struct mgmt_cp_set_device_flags *cp = data;
4620 struct bdaddr_list_with_flags *br_params;
4621 struct hci_conn_params *params;
4622 u8 status = MGMT_STATUS_INVALID_PARAMS;
4623 u32 supported_flags;
4624 u32 current_flags = __le32_to_cpu(cp->current_flags);
4626 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4627 &cp->addr.bdaddr, cp->addr.type, current_flags);
4629 // We should take hci_dev_lock() early, I think.. conn_flags can change
4630 supported_flags = hdev->conn_flags;
4632 if ((supported_flags | current_flags) != supported_flags) {
4633 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4634 current_flags, supported_flags);
4640 if (cp->addr.type == BDADDR_BREDR) {
4641 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4646 br_params->flags = current_flags;
4647 status = MGMT_STATUS_SUCCESS;
4649 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4650 &cp->addr.bdaddr, cp->addr.type);
4653 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4654 le_addr_type(cp->addr.type));
4656 /* Devices using RPAs can only be programmed in the
4657 * acceptlist LL Privacy has been enable otherwise they
4658 * cannot mark HCI_CONN_FLAG_REMOTE_WAKEUP.
4660 if ((current_flags & HCI_CONN_FLAG_REMOTE_WAKEUP) &&
4661 !use_ll_privacy(hdev) &&
4662 hci_find_irk_by_addr(hdev, ¶ms->addr,
4663 params->addr_type)) {
4665 "Cannot set wakeable for RPA");
4669 params->flags = current_flags;
4670 status = MGMT_STATUS_SUCCESS;
4672 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
4675 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
4676 hci_update_passive_scan(hdev);
4678 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4680 le_addr_type(cp->addr.type));
4685 hci_dev_unlock(hdev);
4688 if (status == MGMT_STATUS_SUCCESS)
4689 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4690 supported_flags, current_flags);
4692 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4693 &cp->addr, sizeof(cp->addr));
4696 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4699 struct mgmt_ev_adv_monitor_added ev;
4701 ev.monitor_handle = cpu_to_le16(handle);
4703 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4706 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4708 struct mgmt_ev_adv_monitor_removed ev;
4709 struct mgmt_pending_cmd *cmd;
4710 struct sock *sk_skip = NULL;
4711 struct mgmt_cp_remove_adv_monitor *cp;
4713 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4717 if (cp->monitor_handle)
4721 ev.monitor_handle = cpu_to_le16(handle);
4723 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4726 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4727 void *data, u16 len)
4729 struct adv_monitor *monitor = NULL;
4730 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4733 __u32 supported = 0;
4735 __u16 num_handles = 0;
4736 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4738 BT_DBG("request for %s", hdev->name);
4742 if (msft_monitor_supported(hdev))
4743 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4745 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4746 handles[num_handles++] = monitor->handle;
4748 hci_dev_unlock(hdev);
4750 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4751 rp = kmalloc(rp_size, GFP_KERNEL);
4755 /* All supported features are currently enabled */
4756 enabled = supported;
4758 rp->supported_features = cpu_to_le32(supported);
4759 rp->enabled_features = cpu_to_le32(enabled);
4760 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4761 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4762 rp->num_handles = cpu_to_le16(num_handles);
4764 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4766 err = mgmt_cmd_complete(sk, hdev->id,
4767 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4768 MGMT_STATUS_SUCCESS, rp, rp_size);
4775 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
4776 void *data, int status)
4778 struct mgmt_rp_add_adv_patterns_monitor rp;
4779 struct mgmt_pending_cmd *cmd = data;
4780 struct adv_monitor *monitor = cmd->user_data;
4784 rp.monitor_handle = cpu_to_le16(monitor->handle);
4787 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4788 hdev->adv_monitors_cnt++;
4789 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4790 monitor->state = ADV_MONITOR_STATE_REGISTERED;
4791 hci_update_passive_scan(hdev);
4794 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4795 mgmt_status(status), &rp, sizeof(rp));
4796 mgmt_pending_remove(cmd);
4798 hci_dev_unlock(hdev);
4799 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
4800 rp.monitor_handle, status);
4803 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
4805 struct mgmt_pending_cmd *cmd = data;
4806 struct adv_monitor *monitor = cmd->user_data;
4808 return hci_add_adv_monitor(hdev, monitor);
4811 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4812 struct adv_monitor *m, u8 status,
4813 void *data, u16 len, u16 op)
4815 struct mgmt_pending_cmd *cmd;
4823 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4824 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4825 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4826 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4827 status = MGMT_STATUS_BUSY;
4831 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4833 status = MGMT_STATUS_NO_RESOURCES;
4838 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
4839 mgmt_add_adv_patterns_monitor_complete);
4842 status = MGMT_STATUS_NO_RESOURCES;
4844 status = MGMT_STATUS_FAILED;
4849 hci_dev_unlock(hdev);
4854 hci_free_adv_monitor(hdev, m);
4855 hci_dev_unlock(hdev);
4856 return mgmt_cmd_status(sk, hdev->id, op, status);
4859 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4860 struct mgmt_adv_rssi_thresholds *rssi)
4863 m->rssi.low_threshold = rssi->low_threshold;
4864 m->rssi.low_threshold_timeout =
4865 __le16_to_cpu(rssi->low_threshold_timeout);
4866 m->rssi.high_threshold = rssi->high_threshold;
4867 m->rssi.high_threshold_timeout =
4868 __le16_to_cpu(rssi->high_threshold_timeout);
4869 m->rssi.sampling_period = rssi->sampling_period;
4871 /* Default values. These numbers are the least constricting
4872 * parameters for MSFT API to work, so it behaves as if there
4873 * are no rssi parameter to consider. May need to be changed
4874 * if other API are to be supported.
4876 m->rssi.low_threshold = -127;
4877 m->rssi.low_threshold_timeout = 60;
4878 m->rssi.high_threshold = -127;
4879 m->rssi.high_threshold_timeout = 0;
4880 m->rssi.sampling_period = 0;
4884 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4885 struct mgmt_adv_pattern *patterns)
4887 u8 offset = 0, length = 0;
4888 struct adv_pattern *p = NULL;
4891 for (i = 0; i < pattern_count; i++) {
4892 offset = patterns[i].offset;
4893 length = patterns[i].length;
4894 if (offset >= HCI_MAX_AD_LENGTH ||
4895 length > HCI_MAX_AD_LENGTH ||
4896 (offset + length) > HCI_MAX_AD_LENGTH)
4897 return MGMT_STATUS_INVALID_PARAMS;
4899 p = kmalloc(sizeof(*p), GFP_KERNEL);
4901 return MGMT_STATUS_NO_RESOURCES;
4903 p->ad_type = patterns[i].ad_type;
4904 p->offset = patterns[i].offset;
4905 p->length = patterns[i].length;
4906 memcpy(p->value, patterns[i].value, p->length);
4908 INIT_LIST_HEAD(&p->list);
4909 list_add(&p->list, &m->patterns);
4912 return MGMT_STATUS_SUCCESS;
4915 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4916 void *data, u16 len)
4918 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4919 struct adv_monitor *m = NULL;
4920 u8 status = MGMT_STATUS_SUCCESS;
4921 size_t expected_size = sizeof(*cp);
4923 BT_DBG("request for %s", hdev->name);
4925 if (len <= sizeof(*cp)) {
4926 status = MGMT_STATUS_INVALID_PARAMS;
4930 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4931 if (len != expected_size) {
4932 status = MGMT_STATUS_INVALID_PARAMS;
4936 m = kzalloc(sizeof(*m), GFP_KERNEL);
4938 status = MGMT_STATUS_NO_RESOURCES;
4942 INIT_LIST_HEAD(&m->patterns);
4944 parse_adv_monitor_rssi(m, NULL);
4945 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4948 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4949 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4952 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4953 void *data, u16 len)
4955 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4956 struct adv_monitor *m = NULL;
4957 u8 status = MGMT_STATUS_SUCCESS;
4958 size_t expected_size = sizeof(*cp);
4960 BT_DBG("request for %s", hdev->name);
4962 if (len <= sizeof(*cp)) {
4963 status = MGMT_STATUS_INVALID_PARAMS;
4967 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4968 if (len != expected_size) {
4969 status = MGMT_STATUS_INVALID_PARAMS;
4973 m = kzalloc(sizeof(*m), GFP_KERNEL);
4975 status = MGMT_STATUS_NO_RESOURCES;
4979 INIT_LIST_HEAD(&m->patterns);
4981 parse_adv_monitor_rssi(m, &cp->rssi);
4982 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4985 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4986 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4989 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
4990 void *data, int status)
4992 struct mgmt_rp_remove_adv_monitor rp;
4993 struct mgmt_pending_cmd *cmd = data;
4994 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
4998 rp.monitor_handle = cp->monitor_handle;
5001 hci_update_passive_scan(hdev);
5003 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5004 mgmt_status(status), &rp, sizeof(rp));
5005 mgmt_pending_remove(cmd);
5007 hci_dev_unlock(hdev);
5008 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5009 rp.monitor_handle, status);
5012 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5014 struct mgmt_pending_cmd *cmd = data;
5015 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5016 u16 handle = __le16_to_cpu(cp->monitor_handle);
5019 return hci_remove_all_adv_monitor(hdev);
5021 return hci_remove_single_adv_monitor(hdev, handle);
5024 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5025 void *data, u16 len)
5027 struct mgmt_pending_cmd *cmd;
5032 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5033 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5034 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5035 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5036 status = MGMT_STATUS_BUSY;
5040 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5042 status = MGMT_STATUS_NO_RESOURCES;
5046 err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5047 mgmt_remove_adv_monitor_complete);
5050 mgmt_pending_remove(cmd);
5053 status = MGMT_STATUS_NO_RESOURCES;
5055 status = MGMT_STATUS_FAILED;
5057 mgmt_pending_remove(cmd);
5061 hci_dev_unlock(hdev);
5066 hci_dev_unlock(hdev);
5067 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5071 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5073 struct mgmt_rp_read_local_oob_data mgmt_rp;
5074 size_t rp_size = sizeof(mgmt_rp);
5075 struct mgmt_pending_cmd *cmd = data;
5076 struct sk_buff *skb = cmd->skb;
5077 u8 status = mgmt_status(err);
5081 status = MGMT_STATUS_FAILED;
5082 else if (IS_ERR(skb))
5083 status = mgmt_status(PTR_ERR(skb));
5085 status = mgmt_status(skb->data[0]);
5088 bt_dev_dbg(hdev, "status %d", status);
5091 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5095 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5097 if (!bredr_sc_enabled(hdev)) {
5098 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5100 if (skb->len < sizeof(*rp)) {
5101 mgmt_cmd_status(cmd->sk, hdev->id,
5102 MGMT_OP_READ_LOCAL_OOB_DATA,
5103 MGMT_STATUS_FAILED);
5107 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5108 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5110 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5112 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5114 if (skb->len < sizeof(*rp)) {
5115 mgmt_cmd_status(cmd->sk, hdev->id,
5116 MGMT_OP_READ_LOCAL_OOB_DATA,
5117 MGMT_STATUS_FAILED);
5121 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5122 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5124 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5125 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5128 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5129 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5132 if (skb && !IS_ERR(skb))
5135 mgmt_pending_free(cmd);
5138 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5140 struct mgmt_pending_cmd *cmd = data;
5142 if (bredr_sc_enabled(hdev))
5143 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5145 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5147 if (IS_ERR(cmd->skb))
5148 return PTR_ERR(cmd->skb);
5153 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5154 void *data, u16 data_len)
5156 struct mgmt_pending_cmd *cmd;
5159 bt_dev_dbg(hdev, "sock %p", sk);
5163 if (!hdev_is_powered(hdev)) {
5164 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5165 MGMT_STATUS_NOT_POWERED);
5169 if (!lmp_ssp_capable(hdev)) {
5170 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5171 MGMT_STATUS_NOT_SUPPORTED);
5175 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5179 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5180 read_local_oob_data_complete);
5183 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5184 MGMT_STATUS_FAILED);
5187 mgmt_pending_free(cmd);
5191 hci_dev_unlock(hdev);
5195 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5196 void *data, u16 len)
5198 struct mgmt_addr_info *addr = data;
5201 bt_dev_dbg(hdev, "sock %p", sk);
5203 if (!bdaddr_type_is_valid(addr->type))
5204 return mgmt_cmd_complete(sk, hdev->id,
5205 MGMT_OP_ADD_REMOTE_OOB_DATA,
5206 MGMT_STATUS_INVALID_PARAMS,
5207 addr, sizeof(*addr));
5211 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5212 struct mgmt_cp_add_remote_oob_data *cp = data;
5215 if (cp->addr.type != BDADDR_BREDR) {
5216 err = mgmt_cmd_complete(sk, hdev->id,
5217 MGMT_OP_ADD_REMOTE_OOB_DATA,
5218 MGMT_STATUS_INVALID_PARAMS,
5219 &cp->addr, sizeof(cp->addr));
5223 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5224 cp->addr.type, cp->hash,
5225 cp->rand, NULL, NULL);
5227 status = MGMT_STATUS_FAILED;
5229 status = MGMT_STATUS_SUCCESS;
5231 err = mgmt_cmd_complete(sk, hdev->id,
5232 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5233 &cp->addr, sizeof(cp->addr));
5234 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5235 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5236 u8 *rand192, *hash192, *rand256, *hash256;
5239 if (bdaddr_type_is_le(cp->addr.type)) {
5240 /* Enforce zero-valued 192-bit parameters as
5241 * long as legacy SMP OOB isn't implemented.
5243 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5244 memcmp(cp->hash192, ZERO_KEY, 16)) {
5245 err = mgmt_cmd_complete(sk, hdev->id,
5246 MGMT_OP_ADD_REMOTE_OOB_DATA,
5247 MGMT_STATUS_INVALID_PARAMS,
5248 addr, sizeof(*addr));
5255 /* In case one of the P-192 values is set to zero,
5256 * then just disable OOB data for P-192.
5258 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5259 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5263 rand192 = cp->rand192;
5264 hash192 = cp->hash192;
5268 /* In case one of the P-256 values is set to zero, then just
5269 * disable OOB data for P-256.
5271 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5272 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5276 rand256 = cp->rand256;
5277 hash256 = cp->hash256;
5280 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5281 cp->addr.type, hash192, rand192,
5284 status = MGMT_STATUS_FAILED;
5286 status = MGMT_STATUS_SUCCESS;
5288 err = mgmt_cmd_complete(sk, hdev->id,
5289 MGMT_OP_ADD_REMOTE_OOB_DATA,
5290 status, &cp->addr, sizeof(cp->addr));
5292 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5294 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5295 MGMT_STATUS_INVALID_PARAMS);
5299 hci_dev_unlock(hdev);
5303 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5304 void *data, u16 len)
5306 struct mgmt_cp_remove_remote_oob_data *cp = data;
5310 bt_dev_dbg(hdev, "sock %p", sk);
5312 if (cp->addr.type != BDADDR_BREDR)
5313 return mgmt_cmd_complete(sk, hdev->id,
5314 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5315 MGMT_STATUS_INVALID_PARAMS,
5316 &cp->addr, sizeof(cp->addr));
5320 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5321 hci_remote_oob_data_clear(hdev);
5322 status = MGMT_STATUS_SUCCESS;
5326 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5328 status = MGMT_STATUS_INVALID_PARAMS;
5330 status = MGMT_STATUS_SUCCESS;
5333 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5334 status, &cp->addr, sizeof(cp->addr));
5336 hci_dev_unlock(hdev);
5340 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5342 struct mgmt_pending_cmd *cmd;
5344 bt_dev_dbg(hdev, "status %u", status);
5348 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5350 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5353 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5356 cmd->cmd_complete(cmd, mgmt_status(status));
5357 mgmt_pending_remove(cmd);
5360 hci_dev_unlock(hdev);
5363 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5364 uint8_t *mgmt_status)
5367 case DISCOV_TYPE_LE:
5368 *mgmt_status = mgmt_le_support(hdev);
5372 case DISCOV_TYPE_INTERLEAVED:
5373 *mgmt_status = mgmt_le_support(hdev);
5377 case DISCOV_TYPE_BREDR:
5378 *mgmt_status = mgmt_bredr_support(hdev);
5383 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5390 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5392 struct mgmt_pending_cmd *cmd = data;
5394 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5395 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5396 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5399 bt_dev_dbg(hdev, "err %d", err);
5401 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5403 mgmt_pending_remove(cmd);
5405 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5409 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5411 return hci_start_discovery_sync(hdev);
5414 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5415 u16 op, void *data, u16 len)
5417 struct mgmt_cp_start_discovery *cp = data;
5418 struct mgmt_pending_cmd *cmd;
5422 bt_dev_dbg(hdev, "sock %p", sk);
5426 if (!hdev_is_powered(hdev)) {
5427 err = mgmt_cmd_complete(sk, hdev->id, op,
5428 MGMT_STATUS_NOT_POWERED,
5429 &cp->type, sizeof(cp->type));
5433 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5434 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5435 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5436 &cp->type, sizeof(cp->type));
5440 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5441 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5442 &cp->type, sizeof(cp->type));
5446 /* Can't start discovery when it is paused */
5447 if (hdev->discovery_paused) {
5448 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5449 &cp->type, sizeof(cp->type));
5453 /* Clear the discovery filter first to free any previously
5454 * allocated memory for the UUID list.
5456 hci_discovery_filter_clear(hdev);
5458 hdev->discovery.type = cp->type;
5459 hdev->discovery.report_invalid_rssi = false;
5460 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5461 hdev->discovery.limited = true;
5463 hdev->discovery.limited = false;
5465 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5471 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5472 start_discovery_complete);
5474 mgmt_pending_remove(cmd);
5478 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5481 hci_dev_unlock(hdev);
5485 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5486 void *data, u16 len)
5488 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5492 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5493 void *data, u16 len)
5495 return start_discovery_internal(sk, hdev,
5496 MGMT_OP_START_LIMITED_DISCOVERY,
5500 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5501 void *data, u16 len)
5503 struct mgmt_cp_start_service_discovery *cp = data;
5504 struct mgmt_pending_cmd *cmd;
5505 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5506 u16 uuid_count, expected_len;
5510 bt_dev_dbg(hdev, "sock %p", sk);
5514 if (!hdev_is_powered(hdev)) {
5515 err = mgmt_cmd_complete(sk, hdev->id,
5516 MGMT_OP_START_SERVICE_DISCOVERY,
5517 MGMT_STATUS_NOT_POWERED,
5518 &cp->type, sizeof(cp->type));
5522 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5523 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5524 err = mgmt_cmd_complete(sk, hdev->id,
5525 MGMT_OP_START_SERVICE_DISCOVERY,
5526 MGMT_STATUS_BUSY, &cp->type,
5531 if (hdev->discovery_paused) {
5532 err = mgmt_cmd_complete(sk, hdev->id,
5533 MGMT_OP_START_SERVICE_DISCOVERY,
5534 MGMT_STATUS_BUSY, &cp->type,
5539 uuid_count = __le16_to_cpu(cp->uuid_count);
5540 if (uuid_count > max_uuid_count) {
5541 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5543 err = mgmt_cmd_complete(sk, hdev->id,
5544 MGMT_OP_START_SERVICE_DISCOVERY,
5545 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5550 expected_len = sizeof(*cp) + uuid_count * 16;
5551 if (expected_len != len) {
5552 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5554 err = mgmt_cmd_complete(sk, hdev->id,
5555 MGMT_OP_START_SERVICE_DISCOVERY,
5556 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5561 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5562 err = mgmt_cmd_complete(sk, hdev->id,
5563 MGMT_OP_START_SERVICE_DISCOVERY,
5564 status, &cp->type, sizeof(cp->type));
5568 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5575 /* Clear the discovery filter first to free any previously
5576 * allocated memory for the UUID list.
5578 hci_discovery_filter_clear(hdev);
5580 hdev->discovery.result_filtering = true;
5581 hdev->discovery.type = cp->type;
5582 hdev->discovery.rssi = cp->rssi;
5583 hdev->discovery.uuid_count = uuid_count;
5585 if (uuid_count > 0) {
5586 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5588 if (!hdev->discovery.uuids) {
5589 err = mgmt_cmd_complete(sk, hdev->id,
5590 MGMT_OP_START_SERVICE_DISCOVERY,
5592 &cp->type, sizeof(cp->type));
5593 mgmt_pending_remove(cmd);
5598 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5599 start_discovery_complete);
5601 mgmt_pending_remove(cmd);
5605 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5608 hci_dev_unlock(hdev);
5612 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5614 struct mgmt_pending_cmd *cmd;
5616 bt_dev_dbg(hdev, "status %u", status);
5620 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5622 cmd->cmd_complete(cmd, mgmt_status(status));
5623 mgmt_pending_remove(cmd);
5626 hci_dev_unlock(hdev);
5629 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
5631 struct mgmt_pending_cmd *cmd = data;
5633 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
5636 bt_dev_dbg(hdev, "err %d", err);
5638 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5640 mgmt_pending_remove(cmd);
5643 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5646 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
5648 return hci_stop_discovery_sync(hdev);
5651 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5654 struct mgmt_cp_stop_discovery *mgmt_cp = data;
5655 struct mgmt_pending_cmd *cmd;
5658 bt_dev_dbg(hdev, "sock %p", sk);
5662 if (!hci_discovery_active(hdev)) {
5663 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5664 MGMT_STATUS_REJECTED, &mgmt_cp->type,
5665 sizeof(mgmt_cp->type));
5669 if (hdev->discovery.type != mgmt_cp->type) {
5670 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5671 MGMT_STATUS_INVALID_PARAMS,
5672 &mgmt_cp->type, sizeof(mgmt_cp->type));
5676 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5682 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
5683 stop_discovery_complete);
5685 mgmt_pending_remove(cmd);
5689 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5692 hci_dev_unlock(hdev);
5696 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5699 struct mgmt_cp_confirm_name *cp = data;
5700 struct inquiry_entry *e;
5703 bt_dev_dbg(hdev, "sock %p", sk);
5707 if (!hci_discovery_active(hdev)) {
5708 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5709 MGMT_STATUS_FAILED, &cp->addr,
5714 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5716 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5717 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5722 if (cp->name_known) {
5723 e->name_state = NAME_KNOWN;
5726 e->name_state = NAME_NEEDED;
5727 hci_inquiry_cache_update_resolve(hdev, e);
5730 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5731 &cp->addr, sizeof(cp->addr));
5734 hci_dev_unlock(hdev);
5738 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5741 struct mgmt_cp_block_device *cp = data;
5745 bt_dev_dbg(hdev, "sock %p", sk);
5747 if (!bdaddr_type_is_valid(cp->addr.type))
5748 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5749 MGMT_STATUS_INVALID_PARAMS,
5750 &cp->addr, sizeof(cp->addr));
5754 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5757 status = MGMT_STATUS_FAILED;
5761 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5763 status = MGMT_STATUS_SUCCESS;
5766 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5767 &cp->addr, sizeof(cp->addr));
5769 hci_dev_unlock(hdev);
5774 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5777 struct mgmt_cp_unblock_device *cp = data;
5781 bt_dev_dbg(hdev, "sock %p", sk);
5783 if (!bdaddr_type_is_valid(cp->addr.type))
5784 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5785 MGMT_STATUS_INVALID_PARAMS,
5786 &cp->addr, sizeof(cp->addr));
5790 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5793 status = MGMT_STATUS_INVALID_PARAMS;
5797 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5799 status = MGMT_STATUS_SUCCESS;
5802 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5803 &cp->addr, sizeof(cp->addr));
5805 hci_dev_unlock(hdev);
5810 static int set_device_id_sync(struct hci_dev *hdev, void *data)
5812 return hci_update_eir_sync(hdev);
5815 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5818 struct mgmt_cp_set_device_id *cp = data;
5822 bt_dev_dbg(hdev, "sock %p", sk);
5824 source = __le16_to_cpu(cp->source);
5826 if (source > 0x0002)
5827 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5828 MGMT_STATUS_INVALID_PARAMS);
5832 hdev->devid_source = source;
5833 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5834 hdev->devid_product = __le16_to_cpu(cp->product);
5835 hdev->devid_version = __le16_to_cpu(cp->version);
5837 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5840 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
5842 hci_dev_unlock(hdev);
5847 static void enable_advertising_instance(struct hci_dev *hdev, int err)
5850 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
5852 bt_dev_dbg(hdev, "status %d", err);
5855 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
5857 struct cmd_lookup match = { NULL, hdev };
5859 struct adv_info *adv_instance;
5860 u8 status = mgmt_status(err);
5863 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5864 cmd_status_rsp, &status);
5868 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5869 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5871 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5873 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5876 new_settings(hdev, match.sk);
5881 /* If "Set Advertising" was just disabled and instance advertising was
5882 * set up earlier, then re-enable multi-instance advertising.
5884 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5885 list_empty(&hdev->adv_instances))
5888 instance = hdev->cur_adv_instance;
5890 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5891 struct adv_info, list);
5895 instance = adv_instance->instance;
5898 err = hci_schedule_adv_instance_sync(hdev, instance, true);
5900 enable_advertising_instance(hdev, err);
5903 static int set_adv_sync(struct hci_dev *hdev, void *data)
5905 struct mgmt_pending_cmd *cmd = data;
5906 struct mgmt_mode *cp = cmd->param;
5909 if (cp->val == 0x02)
5910 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5912 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5914 cancel_adv_timeout(hdev);
5917 /* Switch to instance "0" for the Set Advertising setting.
5918 * We cannot use update_[adv|scan_rsp]_data() here as the
5919 * HCI_ADVERTISING flag is not yet set.
5921 hdev->cur_adv_instance = 0x00;
5923 if (ext_adv_capable(hdev)) {
5924 hci_start_ext_adv_sync(hdev, 0x00);
5926 hci_update_adv_data_sync(hdev, 0x00);
5927 hci_update_scan_rsp_data_sync(hdev, 0x00);
5928 hci_enable_advertising_sync(hdev);
5931 hci_disable_advertising_sync(hdev);
5937 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5940 struct mgmt_mode *cp = data;
5941 struct mgmt_pending_cmd *cmd;
5945 bt_dev_dbg(hdev, "sock %p", sk);
5947 status = mgmt_le_support(hdev);
5949 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5952 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5953 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5954 MGMT_STATUS_INVALID_PARAMS);
5956 if (hdev->advertising_paused)
5957 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5964 /* The following conditions are ones which mean that we should
5965 * not do any HCI communication but directly send a mgmt
5966 * response to user space (after toggling the flag if
5969 if (!hdev_is_powered(hdev) ||
5970 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5971 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5972 hci_conn_num(hdev, LE_LINK) > 0 ||
5973 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5974 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5978 hdev->cur_adv_instance = 0x00;
5979 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5980 if (cp->val == 0x02)
5981 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5983 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5985 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5986 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5989 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5994 err = new_settings(hdev, sk);
5999 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6000 pending_find(MGMT_OP_SET_LE, hdev)) {
6001 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6006 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6010 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6011 set_advertising_complete);
6014 mgmt_pending_remove(cmd);
6017 hci_dev_unlock(hdev);
6021 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6022 void *data, u16 len)
6024 struct mgmt_cp_set_static_address *cp = data;
6027 bt_dev_dbg(hdev, "sock %p", sk);
6029 if (!lmp_le_capable(hdev))
6030 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6031 MGMT_STATUS_NOT_SUPPORTED);
6033 if (hdev_is_powered(hdev))
6034 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6035 MGMT_STATUS_REJECTED);
6037 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6038 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6039 return mgmt_cmd_status(sk, hdev->id,
6040 MGMT_OP_SET_STATIC_ADDRESS,
6041 MGMT_STATUS_INVALID_PARAMS);
6043 /* Two most significant bits shall be set */
6044 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6045 return mgmt_cmd_status(sk, hdev->id,
6046 MGMT_OP_SET_STATIC_ADDRESS,
6047 MGMT_STATUS_INVALID_PARAMS);
6052 bacpy(&hdev->static_addr, &cp->bdaddr);
6054 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6058 err = new_settings(hdev, sk);
6061 hci_dev_unlock(hdev);
6065 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6066 void *data, u16 len)
6068 struct mgmt_cp_set_scan_params *cp = data;
6069 __u16 interval, window;
6072 bt_dev_dbg(hdev, "sock %p", sk);
6074 if (!lmp_le_capable(hdev))
6075 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6076 MGMT_STATUS_NOT_SUPPORTED);
6078 interval = __le16_to_cpu(cp->interval);
6080 if (interval < 0x0004 || interval > 0x4000)
6081 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6082 MGMT_STATUS_INVALID_PARAMS);
6084 window = __le16_to_cpu(cp->window);
6086 if (window < 0x0004 || window > 0x4000)
6087 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6088 MGMT_STATUS_INVALID_PARAMS);
6090 if (window > interval)
6091 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6092 MGMT_STATUS_INVALID_PARAMS);
6096 hdev->le_scan_interval = interval;
6097 hdev->le_scan_window = window;
6099 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6102 /* If background scan is running, restart it so new parameters are
6105 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6106 hdev->discovery.state == DISCOVERY_STOPPED)
6107 hci_update_passive_scan(hdev);
6109 hci_dev_unlock(hdev);
6114 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6116 struct mgmt_pending_cmd *cmd = data;
6118 bt_dev_dbg(hdev, "err %d", err);
6121 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6124 struct mgmt_mode *cp = cmd->param;
6127 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6129 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6131 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6132 new_settings(hdev, cmd->sk);
6135 mgmt_pending_free(cmd);
6138 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6140 struct mgmt_pending_cmd *cmd = data;
6141 struct mgmt_mode *cp = cmd->param;
6143 return hci_write_fast_connectable_sync(hdev, cp->val);
6146 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6147 void *data, u16 len)
6149 struct mgmt_mode *cp = data;
6150 struct mgmt_pending_cmd *cmd;
6153 bt_dev_dbg(hdev, "sock %p", sk);
6155 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6156 hdev->hci_ver < BLUETOOTH_VER_1_2)
6157 return mgmt_cmd_status(sk, hdev->id,
6158 MGMT_OP_SET_FAST_CONNECTABLE,
6159 MGMT_STATUS_NOT_SUPPORTED);
6161 if (cp->val != 0x00 && cp->val != 0x01)
6162 return mgmt_cmd_status(sk, hdev->id,
6163 MGMT_OP_SET_FAST_CONNECTABLE,
6164 MGMT_STATUS_INVALID_PARAMS);
6168 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6169 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6173 if (!hdev_is_powered(hdev)) {
6174 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6175 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6176 new_settings(hdev, sk);
6180 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6185 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6186 fast_connectable_complete);
6189 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6190 MGMT_STATUS_FAILED);
6193 mgmt_pending_free(cmd);
6197 hci_dev_unlock(hdev);
6202 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6204 struct mgmt_pending_cmd *cmd = data;
6206 bt_dev_dbg(hdev, "err %d", err);
6209 u8 mgmt_err = mgmt_status(err);
6211 /* We need to restore the flag if related HCI commands
6214 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6216 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6218 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6219 new_settings(hdev, cmd->sk);
6222 mgmt_pending_free(cmd);
6225 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6229 status = hci_write_fast_connectable_sync(hdev, false);
6232 status = hci_update_scan_sync(hdev);
6234 /* Since only the advertising data flags will change, there
6235 * is no need to update the scan response data.
6238 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6243 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6245 struct mgmt_mode *cp = data;
6246 struct mgmt_pending_cmd *cmd;
6249 bt_dev_dbg(hdev, "sock %p", sk);
6251 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6252 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6253 MGMT_STATUS_NOT_SUPPORTED);
6255 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6256 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6257 MGMT_STATUS_REJECTED);
6259 if (cp->val != 0x00 && cp->val != 0x01)
6260 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6261 MGMT_STATUS_INVALID_PARAMS);
6265 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6266 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6270 if (!hdev_is_powered(hdev)) {
6272 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6273 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6274 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6275 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6276 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6279 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6281 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6285 err = new_settings(hdev, sk);
6289 /* Reject disabling when powered on */
6291 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6292 MGMT_STATUS_REJECTED);
6295 /* When configuring a dual-mode controller to operate
6296 * with LE only and using a static address, then switching
6297 * BR/EDR back on is not allowed.
6299 * Dual-mode controllers shall operate with the public
6300 * address as its identity address for BR/EDR and LE. So
6301 * reject the attempt to create an invalid configuration.
6303 * The same restrictions applies when secure connections
6304 * has been enabled. For BR/EDR this is a controller feature
6305 * while for LE it is a host stack feature. This means that
6306 * switching BR/EDR back on when secure connections has been
6307 * enabled is not a supported transaction.
6309 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6310 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6311 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6312 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6313 MGMT_STATUS_REJECTED);
6318 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6322 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6323 set_bredr_complete);
6326 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6327 MGMT_STATUS_FAILED);
6329 mgmt_pending_free(cmd);
6334 /* We need to flip the bit already here so that
6335 * hci_req_update_adv_data generates the correct flags.
6337 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6340 hci_dev_unlock(hdev);
6344 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6346 struct mgmt_pending_cmd *cmd = data;
6347 struct mgmt_mode *cp;
6349 bt_dev_dbg(hdev, "err %d", err);
6352 u8 mgmt_err = mgmt_status(err);
6354 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6362 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6363 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6366 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6367 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6370 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6371 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6375 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6376 new_settings(hdev, cmd->sk);
6379 mgmt_pending_free(cmd);
6382 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6384 struct mgmt_pending_cmd *cmd = data;
6385 struct mgmt_mode *cp = cmd->param;
6388 /* Force write of val */
6389 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6391 return hci_write_sc_support_sync(hdev, val);
6394 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6395 void *data, u16 len)
6397 struct mgmt_mode *cp = data;
6398 struct mgmt_pending_cmd *cmd;
6402 bt_dev_dbg(hdev, "sock %p", sk);
6404 if (!lmp_sc_capable(hdev) &&
6405 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6406 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6407 MGMT_STATUS_NOT_SUPPORTED);
6409 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6410 lmp_sc_capable(hdev) &&
6411 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6412 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6413 MGMT_STATUS_REJECTED);
6415 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6416 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6417 MGMT_STATUS_INVALID_PARAMS);
6421 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6422 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6426 changed = !hci_dev_test_and_set_flag(hdev,
6428 if (cp->val == 0x02)
6429 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6431 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6433 changed = hci_dev_test_and_clear_flag(hdev,
6435 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6438 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6443 err = new_settings(hdev, sk);
6450 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6451 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6452 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6456 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6460 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6461 set_secure_conn_complete);
6464 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6465 MGMT_STATUS_FAILED);
6467 mgmt_pending_free(cmd);
6471 hci_dev_unlock(hdev);
6475 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6476 void *data, u16 len)
6478 struct mgmt_mode *cp = data;
6479 bool changed, use_changed;
6482 bt_dev_dbg(hdev, "sock %p", sk);
6484 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6485 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6486 MGMT_STATUS_INVALID_PARAMS);
6491 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6493 changed = hci_dev_test_and_clear_flag(hdev,
6494 HCI_KEEP_DEBUG_KEYS);
6496 if (cp->val == 0x02)
6497 use_changed = !hci_dev_test_and_set_flag(hdev,
6498 HCI_USE_DEBUG_KEYS);
6500 use_changed = hci_dev_test_and_clear_flag(hdev,
6501 HCI_USE_DEBUG_KEYS);
6503 if (hdev_is_powered(hdev) && use_changed &&
6504 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6505 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6506 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6507 sizeof(mode), &mode);
6510 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6515 err = new_settings(hdev, sk);
6518 hci_dev_unlock(hdev);
6522 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6525 struct mgmt_cp_set_privacy *cp = cp_data;
6529 bt_dev_dbg(hdev, "sock %p", sk);
6531 if (!lmp_le_capable(hdev))
6532 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6533 MGMT_STATUS_NOT_SUPPORTED);
6535 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6536 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6537 MGMT_STATUS_INVALID_PARAMS);
6539 if (hdev_is_powered(hdev))
6540 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6541 MGMT_STATUS_REJECTED);
6545 /* If user space supports this command it is also expected to
6546 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6548 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6551 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6552 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6553 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6554 hci_adv_instances_set_rpa_expired(hdev, true);
6555 if (cp->privacy == 0x02)
6556 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6558 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6560 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6561 memset(hdev->irk, 0, sizeof(hdev->irk));
6562 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6563 hci_adv_instances_set_rpa_expired(hdev, false);
6564 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6567 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6572 err = new_settings(hdev, sk);
6575 hci_dev_unlock(hdev);
6579 static bool irk_is_valid(struct mgmt_irk_info *irk)
6581 switch (irk->addr.type) {
6582 case BDADDR_LE_PUBLIC:
6585 case BDADDR_LE_RANDOM:
6586 /* Two most significant bits shall be set */
6587 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6595 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6598 struct mgmt_cp_load_irks *cp = cp_data;
6599 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6600 sizeof(struct mgmt_irk_info));
6601 u16 irk_count, expected_len;
6604 bt_dev_dbg(hdev, "sock %p", sk);
6606 if (!lmp_le_capable(hdev))
6607 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6608 MGMT_STATUS_NOT_SUPPORTED);
6610 irk_count = __le16_to_cpu(cp->irk_count);
6611 if (irk_count > max_irk_count) {
6612 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6614 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6615 MGMT_STATUS_INVALID_PARAMS);
6618 expected_len = struct_size(cp, irks, irk_count);
6619 if (expected_len != len) {
6620 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6622 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6623 MGMT_STATUS_INVALID_PARAMS);
6626 bt_dev_dbg(hdev, "irk_count %u", irk_count);
6628 for (i = 0; i < irk_count; i++) {
6629 struct mgmt_irk_info *key = &cp->irks[i];
6631 if (!irk_is_valid(key))
6632 return mgmt_cmd_status(sk, hdev->id,
6634 MGMT_STATUS_INVALID_PARAMS);
6639 hci_smp_irks_clear(hdev);
6641 for (i = 0; i < irk_count; i++) {
6642 struct mgmt_irk_info *irk = &cp->irks[i];
6644 if (hci_is_blocked_key(hdev,
6645 HCI_BLOCKED_KEY_TYPE_IRK,
6647 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6652 hci_add_irk(hdev, &irk->addr.bdaddr,
6653 le_addr_type(irk->addr.type), irk->val,
6657 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6659 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6661 hci_dev_unlock(hdev);
6666 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6668 if (key->initiator != 0x00 && key->initiator != 0x01)
6671 switch (key->addr.type) {
6672 case BDADDR_LE_PUBLIC:
6675 case BDADDR_LE_RANDOM:
6676 /* Two most significant bits shall be set */
6677 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6685 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6686 void *cp_data, u16 len)
6688 struct mgmt_cp_load_long_term_keys *cp = cp_data;
6689 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6690 sizeof(struct mgmt_ltk_info));
6691 u16 key_count, expected_len;
6694 bt_dev_dbg(hdev, "sock %p", sk);
6696 if (!lmp_le_capable(hdev))
6697 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6698 MGMT_STATUS_NOT_SUPPORTED);
6700 key_count = __le16_to_cpu(cp->key_count);
6701 if (key_count > max_key_count) {
6702 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6704 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6705 MGMT_STATUS_INVALID_PARAMS);
6708 expected_len = struct_size(cp, keys, key_count);
6709 if (expected_len != len) {
6710 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6712 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6713 MGMT_STATUS_INVALID_PARAMS);
6716 bt_dev_dbg(hdev, "key_count %u", key_count);
6718 for (i = 0; i < key_count; i++) {
6719 struct mgmt_ltk_info *key = &cp->keys[i];
6721 if (!ltk_is_valid(key))
6722 return mgmt_cmd_status(sk, hdev->id,
6723 MGMT_OP_LOAD_LONG_TERM_KEYS,
6724 MGMT_STATUS_INVALID_PARAMS);
6729 hci_smp_ltks_clear(hdev);
6731 for (i = 0; i < key_count; i++) {
6732 struct mgmt_ltk_info *key = &cp->keys[i];
6733 u8 type, authenticated;
6735 if (hci_is_blocked_key(hdev,
6736 HCI_BLOCKED_KEY_TYPE_LTK,
6738 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6743 switch (key->type) {
6744 case MGMT_LTK_UNAUTHENTICATED:
6745 authenticated = 0x00;
6746 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6748 case MGMT_LTK_AUTHENTICATED:
6749 authenticated = 0x01;
6750 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6752 case MGMT_LTK_P256_UNAUTH:
6753 authenticated = 0x00;
6754 type = SMP_LTK_P256;
6756 case MGMT_LTK_P256_AUTH:
6757 authenticated = 0x01;
6758 type = SMP_LTK_P256;
6760 case MGMT_LTK_P256_DEBUG:
6761 authenticated = 0x00;
6762 type = SMP_LTK_P256_DEBUG;
6768 hci_add_ltk(hdev, &key->addr.bdaddr,
6769 le_addr_type(key->addr.type), type, authenticated,
6770 key->val, key->enc_size, key->ediv, key->rand);
6773 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6776 hci_dev_unlock(hdev);
6781 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
6783 struct mgmt_pending_cmd *cmd = data;
6784 struct hci_conn *conn = cmd->user_data;
6785 struct mgmt_cp_get_conn_info *cp = cmd->param;
6786 struct mgmt_rp_get_conn_info rp;
6789 bt_dev_dbg(hdev, "err %d", err);
6791 memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
6793 status = mgmt_status(err);
6794 if (status == MGMT_STATUS_SUCCESS) {
6795 rp.rssi = conn->rssi;
6796 rp.tx_power = conn->tx_power;
6797 rp.max_tx_power = conn->max_tx_power;
6799 rp.rssi = HCI_RSSI_INVALID;
6800 rp.tx_power = HCI_TX_POWER_INVALID;
6801 rp.max_tx_power = HCI_TX_POWER_INVALID;
6804 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
6807 mgmt_pending_free(cmd);
6810 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
6812 struct mgmt_pending_cmd *cmd = data;
6813 struct mgmt_cp_get_conn_info *cp = cmd->param;
6814 struct hci_conn *conn;
6818 /* Make sure we are still connected */
6819 if (cp->addr.type == BDADDR_BREDR)
6820 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6823 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6825 if (!conn || conn->state != BT_CONNECTED)
6826 return MGMT_STATUS_NOT_CONNECTED;
6828 cmd->user_data = conn;
6829 handle = cpu_to_le16(conn->handle);
6831 /* Refresh RSSI each time */
6832 err = hci_read_rssi_sync(hdev, handle);
6834 /* For LE links TX power does not change thus we don't need to
6835 * query for it once value is known.
6837 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
6838 conn->tx_power == HCI_TX_POWER_INVALID))
6839 err = hci_read_tx_power_sync(hdev, handle, 0x00);
6841 /* Max TX power needs to be read only once per connection */
6842 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
6843 err = hci_read_tx_power_sync(hdev, handle, 0x01);
6848 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6851 struct mgmt_cp_get_conn_info *cp = data;
6852 struct mgmt_rp_get_conn_info rp;
6853 struct hci_conn *conn;
6854 unsigned long conn_info_age;
6857 bt_dev_dbg(hdev, "sock %p", sk);
6859 memset(&rp, 0, sizeof(rp));
6860 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6861 rp.addr.type = cp->addr.type;
6863 if (!bdaddr_type_is_valid(cp->addr.type))
6864 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6865 MGMT_STATUS_INVALID_PARAMS,
6870 if (!hdev_is_powered(hdev)) {
6871 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6872 MGMT_STATUS_NOT_POWERED, &rp,
6877 if (cp->addr.type == BDADDR_BREDR)
6878 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6881 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6883 if (!conn || conn->state != BT_CONNECTED) {
6884 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6885 MGMT_STATUS_NOT_CONNECTED, &rp,
6890 /* To avoid client trying to guess when to poll again for information we
6891 * calculate conn info age as random value between min/max set in hdev.
6893 conn_info_age = hdev->conn_info_min_age +
6894 prandom_u32_max(hdev->conn_info_max_age -
6895 hdev->conn_info_min_age);
6897 /* Query controller to refresh cached values if they are too old or were
6900 if (time_after(jiffies, conn->conn_info_timestamp +
6901 msecs_to_jiffies(conn_info_age)) ||
6902 !conn->conn_info_timestamp) {
6903 struct mgmt_pending_cmd *cmd;
6905 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
6910 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
6911 cmd, get_conn_info_complete);
6915 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6916 MGMT_STATUS_FAILED, &rp, sizeof(rp));
6919 mgmt_pending_free(cmd);
6924 conn->conn_info_timestamp = jiffies;
6926 /* Cache is valid, just reply with values cached in hci_conn */
6927 rp.rssi = conn->rssi;
6928 rp.tx_power = conn->tx_power;
6929 rp.max_tx_power = conn->max_tx_power;
6931 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6932 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6936 hci_dev_unlock(hdev);
6940 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
6942 struct mgmt_pending_cmd *cmd = data;
6943 struct mgmt_cp_get_clock_info *cp = cmd->param;
6944 struct mgmt_rp_get_clock_info rp;
6945 struct hci_conn *conn = cmd->user_data;
6946 u8 status = mgmt_status(err);
6948 bt_dev_dbg(hdev, "err %d", err);
6950 memset(&rp, 0, sizeof(rp));
6951 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6952 rp.addr.type = cp->addr.type;
6957 rp.local_clock = cpu_to_le32(hdev->clock);
6960 rp.piconet_clock = cpu_to_le32(conn->clock);
6961 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6965 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6968 mgmt_pending_free(cmd);
6971 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
6973 struct mgmt_pending_cmd *cmd = data;
6974 struct mgmt_cp_get_clock_info *cp = cmd->param;
6975 struct hci_cp_read_clock hci_cp;
6976 struct hci_conn *conn;
6978 memset(&hci_cp, 0, sizeof(hci_cp));
6979 hci_read_clock_sync(hdev, &hci_cp);
6981 /* Make sure connection still exists */
6982 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
6983 if (!conn || conn->state != BT_CONNECTED)
6984 return MGMT_STATUS_NOT_CONNECTED;
6986 cmd->user_data = conn;
6987 hci_cp.handle = cpu_to_le16(conn->handle);
6988 hci_cp.which = 0x01; /* Piconet clock */
6990 return hci_read_clock_sync(hdev, &hci_cp);
6993 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6996 struct mgmt_cp_get_clock_info *cp = data;
6997 struct mgmt_rp_get_clock_info rp;
6998 struct mgmt_pending_cmd *cmd;
6999 struct hci_conn *conn;
7002 bt_dev_dbg(hdev, "sock %p", sk);
7004 memset(&rp, 0, sizeof(rp));
7005 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
7006 rp.addr.type = cp->addr.type;
7008 if (cp->addr.type != BDADDR_BREDR)
7009 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7010 MGMT_STATUS_INVALID_PARAMS,
7015 if (!hdev_is_powered(hdev)) {
7016 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7017 MGMT_STATUS_NOT_POWERED, &rp,
7022 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7023 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
7025 if (!conn || conn->state != BT_CONNECTED) {
7026 err = mgmt_cmd_complete(sk, hdev->id,
7027 MGMT_OP_GET_CLOCK_INFO,
7028 MGMT_STATUS_NOT_CONNECTED,
7036 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
7040 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
7041 get_clock_info_complete);
7044 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
7045 MGMT_STATUS_FAILED, &rp, sizeof(rp));
7048 mgmt_pending_free(cmd);
7053 hci_dev_unlock(hdev);
7057 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7059 struct hci_conn *conn;
7061 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7065 if (conn->dst_type != type)
7068 if (conn->state != BT_CONNECTED)
7074 /* This function requires the caller holds hdev->lock */
7075 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7076 u8 addr_type, u8 auto_connect)
7078 struct hci_conn_params *params;
7080 params = hci_conn_params_add(hdev, addr, addr_type);
7084 if (params->auto_connect == auto_connect)
7087 list_del_init(¶ms->action);
7089 switch (auto_connect) {
7090 case HCI_AUTO_CONN_DISABLED:
7091 case HCI_AUTO_CONN_LINK_LOSS:
7092 /* If auto connect is being disabled when we're trying to
7093 * connect to device, keep connecting.
7095 if (params->explicit_connect)
7096 list_add(¶ms->action, &hdev->pend_le_conns);
7098 case HCI_AUTO_CONN_REPORT:
7099 if (params->explicit_connect)
7100 list_add(¶ms->action, &hdev->pend_le_conns);
7102 list_add(¶ms->action, &hdev->pend_le_reports);
7104 case HCI_AUTO_CONN_DIRECT:
7105 case HCI_AUTO_CONN_ALWAYS:
7106 if (!is_connected(hdev, addr, addr_type))
7107 list_add(¶ms->action, &hdev->pend_le_conns);
7111 params->auto_connect = auto_connect;
7113 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7114 addr, addr_type, auto_connect);
7119 static void device_added(struct sock *sk, struct hci_dev *hdev,
7120 bdaddr_t *bdaddr, u8 type, u8 action)
7122 struct mgmt_ev_device_added ev;
7124 bacpy(&ev.addr.bdaddr, bdaddr);
7125 ev.addr.type = type;
7128 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7131 static int add_device_sync(struct hci_dev *hdev, void *data)
7133 return hci_update_passive_scan_sync(hdev);
7136 static int add_device(struct sock *sk, struct hci_dev *hdev,
7137 void *data, u16 len)
7139 struct mgmt_cp_add_device *cp = data;
7140 u8 auto_conn, addr_type;
7141 struct hci_conn_params *params;
7143 u32 current_flags = 0;
7144 u32 supported_flags;
7146 bt_dev_dbg(hdev, "sock %p", sk);
7148 if (!bdaddr_type_is_valid(cp->addr.type) ||
7149 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7150 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7151 MGMT_STATUS_INVALID_PARAMS,
7152 &cp->addr, sizeof(cp->addr));
7154 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7155 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7156 MGMT_STATUS_INVALID_PARAMS,
7157 &cp->addr, sizeof(cp->addr));
7161 if (cp->addr.type == BDADDR_BREDR) {
7162 /* Only incoming connections action is supported for now */
7163 if (cp->action != 0x01) {
7164 err = mgmt_cmd_complete(sk, hdev->id,
7166 MGMT_STATUS_INVALID_PARAMS,
7167 &cp->addr, sizeof(cp->addr));
7171 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7177 hci_update_scan(hdev);
7182 addr_type = le_addr_type(cp->addr.type);
7184 if (cp->action == 0x02)
7185 auto_conn = HCI_AUTO_CONN_ALWAYS;
7186 else if (cp->action == 0x01)
7187 auto_conn = HCI_AUTO_CONN_DIRECT;
7189 auto_conn = HCI_AUTO_CONN_REPORT;
7191 /* Kernel internally uses conn_params with resolvable private
7192 * address, but Add Device allows only identity addresses.
7193 * Make sure it is enforced before calling
7194 * hci_conn_params_lookup.
7196 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7197 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7198 MGMT_STATUS_INVALID_PARAMS,
7199 &cp->addr, sizeof(cp->addr));
7203 /* If the connection parameters don't exist for this device,
7204 * they will be created and configured with defaults.
7206 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7208 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7209 MGMT_STATUS_FAILED, &cp->addr,
7213 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7216 current_flags = params->flags;
7219 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7224 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7225 supported_flags = hdev->conn_flags;
7226 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7227 supported_flags, current_flags);
7229 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7230 MGMT_STATUS_SUCCESS, &cp->addr,
7234 hci_dev_unlock(hdev);
7238 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7239 bdaddr_t *bdaddr, u8 type)
7241 struct mgmt_ev_device_removed ev;
7243 bacpy(&ev.addr.bdaddr, bdaddr);
7244 ev.addr.type = type;
7246 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7249 static int remove_device_sync(struct hci_dev *hdev, void *data)
7251 return hci_update_passive_scan_sync(hdev);
7254 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7255 void *data, u16 len)
7257 struct mgmt_cp_remove_device *cp = data;
7260 bt_dev_dbg(hdev, "sock %p", sk);
7264 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7265 struct hci_conn_params *params;
7268 if (!bdaddr_type_is_valid(cp->addr.type)) {
7269 err = mgmt_cmd_complete(sk, hdev->id,
7270 MGMT_OP_REMOVE_DEVICE,
7271 MGMT_STATUS_INVALID_PARAMS,
7272 &cp->addr, sizeof(cp->addr));
7276 if (cp->addr.type == BDADDR_BREDR) {
7277 err = hci_bdaddr_list_del(&hdev->accept_list,
7281 err = mgmt_cmd_complete(sk, hdev->id,
7282 MGMT_OP_REMOVE_DEVICE,
7283 MGMT_STATUS_INVALID_PARAMS,
7289 hci_update_scan(hdev);
7291 device_removed(sk, hdev, &cp->addr.bdaddr,
7296 addr_type = le_addr_type(cp->addr.type);
7298 /* Kernel internally uses conn_params with resolvable private
7299 * address, but Remove Device allows only identity addresses.
7300 * Make sure it is enforced before calling
7301 * hci_conn_params_lookup.
7303 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7304 err = mgmt_cmd_complete(sk, hdev->id,
7305 MGMT_OP_REMOVE_DEVICE,
7306 MGMT_STATUS_INVALID_PARAMS,
7307 &cp->addr, sizeof(cp->addr));
7311 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7314 err = mgmt_cmd_complete(sk, hdev->id,
7315 MGMT_OP_REMOVE_DEVICE,
7316 MGMT_STATUS_INVALID_PARAMS,
7317 &cp->addr, sizeof(cp->addr));
7321 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7322 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7323 err = mgmt_cmd_complete(sk, hdev->id,
7324 MGMT_OP_REMOVE_DEVICE,
7325 MGMT_STATUS_INVALID_PARAMS,
7326 &cp->addr, sizeof(cp->addr));
7330 list_del(¶ms->action);
7331 list_del(¶ms->list);
7334 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7336 struct hci_conn_params *p, *tmp;
7337 struct bdaddr_list *b, *btmp;
7339 if (cp->addr.type) {
7340 err = mgmt_cmd_complete(sk, hdev->id,
7341 MGMT_OP_REMOVE_DEVICE,
7342 MGMT_STATUS_INVALID_PARAMS,
7343 &cp->addr, sizeof(cp->addr));
7347 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7348 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7353 hci_update_scan(hdev);
7355 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7356 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7358 device_removed(sk, hdev, &p->addr, p->addr_type);
7359 if (p->explicit_connect) {
7360 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7363 list_del(&p->action);
7368 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7371 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7374 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7375 MGMT_STATUS_SUCCESS, &cp->addr,
7378 hci_dev_unlock(hdev);
7382 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7385 struct mgmt_cp_load_conn_param *cp = data;
7386 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7387 sizeof(struct mgmt_conn_param));
7388 u16 param_count, expected_len;
7391 if (!lmp_le_capable(hdev))
7392 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7393 MGMT_STATUS_NOT_SUPPORTED);
7395 param_count = __le16_to_cpu(cp->param_count);
7396 if (param_count > max_param_count) {
7397 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7399 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7400 MGMT_STATUS_INVALID_PARAMS);
7403 expected_len = struct_size(cp, params, param_count);
7404 if (expected_len != len) {
7405 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7407 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7408 MGMT_STATUS_INVALID_PARAMS);
7411 bt_dev_dbg(hdev, "param_count %u", param_count);
7415 hci_conn_params_clear_disabled(hdev);
7417 for (i = 0; i < param_count; i++) {
7418 struct mgmt_conn_param *param = &cp->params[i];
7419 struct hci_conn_params *hci_param;
7420 u16 min, max, latency, timeout;
7423 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7426 if (param->addr.type == BDADDR_LE_PUBLIC) {
7427 addr_type = ADDR_LE_DEV_PUBLIC;
7428 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7429 addr_type = ADDR_LE_DEV_RANDOM;
7431 bt_dev_err(hdev, "ignoring invalid connection parameters");
7435 min = le16_to_cpu(param->min_interval);
7436 max = le16_to_cpu(param->max_interval);
7437 latency = le16_to_cpu(param->latency);
7438 timeout = le16_to_cpu(param->timeout);
7440 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7441 min, max, latency, timeout);
7443 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7444 bt_dev_err(hdev, "ignoring invalid connection parameters");
7448 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7451 bt_dev_err(hdev, "failed to add connection parameters");
7455 hci_param->conn_min_interval = min;
7456 hci_param->conn_max_interval = max;
7457 hci_param->conn_latency = latency;
7458 hci_param->supervision_timeout = timeout;
7461 hci_dev_unlock(hdev);
7463 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7467 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7468 void *data, u16 len)
7470 struct mgmt_cp_set_external_config *cp = data;
7474 bt_dev_dbg(hdev, "sock %p", sk);
7476 if (hdev_is_powered(hdev))
7477 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7478 MGMT_STATUS_REJECTED);
7480 if (cp->config != 0x00 && cp->config != 0x01)
7481 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7482 MGMT_STATUS_INVALID_PARAMS);
7484 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7485 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7486 MGMT_STATUS_NOT_SUPPORTED);
7491 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7493 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7495 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7502 err = new_options(hdev, sk);
7504 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7505 mgmt_index_removed(hdev);
7507 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7508 hci_dev_set_flag(hdev, HCI_CONFIG);
7509 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7511 queue_work(hdev->req_workqueue, &hdev->power_on);
7513 set_bit(HCI_RAW, &hdev->flags);
7514 mgmt_index_added(hdev);
7519 hci_dev_unlock(hdev);
7523 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7524 void *data, u16 len)
7526 struct mgmt_cp_set_public_address *cp = data;
7530 bt_dev_dbg(hdev, "sock %p", sk);
7532 if (hdev_is_powered(hdev))
7533 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7534 MGMT_STATUS_REJECTED);
7536 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7537 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7538 MGMT_STATUS_INVALID_PARAMS);
7540 if (!hdev->set_bdaddr)
7541 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7542 MGMT_STATUS_NOT_SUPPORTED);
7546 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7547 bacpy(&hdev->public_addr, &cp->bdaddr);
7549 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7556 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7557 err = new_options(hdev, sk);
7559 if (is_configured(hdev)) {
7560 mgmt_index_removed(hdev);
7562 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7564 hci_dev_set_flag(hdev, HCI_CONFIG);
7565 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7567 queue_work(hdev->req_workqueue, &hdev->power_on);
7571 hci_dev_unlock(hdev);
7575 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
7578 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7579 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7580 u8 *h192, *r192, *h256, *r256;
7581 struct mgmt_pending_cmd *cmd = data;
7582 struct sk_buff *skb = cmd->skb;
7583 u8 status = mgmt_status(err);
7586 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
7591 status = MGMT_STATUS_FAILED;
7592 else if (IS_ERR(skb))
7593 status = mgmt_status(PTR_ERR(skb));
7595 status = mgmt_status(skb->data[0]);
7598 bt_dev_dbg(hdev, "status %u", status);
7600 mgmt_cp = cmd->param;
7603 status = mgmt_status(status);
7610 } else if (!bredr_sc_enabled(hdev)) {
7611 struct hci_rp_read_local_oob_data *rp;
7613 if (skb->len != sizeof(*rp)) {
7614 status = MGMT_STATUS_FAILED;
7617 status = MGMT_STATUS_SUCCESS;
7618 rp = (void *)skb->data;
7620 eir_len = 5 + 18 + 18;
7627 struct hci_rp_read_local_oob_ext_data *rp;
7629 if (skb->len != sizeof(*rp)) {
7630 status = MGMT_STATUS_FAILED;
7633 status = MGMT_STATUS_SUCCESS;
7634 rp = (void *)skb->data;
7636 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7637 eir_len = 5 + 18 + 18;
7641 eir_len = 5 + 18 + 18 + 18 + 18;
7651 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7658 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7659 hdev->dev_class, 3);
7662 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7663 EIR_SSP_HASH_C192, h192, 16);
7664 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7665 EIR_SSP_RAND_R192, r192, 16);
7669 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7670 EIR_SSP_HASH_C256, h256, 16);
7671 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7672 EIR_SSP_RAND_R256, r256, 16);
7676 mgmt_rp->type = mgmt_cp->type;
7677 mgmt_rp->eir_len = cpu_to_le16(eir_len);
7679 err = mgmt_cmd_complete(cmd->sk, hdev->id,
7680 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7681 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7682 if (err < 0 || status)
7685 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7687 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7688 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7689 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7691 if (skb && !IS_ERR(skb))
7695 mgmt_pending_remove(cmd);
7698 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7699 struct mgmt_cp_read_local_oob_ext_data *cp)
7701 struct mgmt_pending_cmd *cmd;
7704 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7709 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
7710 read_local_oob_ext_data_complete);
7713 mgmt_pending_remove(cmd);
7720 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7721 void *data, u16 data_len)
7723 struct mgmt_cp_read_local_oob_ext_data *cp = data;
7724 struct mgmt_rp_read_local_oob_ext_data *rp;
7727 u8 status, flags, role, addr[7], hash[16], rand[16];
7730 bt_dev_dbg(hdev, "sock %p", sk);
7732 if (hdev_is_powered(hdev)) {
7734 case BIT(BDADDR_BREDR):
7735 status = mgmt_bredr_support(hdev);
7741 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7742 status = mgmt_le_support(hdev);
7746 eir_len = 9 + 3 + 18 + 18 + 3;
7749 status = MGMT_STATUS_INVALID_PARAMS;
7754 status = MGMT_STATUS_NOT_POWERED;
7758 rp_len = sizeof(*rp) + eir_len;
7759 rp = kmalloc(rp_len, GFP_ATOMIC);
7763 if (!status && !lmp_ssp_capable(hdev)) {
7764 status = MGMT_STATUS_NOT_SUPPORTED;
7775 case BIT(BDADDR_BREDR):
7776 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7777 err = read_local_ssp_oob_req(hdev, sk, cp);
7778 hci_dev_unlock(hdev);
7782 status = MGMT_STATUS_FAILED;
7785 eir_len = eir_append_data(rp->eir, eir_len,
7787 hdev->dev_class, 3);
7790 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7791 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7792 smp_generate_oob(hdev, hash, rand) < 0) {
7793 hci_dev_unlock(hdev);
7794 status = MGMT_STATUS_FAILED;
7798 /* This should return the active RPA, but since the RPA
7799 * is only programmed on demand, it is really hard to fill
7800 * this in at the moment. For now disallow retrieving
7801 * local out-of-band data when privacy is in use.
7803 * Returning the identity address will not help here since
7804 * pairing happens before the identity resolving key is
7805 * known and thus the connection establishment happens
7806 * based on the RPA and not the identity address.
7808 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7809 hci_dev_unlock(hdev);
7810 status = MGMT_STATUS_REJECTED;
7814 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7815 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7816 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7817 bacmp(&hdev->static_addr, BDADDR_ANY))) {
7818 memcpy(addr, &hdev->static_addr, 6);
7821 memcpy(addr, &hdev->bdaddr, 6);
7825 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7826 addr, sizeof(addr));
7828 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7833 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7834 &role, sizeof(role));
7836 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7837 eir_len = eir_append_data(rp->eir, eir_len,
7839 hash, sizeof(hash));
7841 eir_len = eir_append_data(rp->eir, eir_len,
7843 rand, sizeof(rand));
7846 flags = mgmt_get_adv_discov_flags(hdev);
7848 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7849 flags |= LE_AD_NO_BREDR;
7851 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7852 &flags, sizeof(flags));
7856 hci_dev_unlock(hdev);
7858 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7860 status = MGMT_STATUS_SUCCESS;
7863 rp->type = cp->type;
7864 rp->eir_len = cpu_to_le16(eir_len);
7866 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7867 status, rp, sizeof(*rp) + eir_len);
7868 if (err < 0 || status)
7871 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7872 rp, sizeof(*rp) + eir_len,
7873 HCI_MGMT_OOB_DATA_EVENTS, sk);
7881 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7885 flags |= MGMT_ADV_FLAG_CONNECTABLE;
7886 flags |= MGMT_ADV_FLAG_DISCOV;
7887 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7888 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7889 flags |= MGMT_ADV_FLAG_APPEARANCE;
7890 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7891 flags |= MGMT_ADV_PARAM_DURATION;
7892 flags |= MGMT_ADV_PARAM_TIMEOUT;
7893 flags |= MGMT_ADV_PARAM_INTERVALS;
7894 flags |= MGMT_ADV_PARAM_TX_POWER;
7895 flags |= MGMT_ADV_PARAM_SCAN_RSP;
7897 /* In extended adv TX_POWER returned from Set Adv Param
7898 * will be always valid.
7900 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7901 ext_adv_capable(hdev))
7902 flags |= MGMT_ADV_FLAG_TX_POWER;
7904 if (ext_adv_capable(hdev)) {
7905 flags |= MGMT_ADV_FLAG_SEC_1M;
7906 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7907 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7909 if (hdev->le_features[1] & HCI_LE_PHY_2M)
7910 flags |= MGMT_ADV_FLAG_SEC_2M;
7912 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7913 flags |= MGMT_ADV_FLAG_SEC_CODED;
7919 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7920 void *data, u16 data_len)
7922 struct mgmt_rp_read_adv_features *rp;
7925 struct adv_info *adv_instance;
7926 u32 supported_flags;
7929 bt_dev_dbg(hdev, "sock %p", sk);
7931 if (!lmp_le_capable(hdev))
7932 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7933 MGMT_STATUS_REJECTED);
7937 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7938 rp = kmalloc(rp_len, GFP_ATOMIC);
7940 hci_dev_unlock(hdev);
7944 supported_flags = get_supported_adv_flags(hdev);
7946 rp->supported_flags = cpu_to_le32(supported_flags);
7947 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7948 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7949 rp->max_instances = hdev->le_num_of_adv_sets;
7950 rp->num_instances = hdev->adv_instance_cnt;
7952 instance = rp->instance;
7953 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7954 *instance = adv_instance->instance;
7958 hci_dev_unlock(hdev);
7960 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7961 MGMT_STATUS_SUCCESS, rp, rp_len);
7968 static u8 calculate_name_len(struct hci_dev *hdev)
7970 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7972 return eir_append_local_name(hdev, buf, 0);
7975 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7978 u8 max_len = HCI_MAX_AD_LENGTH;
7981 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7982 MGMT_ADV_FLAG_LIMITED_DISCOV |
7983 MGMT_ADV_FLAG_MANAGED_FLAGS))
7986 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7989 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7990 max_len -= calculate_name_len(hdev);
7992 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7999 static bool flags_managed(u32 adv_flags)
8001 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
8002 MGMT_ADV_FLAG_LIMITED_DISCOV |
8003 MGMT_ADV_FLAG_MANAGED_FLAGS);
8006 static bool tx_power_managed(u32 adv_flags)
8008 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
8011 static bool name_managed(u32 adv_flags)
8013 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
8016 static bool appearance_managed(u32 adv_flags)
8018 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
8021 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
8022 u8 len, bool is_adv_data)
8027 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
8032 /* Make sure that the data is correctly formatted. */
8033 for (i = 0; i < len; i += (cur_len + 1)) {
8039 if (data[i + 1] == EIR_FLAGS &&
8040 (!is_adv_data || flags_managed(adv_flags)))
8043 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
8046 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
8049 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
8052 if (data[i + 1] == EIR_APPEARANCE &&
8053 appearance_managed(adv_flags))
8056 /* If the current field length would exceed the total data
8057 * length, then it's invalid.
8059 if (i + cur_len >= len)
8066 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8068 u32 supported_flags, phy_flags;
8070 /* The current implementation only supports a subset of the specified
8071 * flags. Also need to check mutual exclusiveness of sec flags.
8073 supported_flags = get_supported_adv_flags(hdev);
8074 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8075 if (adv_flags & ~supported_flags ||
8076 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8082 static bool adv_busy(struct hci_dev *hdev)
8084 return pending_find(MGMT_OP_SET_LE, hdev);
8087 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8090 struct adv_info *adv, *n;
8092 bt_dev_dbg(hdev, "err %d", err);
8096 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8103 adv->pending = false;
8107 instance = adv->instance;
8109 if (hdev->cur_adv_instance == instance)
8110 cancel_adv_timeout(hdev);
8112 hci_remove_adv_instance(hdev, instance);
8113 mgmt_advertising_removed(sk, hdev, instance);
8116 hci_dev_unlock(hdev);
8119 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8121 struct mgmt_pending_cmd *cmd = data;
8122 struct mgmt_cp_add_advertising *cp = cmd->param;
8123 struct mgmt_rp_add_advertising rp;
8125 memset(&rp, 0, sizeof(rp));
8127 rp.instance = cp->instance;
8130 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8133 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8134 mgmt_status(err), &rp, sizeof(rp));
8136 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8138 mgmt_pending_free(cmd);
8141 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8143 struct mgmt_pending_cmd *cmd = data;
8144 struct mgmt_cp_add_advertising *cp = cmd->param;
8146 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8149 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8150 void *data, u16 data_len)
8152 struct mgmt_cp_add_advertising *cp = data;
8153 struct mgmt_rp_add_advertising rp;
8156 u16 timeout, duration;
8157 unsigned int prev_instance_cnt;
8158 u8 schedule_instance = 0;
8159 struct adv_info *adv, *next_instance;
8161 struct mgmt_pending_cmd *cmd;
8163 bt_dev_dbg(hdev, "sock %p", sk);
8165 status = mgmt_le_support(hdev);
8167 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8170 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8171 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8172 MGMT_STATUS_INVALID_PARAMS);
8174 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8175 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8176 MGMT_STATUS_INVALID_PARAMS);
8178 flags = __le32_to_cpu(cp->flags);
8179 timeout = __le16_to_cpu(cp->timeout);
8180 duration = __le16_to_cpu(cp->duration);
8182 if (!requested_adv_flags_are_valid(hdev, flags))
8183 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8184 MGMT_STATUS_INVALID_PARAMS);
8188 if (timeout && !hdev_is_powered(hdev)) {
8189 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8190 MGMT_STATUS_REJECTED);
8194 if (adv_busy(hdev)) {
8195 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8200 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8201 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8202 cp->scan_rsp_len, false)) {
8203 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8204 MGMT_STATUS_INVALID_PARAMS);
8208 prev_instance_cnt = hdev->adv_instance_cnt;
8210 adv = hci_add_adv_instance(hdev, cp->instance, flags,
8211 cp->adv_data_len, cp->data,
8213 cp->data + cp->adv_data_len,
8215 HCI_ADV_TX_POWER_NO_PREFERENCE,
8216 hdev->le_adv_min_interval,
8217 hdev->le_adv_max_interval);
8219 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8220 MGMT_STATUS_FAILED);
8224 /* Only trigger an advertising added event if a new instance was
8227 if (hdev->adv_instance_cnt > prev_instance_cnt)
8228 mgmt_advertising_added(sk, hdev, cp->instance);
8230 if (hdev->cur_adv_instance == cp->instance) {
8231 /* If the currently advertised instance is being changed then
8232 * cancel the current advertising and schedule the next
8233 * instance. If there is only one instance then the overridden
8234 * advertising data will be visible right away.
8236 cancel_adv_timeout(hdev);
8238 next_instance = hci_get_next_instance(hdev, cp->instance);
8240 schedule_instance = next_instance->instance;
8241 } else if (!hdev->adv_instance_timeout) {
8242 /* Immediately advertise the new instance if no other
8243 * instance is currently being advertised.
8245 schedule_instance = cp->instance;
8248 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8249 * there is no instance to be advertised then we have no HCI
8250 * communication to make. Simply return.
8252 if (!hdev_is_powered(hdev) ||
8253 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8254 !schedule_instance) {
8255 rp.instance = cp->instance;
8256 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8257 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8261 /* We're good to go, update advertising data, parameters, and start
8264 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8271 cp->instance = schedule_instance;
8273 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8274 add_advertising_complete);
8276 mgmt_pending_free(cmd);
8279 hci_dev_unlock(hdev);
8284 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8287 struct mgmt_pending_cmd *cmd = data;
8288 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8289 struct mgmt_rp_add_ext_adv_params rp;
8290 struct adv_info *adv;
8293 BT_DBG("%s", hdev->name);
8297 adv = hci_find_adv_instance(hdev, cp->instance);
8301 rp.instance = cp->instance;
8302 rp.tx_power = adv->tx_power;
8304 /* While we're at it, inform userspace of the available space for this
8305 * advertisement, given the flags that will be used.
8307 flags = __le32_to_cpu(cp->flags);
8308 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8309 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8312 /* If this advertisement was previously advertising and we
8313 * failed to update it, we signal that it has been removed and
8314 * delete its structure
8317 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8319 hci_remove_adv_instance(hdev, cp->instance);
8321 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8324 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8325 mgmt_status(err), &rp, sizeof(rp));
8330 mgmt_pending_free(cmd);
8332 hci_dev_unlock(hdev);
8335 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8337 struct mgmt_pending_cmd *cmd = data;
8338 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8340 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8343 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8344 void *data, u16 data_len)
8346 struct mgmt_cp_add_ext_adv_params *cp = data;
8347 struct mgmt_rp_add_ext_adv_params rp;
8348 struct mgmt_pending_cmd *cmd = NULL;
8349 struct adv_info *adv;
8350 u32 flags, min_interval, max_interval;
8351 u16 timeout, duration;
8356 BT_DBG("%s", hdev->name);
8358 status = mgmt_le_support(hdev);
8360 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8363 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8364 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8365 MGMT_STATUS_INVALID_PARAMS);
8367 /* The purpose of breaking add_advertising into two separate MGMT calls
8368 * for params and data is to allow more parameters to be added to this
8369 * structure in the future. For this reason, we verify that we have the
8370 * bare minimum structure we know of when the interface was defined. Any
8371 * extra parameters we don't know about will be ignored in this request.
8373 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8374 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8375 MGMT_STATUS_INVALID_PARAMS);
8377 flags = __le32_to_cpu(cp->flags);
8379 if (!requested_adv_flags_are_valid(hdev, flags))
8380 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8381 MGMT_STATUS_INVALID_PARAMS);
8385 /* In new interface, we require that we are powered to register */
8386 if (!hdev_is_powered(hdev)) {
8387 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8388 MGMT_STATUS_REJECTED);
8392 if (adv_busy(hdev)) {
8393 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8398 /* Parse defined parameters from request, use defaults otherwise */
8399 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8400 __le16_to_cpu(cp->timeout) : 0;
8402 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8403 __le16_to_cpu(cp->duration) :
8404 hdev->def_multi_adv_rotation_duration;
8406 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8407 __le32_to_cpu(cp->min_interval) :
8408 hdev->le_adv_min_interval;
8410 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8411 __le32_to_cpu(cp->max_interval) :
8412 hdev->le_adv_max_interval;
8414 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8416 HCI_ADV_TX_POWER_NO_PREFERENCE;
8418 /* Create advertising instance with no advertising or response data */
8419 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
8420 timeout, duration, tx_power, min_interval,
8424 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8425 MGMT_STATUS_FAILED);
8429 /* Submit request for advertising params if ext adv available */
8430 if (ext_adv_capable(hdev)) {
8431 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8435 hci_remove_adv_instance(hdev, cp->instance);
8439 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8440 add_ext_adv_params_complete);
8442 mgmt_pending_free(cmd);
8444 rp.instance = cp->instance;
8445 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8446 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8447 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8448 err = mgmt_cmd_complete(sk, hdev->id,
8449 MGMT_OP_ADD_EXT_ADV_PARAMS,
8450 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8454 hci_dev_unlock(hdev);
8459 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8461 struct mgmt_pending_cmd *cmd = data;
8462 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8463 struct mgmt_rp_add_advertising rp;
8465 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8467 memset(&rp, 0, sizeof(rp));
8469 rp.instance = cp->instance;
8472 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8475 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8476 mgmt_status(err), &rp, sizeof(rp));
8478 mgmt_pending_free(cmd);
8481 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8483 struct mgmt_pending_cmd *cmd = data;
8484 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8487 if (ext_adv_capable(hdev)) {
8488 err = hci_update_adv_data_sync(hdev, cp->instance);
8492 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8496 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8499 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8502 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8505 struct mgmt_cp_add_ext_adv_data *cp = data;
8506 struct mgmt_rp_add_ext_adv_data rp;
8507 u8 schedule_instance = 0;
8508 struct adv_info *next_instance;
8509 struct adv_info *adv_instance;
8511 struct mgmt_pending_cmd *cmd;
8513 BT_DBG("%s", hdev->name);
8517 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8519 if (!adv_instance) {
8520 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8521 MGMT_STATUS_INVALID_PARAMS);
8525 /* In new interface, we require that we are powered to register */
8526 if (!hdev_is_powered(hdev)) {
8527 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8528 MGMT_STATUS_REJECTED);
8529 goto clear_new_instance;
8532 if (adv_busy(hdev)) {
8533 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8535 goto clear_new_instance;
8538 /* Validate new data */
8539 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8540 cp->adv_data_len, true) ||
8541 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8542 cp->adv_data_len, cp->scan_rsp_len, false)) {
8543 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8544 MGMT_STATUS_INVALID_PARAMS);
8545 goto clear_new_instance;
8548 /* Set the data in the advertising instance */
8549 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8550 cp->data, cp->scan_rsp_len,
8551 cp->data + cp->adv_data_len);
8553 /* If using software rotation, determine next instance to use */
8554 if (hdev->cur_adv_instance == cp->instance) {
8555 /* If the currently advertised instance is being changed
8556 * then cancel the current advertising and schedule the
8557 * next instance. If there is only one instance then the
8558 * overridden advertising data will be visible right
8561 cancel_adv_timeout(hdev);
8563 next_instance = hci_get_next_instance(hdev, cp->instance);
8565 schedule_instance = next_instance->instance;
8566 } else if (!hdev->adv_instance_timeout) {
8567 /* Immediately advertise the new instance if no other
8568 * instance is currently being advertised.
8570 schedule_instance = cp->instance;
8573 /* If the HCI_ADVERTISING flag is set or there is no instance to
8574 * be advertised then we have no HCI communication to make.
8577 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
8578 if (adv_instance->pending) {
8579 mgmt_advertising_added(sk, hdev, cp->instance);
8580 adv_instance->pending = false;
8582 rp.instance = cp->instance;
8583 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8584 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8588 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8592 goto clear_new_instance;
8595 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
8596 add_ext_adv_data_complete);
8598 mgmt_pending_free(cmd);
8599 goto clear_new_instance;
8602 /* We were successful in updating data, so trigger advertising_added
8603 * event if this is an instance that wasn't previously advertising. If
8604 * a failure occurs in the requests we initiated, we will remove the
8605 * instance again in add_advertising_complete
8607 if (adv_instance->pending)
8608 mgmt_advertising_added(sk, hdev, cp->instance);
8613 hci_remove_adv_instance(hdev, cp->instance);
8616 hci_dev_unlock(hdev);
8621 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
8624 struct mgmt_pending_cmd *cmd = data;
8625 struct mgmt_cp_remove_advertising *cp = cmd->param;
8626 struct mgmt_rp_remove_advertising rp;
8628 bt_dev_dbg(hdev, "err %d", err);
8630 memset(&rp, 0, sizeof(rp));
8631 rp.instance = cp->instance;
8634 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8637 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8638 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8640 mgmt_pending_free(cmd);
8643 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
8645 struct mgmt_pending_cmd *cmd = data;
8646 struct mgmt_cp_remove_advertising *cp = cmd->param;
8649 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
8653 if (list_empty(&hdev->adv_instances))
8654 err = hci_disable_advertising_sync(hdev);
8659 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8660 void *data, u16 data_len)
8662 struct mgmt_cp_remove_advertising *cp = data;
8663 struct mgmt_pending_cmd *cmd;
8666 bt_dev_dbg(hdev, "sock %p", sk);
8670 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8671 err = mgmt_cmd_status(sk, hdev->id,
8672 MGMT_OP_REMOVE_ADVERTISING,
8673 MGMT_STATUS_INVALID_PARAMS);
8677 if (pending_find(MGMT_OP_SET_LE, hdev)) {
8678 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8683 if (list_empty(&hdev->adv_instances)) {
8684 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8685 MGMT_STATUS_INVALID_PARAMS);
8689 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8696 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
8697 remove_advertising_complete);
8699 mgmt_pending_free(cmd);
8702 hci_dev_unlock(hdev);
8707 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8708 void *data, u16 data_len)
8710 struct mgmt_cp_get_adv_size_info *cp = data;
8711 struct mgmt_rp_get_adv_size_info rp;
8712 u32 flags, supported_flags;
8714 bt_dev_dbg(hdev, "sock %p", sk);
8716 if (!lmp_le_capable(hdev))
8717 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8718 MGMT_STATUS_REJECTED);
8720 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8721 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8722 MGMT_STATUS_INVALID_PARAMS);
8724 flags = __le32_to_cpu(cp->flags);
8726 /* The current implementation only supports a subset of the specified
8729 supported_flags = get_supported_adv_flags(hdev);
8730 if (flags & ~supported_flags)
8731 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8732 MGMT_STATUS_INVALID_PARAMS);
8734 rp.instance = cp->instance;
8735 rp.flags = cp->flags;
8736 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8737 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8739 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8740 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8743 static const struct hci_mgmt_handler mgmt_handlers[] = {
8744 { NULL }, /* 0x0000 (no command) */
8745 { read_version, MGMT_READ_VERSION_SIZE,
8747 HCI_MGMT_UNTRUSTED },
8748 { read_commands, MGMT_READ_COMMANDS_SIZE,
8750 HCI_MGMT_UNTRUSTED },
8751 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
8753 HCI_MGMT_UNTRUSTED },
8754 { read_controller_info, MGMT_READ_INFO_SIZE,
8755 HCI_MGMT_UNTRUSTED },
8756 { set_powered, MGMT_SETTING_SIZE },
8757 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
8758 { set_connectable, MGMT_SETTING_SIZE },
8759 { set_fast_connectable, MGMT_SETTING_SIZE },
8760 { set_bondable, MGMT_SETTING_SIZE },
8761 { set_link_security, MGMT_SETTING_SIZE },
8762 { set_ssp, MGMT_SETTING_SIZE },
8763 { set_hs, MGMT_SETTING_SIZE },
8764 { set_le, MGMT_SETTING_SIZE },
8765 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
8766 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
8767 { add_uuid, MGMT_ADD_UUID_SIZE },
8768 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
8769 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
8771 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8773 { disconnect, MGMT_DISCONNECT_SIZE },
8774 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
8775 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
8776 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
8777 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
8778 { pair_device, MGMT_PAIR_DEVICE_SIZE },
8779 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
8780 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
8781 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
8782 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8783 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
8784 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8785 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
8786 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8788 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8789 { start_discovery, MGMT_START_DISCOVERY_SIZE },
8790 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
8791 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
8792 { block_device, MGMT_BLOCK_DEVICE_SIZE },
8793 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
8794 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
8795 { set_advertising, MGMT_SETTING_SIZE },
8796 { set_bredr, MGMT_SETTING_SIZE },
8797 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
8798 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
8799 { set_secure_conn, MGMT_SETTING_SIZE },
8800 { set_debug_keys, MGMT_SETTING_SIZE },
8801 { set_privacy, MGMT_SET_PRIVACY_SIZE },
8802 { load_irks, MGMT_LOAD_IRKS_SIZE,
8804 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
8805 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
8806 { add_device, MGMT_ADD_DEVICE_SIZE },
8807 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
8808 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
8810 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8812 HCI_MGMT_UNTRUSTED },
8813 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
8814 HCI_MGMT_UNCONFIGURED |
8815 HCI_MGMT_UNTRUSTED },
8816 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
8817 HCI_MGMT_UNCONFIGURED },
8818 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
8819 HCI_MGMT_UNCONFIGURED },
8820 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8822 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8823 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
8825 HCI_MGMT_UNTRUSTED },
8826 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
8827 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
8829 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
8830 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
8831 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8832 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8833 HCI_MGMT_UNTRUSTED },
8834 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
8835 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
8836 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
8837 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8839 { set_wideband_speech, MGMT_SETTING_SIZE },
8840 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
8841 HCI_MGMT_UNTRUSTED },
8842 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
8843 HCI_MGMT_UNTRUSTED |
8844 HCI_MGMT_HDEV_OPTIONAL },
8845 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
8847 HCI_MGMT_HDEV_OPTIONAL },
8848 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8849 HCI_MGMT_UNTRUSTED },
8850 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8852 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8853 HCI_MGMT_UNTRUSTED },
8854 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8856 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
8857 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
8858 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8859 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8861 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
8862 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8864 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
8866 { add_adv_patterns_monitor_rssi,
8867 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8871 void mgmt_index_added(struct hci_dev *hdev)
8873 struct mgmt_ev_ext_index ev;
8875 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8878 switch (hdev->dev_type) {
8880 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8881 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8882 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8885 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8886 HCI_MGMT_INDEX_EVENTS);
8899 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8900 HCI_MGMT_EXT_INDEX_EVENTS);
8903 void mgmt_index_removed(struct hci_dev *hdev)
8905 struct mgmt_ev_ext_index ev;
8906 u8 status = MGMT_STATUS_INVALID_INDEX;
8908 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8911 switch (hdev->dev_type) {
8913 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8915 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8916 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8917 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8920 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8921 HCI_MGMT_INDEX_EVENTS);
8934 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8935 HCI_MGMT_EXT_INDEX_EVENTS);
8937 /* Cancel any remaining timed work */
8938 if (!hci_dev_test_flag(hdev, HCI_MGMT))
8940 cancel_delayed_work_sync(&hdev->discov_off);
8941 cancel_delayed_work_sync(&hdev->service_cache);
8942 cancel_delayed_work_sync(&hdev->rpa_expired);
8945 void mgmt_power_on(struct hci_dev *hdev, int err)
8947 struct cmd_lookup match = { NULL, hdev };
8949 bt_dev_dbg(hdev, "err %d", err);
8954 restart_le_actions(hdev);
8955 hci_update_passive_scan(hdev);
8958 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8960 new_settings(hdev, match.sk);
8965 hci_dev_unlock(hdev);
8968 void __mgmt_power_off(struct hci_dev *hdev)
8970 struct cmd_lookup match = { NULL, hdev };
8971 u8 status, zero_cod[] = { 0, 0, 0 };
8973 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8975 /* If the power off is because of hdev unregistration let
8976 * use the appropriate INVALID_INDEX status. Otherwise use
8977 * NOT_POWERED. We cover both scenarios here since later in
8978 * mgmt_index_removed() any hci_conn callbacks will have already
8979 * been triggered, potentially causing misleading DISCONNECTED
8982 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8983 status = MGMT_STATUS_INVALID_INDEX;
8985 status = MGMT_STATUS_NOT_POWERED;
8987 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8989 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8990 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8991 zero_cod, sizeof(zero_cod),
8992 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8993 ext_info_changed(hdev, NULL);
8996 new_settings(hdev, match.sk);
9002 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
9004 struct mgmt_pending_cmd *cmd;
9007 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9011 if (err == -ERFKILL)
9012 status = MGMT_STATUS_RFKILLED;
9014 status = MGMT_STATUS_FAILED;
9016 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
9018 mgmt_pending_remove(cmd);
9021 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
9024 struct mgmt_ev_new_link_key ev;
9026 memset(&ev, 0, sizeof(ev));
9028 ev.store_hint = persistent;
9029 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9030 ev.key.addr.type = BDADDR_BREDR;
9031 ev.key.type = key->type;
9032 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
9033 ev.key.pin_len = key->pin_len;
9035 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
9038 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
9040 switch (ltk->type) {
9042 case SMP_LTK_RESPONDER:
9043 if (ltk->authenticated)
9044 return MGMT_LTK_AUTHENTICATED;
9045 return MGMT_LTK_UNAUTHENTICATED;
9047 if (ltk->authenticated)
9048 return MGMT_LTK_P256_AUTH;
9049 return MGMT_LTK_P256_UNAUTH;
9050 case SMP_LTK_P256_DEBUG:
9051 return MGMT_LTK_P256_DEBUG;
9054 return MGMT_LTK_UNAUTHENTICATED;
9057 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
9059 struct mgmt_ev_new_long_term_key ev;
9061 memset(&ev, 0, sizeof(ev));
9063 /* Devices using resolvable or non-resolvable random addresses
9064 * without providing an identity resolving key don't require
9065 * to store long term keys. Their addresses will change the
9068 * Only when a remote device provides an identity address
9069 * make sure the long term key is stored. If the remote
9070 * identity is known, the long term keys are internally
9071 * mapped to the identity address. So allow static random
9072 * and public addresses here.
9074 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9075 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9076 ev.store_hint = 0x00;
9078 ev.store_hint = persistent;
9080 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9081 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9082 ev.key.type = mgmt_ltk_type(key);
9083 ev.key.enc_size = key->enc_size;
9084 ev.key.ediv = key->ediv;
9085 ev.key.rand = key->rand;
9087 if (key->type == SMP_LTK)
9088 ev.key.initiator = 1;
9090 /* Make sure we copy only the significant bytes based on the
9091 * encryption key size, and set the rest of the value to zeroes.
9093 memcpy(ev.key.val, key->val, key->enc_size);
9094 memset(ev.key.val + key->enc_size, 0,
9095 sizeof(ev.key.val) - key->enc_size);
9097 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9100 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9102 struct mgmt_ev_new_irk ev;
9104 memset(&ev, 0, sizeof(ev));
9106 ev.store_hint = persistent;
9108 bacpy(&ev.rpa, &irk->rpa);
9109 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9110 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9111 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9113 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9116 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9119 struct mgmt_ev_new_csrk ev;
9121 memset(&ev, 0, sizeof(ev));
9123 /* Devices using resolvable or non-resolvable random addresses
9124 * without providing an identity resolving key don't require
9125 * to store signature resolving keys. Their addresses will change
9126 * the next time around.
9128 * Only when a remote device provides an identity address
9129 * make sure the signature resolving key is stored. So allow
9130 * static random and public addresses here.
9132 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9133 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9134 ev.store_hint = 0x00;
9136 ev.store_hint = persistent;
9138 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9139 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9140 ev.key.type = csrk->type;
9141 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9143 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9146 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9147 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9148 u16 max_interval, u16 latency, u16 timeout)
9150 struct mgmt_ev_new_conn_param ev;
9152 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9155 memset(&ev, 0, sizeof(ev));
9156 bacpy(&ev.addr.bdaddr, bdaddr);
9157 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9158 ev.store_hint = store_hint;
9159 ev.min_interval = cpu_to_le16(min_interval);
9160 ev.max_interval = cpu_to_le16(max_interval);
9161 ev.latency = cpu_to_le16(latency);
9162 ev.timeout = cpu_to_le16(timeout);
9164 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9167 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9168 u8 *name, u8 name_len)
9170 struct sk_buff *skb;
9171 struct mgmt_ev_device_connected *ev;
9175 /* allocate buff for LE or BR/EDR adv */
9176 if (conn->le_adv_data_len > 0)
9177 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9178 sizeof(*ev) + conn->le_adv_data_len);
9180 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9181 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9182 eir_precalc_len(sizeof(conn->dev_class)));
9184 ev = skb_put(skb, sizeof(*ev));
9185 bacpy(&ev->addr.bdaddr, &conn->dst);
9186 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9189 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9191 ev->flags = __cpu_to_le32(flags);
9193 /* We must ensure that the EIR Data fields are ordered and
9194 * unique. Keep it simple for now and avoid the problem by not
9195 * adding any BR/EDR data to the LE adv.
9197 if (conn->le_adv_data_len > 0) {
9198 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9199 eir_len = conn->le_adv_data_len;
9202 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9204 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9205 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9206 conn->dev_class, sizeof(conn->dev_class));
9209 ev->eir_len = cpu_to_le16(eir_len);
9211 mgmt_event_skb(skb, NULL);
9214 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9216 struct sock **sk = data;
9218 cmd->cmd_complete(cmd, 0);
9223 mgmt_pending_remove(cmd);
9226 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9228 struct hci_dev *hdev = data;
9229 struct mgmt_cp_unpair_device *cp = cmd->param;
9231 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9233 cmd->cmd_complete(cmd, 0);
9234 mgmt_pending_remove(cmd);
9237 bool mgmt_powering_down(struct hci_dev *hdev)
9239 struct mgmt_pending_cmd *cmd;
9240 struct mgmt_mode *cp;
9242 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9253 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9254 u8 link_type, u8 addr_type, u8 reason,
9255 bool mgmt_connected)
9257 struct mgmt_ev_device_disconnected ev;
9258 struct sock *sk = NULL;
9260 /* The connection is still in hci_conn_hash so test for 1
9261 * instead of 0 to know if this is the last one.
9263 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9264 cancel_delayed_work(&hdev->power_off);
9265 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9268 if (!mgmt_connected)
9271 if (link_type != ACL_LINK && link_type != LE_LINK)
9274 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9276 bacpy(&ev.addr.bdaddr, bdaddr);
9277 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9280 /* Report disconnects due to suspend */
9281 if (hdev->suspended)
9282 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9284 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9289 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9293 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9294 u8 link_type, u8 addr_type, u8 status)
9296 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9297 struct mgmt_cp_disconnect *cp;
9298 struct mgmt_pending_cmd *cmd;
9300 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9303 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9309 if (bacmp(bdaddr, &cp->addr.bdaddr))
9312 if (cp->addr.type != bdaddr_type)
9315 cmd->cmd_complete(cmd, mgmt_status(status));
9316 mgmt_pending_remove(cmd);
9319 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9320 u8 addr_type, u8 status)
9322 struct mgmt_ev_connect_failed ev;
9324 /* The connection is still in hci_conn_hash so test for 1
9325 * instead of 0 to know if this is the last one.
9327 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9328 cancel_delayed_work(&hdev->power_off);
9329 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9332 bacpy(&ev.addr.bdaddr, bdaddr);
9333 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9334 ev.status = mgmt_status(status);
9336 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9339 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9341 struct mgmt_ev_pin_code_request ev;
9343 bacpy(&ev.addr.bdaddr, bdaddr);
9344 ev.addr.type = BDADDR_BREDR;
9347 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9350 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9353 struct mgmt_pending_cmd *cmd;
9355 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9359 cmd->cmd_complete(cmd, mgmt_status(status));
9360 mgmt_pending_remove(cmd);
9363 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9366 struct mgmt_pending_cmd *cmd;
9368 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9372 cmd->cmd_complete(cmd, mgmt_status(status));
9373 mgmt_pending_remove(cmd);
9376 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9377 u8 link_type, u8 addr_type, u32 value,
9380 struct mgmt_ev_user_confirm_request ev;
9382 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9384 bacpy(&ev.addr.bdaddr, bdaddr);
9385 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9386 ev.confirm_hint = confirm_hint;
9387 ev.value = cpu_to_le32(value);
9389 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9393 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9394 u8 link_type, u8 addr_type)
9396 struct mgmt_ev_user_passkey_request ev;
9398 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9400 bacpy(&ev.addr.bdaddr, bdaddr);
9401 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9403 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9407 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9408 u8 link_type, u8 addr_type, u8 status,
9411 struct mgmt_pending_cmd *cmd;
9413 cmd = pending_find(opcode, hdev);
9417 cmd->cmd_complete(cmd, mgmt_status(status));
9418 mgmt_pending_remove(cmd);
9423 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9424 u8 link_type, u8 addr_type, u8 status)
9426 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9427 status, MGMT_OP_USER_CONFIRM_REPLY);
9430 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9431 u8 link_type, u8 addr_type, u8 status)
9433 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9435 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9438 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9439 u8 link_type, u8 addr_type, u8 status)
9441 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9442 status, MGMT_OP_USER_PASSKEY_REPLY);
9445 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9446 u8 link_type, u8 addr_type, u8 status)
9448 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9450 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9453 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9454 u8 link_type, u8 addr_type, u32 passkey,
9457 struct mgmt_ev_passkey_notify ev;
9459 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9461 bacpy(&ev.addr.bdaddr, bdaddr);
9462 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9463 ev.passkey = __cpu_to_le32(passkey);
9464 ev.entered = entered;
9466 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9469 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9471 struct mgmt_ev_auth_failed ev;
9472 struct mgmt_pending_cmd *cmd;
9473 u8 status = mgmt_status(hci_status);
9475 bacpy(&ev.addr.bdaddr, &conn->dst);
9476 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9479 cmd = find_pairing(conn);
9481 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9482 cmd ? cmd->sk : NULL);
9485 cmd->cmd_complete(cmd, status);
9486 mgmt_pending_remove(cmd);
9490 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9492 struct cmd_lookup match = { NULL, hdev };
9496 u8 mgmt_err = mgmt_status(status);
9497 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9498 cmd_status_rsp, &mgmt_err);
9502 if (test_bit(HCI_AUTH, &hdev->flags))
9503 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9505 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9507 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9511 new_settings(hdev, match.sk);
9517 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9519 struct cmd_lookup *match = data;
9521 if (match->sk == NULL) {
9522 match->sk = cmd->sk;
9523 sock_hold(match->sk);
9527 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9530 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9532 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9533 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9534 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9537 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9538 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9539 ext_info_changed(hdev, NULL);
9546 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9548 struct mgmt_cp_set_local_name ev;
9549 struct mgmt_pending_cmd *cmd;
9554 memset(&ev, 0, sizeof(ev));
9555 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9556 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9558 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9560 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9562 /* If this is a HCI command related to powering on the
9563 * HCI dev don't send any mgmt signals.
9565 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9569 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9570 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9571 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9574 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9578 for (i = 0; i < uuid_count; i++) {
9579 if (!memcmp(uuid, uuids[i], 16))
9586 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9590 while (parsed < eir_len) {
9591 u8 field_len = eir[0];
9598 if (eir_len - parsed < field_len + 1)
9602 case EIR_UUID16_ALL:
9603 case EIR_UUID16_SOME:
9604 for (i = 0; i + 3 <= field_len; i += 2) {
9605 memcpy(uuid, bluetooth_base_uuid, 16);
9606 uuid[13] = eir[i + 3];
9607 uuid[12] = eir[i + 2];
9608 if (has_uuid(uuid, uuid_count, uuids))
9612 case EIR_UUID32_ALL:
9613 case EIR_UUID32_SOME:
9614 for (i = 0; i + 5 <= field_len; i += 4) {
9615 memcpy(uuid, bluetooth_base_uuid, 16);
9616 uuid[15] = eir[i + 5];
9617 uuid[14] = eir[i + 4];
9618 uuid[13] = eir[i + 3];
9619 uuid[12] = eir[i + 2];
9620 if (has_uuid(uuid, uuid_count, uuids))
9624 case EIR_UUID128_ALL:
9625 case EIR_UUID128_SOME:
9626 for (i = 0; i + 17 <= field_len; i += 16) {
9627 memcpy(uuid, eir + i + 2, 16);
9628 if (has_uuid(uuid, uuid_count, uuids))
9634 parsed += field_len + 1;
9635 eir += field_len + 1;
9641 static void restart_le_scan(struct hci_dev *hdev)
9643 /* If controller is not scanning we are done. */
9644 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9647 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9648 hdev->discovery.scan_start +
9649 hdev->discovery.scan_duration))
9652 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9653 DISCOV_LE_RESTART_DELAY);
9656 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9657 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9659 /* If a RSSI threshold has been specified, and
9660 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9661 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9662 * is set, let it through for further processing, as we might need to
9665 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9666 * the results are also dropped.
9668 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9669 (rssi == HCI_RSSI_INVALID ||
9670 (rssi < hdev->discovery.rssi &&
9671 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9674 if (hdev->discovery.uuid_count != 0) {
9675 /* If a list of UUIDs is provided in filter, results with no
9676 * matching UUID should be dropped.
9678 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9679 hdev->discovery.uuids) &&
9680 !eir_has_uuids(scan_rsp, scan_rsp_len,
9681 hdev->discovery.uuid_count,
9682 hdev->discovery.uuids))
9686 /* If duplicate filtering does not report RSSI changes, then restart
9687 * scanning to ensure updated result with updated RSSI values.
9689 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9690 restart_le_scan(hdev);
9692 /* Validate RSSI value against the RSSI threshold once more. */
9693 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9694 rssi < hdev->discovery.rssi)
9701 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
9702 bdaddr_t *bdaddr, u8 addr_type)
9704 struct mgmt_ev_adv_monitor_device_lost ev;
9706 ev.monitor_handle = cpu_to_le16(handle);
9707 bacpy(&ev.addr.bdaddr, bdaddr);
9708 ev.addr.type = addr_type;
9710 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
9714 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
9715 struct sk_buff *skb,
9716 struct sock *skip_sk,
9719 struct sk_buff *advmon_skb;
9720 size_t advmon_skb_len;
9721 __le16 *monitor_handle;
9726 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
9727 sizeof(struct mgmt_ev_device_found)) + skb->len;
9728 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
9733 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
9734 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
9735 * store monitor_handle of the matched monitor.
9737 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
9738 *monitor_handle = cpu_to_le16(handle);
9739 skb_put_data(advmon_skb, skb->data, skb->len);
9741 mgmt_event_skb(advmon_skb, skip_sk);
9744 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
9745 bdaddr_t *bdaddr, bool report_device,
9746 struct sk_buff *skb,
9747 struct sock *skip_sk)
9749 struct monitored_device *dev, *tmp;
9750 bool matched = false;
9751 bool notified = false;
9753 /* We have received the Advertisement Report because:
9754 * 1. the kernel has initiated active discovery
9755 * 2. if not, we have pend_le_reports > 0 in which case we are doing
9757 * 3. if none of the above is true, we have one or more active
9758 * Advertisement Monitor
9760 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
9761 * and report ONLY one advertisement per device for the matched Monitor
9762 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9764 * For case 3, since we are not active scanning and all advertisements
9765 * received are due to a matched Advertisement Monitor, report all
9766 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9768 if (report_device && !hdev->advmon_pend_notify) {
9769 mgmt_event_skb(skb, skip_sk);
9773 hdev->advmon_pend_notify = false;
9775 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
9776 if (!bacmp(&dev->bdaddr, bdaddr)) {
9779 if (!dev->notified) {
9780 mgmt_send_adv_monitor_device_found(hdev, skb,
9784 dev->notified = true;
9789 hdev->advmon_pend_notify = true;
9792 if (!report_device &&
9793 ((matched && !notified) || !msft_monitor_supported(hdev))) {
9794 /* Handle 0 indicates that we are not active scanning and this
9795 * is a subsequent advertisement report for an already matched
9796 * Advertisement Monitor or the controller offloading support
9799 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
9803 mgmt_event_skb(skb, skip_sk);
9808 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9809 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9810 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9812 struct sk_buff *skb;
9813 struct mgmt_ev_device_found *ev;
9814 bool report_device = hci_discovery_active(hdev);
9816 /* Don't send events for a non-kernel initiated discovery. With
9817 * LE one exception is if we have pend_le_reports > 0 in which
9818 * case we're doing passive scanning and want these events.
9820 if (!hci_discovery_active(hdev)) {
9821 if (link_type == ACL_LINK)
9823 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
9824 report_device = true;
9825 else if (!hci_is_adv_monitoring(hdev))
9829 if (hdev->discovery.result_filtering) {
9830 /* We are using service discovery */
9831 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9836 if (hdev->discovery.limited) {
9837 /* Check for limited discoverable bit */
9839 if (!(dev_class[1] & 0x20))
9842 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9843 if (!flags || !(flags[0] & LE_AD_LIMITED))
9848 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
9849 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9850 sizeof(*ev) + eir_len + scan_rsp_len + 5);
9854 ev = skb_put(skb, sizeof(*ev));
9856 /* In case of device discovery with BR/EDR devices (pre 1.2), the
9857 * RSSI value was reported as 0 when not available. This behavior
9858 * is kept when using device discovery. This is required for full
9859 * backwards compatibility with the API.
9861 * However when using service discovery, the value 127 will be
9862 * returned when the RSSI is not available.
9864 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9865 link_type == ACL_LINK)
9868 bacpy(&ev->addr.bdaddr, bdaddr);
9869 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9871 ev->flags = cpu_to_le32(flags);
9874 /* Copy EIR or advertising data into event */
9875 skb_put_data(skb, eir, eir_len);
9877 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
9880 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
9882 skb_put_data(skb, eir_cod, sizeof(eir_cod));
9885 if (scan_rsp_len > 0)
9886 /* Append scan response data to event */
9887 skb_put_data(skb, scan_rsp, scan_rsp_len);
9889 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9891 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
9894 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9895 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9897 struct sk_buff *skb;
9898 struct mgmt_ev_device_found *ev;
9902 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9903 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
9905 ev = skb_put(skb, sizeof(*ev));
9906 bacpy(&ev->addr.bdaddr, bdaddr);
9907 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9911 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9913 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
9915 ev->eir_len = cpu_to_le16(eir_len);
9916 ev->flags = cpu_to_le32(flags);
9918 mgmt_event_skb(skb, NULL);
9921 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9923 struct mgmt_ev_discovering ev;
9925 bt_dev_dbg(hdev, "discovering %u", discovering);
9927 memset(&ev, 0, sizeof(ev));
9928 ev.type = hdev->discovery.type;
9929 ev.discovering = discovering;
9931 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9934 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9936 struct mgmt_ev_controller_suspend ev;
9938 ev.suspend_state = state;
9939 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9942 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9945 struct mgmt_ev_controller_resume ev;
9947 ev.wake_reason = reason;
9949 bacpy(&ev.addr.bdaddr, bdaddr);
9950 ev.addr.type = addr_type;
9952 memset(&ev.addr, 0, sizeof(ev.addr));
9955 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9958 static struct hci_mgmt_chan chan = {
9959 .channel = HCI_CHANNEL_CONTROL,
9960 .handler_count = ARRAY_SIZE(mgmt_handlers),
9961 .handlers = mgmt_handlers,
9962 .hdev_init = mgmt_init_hdev,
9967 return hci_mgmt_chan_register(&chan);
9970 void mgmt_exit(void)
9972 hci_mgmt_chan_unregister(&chan);