2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
44 #define MGMT_VERSION 1
45 #define MGMT_REVISION 22
47 static const u16 mgmt_commands[] = {
48 MGMT_OP_READ_INDEX_LIST,
51 MGMT_OP_SET_DISCOVERABLE,
52 MGMT_OP_SET_CONNECTABLE,
53 MGMT_OP_SET_FAST_CONNECTABLE,
55 MGMT_OP_SET_LINK_SECURITY,
59 MGMT_OP_SET_DEV_CLASS,
60 MGMT_OP_SET_LOCAL_NAME,
63 MGMT_OP_LOAD_LINK_KEYS,
64 MGMT_OP_LOAD_LONG_TERM_KEYS,
66 MGMT_OP_GET_CONNECTIONS,
67 MGMT_OP_PIN_CODE_REPLY,
68 MGMT_OP_PIN_CODE_NEG_REPLY,
69 MGMT_OP_SET_IO_CAPABILITY,
71 MGMT_OP_CANCEL_PAIR_DEVICE,
72 MGMT_OP_UNPAIR_DEVICE,
73 MGMT_OP_USER_CONFIRM_REPLY,
74 MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 MGMT_OP_USER_PASSKEY_REPLY,
76 MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 MGMT_OP_READ_LOCAL_OOB_DATA,
78 MGMT_OP_ADD_REMOTE_OOB_DATA,
79 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 MGMT_OP_START_DISCOVERY,
81 MGMT_OP_STOP_DISCOVERY,
84 MGMT_OP_UNBLOCK_DEVICE,
85 MGMT_OP_SET_DEVICE_ID,
86 MGMT_OP_SET_ADVERTISING,
88 MGMT_OP_SET_STATIC_ADDRESS,
89 MGMT_OP_SET_SCAN_PARAMS,
90 MGMT_OP_SET_SECURE_CONN,
91 MGMT_OP_SET_DEBUG_KEYS,
94 MGMT_OP_GET_CONN_INFO,
95 MGMT_OP_GET_CLOCK_INFO,
97 MGMT_OP_REMOVE_DEVICE,
98 MGMT_OP_LOAD_CONN_PARAM,
99 MGMT_OP_READ_UNCONF_INDEX_LIST,
100 MGMT_OP_READ_CONFIG_INFO,
101 MGMT_OP_SET_EXTERNAL_CONFIG,
102 MGMT_OP_SET_PUBLIC_ADDRESS,
103 MGMT_OP_START_SERVICE_DISCOVERY,
104 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 MGMT_OP_READ_EXT_INDEX_LIST,
106 MGMT_OP_READ_ADV_FEATURES,
107 MGMT_OP_ADD_ADVERTISING,
108 MGMT_OP_REMOVE_ADVERTISING,
109 MGMT_OP_GET_ADV_SIZE_INFO,
110 MGMT_OP_START_LIMITED_DISCOVERY,
111 MGMT_OP_READ_EXT_INFO,
112 MGMT_OP_SET_APPEARANCE,
113 MGMT_OP_GET_PHY_CONFIGURATION,
114 MGMT_OP_SET_PHY_CONFIGURATION,
115 MGMT_OP_SET_BLOCKED_KEYS,
116 MGMT_OP_SET_WIDEBAND_SPEECH,
117 MGMT_OP_READ_CONTROLLER_CAP,
118 MGMT_OP_READ_EXP_FEATURES_INFO,
119 MGMT_OP_SET_EXP_FEATURE,
120 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 MGMT_OP_GET_DEVICE_FLAGS,
125 MGMT_OP_SET_DEVICE_FLAGS,
126 MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 MGMT_OP_REMOVE_ADV_MONITOR,
129 MGMT_OP_ADD_EXT_ADV_PARAMS,
130 MGMT_OP_ADD_EXT_ADV_DATA,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
134 static const u16 mgmt_events[] = {
135 MGMT_EV_CONTROLLER_ERROR,
137 MGMT_EV_INDEX_REMOVED,
138 MGMT_EV_NEW_SETTINGS,
139 MGMT_EV_CLASS_OF_DEV_CHANGED,
140 MGMT_EV_LOCAL_NAME_CHANGED,
141 MGMT_EV_NEW_LINK_KEY,
142 MGMT_EV_NEW_LONG_TERM_KEY,
143 MGMT_EV_DEVICE_CONNECTED,
144 MGMT_EV_DEVICE_DISCONNECTED,
145 MGMT_EV_CONNECT_FAILED,
146 MGMT_EV_PIN_CODE_REQUEST,
147 MGMT_EV_USER_CONFIRM_REQUEST,
148 MGMT_EV_USER_PASSKEY_REQUEST,
150 MGMT_EV_DEVICE_FOUND,
152 MGMT_EV_DEVICE_BLOCKED,
153 MGMT_EV_DEVICE_UNBLOCKED,
154 MGMT_EV_DEVICE_UNPAIRED,
155 MGMT_EV_PASSKEY_NOTIFY,
158 MGMT_EV_DEVICE_ADDED,
159 MGMT_EV_DEVICE_REMOVED,
160 MGMT_EV_NEW_CONN_PARAM,
161 MGMT_EV_UNCONF_INDEX_ADDED,
162 MGMT_EV_UNCONF_INDEX_REMOVED,
163 MGMT_EV_NEW_CONFIG_OPTIONS,
164 MGMT_EV_EXT_INDEX_ADDED,
165 MGMT_EV_EXT_INDEX_REMOVED,
166 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
167 MGMT_EV_ADVERTISING_ADDED,
168 MGMT_EV_ADVERTISING_REMOVED,
169 MGMT_EV_EXT_INFO_CHANGED,
170 MGMT_EV_PHY_CONFIGURATION_CHANGED,
171 MGMT_EV_EXP_FEATURE_CHANGED,
172 MGMT_EV_DEVICE_FLAGS_CHANGED,
173 MGMT_EV_ADV_MONITOR_ADDED,
174 MGMT_EV_ADV_MONITOR_REMOVED,
175 MGMT_EV_CONTROLLER_SUSPEND,
176 MGMT_EV_CONTROLLER_RESUME,
177 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
178 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
181 static const u16 mgmt_untrusted_commands[] = {
182 MGMT_OP_READ_INDEX_LIST,
184 MGMT_OP_READ_UNCONF_INDEX_LIST,
185 MGMT_OP_READ_CONFIG_INFO,
186 MGMT_OP_READ_EXT_INDEX_LIST,
187 MGMT_OP_READ_EXT_INFO,
188 MGMT_OP_READ_CONTROLLER_CAP,
189 MGMT_OP_READ_EXP_FEATURES_INFO,
190 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
191 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
194 static const u16 mgmt_untrusted_events[] = {
196 MGMT_EV_INDEX_REMOVED,
197 MGMT_EV_NEW_SETTINGS,
198 MGMT_EV_CLASS_OF_DEV_CHANGED,
199 MGMT_EV_LOCAL_NAME_CHANGED,
200 MGMT_EV_UNCONF_INDEX_ADDED,
201 MGMT_EV_UNCONF_INDEX_REMOVED,
202 MGMT_EV_NEW_CONFIG_OPTIONS,
203 MGMT_EV_EXT_INDEX_ADDED,
204 MGMT_EV_EXT_INDEX_REMOVED,
205 MGMT_EV_EXT_INFO_CHANGED,
206 MGMT_EV_EXP_FEATURE_CHANGED,
209 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
211 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
212 "\x00\x00\x00\x00\x00\x00\x00\x00"
214 /* HCI to MGMT error code conversion table */
215 static const u8 mgmt_status_table[] = {
217 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
218 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
219 MGMT_STATUS_FAILED, /* Hardware Failure */
220 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
221 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
222 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
223 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
224 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
225 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
226 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
227 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
228 MGMT_STATUS_BUSY, /* Command Disallowed */
229 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
230 MGMT_STATUS_REJECTED, /* Rejected Security */
231 MGMT_STATUS_REJECTED, /* Rejected Personal */
232 MGMT_STATUS_TIMEOUT, /* Host Timeout */
233 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
234 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
235 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
236 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
237 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
238 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
239 MGMT_STATUS_BUSY, /* Repeated Attempts */
240 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
241 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
242 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
243 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
244 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
245 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
246 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
247 MGMT_STATUS_FAILED, /* Unspecified Error */
248 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
249 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
250 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
251 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
252 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
253 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
254 MGMT_STATUS_FAILED, /* Unit Link Key Used */
255 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
256 MGMT_STATUS_TIMEOUT, /* Instant Passed */
257 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
258 MGMT_STATUS_FAILED, /* Transaction Collision */
259 MGMT_STATUS_FAILED, /* Reserved for future use */
260 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
261 MGMT_STATUS_REJECTED, /* QoS Rejected */
262 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
263 MGMT_STATUS_REJECTED, /* Insufficient Security */
264 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
265 MGMT_STATUS_FAILED, /* Reserved for future use */
266 MGMT_STATUS_BUSY, /* Role Switch Pending */
267 MGMT_STATUS_FAILED, /* Reserved for future use */
268 MGMT_STATUS_FAILED, /* Slot Violation */
269 MGMT_STATUS_FAILED, /* Role Switch Failed */
270 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
271 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
272 MGMT_STATUS_BUSY, /* Host Busy Pairing */
273 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
274 MGMT_STATUS_BUSY, /* Controller Busy */
275 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
276 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
277 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
278 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
279 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
282 static u8 mgmt_errno_status(int err)
286 return MGMT_STATUS_SUCCESS;
288 return MGMT_STATUS_REJECTED;
290 return MGMT_STATUS_INVALID_PARAMS;
292 return MGMT_STATUS_NOT_SUPPORTED;
294 return MGMT_STATUS_BUSY;
296 return MGMT_STATUS_AUTH_FAILED;
298 return MGMT_STATUS_NO_RESOURCES;
300 return MGMT_STATUS_ALREADY_CONNECTED;
302 return MGMT_STATUS_DISCONNECTED;
305 return MGMT_STATUS_FAILED;
308 static u8 mgmt_status(int err)
311 return mgmt_errno_status(err);
313 if (err < ARRAY_SIZE(mgmt_status_table))
314 return mgmt_status_table[err];
316 return MGMT_STATUS_FAILED;
319 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
322 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
326 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
327 u16 len, int flag, struct sock *skip_sk)
329 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
333 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
334 struct sock *skip_sk)
336 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
337 HCI_SOCK_TRUSTED, skip_sk);
340 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
342 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
346 static u8 le_addr_type(u8 mgmt_addr_type)
348 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
349 return ADDR_LE_DEV_PUBLIC;
351 return ADDR_LE_DEV_RANDOM;
354 void mgmt_fill_version_info(void *ver)
356 struct mgmt_rp_read_version *rp = ver;
358 rp->version = MGMT_VERSION;
359 rp->revision = cpu_to_le16(MGMT_REVISION);
362 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
365 struct mgmt_rp_read_version rp;
367 bt_dev_dbg(hdev, "sock %p", sk);
369 mgmt_fill_version_info(&rp);
371 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
375 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
378 struct mgmt_rp_read_commands *rp;
379 u16 num_commands, num_events;
383 bt_dev_dbg(hdev, "sock %p", sk);
385 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
386 num_commands = ARRAY_SIZE(mgmt_commands);
387 num_events = ARRAY_SIZE(mgmt_events);
389 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
390 num_events = ARRAY_SIZE(mgmt_untrusted_events);
393 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
395 rp = kmalloc(rp_size, GFP_KERNEL);
399 rp->num_commands = cpu_to_le16(num_commands);
400 rp->num_events = cpu_to_le16(num_events);
402 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
403 __le16 *opcode = rp->opcodes;
405 for (i = 0; i < num_commands; i++, opcode++)
406 put_unaligned_le16(mgmt_commands[i], opcode);
408 for (i = 0; i < num_events; i++, opcode++)
409 put_unaligned_le16(mgmt_events[i], opcode);
411 __le16 *opcode = rp->opcodes;
413 for (i = 0; i < num_commands; i++, opcode++)
414 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
416 for (i = 0; i < num_events; i++, opcode++)
417 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
420 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
427 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
430 struct mgmt_rp_read_index_list *rp;
436 bt_dev_dbg(hdev, "sock %p", sk);
438 read_lock(&hci_dev_list_lock);
441 list_for_each_entry(d, &hci_dev_list, list) {
442 if (d->dev_type == HCI_PRIMARY &&
443 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
447 rp_len = sizeof(*rp) + (2 * count);
448 rp = kmalloc(rp_len, GFP_ATOMIC);
450 read_unlock(&hci_dev_list_lock);
455 list_for_each_entry(d, &hci_dev_list, list) {
456 if (hci_dev_test_flag(d, HCI_SETUP) ||
457 hci_dev_test_flag(d, HCI_CONFIG) ||
458 hci_dev_test_flag(d, HCI_USER_CHANNEL))
461 /* Devices marked as raw-only are neither configured
462 * nor unconfigured controllers.
464 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
467 if (d->dev_type == HCI_PRIMARY &&
468 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
469 rp->index[count++] = cpu_to_le16(d->id);
470 bt_dev_dbg(hdev, "Added hci%u", d->id);
474 rp->num_controllers = cpu_to_le16(count);
475 rp_len = sizeof(*rp) + (2 * count);
477 read_unlock(&hci_dev_list_lock);
479 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
487 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
488 void *data, u16 data_len)
490 struct mgmt_rp_read_unconf_index_list *rp;
496 bt_dev_dbg(hdev, "sock %p", sk);
498 read_lock(&hci_dev_list_lock);
501 list_for_each_entry(d, &hci_dev_list, list) {
502 if (d->dev_type == HCI_PRIMARY &&
503 hci_dev_test_flag(d, HCI_UNCONFIGURED))
507 rp_len = sizeof(*rp) + (2 * count);
508 rp = kmalloc(rp_len, GFP_ATOMIC);
510 read_unlock(&hci_dev_list_lock);
515 list_for_each_entry(d, &hci_dev_list, list) {
516 if (hci_dev_test_flag(d, HCI_SETUP) ||
517 hci_dev_test_flag(d, HCI_CONFIG) ||
518 hci_dev_test_flag(d, HCI_USER_CHANNEL))
521 /* Devices marked as raw-only are neither configured
522 * nor unconfigured controllers.
524 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
527 if (d->dev_type == HCI_PRIMARY &&
528 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 rp->index[count++] = cpu_to_le16(d->id);
530 bt_dev_dbg(hdev, "Added hci%u", d->id);
534 rp->num_controllers = cpu_to_le16(count);
535 rp_len = sizeof(*rp) + (2 * count);
537 read_unlock(&hci_dev_list_lock);
539 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 void *data, u16 data_len)
550 struct mgmt_rp_read_ext_index_list *rp;
555 bt_dev_dbg(hdev, "sock %p", sk);
557 read_lock(&hci_dev_list_lock);
560 list_for_each_entry(d, &hci_dev_list, list) {
561 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
565 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
567 read_unlock(&hci_dev_list_lock);
572 list_for_each_entry(d, &hci_dev_list, list) {
573 if (hci_dev_test_flag(d, HCI_SETUP) ||
574 hci_dev_test_flag(d, HCI_CONFIG) ||
575 hci_dev_test_flag(d, HCI_USER_CHANNEL))
578 /* Devices marked as raw-only are neither configured
579 * nor unconfigured controllers.
581 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
584 if (d->dev_type == HCI_PRIMARY) {
585 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
586 rp->entry[count].type = 0x01;
588 rp->entry[count].type = 0x00;
589 } else if (d->dev_type == HCI_AMP) {
590 rp->entry[count].type = 0x02;
595 rp->entry[count].bus = d->bus;
596 rp->entry[count++].index = cpu_to_le16(d->id);
597 bt_dev_dbg(hdev, "Added hci%u", d->id);
600 rp->num_controllers = cpu_to_le16(count);
602 read_unlock(&hci_dev_list_lock);
604 /* If this command is called at least once, then all the
605 * default index and unconfigured index events are disabled
606 * and from now on only extended index events are used.
608 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
609 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
610 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
612 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
613 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
614 struct_size(rp, entry, count));
621 static bool is_configured(struct hci_dev *hdev)
623 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
624 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
627 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
628 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
629 !bacmp(&hdev->public_addr, BDADDR_ANY))
635 static __le32 get_missing_options(struct hci_dev *hdev)
639 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
640 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
641 options |= MGMT_OPTION_EXTERNAL_CONFIG;
643 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
644 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
645 !bacmp(&hdev->public_addr, BDADDR_ANY))
646 options |= MGMT_OPTION_PUBLIC_ADDRESS;
648 return cpu_to_le32(options);
651 static int new_options(struct hci_dev *hdev, struct sock *skip)
653 __le32 options = get_missing_options(hdev);
655 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
656 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
659 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
661 __le32 options = get_missing_options(hdev);
663 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
667 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
668 void *data, u16 data_len)
670 struct mgmt_rp_read_config_info rp;
673 bt_dev_dbg(hdev, "sock %p", sk);
677 memset(&rp, 0, sizeof(rp));
678 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
680 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
681 options |= MGMT_OPTION_EXTERNAL_CONFIG;
683 if (hdev->set_bdaddr)
684 options |= MGMT_OPTION_PUBLIC_ADDRESS;
686 rp.supported_options = cpu_to_le32(options);
687 rp.missing_options = get_missing_options(hdev);
689 hci_dev_unlock(hdev);
691 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
695 static u32 get_supported_phys(struct hci_dev *hdev)
697 u32 supported_phys = 0;
699 if (lmp_bredr_capable(hdev)) {
700 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
702 if (hdev->features[0][0] & LMP_3SLOT)
703 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
705 if (hdev->features[0][0] & LMP_5SLOT)
706 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
708 if (lmp_edr_2m_capable(hdev)) {
709 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
711 if (lmp_edr_3slot_capable(hdev))
712 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
714 if (lmp_edr_5slot_capable(hdev))
715 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
717 if (lmp_edr_3m_capable(hdev)) {
718 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
720 if (lmp_edr_3slot_capable(hdev))
721 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
723 if (lmp_edr_5slot_capable(hdev))
724 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
729 if (lmp_le_capable(hdev)) {
730 supported_phys |= MGMT_PHY_LE_1M_TX;
731 supported_phys |= MGMT_PHY_LE_1M_RX;
733 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
734 supported_phys |= MGMT_PHY_LE_2M_TX;
735 supported_phys |= MGMT_PHY_LE_2M_RX;
738 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
739 supported_phys |= MGMT_PHY_LE_CODED_TX;
740 supported_phys |= MGMT_PHY_LE_CODED_RX;
744 return supported_phys;
747 static u32 get_selected_phys(struct hci_dev *hdev)
749 u32 selected_phys = 0;
751 if (lmp_bredr_capable(hdev)) {
752 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
754 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
755 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
757 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
758 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
760 if (lmp_edr_2m_capable(hdev)) {
761 if (!(hdev->pkt_type & HCI_2DH1))
762 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
764 if (lmp_edr_3slot_capable(hdev) &&
765 !(hdev->pkt_type & HCI_2DH3))
766 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
768 if (lmp_edr_5slot_capable(hdev) &&
769 !(hdev->pkt_type & HCI_2DH5))
770 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
772 if (lmp_edr_3m_capable(hdev)) {
773 if (!(hdev->pkt_type & HCI_3DH1))
774 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
776 if (lmp_edr_3slot_capable(hdev) &&
777 !(hdev->pkt_type & HCI_3DH3))
778 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
780 if (lmp_edr_5slot_capable(hdev) &&
781 !(hdev->pkt_type & HCI_3DH5))
782 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
787 if (lmp_le_capable(hdev)) {
788 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
789 selected_phys |= MGMT_PHY_LE_1M_TX;
791 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
792 selected_phys |= MGMT_PHY_LE_1M_RX;
794 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
795 selected_phys |= MGMT_PHY_LE_2M_TX;
797 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
798 selected_phys |= MGMT_PHY_LE_2M_RX;
800 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
801 selected_phys |= MGMT_PHY_LE_CODED_TX;
803 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
804 selected_phys |= MGMT_PHY_LE_CODED_RX;
807 return selected_phys;
810 static u32 get_configurable_phys(struct hci_dev *hdev)
812 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
813 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
816 static u32 get_supported_settings(struct hci_dev *hdev)
820 settings |= MGMT_SETTING_POWERED;
821 settings |= MGMT_SETTING_BONDABLE;
822 settings |= MGMT_SETTING_DEBUG_KEYS;
823 settings |= MGMT_SETTING_CONNECTABLE;
824 settings |= MGMT_SETTING_DISCOVERABLE;
826 if (lmp_bredr_capable(hdev)) {
827 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
828 settings |= MGMT_SETTING_FAST_CONNECTABLE;
829 settings |= MGMT_SETTING_BREDR;
830 settings |= MGMT_SETTING_LINK_SECURITY;
832 if (lmp_ssp_capable(hdev)) {
833 settings |= MGMT_SETTING_SSP;
834 if (IS_ENABLED(CONFIG_BT_HS))
835 settings |= MGMT_SETTING_HS;
838 if (lmp_sc_capable(hdev))
839 settings |= MGMT_SETTING_SECURE_CONN;
841 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
843 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
846 if (lmp_le_capable(hdev)) {
847 settings |= MGMT_SETTING_LE;
848 settings |= MGMT_SETTING_SECURE_CONN;
849 settings |= MGMT_SETTING_PRIVACY;
850 settings |= MGMT_SETTING_STATIC_ADDRESS;
851 settings |= MGMT_SETTING_ADVERTISING;
854 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
856 settings |= MGMT_SETTING_CONFIGURATION;
858 settings |= MGMT_SETTING_PHY_CONFIGURATION;
863 static u32 get_current_settings(struct hci_dev *hdev)
867 if (hdev_is_powered(hdev))
868 settings |= MGMT_SETTING_POWERED;
870 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
871 settings |= MGMT_SETTING_CONNECTABLE;
873 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
874 settings |= MGMT_SETTING_FAST_CONNECTABLE;
876 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
877 settings |= MGMT_SETTING_DISCOVERABLE;
879 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
880 settings |= MGMT_SETTING_BONDABLE;
882 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
883 settings |= MGMT_SETTING_BREDR;
885 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
886 settings |= MGMT_SETTING_LE;
888 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
889 settings |= MGMT_SETTING_LINK_SECURITY;
891 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
892 settings |= MGMT_SETTING_SSP;
894 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
895 settings |= MGMT_SETTING_HS;
897 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
898 settings |= MGMT_SETTING_ADVERTISING;
900 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
901 settings |= MGMT_SETTING_SECURE_CONN;
903 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
904 settings |= MGMT_SETTING_DEBUG_KEYS;
906 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
907 settings |= MGMT_SETTING_PRIVACY;
909 /* The current setting for static address has two purposes. The
910 * first is to indicate if the static address will be used and
911 * the second is to indicate if it is actually set.
913 * This means if the static address is not configured, this flag
914 * will never be set. If the address is configured, then if the
915 * address is actually used decides if the flag is set or not.
917 * For single mode LE only controllers and dual-mode controllers
918 * with BR/EDR disabled, the existence of the static address will
921 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
922 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
923 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
924 if (bacmp(&hdev->static_addr, BDADDR_ANY))
925 settings |= MGMT_SETTING_STATIC_ADDRESS;
928 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
929 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
934 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
936 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
939 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
941 struct mgmt_pending_cmd *cmd;
943 /* If there's a pending mgmt command the flags will not yet have
944 * their final values, so check for this first.
946 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
948 struct mgmt_mode *cp = cmd->param;
950 return LE_AD_GENERAL;
951 else if (cp->val == 0x02)
952 return LE_AD_LIMITED;
954 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
955 return LE_AD_LIMITED;
956 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
957 return LE_AD_GENERAL;
963 bool mgmt_get_connectable(struct hci_dev *hdev)
965 struct mgmt_pending_cmd *cmd;
967 /* If there's a pending mgmt command the flag will not yet have
968 * it's final value, so check for this first.
970 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
972 struct mgmt_mode *cp = cmd->param;
977 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
980 static int service_cache_sync(struct hci_dev *hdev, void *data)
982 hci_update_eir_sync(hdev);
983 hci_update_class_sync(hdev);
988 static void service_cache_off(struct work_struct *work)
990 struct hci_dev *hdev = container_of(work, struct hci_dev,
993 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
996 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
999 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1001 /* The generation of a new RPA and programming it into the
1002 * controller happens in the hci_req_enable_advertising()
1005 if (ext_adv_capable(hdev))
1006 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1008 return hci_enable_advertising_sync(hdev);
1011 static void rpa_expired(struct work_struct *work)
1013 struct hci_dev *hdev = container_of(work, struct hci_dev,
1016 bt_dev_dbg(hdev, "");
1018 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1020 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1023 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1026 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1028 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1031 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1032 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1034 /* Non-mgmt controlled devices get this bit set
1035 * implicitly so that pairing works for them, however
1036 * for mgmt we require user-space to explicitly enable
1039 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1042 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1043 void *data, u16 data_len)
1045 struct mgmt_rp_read_info rp;
1047 bt_dev_dbg(hdev, "sock %p", sk);
1051 memset(&rp, 0, sizeof(rp));
1053 bacpy(&rp.bdaddr, &hdev->bdaddr);
1055 rp.version = hdev->hci_ver;
1056 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1058 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1059 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1061 memcpy(rp.dev_class, hdev->dev_class, 3);
1063 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1064 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1066 hci_dev_unlock(hdev);
1068 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1072 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1077 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1078 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1079 hdev->dev_class, 3);
1081 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1082 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1085 name_len = strlen(hdev->dev_name);
1086 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1087 hdev->dev_name, name_len);
1089 name_len = strlen(hdev->short_name);
1090 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1091 hdev->short_name, name_len);
1096 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1097 void *data, u16 data_len)
1100 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1103 bt_dev_dbg(hdev, "sock %p", sk);
1105 memset(&buf, 0, sizeof(buf));
1109 bacpy(&rp->bdaddr, &hdev->bdaddr);
1111 rp->version = hdev->hci_ver;
1112 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1114 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1115 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1118 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1119 rp->eir_len = cpu_to_le16(eir_len);
1121 hci_dev_unlock(hdev);
1123 /* If this command is called at least once, then the events
1124 * for class of device and local name changes are disabled
1125 * and only the new extended controller information event
1128 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1129 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1130 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1132 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1133 sizeof(*rp) + eir_len);
1136 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1139 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1142 memset(buf, 0, sizeof(buf));
1144 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1145 ev->eir_len = cpu_to_le16(eir_len);
1147 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1148 sizeof(*ev) + eir_len,
1149 HCI_MGMT_EXT_INFO_EVENTS, skip);
1152 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1154 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1156 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1160 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1162 struct mgmt_ev_advertising_added ev;
1164 ev.instance = instance;
1166 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1169 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1172 struct mgmt_ev_advertising_removed ev;
1174 ev.instance = instance;
1176 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1179 static void cancel_adv_timeout(struct hci_dev *hdev)
1181 if (hdev->adv_instance_timeout) {
1182 hdev->adv_instance_timeout = 0;
1183 cancel_delayed_work(&hdev->adv_instance_expire);
1187 /* This function requires the caller holds hdev->lock */
1188 static void restart_le_actions(struct hci_dev *hdev)
1190 struct hci_conn_params *p;
1192 list_for_each_entry(p, &hdev->le_conn_params, list) {
1193 /* Needed for AUTO_OFF case where might not "really"
1194 * have been powered off.
1196 list_del_init(&p->action);
1198 switch (p->auto_connect) {
1199 case HCI_AUTO_CONN_DIRECT:
1200 case HCI_AUTO_CONN_ALWAYS:
1201 list_add(&p->action, &hdev->pend_le_conns);
1203 case HCI_AUTO_CONN_REPORT:
1204 list_add(&p->action, &hdev->pend_le_reports);
1212 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1214 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1216 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1217 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1220 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1222 struct mgmt_pending_cmd *cmd = data;
1223 struct mgmt_mode *cp;
1225 /* Make sure cmd still outstanding. */
1226 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1231 bt_dev_dbg(hdev, "err %d", err);
1236 restart_le_actions(hdev);
1237 hci_update_passive_scan(hdev);
1238 hci_dev_unlock(hdev);
1241 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1243 /* Only call new_setting for power on as power off is deferred
1244 * to hdev->power_off work which does call hci_dev_do_close.
1247 new_settings(hdev, cmd->sk);
1249 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1253 mgmt_pending_remove(cmd);
1256 static int set_powered_sync(struct hci_dev *hdev, void *data)
1258 struct mgmt_pending_cmd *cmd = data;
1259 struct mgmt_mode *cp = cmd->param;
1261 BT_DBG("%s", hdev->name);
1263 return hci_set_powered_sync(hdev, cp->val);
1266 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1269 struct mgmt_mode *cp = data;
1270 struct mgmt_pending_cmd *cmd;
1273 bt_dev_dbg(hdev, "sock %p", sk);
1275 if (cp->val != 0x00 && cp->val != 0x01)
1276 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1277 MGMT_STATUS_INVALID_PARAMS);
1281 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1282 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1287 if (!!cp->val == hdev_is_powered(hdev)) {
1288 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1292 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1298 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1299 mgmt_set_powered_complete);
1302 mgmt_pending_remove(cmd);
1305 hci_dev_unlock(hdev);
1309 int mgmt_new_settings(struct hci_dev *hdev)
1311 return new_settings(hdev, NULL);
1316 struct hci_dev *hdev;
1320 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1322 struct cmd_lookup *match = data;
1324 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1326 list_del(&cmd->list);
1328 if (match->sk == NULL) {
1329 match->sk = cmd->sk;
1330 sock_hold(match->sk);
1333 mgmt_pending_free(cmd);
1336 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1340 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1341 mgmt_pending_remove(cmd);
1344 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1346 if (cmd->cmd_complete) {
1349 cmd->cmd_complete(cmd, *status);
1350 mgmt_pending_remove(cmd);
1355 cmd_status_rsp(cmd, data);
1358 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1360 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1361 cmd->param, cmd->param_len);
1364 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1366 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1367 cmd->param, sizeof(struct mgmt_addr_info));
1370 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1372 if (!lmp_bredr_capable(hdev))
1373 return MGMT_STATUS_NOT_SUPPORTED;
1374 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1375 return MGMT_STATUS_REJECTED;
1377 return MGMT_STATUS_SUCCESS;
1380 static u8 mgmt_le_support(struct hci_dev *hdev)
1382 if (!lmp_le_capable(hdev))
1383 return MGMT_STATUS_NOT_SUPPORTED;
1384 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1385 return MGMT_STATUS_REJECTED;
1387 return MGMT_STATUS_SUCCESS;
1390 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1393 struct mgmt_pending_cmd *cmd = data;
1395 bt_dev_dbg(hdev, "err %d", err);
1397 /* Make sure cmd still outstanding. */
1398 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1404 u8 mgmt_err = mgmt_status(err);
1405 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1406 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1410 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1411 hdev->discov_timeout > 0) {
1412 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1413 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1416 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1417 new_settings(hdev, cmd->sk);
1420 mgmt_pending_remove(cmd);
1421 hci_dev_unlock(hdev);
1424 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1426 BT_DBG("%s", hdev->name);
1428 return hci_update_discoverable_sync(hdev);
1431 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1434 struct mgmt_cp_set_discoverable *cp = data;
1435 struct mgmt_pending_cmd *cmd;
1439 bt_dev_dbg(hdev, "sock %p", sk);
1441 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1442 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1443 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1444 MGMT_STATUS_REJECTED);
1446 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1448 MGMT_STATUS_INVALID_PARAMS);
1450 timeout = __le16_to_cpu(cp->timeout);
1452 /* Disabling discoverable requires that no timeout is set,
1453 * and enabling limited discoverable requires a timeout.
1455 if ((cp->val == 0x00 && timeout > 0) ||
1456 (cp->val == 0x02 && timeout == 0))
1457 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1458 MGMT_STATUS_INVALID_PARAMS);
1462 if (!hdev_is_powered(hdev) && timeout > 0) {
1463 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1464 MGMT_STATUS_NOT_POWERED);
1468 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1469 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1470 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1475 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1476 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1477 MGMT_STATUS_REJECTED);
1481 if (hdev->advertising_paused) {
1482 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1487 if (!hdev_is_powered(hdev)) {
1488 bool changed = false;
1490 /* Setting limited discoverable when powered off is
1491 * not a valid operation since it requires a timeout
1492 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1494 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1495 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1499 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1504 err = new_settings(hdev, sk);
1509 /* If the current mode is the same, then just update the timeout
1510 * value with the new value. And if only the timeout gets updated,
1511 * then no need for any HCI transactions.
1513 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1514 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1515 HCI_LIMITED_DISCOVERABLE)) {
1516 cancel_delayed_work(&hdev->discov_off);
1517 hdev->discov_timeout = timeout;
1519 if (cp->val && hdev->discov_timeout > 0) {
1520 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1521 queue_delayed_work(hdev->req_workqueue,
1522 &hdev->discov_off, to);
1525 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1529 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1535 /* Cancel any potential discoverable timeout that might be
1536 * still active and store new timeout value. The arming of
1537 * the timeout happens in the complete handler.
1539 cancel_delayed_work(&hdev->discov_off);
1540 hdev->discov_timeout = timeout;
1543 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1545 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1547 /* Limited discoverable mode */
1548 if (cp->val == 0x02)
1549 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1551 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1553 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1554 mgmt_set_discoverable_complete);
1557 mgmt_pending_remove(cmd);
1560 hci_dev_unlock(hdev);
1564 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1567 struct mgmt_pending_cmd *cmd = data;
1569 bt_dev_dbg(hdev, "err %d", err);
1571 /* Make sure cmd still outstanding. */
1572 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1578 u8 mgmt_err = mgmt_status(err);
1579 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1583 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1584 new_settings(hdev, cmd->sk);
1588 mgmt_pending_remove(cmd);
1590 hci_dev_unlock(hdev);
1593 static int set_connectable_update_settings(struct hci_dev *hdev,
1594 struct sock *sk, u8 val)
1596 bool changed = false;
1599 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1603 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1605 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1606 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1609 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1614 hci_req_update_scan(hdev);
1615 hci_update_passive_scan(hdev);
1616 return new_settings(hdev, sk);
1622 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1624 BT_DBG("%s", hdev->name);
1626 return hci_update_connectable_sync(hdev);
1629 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1632 struct mgmt_mode *cp = data;
1633 struct mgmt_pending_cmd *cmd;
1636 bt_dev_dbg(hdev, "sock %p", sk);
1638 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1639 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1640 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1641 MGMT_STATUS_REJECTED);
1643 if (cp->val != 0x00 && cp->val != 0x01)
1644 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1645 MGMT_STATUS_INVALID_PARAMS);
1649 if (!hdev_is_powered(hdev)) {
1650 err = set_connectable_update_settings(hdev, sk, cp->val);
1654 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1655 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1656 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1661 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1668 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1670 if (hdev->discov_timeout > 0)
1671 cancel_delayed_work(&hdev->discov_off);
1673 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1674 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1675 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1678 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1679 mgmt_set_connectable_complete);
1682 mgmt_pending_remove(cmd);
1685 hci_dev_unlock(hdev);
1689 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1692 struct mgmt_mode *cp = data;
1696 bt_dev_dbg(hdev, "sock %p", sk);
1698 if (cp->val != 0x00 && cp->val != 0x01)
1699 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1700 MGMT_STATUS_INVALID_PARAMS);
1705 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1707 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1709 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1714 /* In limited privacy mode the change of bondable mode
1715 * may affect the local advertising address.
1717 hci_update_discoverable(hdev);
1719 err = new_settings(hdev, sk);
1723 hci_dev_unlock(hdev);
1727 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1730 struct mgmt_mode *cp = data;
1731 struct mgmt_pending_cmd *cmd;
1735 bt_dev_dbg(hdev, "sock %p", sk);
1737 status = mgmt_bredr_support(hdev);
1739 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1742 if (cp->val != 0x00 && cp->val != 0x01)
1743 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1744 MGMT_STATUS_INVALID_PARAMS);
1748 if (!hdev_is_powered(hdev)) {
1749 bool changed = false;
1751 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1752 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1756 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1761 err = new_settings(hdev, sk);
1766 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1767 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1774 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1775 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1779 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1785 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1787 mgmt_pending_remove(cmd);
1792 hci_dev_unlock(hdev);
1796 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1798 struct cmd_lookup match = { NULL, hdev };
1799 struct mgmt_pending_cmd *cmd = data;
1800 struct mgmt_mode *cp = cmd->param;
1801 u8 enable = cp->val;
1804 /* Make sure cmd still outstanding. */
1805 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1809 u8 mgmt_err = mgmt_status(err);
1811 if (enable && hci_dev_test_and_clear_flag(hdev,
1813 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1814 new_settings(hdev, NULL);
1817 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1823 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1825 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1828 changed = hci_dev_test_and_clear_flag(hdev,
1831 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1834 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1837 new_settings(hdev, match.sk);
1842 hci_update_eir_sync(hdev);
1845 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1847 struct mgmt_pending_cmd *cmd = data;
1848 struct mgmt_mode *cp = cmd->param;
1849 bool changed = false;
1853 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1855 err = hci_write_ssp_mode_sync(hdev, cp->val);
1857 if (!err && changed)
1858 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1863 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1865 struct mgmt_mode *cp = data;
1866 struct mgmt_pending_cmd *cmd;
1870 bt_dev_dbg(hdev, "sock %p", sk);
1872 status = mgmt_bredr_support(hdev);
1874 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1876 if (!lmp_ssp_capable(hdev))
1877 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1878 MGMT_STATUS_NOT_SUPPORTED);
1880 if (cp->val != 0x00 && cp->val != 0x01)
1881 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1882 MGMT_STATUS_INVALID_PARAMS);
1886 if (!hdev_is_powered(hdev)) {
1890 changed = !hci_dev_test_and_set_flag(hdev,
1893 changed = hci_dev_test_and_clear_flag(hdev,
1896 changed = hci_dev_test_and_clear_flag(hdev,
1899 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1902 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1907 err = new_settings(hdev, sk);
1912 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1913 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1918 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1919 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1923 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1927 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
1931 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1932 MGMT_STATUS_FAILED);
1935 mgmt_pending_remove(cmd);
1939 hci_dev_unlock(hdev);
1943 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1945 struct mgmt_mode *cp = data;
1950 bt_dev_dbg(hdev, "sock %p", sk);
1952 if (!IS_ENABLED(CONFIG_BT_HS))
1953 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1954 MGMT_STATUS_NOT_SUPPORTED);
1956 status = mgmt_bredr_support(hdev);
1958 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1960 if (!lmp_ssp_capable(hdev))
1961 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1962 MGMT_STATUS_NOT_SUPPORTED);
1964 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1965 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1966 MGMT_STATUS_REJECTED);
1968 if (cp->val != 0x00 && cp->val != 0x01)
1969 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1970 MGMT_STATUS_INVALID_PARAMS);
1974 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1975 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1981 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1983 if (hdev_is_powered(hdev)) {
1984 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1985 MGMT_STATUS_REJECTED);
1989 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1992 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1997 err = new_settings(hdev, sk);
2000 hci_dev_unlock(hdev);
2004 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2006 struct cmd_lookup match = { NULL, hdev };
2007 u8 status = mgmt_status(err);
2009 bt_dev_dbg(hdev, "err %d", err);
2012 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2017 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2019 new_settings(hdev, match.sk);
2025 static int set_le_sync(struct hci_dev *hdev, void *data)
2027 struct mgmt_pending_cmd *cmd = data;
2028 struct mgmt_mode *cp = cmd->param;
2033 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2034 hci_disable_advertising_sync(hdev);
2036 if (ext_adv_capable(hdev))
2037 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2039 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2042 err = hci_write_le_host_supported_sync(hdev, val, 0);
2044 /* Make sure the controller has a good default for
2045 * advertising data. Restrict the update to when LE
2046 * has actually been enabled. During power on, the
2047 * update in powered_update_hci will take care of it.
2049 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2050 if (ext_adv_capable(hdev)) {
2053 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2055 hci_update_scan_rsp_data_sync(hdev, 0x00);
2057 hci_update_adv_data_sync(hdev, 0x00);
2058 hci_update_scan_rsp_data_sync(hdev, 0x00);
2061 hci_update_passive_scan(hdev);
2067 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2069 struct mgmt_mode *cp = data;
2070 struct mgmt_pending_cmd *cmd;
2074 bt_dev_dbg(hdev, "sock %p", sk);
2076 if (!lmp_le_capable(hdev))
2077 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2078 MGMT_STATUS_NOT_SUPPORTED);
2080 if (cp->val != 0x00 && cp->val != 0x01)
2081 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2082 MGMT_STATUS_INVALID_PARAMS);
2084 /* Bluetooth single mode LE only controllers or dual-mode
2085 * controllers configured as LE only devices, do not allow
2086 * switching LE off. These have either LE enabled explicitly
2087 * or BR/EDR has been previously switched off.
2089 * When trying to enable an already enabled LE, then gracefully
2090 * send a positive response. Trying to disable it however will
2091 * result into rejection.
2093 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2094 if (cp->val == 0x01)
2095 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2097 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2098 MGMT_STATUS_REJECTED);
2104 enabled = lmp_host_le_capable(hdev);
2107 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
2109 if (!hdev_is_powered(hdev) || val == enabled) {
2110 bool changed = false;
2112 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2113 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2117 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2118 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2122 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2127 err = new_settings(hdev, sk);
2132 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2133 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2134 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2139 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2143 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2147 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2148 MGMT_STATUS_FAILED);
2151 mgmt_pending_remove(cmd);
2155 hci_dev_unlock(hdev);
2159 /* This is a helper function to test for pending mgmt commands that can
2160 * cause CoD or EIR HCI commands. We can only allow one such pending
2161 * mgmt command at a time since otherwise we cannot easily track what
2162 * the current values are, will be, and based on that calculate if a new
2163 * HCI command needs to be sent and if yes with what value.
2165 static bool pending_eir_or_class(struct hci_dev *hdev)
2167 struct mgmt_pending_cmd *cmd;
2169 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2170 switch (cmd->opcode) {
2171 case MGMT_OP_ADD_UUID:
2172 case MGMT_OP_REMOVE_UUID:
2173 case MGMT_OP_SET_DEV_CLASS:
2174 case MGMT_OP_SET_POWERED:
2182 static const u8 bluetooth_base_uuid[] = {
2183 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2184 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2187 static u8 get_uuid_size(const u8 *uuid)
2191 if (memcmp(uuid, bluetooth_base_uuid, 12))
2194 val = get_unaligned_le32(&uuid[12]);
2201 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2203 struct mgmt_pending_cmd *cmd = data;
2205 bt_dev_dbg(hdev, "err %d", err);
2207 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2208 mgmt_status(err), hdev->dev_class, 3);
2210 mgmt_pending_free(cmd);
2213 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2217 err = hci_update_class_sync(hdev);
2221 return hci_update_eir_sync(hdev);
2224 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2226 struct mgmt_cp_add_uuid *cp = data;
2227 struct mgmt_pending_cmd *cmd;
2228 struct bt_uuid *uuid;
2231 bt_dev_dbg(hdev, "sock %p", sk);
2235 if (pending_eir_or_class(hdev)) {
2236 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2241 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2247 memcpy(uuid->uuid, cp->uuid, 16);
2248 uuid->svc_hint = cp->svc_hint;
2249 uuid->size = get_uuid_size(cp->uuid);
2251 list_add_tail(&uuid->list, &hdev->uuids);
2253 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2259 err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2261 mgmt_pending_free(cmd);
2266 hci_dev_unlock(hdev);
2270 static bool enable_service_cache(struct hci_dev *hdev)
2272 if (!hdev_is_powered(hdev))
2275 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2276 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2284 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2288 err = hci_update_class_sync(hdev);
2292 return hci_update_eir_sync(hdev);
2295 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2298 struct mgmt_cp_remove_uuid *cp = data;
2299 struct mgmt_pending_cmd *cmd;
2300 struct bt_uuid *match, *tmp;
2301 static const u8 bt_uuid_any[] = {
2302 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2306 bt_dev_dbg(hdev, "sock %p", sk);
2310 if (pending_eir_or_class(hdev)) {
2311 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2316 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2317 hci_uuids_clear(hdev);
2319 if (enable_service_cache(hdev)) {
2320 err = mgmt_cmd_complete(sk, hdev->id,
2321 MGMT_OP_REMOVE_UUID,
2322 0, hdev->dev_class, 3);
2331 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2332 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2335 list_del(&match->list);
2341 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2342 MGMT_STATUS_INVALID_PARAMS);
2347 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2353 err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2354 mgmt_class_complete);
2356 mgmt_pending_free(cmd);
2359 hci_dev_unlock(hdev);
2363 static int set_class_sync(struct hci_dev *hdev, void *data)
2367 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2368 cancel_delayed_work_sync(&hdev->service_cache);
2369 err = hci_update_eir_sync(hdev);
2375 return hci_update_class_sync(hdev);
2378 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2381 struct mgmt_cp_set_dev_class *cp = data;
2382 struct mgmt_pending_cmd *cmd;
2385 bt_dev_dbg(hdev, "sock %p", sk);
2387 if (!lmp_bredr_capable(hdev))
2388 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2389 MGMT_STATUS_NOT_SUPPORTED);
2393 if (pending_eir_or_class(hdev)) {
2394 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2399 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2400 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2401 MGMT_STATUS_INVALID_PARAMS);
2405 hdev->major_class = cp->major;
2406 hdev->minor_class = cp->minor;
2408 if (!hdev_is_powered(hdev)) {
2409 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2410 hdev->dev_class, 3);
2414 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2420 err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2421 mgmt_class_complete);
2423 mgmt_pending_free(cmd);
2426 hci_dev_unlock(hdev);
2430 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2433 struct mgmt_cp_load_link_keys *cp = data;
2434 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2435 sizeof(struct mgmt_link_key_info));
2436 u16 key_count, expected_len;
2440 bt_dev_dbg(hdev, "sock %p", sk);
2442 if (!lmp_bredr_capable(hdev))
2443 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2444 MGMT_STATUS_NOT_SUPPORTED);
2446 key_count = __le16_to_cpu(cp->key_count);
2447 if (key_count > max_key_count) {
2448 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2450 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2451 MGMT_STATUS_INVALID_PARAMS);
2454 expected_len = struct_size(cp, keys, key_count);
2455 if (expected_len != len) {
2456 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2458 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2459 MGMT_STATUS_INVALID_PARAMS);
2462 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2463 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2464 MGMT_STATUS_INVALID_PARAMS);
2466 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2469 for (i = 0; i < key_count; i++) {
2470 struct mgmt_link_key_info *key = &cp->keys[i];
2472 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2473 return mgmt_cmd_status(sk, hdev->id,
2474 MGMT_OP_LOAD_LINK_KEYS,
2475 MGMT_STATUS_INVALID_PARAMS);
2480 hci_link_keys_clear(hdev);
2483 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2485 changed = hci_dev_test_and_clear_flag(hdev,
2486 HCI_KEEP_DEBUG_KEYS);
2489 new_settings(hdev, NULL);
2491 for (i = 0; i < key_count; i++) {
2492 struct mgmt_link_key_info *key = &cp->keys[i];
2494 if (hci_is_blocked_key(hdev,
2495 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2497 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2502 /* Always ignore debug keys and require a new pairing if
2503 * the user wants to use them.
2505 if (key->type == HCI_LK_DEBUG_COMBINATION)
2508 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2509 key->type, key->pin_len, NULL);
2512 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2514 hci_dev_unlock(hdev);
2519 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2520 u8 addr_type, struct sock *skip_sk)
2522 struct mgmt_ev_device_unpaired ev;
2524 bacpy(&ev.addr.bdaddr, bdaddr);
2525 ev.addr.type = addr_type;
2527 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2531 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2534 struct mgmt_cp_unpair_device *cp = data;
2535 struct mgmt_rp_unpair_device rp;
2536 struct hci_conn_params *params;
2537 struct mgmt_pending_cmd *cmd;
2538 struct hci_conn *conn;
2542 memset(&rp, 0, sizeof(rp));
2543 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2544 rp.addr.type = cp->addr.type;
2546 if (!bdaddr_type_is_valid(cp->addr.type))
2547 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2548 MGMT_STATUS_INVALID_PARAMS,
2551 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2552 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2553 MGMT_STATUS_INVALID_PARAMS,
2558 if (!hdev_is_powered(hdev)) {
2559 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2560 MGMT_STATUS_NOT_POWERED, &rp,
2565 if (cp->addr.type == BDADDR_BREDR) {
2566 /* If disconnection is requested, then look up the
2567 * connection. If the remote device is connected, it
2568 * will be later used to terminate the link.
2570 * Setting it to NULL explicitly will cause no
2571 * termination of the link.
2574 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2579 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2581 err = mgmt_cmd_complete(sk, hdev->id,
2582 MGMT_OP_UNPAIR_DEVICE,
2583 MGMT_STATUS_NOT_PAIRED, &rp,
2591 /* LE address type */
2592 addr_type = le_addr_type(cp->addr.type);
2594 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2595 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2597 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2598 MGMT_STATUS_NOT_PAIRED, &rp,
2603 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2605 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2610 /* Defer clearing up the connection parameters until closing to
2611 * give a chance of keeping them if a repairing happens.
2613 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2615 /* Disable auto-connection parameters if present */
2616 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2618 if (params->explicit_connect)
2619 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2621 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2624 /* If disconnection is not requested, then clear the connection
2625 * variable so that the link is not terminated.
2627 if (!cp->disconnect)
2631 /* If the connection variable is set, then termination of the
2632 * link is requested.
2635 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2637 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2641 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2648 cmd->cmd_complete = addr_cmd_complete;
2650 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2652 mgmt_pending_remove(cmd);
2655 hci_dev_unlock(hdev);
2659 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2662 struct mgmt_cp_disconnect *cp = data;
2663 struct mgmt_rp_disconnect rp;
2664 struct mgmt_pending_cmd *cmd;
2665 struct hci_conn *conn;
2668 bt_dev_dbg(hdev, "sock %p", sk);
2670 memset(&rp, 0, sizeof(rp));
2671 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2672 rp.addr.type = cp->addr.type;
2674 if (!bdaddr_type_is_valid(cp->addr.type))
2675 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2676 MGMT_STATUS_INVALID_PARAMS,
2681 if (!test_bit(HCI_UP, &hdev->flags)) {
2682 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2683 MGMT_STATUS_NOT_POWERED, &rp,
2688 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2689 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2690 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2694 if (cp->addr.type == BDADDR_BREDR)
2695 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2698 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2699 le_addr_type(cp->addr.type));
2701 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2702 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2703 MGMT_STATUS_NOT_CONNECTED, &rp,
2708 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2714 cmd->cmd_complete = generic_cmd_complete;
2716 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2718 mgmt_pending_remove(cmd);
2721 hci_dev_unlock(hdev);
2725 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2727 switch (link_type) {
2729 switch (addr_type) {
2730 case ADDR_LE_DEV_PUBLIC:
2731 return BDADDR_LE_PUBLIC;
2734 /* Fallback to LE Random address type */
2735 return BDADDR_LE_RANDOM;
2739 /* Fallback to BR/EDR type */
2740 return BDADDR_BREDR;
2744 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2747 struct mgmt_rp_get_connections *rp;
2752 bt_dev_dbg(hdev, "sock %p", sk);
2756 if (!hdev_is_powered(hdev)) {
2757 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2758 MGMT_STATUS_NOT_POWERED);
2763 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2764 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2768 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2775 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2776 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2778 bacpy(&rp->addr[i].bdaddr, &c->dst);
2779 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2780 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2785 rp->conn_count = cpu_to_le16(i);
2787 /* Recalculate length in case of filtered SCO connections, etc */
2788 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2789 struct_size(rp, addr, i));
2794 hci_dev_unlock(hdev);
2798 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2799 struct mgmt_cp_pin_code_neg_reply *cp)
2801 struct mgmt_pending_cmd *cmd;
2804 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2809 cmd->cmd_complete = addr_cmd_complete;
2811 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2812 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2814 mgmt_pending_remove(cmd);
2819 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2822 struct hci_conn *conn;
2823 struct mgmt_cp_pin_code_reply *cp = data;
2824 struct hci_cp_pin_code_reply reply;
2825 struct mgmt_pending_cmd *cmd;
2828 bt_dev_dbg(hdev, "sock %p", sk);
2832 if (!hdev_is_powered(hdev)) {
2833 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2834 MGMT_STATUS_NOT_POWERED);
2838 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2840 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2841 MGMT_STATUS_NOT_CONNECTED);
2845 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2846 struct mgmt_cp_pin_code_neg_reply ncp;
2848 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2850 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2852 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2854 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2855 MGMT_STATUS_INVALID_PARAMS);
2860 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2866 cmd->cmd_complete = addr_cmd_complete;
2868 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2869 reply.pin_len = cp->pin_len;
2870 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2872 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2874 mgmt_pending_remove(cmd);
2877 hci_dev_unlock(hdev);
2881 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2884 struct mgmt_cp_set_io_capability *cp = data;
2886 bt_dev_dbg(hdev, "sock %p", sk);
2888 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2889 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2890 MGMT_STATUS_INVALID_PARAMS);
2894 hdev->io_capability = cp->io_capability;
2896 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2898 hci_dev_unlock(hdev);
2900 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2904 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2906 struct hci_dev *hdev = conn->hdev;
2907 struct mgmt_pending_cmd *cmd;
2909 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2910 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2913 if (cmd->user_data != conn)
2922 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2924 struct mgmt_rp_pair_device rp;
2925 struct hci_conn *conn = cmd->user_data;
2928 bacpy(&rp.addr.bdaddr, &conn->dst);
2929 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2931 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2932 status, &rp, sizeof(rp));
2934 /* So we don't get further callbacks for this connection */
2935 conn->connect_cfm_cb = NULL;
2936 conn->security_cfm_cb = NULL;
2937 conn->disconn_cfm_cb = NULL;
2939 hci_conn_drop(conn);
2941 /* The device is paired so there is no need to remove
2942 * its connection parameters anymore.
2944 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2951 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2953 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2954 struct mgmt_pending_cmd *cmd;
2956 cmd = find_pairing(conn);
2958 cmd->cmd_complete(cmd, status);
2959 mgmt_pending_remove(cmd);
2963 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2965 struct mgmt_pending_cmd *cmd;
2967 BT_DBG("status %u", status);
2969 cmd = find_pairing(conn);
2971 BT_DBG("Unable to find a pending command");
2975 cmd->cmd_complete(cmd, mgmt_status(status));
2976 mgmt_pending_remove(cmd);
2979 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2981 struct mgmt_pending_cmd *cmd;
2983 BT_DBG("status %u", status);
2988 cmd = find_pairing(conn);
2990 BT_DBG("Unable to find a pending command");
2994 cmd->cmd_complete(cmd, mgmt_status(status));
2995 mgmt_pending_remove(cmd);
2998 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3001 struct mgmt_cp_pair_device *cp = data;
3002 struct mgmt_rp_pair_device rp;
3003 struct mgmt_pending_cmd *cmd;
3004 u8 sec_level, auth_type;
3005 struct hci_conn *conn;
3008 bt_dev_dbg(hdev, "sock %p", sk);
3010 memset(&rp, 0, sizeof(rp));
3011 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3012 rp.addr.type = cp->addr.type;
3014 if (!bdaddr_type_is_valid(cp->addr.type))
3015 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3016 MGMT_STATUS_INVALID_PARAMS,
3019 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3020 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3021 MGMT_STATUS_INVALID_PARAMS,
3026 if (!hdev_is_powered(hdev)) {
3027 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3028 MGMT_STATUS_NOT_POWERED, &rp,
3033 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3034 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3035 MGMT_STATUS_ALREADY_PAIRED, &rp,
3040 sec_level = BT_SECURITY_MEDIUM;
3041 auth_type = HCI_AT_DEDICATED_BONDING;
3043 if (cp->addr.type == BDADDR_BREDR) {
3044 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3045 auth_type, CONN_REASON_PAIR_DEVICE);
3047 u8 addr_type = le_addr_type(cp->addr.type);
3048 struct hci_conn_params *p;
3050 /* When pairing a new device, it is expected to remember
3051 * this device for future connections. Adding the connection
3052 * parameter information ahead of time allows tracking
3053 * of the peripheral preferred values and will speed up any
3054 * further connection establishment.
3056 * If connection parameters already exist, then they
3057 * will be kept and this function does nothing.
3059 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3061 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3062 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3064 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3065 sec_level, HCI_LE_CONN_TIMEOUT,
3066 CONN_REASON_PAIR_DEVICE);
3072 if (PTR_ERR(conn) == -EBUSY)
3073 status = MGMT_STATUS_BUSY;
3074 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3075 status = MGMT_STATUS_NOT_SUPPORTED;
3076 else if (PTR_ERR(conn) == -ECONNREFUSED)
3077 status = MGMT_STATUS_REJECTED;
3079 status = MGMT_STATUS_CONNECT_FAILED;
3081 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3082 status, &rp, sizeof(rp));
3086 if (conn->connect_cfm_cb) {
3087 hci_conn_drop(conn);
3088 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3089 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3093 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3096 hci_conn_drop(conn);
3100 cmd->cmd_complete = pairing_complete;
3102 /* For LE, just connecting isn't a proof that the pairing finished */
3103 if (cp->addr.type == BDADDR_BREDR) {
3104 conn->connect_cfm_cb = pairing_complete_cb;
3105 conn->security_cfm_cb = pairing_complete_cb;
3106 conn->disconn_cfm_cb = pairing_complete_cb;
3108 conn->connect_cfm_cb = le_pairing_complete_cb;
3109 conn->security_cfm_cb = le_pairing_complete_cb;
3110 conn->disconn_cfm_cb = le_pairing_complete_cb;
3113 conn->io_capability = cp->io_cap;
3114 cmd->user_data = hci_conn_get(conn);
3116 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3117 hci_conn_security(conn, sec_level, auth_type, true)) {
3118 cmd->cmd_complete(cmd, 0);
3119 mgmt_pending_remove(cmd);
3125 hci_dev_unlock(hdev);
3129 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3132 struct mgmt_addr_info *addr = data;
3133 struct mgmt_pending_cmd *cmd;
3134 struct hci_conn *conn;
3137 bt_dev_dbg(hdev, "sock %p", sk);
3141 if (!hdev_is_powered(hdev)) {
3142 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3143 MGMT_STATUS_NOT_POWERED);
3147 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3149 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3150 MGMT_STATUS_INVALID_PARAMS);
3154 conn = cmd->user_data;
3156 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3157 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3158 MGMT_STATUS_INVALID_PARAMS);
3162 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3163 mgmt_pending_remove(cmd);
3165 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3166 addr, sizeof(*addr));
3168 /* Since user doesn't want to proceed with the connection, abort any
3169 * ongoing pairing and then terminate the link if it was created
3170 * because of the pair device action.
3172 if (addr->type == BDADDR_BREDR)
3173 hci_remove_link_key(hdev, &addr->bdaddr);
3175 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3176 le_addr_type(addr->type));
3178 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3179 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3182 hci_dev_unlock(hdev);
3186 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3187 struct mgmt_addr_info *addr, u16 mgmt_op,
3188 u16 hci_op, __le32 passkey)
3190 struct mgmt_pending_cmd *cmd;
3191 struct hci_conn *conn;
3196 if (!hdev_is_powered(hdev)) {
3197 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3198 MGMT_STATUS_NOT_POWERED, addr,
3203 if (addr->type == BDADDR_BREDR)
3204 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3206 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3207 le_addr_type(addr->type));
3210 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3211 MGMT_STATUS_NOT_CONNECTED, addr,
3216 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3217 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3219 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3220 MGMT_STATUS_SUCCESS, addr,
3223 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3224 MGMT_STATUS_FAILED, addr,
3230 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3236 cmd->cmd_complete = addr_cmd_complete;
3238 /* Continue with pairing via HCI */
3239 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3240 struct hci_cp_user_passkey_reply cp;
3242 bacpy(&cp.bdaddr, &addr->bdaddr);
3243 cp.passkey = passkey;
3244 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3246 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3250 mgmt_pending_remove(cmd);
3253 hci_dev_unlock(hdev);
3257 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3258 void *data, u16 len)
3260 struct mgmt_cp_pin_code_neg_reply *cp = data;
3262 bt_dev_dbg(hdev, "sock %p", sk);
3264 return user_pairing_resp(sk, hdev, &cp->addr,
3265 MGMT_OP_PIN_CODE_NEG_REPLY,
3266 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3269 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3272 struct mgmt_cp_user_confirm_reply *cp = data;
3274 bt_dev_dbg(hdev, "sock %p", sk);
3276 if (len != sizeof(*cp))
3277 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3278 MGMT_STATUS_INVALID_PARAMS);
3280 return user_pairing_resp(sk, hdev, &cp->addr,
3281 MGMT_OP_USER_CONFIRM_REPLY,
3282 HCI_OP_USER_CONFIRM_REPLY, 0);
3285 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3286 void *data, u16 len)
3288 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3290 bt_dev_dbg(hdev, "sock %p", sk);
3292 return user_pairing_resp(sk, hdev, &cp->addr,
3293 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3294 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3297 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3300 struct mgmt_cp_user_passkey_reply *cp = data;
3302 bt_dev_dbg(hdev, "sock %p", sk);
3304 return user_pairing_resp(sk, hdev, &cp->addr,
3305 MGMT_OP_USER_PASSKEY_REPLY,
3306 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3309 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3310 void *data, u16 len)
3312 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3314 bt_dev_dbg(hdev, "sock %p", sk);
3316 return user_pairing_resp(sk, hdev, &cp->addr,
3317 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3318 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3321 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3323 struct adv_info *adv_instance;
3325 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3329 /* stop if current instance doesn't need to be changed */
3330 if (!(adv_instance->flags & flags))
3333 cancel_adv_timeout(hdev);
3335 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3339 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3344 static int name_changed_sync(struct hci_dev *hdev, void *data)
3346 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3349 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3351 struct mgmt_pending_cmd *cmd = data;
3352 struct mgmt_cp_set_local_name *cp = cmd->param;
3353 u8 status = mgmt_status(err);
3355 bt_dev_dbg(hdev, "err %d", err);
3357 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3361 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3364 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3367 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3368 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3371 mgmt_pending_remove(cmd);
3374 static int set_name_sync(struct hci_dev *hdev, void *data)
3376 if (lmp_bredr_capable(hdev)) {
3377 hci_update_name_sync(hdev);
3378 hci_update_eir_sync(hdev);
3381 /* The name is stored in the scan response data and so
3382 * no need to update the advertising data here.
3384 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3385 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3390 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3393 struct mgmt_cp_set_local_name *cp = data;
3394 struct mgmt_pending_cmd *cmd;
3397 bt_dev_dbg(hdev, "sock %p", sk);
3401 /* If the old values are the same as the new ones just return a
3402 * direct command complete event.
3404 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3405 !memcmp(hdev->short_name, cp->short_name,
3406 sizeof(hdev->short_name))) {
3407 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3412 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3414 if (!hdev_is_powered(hdev)) {
3415 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3417 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3422 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3423 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3424 ext_info_changed(hdev, sk);
3429 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3433 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3437 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3438 MGMT_STATUS_FAILED);
3441 mgmt_pending_remove(cmd);
3446 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3449 hci_dev_unlock(hdev);
3453 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3455 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3458 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3461 struct mgmt_cp_set_appearance *cp = data;
3465 bt_dev_dbg(hdev, "sock %p", sk);
3467 if (!lmp_le_capable(hdev))
3468 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3469 MGMT_STATUS_NOT_SUPPORTED);
3471 appearance = le16_to_cpu(cp->appearance);
3475 if (hdev->appearance != appearance) {
3476 hdev->appearance = appearance;
3478 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3479 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3482 ext_info_changed(hdev, sk);
3485 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3488 hci_dev_unlock(hdev);
3493 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3494 void *data, u16 len)
3496 struct mgmt_rp_get_phy_configuration rp;
3498 bt_dev_dbg(hdev, "sock %p", sk);
3502 memset(&rp, 0, sizeof(rp));
3504 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3505 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3506 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3508 hci_dev_unlock(hdev);
3510 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3514 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3516 struct mgmt_ev_phy_configuration_changed ev;
3518 memset(&ev, 0, sizeof(ev));
3520 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3522 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3526 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3528 struct mgmt_pending_cmd *cmd = data;
3529 struct sk_buff *skb = cmd->skb;
3530 u8 status = mgmt_status(err);
3532 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3537 status = MGMT_STATUS_FAILED;
3538 else if (IS_ERR(skb))
3539 status = mgmt_status(PTR_ERR(skb));
3541 status = mgmt_status(skb->data[0]);
3544 bt_dev_dbg(hdev, "status %d", status);
3547 mgmt_cmd_status(cmd->sk, hdev->id,
3548 MGMT_OP_SET_PHY_CONFIGURATION, status);
3550 mgmt_cmd_complete(cmd->sk, hdev->id,
3551 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3554 mgmt_phy_configuration_changed(hdev, cmd->sk);
3557 if (skb && !IS_ERR(skb))
3560 mgmt_pending_remove(cmd);
3563 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3565 struct mgmt_pending_cmd *cmd = data;
3566 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3567 struct hci_cp_le_set_default_phy cp_phy;
3568 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3570 memset(&cp_phy, 0, sizeof(cp_phy));
3572 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3573 cp_phy.all_phys |= 0x01;
3575 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3576 cp_phy.all_phys |= 0x02;
3578 if (selected_phys & MGMT_PHY_LE_1M_TX)
3579 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3581 if (selected_phys & MGMT_PHY_LE_2M_TX)
3582 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3584 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3585 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3587 if (selected_phys & MGMT_PHY_LE_1M_RX)
3588 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3590 if (selected_phys & MGMT_PHY_LE_2M_RX)
3591 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3593 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3594 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3596 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3597 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3602 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3603 void *data, u16 len)
3605 struct mgmt_cp_set_phy_configuration *cp = data;
3606 struct mgmt_pending_cmd *cmd;
3607 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3608 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3609 bool changed = false;
3612 bt_dev_dbg(hdev, "sock %p", sk);
3614 configurable_phys = get_configurable_phys(hdev);
3615 supported_phys = get_supported_phys(hdev);
3616 selected_phys = __le32_to_cpu(cp->selected_phys);
3618 if (selected_phys & ~supported_phys)
3619 return mgmt_cmd_status(sk, hdev->id,
3620 MGMT_OP_SET_PHY_CONFIGURATION,
3621 MGMT_STATUS_INVALID_PARAMS);
3623 unconfigure_phys = supported_phys & ~configurable_phys;
3625 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3626 return mgmt_cmd_status(sk, hdev->id,
3627 MGMT_OP_SET_PHY_CONFIGURATION,
3628 MGMT_STATUS_INVALID_PARAMS);
3630 if (selected_phys == get_selected_phys(hdev))
3631 return mgmt_cmd_complete(sk, hdev->id,
3632 MGMT_OP_SET_PHY_CONFIGURATION,
3637 if (!hdev_is_powered(hdev)) {
3638 err = mgmt_cmd_status(sk, hdev->id,
3639 MGMT_OP_SET_PHY_CONFIGURATION,
3640 MGMT_STATUS_REJECTED);
3644 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3645 err = mgmt_cmd_status(sk, hdev->id,
3646 MGMT_OP_SET_PHY_CONFIGURATION,
3651 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3652 pkt_type |= (HCI_DH3 | HCI_DM3);
3654 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3656 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3657 pkt_type |= (HCI_DH5 | HCI_DM5);
3659 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3661 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3662 pkt_type &= ~HCI_2DH1;
3664 pkt_type |= HCI_2DH1;
3666 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3667 pkt_type &= ~HCI_2DH3;
3669 pkt_type |= HCI_2DH3;
3671 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3672 pkt_type &= ~HCI_2DH5;
3674 pkt_type |= HCI_2DH5;
3676 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3677 pkt_type &= ~HCI_3DH1;
3679 pkt_type |= HCI_3DH1;
3681 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3682 pkt_type &= ~HCI_3DH3;
3684 pkt_type |= HCI_3DH3;
3686 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3687 pkt_type &= ~HCI_3DH5;
3689 pkt_type |= HCI_3DH5;
3691 if (pkt_type != hdev->pkt_type) {
3692 hdev->pkt_type = pkt_type;
3696 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3697 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3699 mgmt_phy_configuration_changed(hdev, sk);
3701 err = mgmt_cmd_complete(sk, hdev->id,
3702 MGMT_OP_SET_PHY_CONFIGURATION,
3708 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3713 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
3714 set_default_phy_complete);
3717 err = mgmt_cmd_status(sk, hdev->id,
3718 MGMT_OP_SET_PHY_CONFIGURATION,
3719 MGMT_STATUS_FAILED);
3722 mgmt_pending_remove(cmd);
3726 hci_dev_unlock(hdev);
3731 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3734 int err = MGMT_STATUS_SUCCESS;
3735 struct mgmt_cp_set_blocked_keys *keys = data;
3736 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3737 sizeof(struct mgmt_blocked_key_info));
3738 u16 key_count, expected_len;
3741 bt_dev_dbg(hdev, "sock %p", sk);
3743 key_count = __le16_to_cpu(keys->key_count);
3744 if (key_count > max_key_count) {
3745 bt_dev_err(hdev, "too big key_count value %u", key_count);
3746 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3747 MGMT_STATUS_INVALID_PARAMS);
3750 expected_len = struct_size(keys, keys, key_count);
3751 if (expected_len != len) {
3752 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3754 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3755 MGMT_STATUS_INVALID_PARAMS);
3760 hci_blocked_keys_clear(hdev);
3762 for (i = 0; i < keys->key_count; ++i) {
3763 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3766 err = MGMT_STATUS_NO_RESOURCES;
3770 b->type = keys->keys[i].type;
3771 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3772 list_add_rcu(&b->list, &hdev->blocked_keys);
3774 hci_dev_unlock(hdev);
3776 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3780 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3781 void *data, u16 len)
3783 struct mgmt_mode *cp = data;
3785 bool changed = false;
3787 bt_dev_dbg(hdev, "sock %p", sk);
3789 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3790 return mgmt_cmd_status(sk, hdev->id,
3791 MGMT_OP_SET_WIDEBAND_SPEECH,
3792 MGMT_STATUS_NOT_SUPPORTED);
3794 if (cp->val != 0x00 && cp->val != 0x01)
3795 return mgmt_cmd_status(sk, hdev->id,
3796 MGMT_OP_SET_WIDEBAND_SPEECH,
3797 MGMT_STATUS_INVALID_PARAMS);
3801 if (hdev_is_powered(hdev) &&
3802 !!cp->val != hci_dev_test_flag(hdev,
3803 HCI_WIDEBAND_SPEECH_ENABLED)) {
3804 err = mgmt_cmd_status(sk, hdev->id,
3805 MGMT_OP_SET_WIDEBAND_SPEECH,
3806 MGMT_STATUS_REJECTED);
3811 changed = !hci_dev_test_and_set_flag(hdev,
3812 HCI_WIDEBAND_SPEECH_ENABLED);
3814 changed = hci_dev_test_and_clear_flag(hdev,
3815 HCI_WIDEBAND_SPEECH_ENABLED);
3817 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3822 err = new_settings(hdev, sk);
3825 hci_dev_unlock(hdev);
3829 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3830 void *data, u16 data_len)
3833 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3836 u8 tx_power_range[2];
3838 bt_dev_dbg(hdev, "sock %p", sk);
3840 memset(&buf, 0, sizeof(buf));
3844 /* When the Read Simple Pairing Options command is supported, then
3845 * the remote public key validation is supported.
3847 * Alternatively, when Microsoft extensions are available, they can
3848 * indicate support for public key validation as well.
3850 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3851 flags |= 0x01; /* Remote public key validation (BR/EDR) */
3853 flags |= 0x02; /* Remote public key validation (LE) */
3855 /* When the Read Encryption Key Size command is supported, then the
3856 * encryption key size is enforced.
3858 if (hdev->commands[20] & 0x10)
3859 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
3861 flags |= 0x08; /* Encryption key size enforcement (LE) */
3863 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3866 /* When the Read Simple Pairing Options command is supported, then
3867 * also max encryption key size information is provided.
3869 if (hdev->commands[41] & 0x08)
3870 cap_len = eir_append_le16(rp->cap, cap_len,
3871 MGMT_CAP_MAX_ENC_KEY_SIZE,
3872 hdev->max_enc_key_size);
3874 cap_len = eir_append_le16(rp->cap, cap_len,
3875 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3876 SMP_MAX_ENC_KEY_SIZE);
3878 /* Append the min/max LE tx power parameters if we were able to fetch
3879 * it from the controller
3881 if (hdev->commands[38] & 0x80) {
3882 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3883 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3884 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3888 rp->cap_len = cpu_to_le16(cap_len);
3890 hci_dev_unlock(hdev);
3892 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3893 rp, sizeof(*rp) + cap_len);
3896 #ifdef CONFIG_BT_FEATURE_DEBUG
3897 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3898 static const u8 debug_uuid[16] = {
3899 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3900 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3904 /* 330859bc-7506-492d-9370-9a6f0614037f */
3905 static const u8 quality_report_uuid[16] = {
3906 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
3907 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
3910 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
3911 static const u8 offload_codecs_uuid[16] = {
3912 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
3913 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
3916 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3917 static const u8 le_simultaneous_roles_uuid[16] = {
3918 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3919 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3922 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3923 static const u8 rpa_resolution_uuid[16] = {
3924 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3925 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3928 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3929 void *data, u16 data_len)
3931 char buf[102]; /* Enough space for 5 features: 2 + 20 * 5 */
3932 struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3936 bt_dev_dbg(hdev, "sock %p", sk);
3938 memset(&buf, 0, sizeof(buf));
3940 #ifdef CONFIG_BT_FEATURE_DEBUG
3942 flags = bt_dbg_get() ? BIT(0) : 0;
3944 memcpy(rp->features[idx].uuid, debug_uuid, 16);
3945 rp->features[idx].flags = cpu_to_le32(flags);
3950 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
3951 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
3956 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
3957 rp->features[idx].flags = cpu_to_le32(flags);
3961 if (hdev && ll_privacy_capable(hdev)) {
3962 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3963 flags = BIT(0) | BIT(1);
3967 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3968 rp->features[idx].flags = cpu_to_le32(flags);
3972 if (hdev && (aosp_has_quality_report(hdev) ||
3973 hdev->set_quality_report)) {
3974 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
3979 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
3980 rp->features[idx].flags = cpu_to_le32(flags);
3984 if (hdev && hdev->get_data_path_id) {
3985 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
3990 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
3991 rp->features[idx].flags = cpu_to_le32(flags);
3995 rp->feature_count = cpu_to_le16(idx);
3997 /* After reading the experimental features information, enable
3998 * the events to update client on any future change.
4000 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4002 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4003 MGMT_OP_READ_EXP_FEATURES_INFO,
4004 0, rp, sizeof(*rp) + (20 * idx));
4007 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4010 struct mgmt_ev_exp_feature_changed ev;
4012 memset(&ev, 0, sizeof(ev));
4013 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4014 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4016 if (enabled && privacy_mode_capable(hdev))
4017 set_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags);
4019 clear_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags);
4021 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4023 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4027 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4028 bool enabled, struct sock *skip)
4030 struct mgmt_ev_exp_feature_changed ev;
4032 memset(&ev, 0, sizeof(ev));
4033 memcpy(ev.uuid, uuid, 16);
4034 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4036 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4038 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4041 #define EXP_FEAT(_uuid, _set_func) \
4044 .set_func = _set_func, \
4047 /* The zero key uuid is special. Multiple exp features are set through it. */
4048 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4049 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4051 struct mgmt_rp_set_exp_feature rp;
4053 memset(rp.uuid, 0, 16);
4054 rp.flags = cpu_to_le32(0);
4056 #ifdef CONFIG_BT_FEATURE_DEBUG
4058 bool changed = bt_dbg_get();
4063 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4067 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4070 changed = hci_dev_test_and_clear_flag(hdev,
4071 HCI_ENABLE_LL_PRIVACY);
4073 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4077 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4079 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4080 MGMT_OP_SET_EXP_FEATURE, 0,
4084 #ifdef CONFIG_BT_FEATURE_DEBUG
4085 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4086 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4088 struct mgmt_rp_set_exp_feature rp;
4093 /* Command requires to use the non-controller index */
4095 return mgmt_cmd_status(sk, hdev->id,
4096 MGMT_OP_SET_EXP_FEATURE,
4097 MGMT_STATUS_INVALID_INDEX);
4099 /* Parameters are limited to a single octet */
4100 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4101 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4102 MGMT_OP_SET_EXP_FEATURE,
4103 MGMT_STATUS_INVALID_PARAMS);
4105 /* Only boolean on/off is supported */
4106 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4107 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4108 MGMT_OP_SET_EXP_FEATURE,
4109 MGMT_STATUS_INVALID_PARAMS);
4111 val = !!cp->param[0];
4112 changed = val ? !bt_dbg_get() : bt_dbg_get();
4115 memcpy(rp.uuid, debug_uuid, 16);
4116 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4118 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4120 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4121 MGMT_OP_SET_EXP_FEATURE, 0,
4125 exp_feature_changed(hdev, debug_uuid, val, sk);
4131 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4132 struct mgmt_cp_set_exp_feature *cp,
4135 struct mgmt_rp_set_exp_feature rp;
4140 /* Command requires to use the controller index */
4142 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4143 MGMT_OP_SET_EXP_FEATURE,
4144 MGMT_STATUS_INVALID_INDEX);
4146 /* Changes can only be made when controller is powered down */
4147 if (hdev_is_powered(hdev))
4148 return mgmt_cmd_status(sk, hdev->id,
4149 MGMT_OP_SET_EXP_FEATURE,
4150 MGMT_STATUS_REJECTED);
4152 /* Parameters are limited to a single octet */
4153 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4154 return mgmt_cmd_status(sk, hdev->id,
4155 MGMT_OP_SET_EXP_FEATURE,
4156 MGMT_STATUS_INVALID_PARAMS);
4158 /* Only boolean on/off is supported */
4159 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4160 return mgmt_cmd_status(sk, hdev->id,
4161 MGMT_OP_SET_EXP_FEATURE,
4162 MGMT_STATUS_INVALID_PARAMS);
4164 val = !!cp->param[0];
4167 changed = !hci_dev_test_and_set_flag(hdev,
4168 HCI_ENABLE_LL_PRIVACY);
4169 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4171 /* Enable LL privacy + supported settings changed */
4172 flags = BIT(0) | BIT(1);
4174 changed = hci_dev_test_and_clear_flag(hdev,
4175 HCI_ENABLE_LL_PRIVACY);
4177 /* Disable LL privacy + supported settings changed */
4181 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4182 rp.flags = cpu_to_le32(flags);
4184 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4186 err = mgmt_cmd_complete(sk, hdev->id,
4187 MGMT_OP_SET_EXP_FEATURE, 0,
4191 exp_ll_privacy_feature_changed(val, hdev, sk);
4196 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4197 struct mgmt_cp_set_exp_feature *cp,
4200 struct mgmt_rp_set_exp_feature rp;
4204 /* Command requires to use a valid controller index */
4206 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4207 MGMT_OP_SET_EXP_FEATURE,
4208 MGMT_STATUS_INVALID_INDEX);
4210 /* Parameters are limited to a single octet */
4211 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4212 return mgmt_cmd_status(sk, hdev->id,
4213 MGMT_OP_SET_EXP_FEATURE,
4214 MGMT_STATUS_INVALID_PARAMS);
4216 /* Only boolean on/off is supported */
4217 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4218 return mgmt_cmd_status(sk, hdev->id,
4219 MGMT_OP_SET_EXP_FEATURE,
4220 MGMT_STATUS_INVALID_PARAMS);
4222 hci_req_sync_lock(hdev);
4224 val = !!cp->param[0];
4225 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4227 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4228 err = mgmt_cmd_status(sk, hdev->id,
4229 MGMT_OP_SET_EXP_FEATURE,
4230 MGMT_STATUS_NOT_SUPPORTED);
4231 goto unlock_quality_report;
4235 if (hdev->set_quality_report)
4236 err = hdev->set_quality_report(hdev, val);
4238 err = aosp_set_quality_report(hdev, val);
4241 err = mgmt_cmd_status(sk, hdev->id,
4242 MGMT_OP_SET_EXP_FEATURE,
4243 MGMT_STATUS_FAILED);
4244 goto unlock_quality_report;
4248 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4250 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4253 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4255 memcpy(rp.uuid, quality_report_uuid, 16);
4256 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4257 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4259 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4263 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4265 unlock_quality_report:
4266 hci_req_sync_unlock(hdev);
4270 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4271 struct mgmt_cp_set_exp_feature *cp,
4276 struct mgmt_rp_set_exp_feature rp;
4278 /* Command requires to use a valid controller index */
4280 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4281 MGMT_OP_SET_EXP_FEATURE,
4282 MGMT_STATUS_INVALID_INDEX);
4284 /* Parameters are limited to a single octet */
4285 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4286 return mgmt_cmd_status(sk, hdev->id,
4287 MGMT_OP_SET_EXP_FEATURE,
4288 MGMT_STATUS_INVALID_PARAMS);
4290 /* Only boolean on/off is supported */
4291 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4292 return mgmt_cmd_status(sk, hdev->id,
4293 MGMT_OP_SET_EXP_FEATURE,
4294 MGMT_STATUS_INVALID_PARAMS);
4296 val = !!cp->param[0];
4297 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4299 if (!hdev->get_data_path_id) {
4300 return mgmt_cmd_status(sk, hdev->id,
4301 MGMT_OP_SET_EXP_FEATURE,
4302 MGMT_STATUS_NOT_SUPPORTED);
4307 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4309 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4312 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4315 memcpy(rp.uuid, offload_codecs_uuid, 16);
4316 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4317 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4318 err = mgmt_cmd_complete(sk, hdev->id,
4319 MGMT_OP_SET_EXP_FEATURE, 0,
4323 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4328 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4329 struct mgmt_cp_set_exp_feature *cp,
4334 struct mgmt_rp_set_exp_feature rp;
4336 /* Command requires to use a valid controller index */
4338 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4339 MGMT_OP_SET_EXP_FEATURE,
4340 MGMT_STATUS_INVALID_INDEX);
4342 /* Parameters are limited to a single octet */
4343 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4344 return mgmt_cmd_status(sk, hdev->id,
4345 MGMT_OP_SET_EXP_FEATURE,
4346 MGMT_STATUS_INVALID_PARAMS);
4348 /* Only boolean on/off is supported */
4349 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4350 return mgmt_cmd_status(sk, hdev->id,
4351 MGMT_OP_SET_EXP_FEATURE,
4352 MGMT_STATUS_INVALID_PARAMS);
4354 val = !!cp->param[0];
4355 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4357 if (!hci_dev_le_state_simultaneous(hdev)) {
4358 return mgmt_cmd_status(sk, hdev->id,
4359 MGMT_OP_SET_EXP_FEATURE,
4360 MGMT_STATUS_NOT_SUPPORTED);
4365 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4367 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4370 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4373 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4374 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4375 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4376 err = mgmt_cmd_complete(sk, hdev->id,
4377 MGMT_OP_SET_EXP_FEATURE, 0,
4381 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4386 static const struct mgmt_exp_feature {
4388 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4389 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4390 } exp_features[] = {
4391 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4392 #ifdef CONFIG_BT_FEATURE_DEBUG
4393 EXP_FEAT(debug_uuid, set_debug_func),
4395 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4396 EXP_FEAT(quality_report_uuid, set_quality_report_func),
4397 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4398 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4400 /* end with a null feature */
4401 EXP_FEAT(NULL, NULL)
4404 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4405 void *data, u16 data_len)
4407 struct mgmt_cp_set_exp_feature *cp = data;
4410 bt_dev_dbg(hdev, "sock %p", sk);
4412 for (i = 0; exp_features[i].uuid; i++) {
4413 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4414 return exp_features[i].set_func(sk, hdev, cp, data_len);
4417 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4418 MGMT_OP_SET_EXP_FEATURE,
4419 MGMT_STATUS_NOT_SUPPORTED);
4422 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4425 struct mgmt_cp_get_device_flags *cp = data;
4426 struct mgmt_rp_get_device_flags rp;
4427 struct bdaddr_list_with_flags *br_params;
4428 struct hci_conn_params *params;
4429 u32 supported_flags;
4430 u32 current_flags = 0;
4431 u8 status = MGMT_STATUS_INVALID_PARAMS;
4433 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4434 &cp->addr.bdaddr, cp->addr.type);
4438 bitmap_to_arr32(&supported_flags, hdev->conn_flags,
4439 __HCI_CONN_NUM_FLAGS);
4441 memset(&rp, 0, sizeof(rp));
4443 if (cp->addr.type == BDADDR_BREDR) {
4444 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4450 bitmap_to_arr32(¤t_flags, br_params->flags,
4451 __HCI_CONN_NUM_FLAGS);
4453 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4454 le_addr_type(cp->addr.type));
4459 bitmap_to_arr32(¤t_flags, params->flags,
4460 __HCI_CONN_NUM_FLAGS);
4463 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4464 rp.addr.type = cp->addr.type;
4465 rp.supported_flags = cpu_to_le32(supported_flags);
4466 rp.current_flags = cpu_to_le32(current_flags);
4468 status = MGMT_STATUS_SUCCESS;
4471 hci_dev_unlock(hdev);
4473 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4477 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4478 bdaddr_t *bdaddr, u8 bdaddr_type,
4479 u32 supported_flags, u32 current_flags)
4481 struct mgmt_ev_device_flags_changed ev;
4483 bacpy(&ev.addr.bdaddr, bdaddr);
4484 ev.addr.type = bdaddr_type;
4485 ev.supported_flags = cpu_to_le32(supported_flags);
4486 ev.current_flags = cpu_to_le32(current_flags);
4488 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4491 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4494 struct mgmt_cp_set_device_flags *cp = data;
4495 struct bdaddr_list_with_flags *br_params;
4496 struct hci_conn_params *params;
4497 u8 status = MGMT_STATUS_INVALID_PARAMS;
4498 u32 supported_flags;
4499 u32 current_flags = __le32_to_cpu(cp->current_flags);
4501 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4502 &cp->addr.bdaddr, cp->addr.type,
4503 __le32_to_cpu(current_flags));
4505 bitmap_to_arr32(&supported_flags, hdev->conn_flags,
4506 __HCI_CONN_NUM_FLAGS);
4508 if ((supported_flags | current_flags) != supported_flags) {
4509 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4510 current_flags, supported_flags);
4516 if (cp->addr.type == BDADDR_BREDR) {
4517 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4522 bitmap_from_u64(br_params->flags, current_flags);
4523 status = MGMT_STATUS_SUCCESS;
4525 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4526 &cp->addr.bdaddr, cp->addr.type);
4529 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4530 le_addr_type(cp->addr.type));
4532 DECLARE_BITMAP(flags, __HCI_CONN_NUM_FLAGS);
4534 bitmap_from_u64(flags, current_flags);
4536 /* Devices using RPAs can only be programmed in the
4537 * acceptlist LL Privacy has been enable otherwise they
4538 * cannot mark HCI_CONN_FLAG_REMOTE_WAKEUP.
4540 if (test_bit(HCI_CONN_FLAG_REMOTE_WAKEUP, flags) &&
4541 !use_ll_privacy(hdev) &&
4542 hci_find_irk_by_addr(hdev, ¶ms->addr,
4543 params->addr_type)) {
4545 "Cannot set wakeable for RPA");
4549 bitmap_from_u64(params->flags, current_flags);
4550 status = MGMT_STATUS_SUCCESS;
4552 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
4555 if (test_bit(HCI_CONN_FLAG_DEVICE_PRIVACY,
4557 hci_update_passive_scan(hdev);
4559 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4561 le_addr_type(cp->addr.type));
4566 hci_dev_unlock(hdev);
4569 if (status == MGMT_STATUS_SUCCESS)
4570 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4571 supported_flags, current_flags);
4573 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4574 &cp->addr, sizeof(cp->addr));
4577 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4580 struct mgmt_ev_adv_monitor_added ev;
4582 ev.monitor_handle = cpu_to_le16(handle);
4584 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4587 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4589 struct mgmt_ev_adv_monitor_removed ev;
4590 struct mgmt_pending_cmd *cmd;
4591 struct sock *sk_skip = NULL;
4592 struct mgmt_cp_remove_adv_monitor *cp;
4594 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4598 if (cp->monitor_handle)
4602 ev.monitor_handle = cpu_to_le16(handle);
4604 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4607 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4608 void *data, u16 len)
4610 struct adv_monitor *monitor = NULL;
4611 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4614 __u32 supported = 0;
4616 __u16 num_handles = 0;
4617 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4619 BT_DBG("request for %s", hdev->name);
4623 if (msft_monitor_supported(hdev))
4624 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4626 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4627 handles[num_handles++] = monitor->handle;
4629 hci_dev_unlock(hdev);
4631 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4632 rp = kmalloc(rp_size, GFP_KERNEL);
4636 /* All supported features are currently enabled */
4637 enabled = supported;
4639 rp->supported_features = cpu_to_le32(supported);
4640 rp->enabled_features = cpu_to_le32(enabled);
4641 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4642 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4643 rp->num_handles = cpu_to_le16(num_handles);
4645 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4647 err = mgmt_cmd_complete(sk, hdev->id,
4648 MGMT_OP_READ_ADV_MONITOR_FEATURES,
4649 MGMT_STATUS_SUCCESS, rp, rp_size);
4656 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4658 struct mgmt_rp_add_adv_patterns_monitor rp;
4659 struct mgmt_pending_cmd *cmd;
4660 struct adv_monitor *monitor;
4665 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4667 cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4672 monitor = cmd->user_data;
4673 rp.monitor_handle = cpu_to_le16(monitor->handle);
4676 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4677 hdev->adv_monitors_cnt++;
4678 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4679 monitor->state = ADV_MONITOR_STATE_REGISTERED;
4680 hci_update_passive_scan(hdev);
4683 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4684 mgmt_status(status), &rp, sizeof(rp));
4685 mgmt_pending_remove(cmd);
4688 hci_dev_unlock(hdev);
4689 bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4690 rp.monitor_handle, status);
4695 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4696 struct adv_monitor *m, u8 status,
4697 void *data, u16 len, u16 op)
4699 struct mgmt_rp_add_adv_patterns_monitor rp;
4700 struct mgmt_pending_cmd *cmd;
4709 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4710 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4711 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4712 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4713 status = MGMT_STATUS_BUSY;
4717 cmd = mgmt_pending_add(sk, op, hdev, data, len);
4719 status = MGMT_STATUS_NO_RESOURCES;
4724 pending = hci_add_adv_monitor(hdev, m, &err);
4726 if (err == -ENOSPC || err == -ENOMEM)
4727 status = MGMT_STATUS_NO_RESOURCES;
4728 else if (err == -EINVAL)
4729 status = MGMT_STATUS_INVALID_PARAMS;
4731 status = MGMT_STATUS_FAILED;
4733 mgmt_pending_remove(cmd);
4738 mgmt_pending_remove(cmd);
4739 rp.monitor_handle = cpu_to_le16(m->handle);
4740 mgmt_adv_monitor_added(sk, hdev, m->handle);
4741 m->state = ADV_MONITOR_STATE_REGISTERED;
4742 hdev->adv_monitors_cnt++;
4744 hci_dev_unlock(hdev);
4745 return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4749 hci_dev_unlock(hdev);
4754 hci_free_adv_monitor(hdev, m);
4755 hci_dev_unlock(hdev);
4756 return mgmt_cmd_status(sk, hdev->id, op, status);
4759 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4760 struct mgmt_adv_rssi_thresholds *rssi)
4763 m->rssi.low_threshold = rssi->low_threshold;
4764 m->rssi.low_threshold_timeout =
4765 __le16_to_cpu(rssi->low_threshold_timeout);
4766 m->rssi.high_threshold = rssi->high_threshold;
4767 m->rssi.high_threshold_timeout =
4768 __le16_to_cpu(rssi->high_threshold_timeout);
4769 m->rssi.sampling_period = rssi->sampling_period;
4771 /* Default values. These numbers are the least constricting
4772 * parameters for MSFT API to work, so it behaves as if there
4773 * are no rssi parameter to consider. May need to be changed
4774 * if other API are to be supported.
4776 m->rssi.low_threshold = -127;
4777 m->rssi.low_threshold_timeout = 60;
4778 m->rssi.high_threshold = -127;
4779 m->rssi.high_threshold_timeout = 0;
4780 m->rssi.sampling_period = 0;
4784 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4785 struct mgmt_adv_pattern *patterns)
4787 u8 offset = 0, length = 0;
4788 struct adv_pattern *p = NULL;
4791 for (i = 0; i < pattern_count; i++) {
4792 offset = patterns[i].offset;
4793 length = patterns[i].length;
4794 if (offset >= HCI_MAX_AD_LENGTH ||
4795 length > HCI_MAX_AD_LENGTH ||
4796 (offset + length) > HCI_MAX_AD_LENGTH)
4797 return MGMT_STATUS_INVALID_PARAMS;
4799 p = kmalloc(sizeof(*p), GFP_KERNEL);
4801 return MGMT_STATUS_NO_RESOURCES;
4803 p->ad_type = patterns[i].ad_type;
4804 p->offset = patterns[i].offset;
4805 p->length = patterns[i].length;
4806 memcpy(p->value, patterns[i].value, p->length);
4808 INIT_LIST_HEAD(&p->list);
4809 list_add(&p->list, &m->patterns);
4812 return MGMT_STATUS_SUCCESS;
4815 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4816 void *data, u16 len)
4818 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4819 struct adv_monitor *m = NULL;
4820 u8 status = MGMT_STATUS_SUCCESS;
4821 size_t expected_size = sizeof(*cp);
4823 BT_DBG("request for %s", hdev->name);
4825 if (len <= sizeof(*cp)) {
4826 status = MGMT_STATUS_INVALID_PARAMS;
4830 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4831 if (len != expected_size) {
4832 status = MGMT_STATUS_INVALID_PARAMS;
4836 m = kzalloc(sizeof(*m), GFP_KERNEL);
4838 status = MGMT_STATUS_NO_RESOURCES;
4842 INIT_LIST_HEAD(&m->patterns);
4844 parse_adv_monitor_rssi(m, NULL);
4845 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4848 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4849 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4852 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4853 void *data, u16 len)
4855 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4856 struct adv_monitor *m = NULL;
4857 u8 status = MGMT_STATUS_SUCCESS;
4858 size_t expected_size = sizeof(*cp);
4860 BT_DBG("request for %s", hdev->name);
4862 if (len <= sizeof(*cp)) {
4863 status = MGMT_STATUS_INVALID_PARAMS;
4867 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4868 if (len != expected_size) {
4869 status = MGMT_STATUS_INVALID_PARAMS;
4873 m = kzalloc(sizeof(*m), GFP_KERNEL);
4875 status = MGMT_STATUS_NO_RESOURCES;
4879 INIT_LIST_HEAD(&m->patterns);
4881 parse_adv_monitor_rssi(m, &cp->rssi);
4882 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4885 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4886 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4889 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4891 struct mgmt_rp_remove_adv_monitor rp;
4892 struct mgmt_cp_remove_adv_monitor *cp;
4893 struct mgmt_pending_cmd *cmd;
4898 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4903 rp.monitor_handle = cp->monitor_handle;
4906 hci_update_passive_scan(hdev);
4908 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4909 mgmt_status(status), &rp, sizeof(rp));
4910 mgmt_pending_remove(cmd);
4913 hci_dev_unlock(hdev);
4914 bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4915 rp.monitor_handle, status);
4920 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4921 void *data, u16 len)
4923 struct mgmt_cp_remove_adv_monitor *cp = data;
4924 struct mgmt_rp_remove_adv_monitor rp;
4925 struct mgmt_pending_cmd *cmd;
4926 u16 handle = __le16_to_cpu(cp->monitor_handle);
4930 BT_DBG("request for %s", hdev->name);
4931 rp.monitor_handle = cp->monitor_handle;
4935 if (pending_find(MGMT_OP_SET_LE, hdev) ||
4936 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4937 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4938 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4939 status = MGMT_STATUS_BUSY;
4943 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4945 status = MGMT_STATUS_NO_RESOURCES;
4950 pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4952 pending = hci_remove_all_adv_monitor(hdev, &err);
4955 mgmt_pending_remove(cmd);
4958 status = MGMT_STATUS_INVALID_INDEX;
4960 status = MGMT_STATUS_FAILED;
4965 /* monitor can be removed without forwarding request to controller */
4967 mgmt_pending_remove(cmd);
4968 hci_dev_unlock(hdev);
4970 return mgmt_cmd_complete(sk, hdev->id,
4971 MGMT_OP_REMOVE_ADV_MONITOR,
4972 MGMT_STATUS_SUCCESS,
4976 hci_dev_unlock(hdev);
4980 hci_dev_unlock(hdev);
4981 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4985 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
4987 struct mgmt_rp_read_local_oob_data mgmt_rp;
4988 size_t rp_size = sizeof(mgmt_rp);
4989 struct mgmt_pending_cmd *cmd = data;
4990 struct sk_buff *skb = cmd->skb;
4991 u8 status = mgmt_status(err);
4995 status = MGMT_STATUS_FAILED;
4996 else if (IS_ERR(skb))
4997 status = mgmt_status(PTR_ERR(skb));
4999 status = mgmt_status(skb->data[0]);
5002 bt_dev_dbg(hdev, "status %d", status);
5005 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5009 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5011 if (!bredr_sc_enabled(hdev)) {
5012 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5014 if (skb->len < sizeof(*rp)) {
5015 mgmt_cmd_status(cmd->sk, hdev->id,
5016 MGMT_OP_READ_LOCAL_OOB_DATA,
5017 MGMT_STATUS_FAILED);
5021 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5022 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5024 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5026 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5028 if (skb->len < sizeof(*rp)) {
5029 mgmt_cmd_status(cmd->sk, hdev->id,
5030 MGMT_OP_READ_LOCAL_OOB_DATA,
5031 MGMT_STATUS_FAILED);
5035 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5036 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5038 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5039 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5042 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5043 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5046 if (skb && !IS_ERR(skb))
5049 mgmt_pending_free(cmd);
5052 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5054 struct mgmt_pending_cmd *cmd = data;
5056 if (bredr_sc_enabled(hdev))
5057 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5059 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5061 if (IS_ERR(cmd->skb))
5062 return PTR_ERR(cmd->skb);
5067 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5068 void *data, u16 data_len)
5070 struct mgmt_pending_cmd *cmd;
5073 bt_dev_dbg(hdev, "sock %p", sk);
5077 if (!hdev_is_powered(hdev)) {
5078 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5079 MGMT_STATUS_NOT_POWERED);
5083 if (!lmp_ssp_capable(hdev)) {
5084 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5085 MGMT_STATUS_NOT_SUPPORTED);
5089 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5093 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5094 read_local_oob_data_complete);
5097 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5098 MGMT_STATUS_FAILED);
5101 mgmt_pending_free(cmd);
5105 hci_dev_unlock(hdev);
5109 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5110 void *data, u16 len)
5112 struct mgmt_addr_info *addr = data;
5115 bt_dev_dbg(hdev, "sock %p", sk);
5117 if (!bdaddr_type_is_valid(addr->type))
5118 return mgmt_cmd_complete(sk, hdev->id,
5119 MGMT_OP_ADD_REMOTE_OOB_DATA,
5120 MGMT_STATUS_INVALID_PARAMS,
5121 addr, sizeof(*addr));
5125 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5126 struct mgmt_cp_add_remote_oob_data *cp = data;
5129 if (cp->addr.type != BDADDR_BREDR) {
5130 err = mgmt_cmd_complete(sk, hdev->id,
5131 MGMT_OP_ADD_REMOTE_OOB_DATA,
5132 MGMT_STATUS_INVALID_PARAMS,
5133 &cp->addr, sizeof(cp->addr));
5137 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5138 cp->addr.type, cp->hash,
5139 cp->rand, NULL, NULL);
5141 status = MGMT_STATUS_FAILED;
5143 status = MGMT_STATUS_SUCCESS;
5145 err = mgmt_cmd_complete(sk, hdev->id,
5146 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5147 &cp->addr, sizeof(cp->addr));
5148 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5149 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5150 u8 *rand192, *hash192, *rand256, *hash256;
5153 if (bdaddr_type_is_le(cp->addr.type)) {
5154 /* Enforce zero-valued 192-bit parameters as
5155 * long as legacy SMP OOB isn't implemented.
5157 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5158 memcmp(cp->hash192, ZERO_KEY, 16)) {
5159 err = mgmt_cmd_complete(sk, hdev->id,
5160 MGMT_OP_ADD_REMOTE_OOB_DATA,
5161 MGMT_STATUS_INVALID_PARAMS,
5162 addr, sizeof(*addr));
5169 /* In case one of the P-192 values is set to zero,
5170 * then just disable OOB data for P-192.
5172 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5173 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5177 rand192 = cp->rand192;
5178 hash192 = cp->hash192;
5182 /* In case one of the P-256 values is set to zero, then just
5183 * disable OOB data for P-256.
5185 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5186 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5190 rand256 = cp->rand256;
5191 hash256 = cp->hash256;
5194 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5195 cp->addr.type, hash192, rand192,
5198 status = MGMT_STATUS_FAILED;
5200 status = MGMT_STATUS_SUCCESS;
5202 err = mgmt_cmd_complete(sk, hdev->id,
5203 MGMT_OP_ADD_REMOTE_OOB_DATA,
5204 status, &cp->addr, sizeof(cp->addr));
5206 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5208 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5209 MGMT_STATUS_INVALID_PARAMS);
5213 hci_dev_unlock(hdev);
5217 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5218 void *data, u16 len)
5220 struct mgmt_cp_remove_remote_oob_data *cp = data;
5224 bt_dev_dbg(hdev, "sock %p", sk);
5226 if (cp->addr.type != BDADDR_BREDR)
5227 return mgmt_cmd_complete(sk, hdev->id,
5228 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5229 MGMT_STATUS_INVALID_PARAMS,
5230 &cp->addr, sizeof(cp->addr));
5234 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5235 hci_remote_oob_data_clear(hdev);
5236 status = MGMT_STATUS_SUCCESS;
5240 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5242 status = MGMT_STATUS_INVALID_PARAMS;
5244 status = MGMT_STATUS_SUCCESS;
5247 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5248 status, &cp->addr, sizeof(cp->addr));
5250 hci_dev_unlock(hdev);
5254 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5256 struct mgmt_pending_cmd *cmd;
5258 bt_dev_dbg(hdev, "status %u", status);
5262 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5264 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5267 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5270 cmd->cmd_complete(cmd, mgmt_status(status));
5271 mgmt_pending_remove(cmd);
5274 hci_dev_unlock(hdev);
5277 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5278 uint8_t *mgmt_status)
5281 case DISCOV_TYPE_LE:
5282 *mgmt_status = mgmt_le_support(hdev);
5286 case DISCOV_TYPE_INTERLEAVED:
5287 *mgmt_status = mgmt_le_support(hdev);
5291 case DISCOV_TYPE_BREDR:
5292 *mgmt_status = mgmt_bredr_support(hdev);
5297 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5304 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5306 struct mgmt_pending_cmd *cmd = data;
5308 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5309 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5310 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5313 bt_dev_dbg(hdev, "err %d", err);
5315 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5317 mgmt_pending_remove(cmd);
5319 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5323 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5325 return hci_start_discovery_sync(hdev);
5328 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5329 u16 op, void *data, u16 len)
5331 struct mgmt_cp_start_discovery *cp = data;
5332 struct mgmt_pending_cmd *cmd;
5336 bt_dev_dbg(hdev, "sock %p", sk);
5340 if (!hdev_is_powered(hdev)) {
5341 err = mgmt_cmd_complete(sk, hdev->id, op,
5342 MGMT_STATUS_NOT_POWERED,
5343 &cp->type, sizeof(cp->type));
5347 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5348 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5349 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5350 &cp->type, sizeof(cp->type));
5354 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5355 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5356 &cp->type, sizeof(cp->type));
5360 /* Can't start discovery when it is paused */
5361 if (hdev->discovery_paused) {
5362 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5363 &cp->type, sizeof(cp->type));
5367 /* Clear the discovery filter first to free any previously
5368 * allocated memory for the UUID list.
5370 hci_discovery_filter_clear(hdev);
5372 hdev->discovery.type = cp->type;
5373 hdev->discovery.report_invalid_rssi = false;
5374 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5375 hdev->discovery.limited = true;
5377 hdev->discovery.limited = false;
5379 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5385 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5386 start_discovery_complete);
5388 mgmt_pending_remove(cmd);
5392 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5395 hci_dev_unlock(hdev);
5399 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5400 void *data, u16 len)
5402 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5406 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5407 void *data, u16 len)
5409 return start_discovery_internal(sk, hdev,
5410 MGMT_OP_START_LIMITED_DISCOVERY,
5414 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5415 void *data, u16 len)
5417 struct mgmt_cp_start_service_discovery *cp = data;
5418 struct mgmt_pending_cmd *cmd;
5419 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5420 u16 uuid_count, expected_len;
5424 bt_dev_dbg(hdev, "sock %p", sk);
5428 if (!hdev_is_powered(hdev)) {
5429 err = mgmt_cmd_complete(sk, hdev->id,
5430 MGMT_OP_START_SERVICE_DISCOVERY,
5431 MGMT_STATUS_NOT_POWERED,
5432 &cp->type, sizeof(cp->type));
5436 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5437 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5438 err = mgmt_cmd_complete(sk, hdev->id,
5439 MGMT_OP_START_SERVICE_DISCOVERY,
5440 MGMT_STATUS_BUSY, &cp->type,
5445 if (hdev->discovery_paused) {
5446 err = mgmt_cmd_complete(sk, hdev->id,
5447 MGMT_OP_START_SERVICE_DISCOVERY,
5448 MGMT_STATUS_BUSY, &cp->type,
5453 uuid_count = __le16_to_cpu(cp->uuid_count);
5454 if (uuid_count > max_uuid_count) {
5455 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5457 err = mgmt_cmd_complete(sk, hdev->id,
5458 MGMT_OP_START_SERVICE_DISCOVERY,
5459 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5464 expected_len = sizeof(*cp) + uuid_count * 16;
5465 if (expected_len != len) {
5466 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5468 err = mgmt_cmd_complete(sk, hdev->id,
5469 MGMT_OP_START_SERVICE_DISCOVERY,
5470 MGMT_STATUS_INVALID_PARAMS, &cp->type,
5475 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5476 err = mgmt_cmd_complete(sk, hdev->id,
5477 MGMT_OP_START_SERVICE_DISCOVERY,
5478 status, &cp->type, sizeof(cp->type));
5482 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5489 /* Clear the discovery filter first to free any previously
5490 * allocated memory for the UUID list.
5492 hci_discovery_filter_clear(hdev);
5494 hdev->discovery.result_filtering = true;
5495 hdev->discovery.type = cp->type;
5496 hdev->discovery.rssi = cp->rssi;
5497 hdev->discovery.uuid_count = uuid_count;
5499 if (uuid_count > 0) {
5500 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5502 if (!hdev->discovery.uuids) {
5503 err = mgmt_cmd_complete(sk, hdev->id,
5504 MGMT_OP_START_SERVICE_DISCOVERY,
5506 &cp->type, sizeof(cp->type));
5507 mgmt_pending_remove(cmd);
5512 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5513 start_discovery_complete);
5515 mgmt_pending_remove(cmd);
5519 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5522 hci_dev_unlock(hdev);
5526 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5528 struct mgmt_pending_cmd *cmd;
5530 bt_dev_dbg(hdev, "status %u", status);
5534 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5536 cmd->cmd_complete(cmd, mgmt_status(status));
5537 mgmt_pending_remove(cmd);
5540 hci_dev_unlock(hdev);
5543 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
5545 struct mgmt_pending_cmd *cmd = data;
5547 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
5550 bt_dev_dbg(hdev, "err %d", err);
5552 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5554 mgmt_pending_remove(cmd);
5557 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5560 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
5562 return hci_stop_discovery_sync(hdev);
5565 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5568 struct mgmt_cp_stop_discovery *mgmt_cp = data;
5569 struct mgmt_pending_cmd *cmd;
5572 bt_dev_dbg(hdev, "sock %p", sk);
5576 if (!hci_discovery_active(hdev)) {
5577 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5578 MGMT_STATUS_REJECTED, &mgmt_cp->type,
5579 sizeof(mgmt_cp->type));
5583 if (hdev->discovery.type != mgmt_cp->type) {
5584 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5585 MGMT_STATUS_INVALID_PARAMS,
5586 &mgmt_cp->type, sizeof(mgmt_cp->type));
5590 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5596 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
5597 stop_discovery_complete);
5599 mgmt_pending_remove(cmd);
5603 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5606 hci_dev_unlock(hdev);
5610 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5613 struct mgmt_cp_confirm_name *cp = data;
5614 struct inquiry_entry *e;
5617 bt_dev_dbg(hdev, "sock %p", sk);
5621 if (!hci_discovery_active(hdev)) {
5622 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5623 MGMT_STATUS_FAILED, &cp->addr,
5628 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5630 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5631 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5636 if (cp->name_known) {
5637 e->name_state = NAME_KNOWN;
5640 e->name_state = NAME_NEEDED;
5641 hci_inquiry_cache_update_resolve(hdev, e);
5644 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5645 &cp->addr, sizeof(cp->addr));
5648 hci_dev_unlock(hdev);
5652 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5655 struct mgmt_cp_block_device *cp = data;
5659 bt_dev_dbg(hdev, "sock %p", sk);
5661 if (!bdaddr_type_is_valid(cp->addr.type))
5662 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5663 MGMT_STATUS_INVALID_PARAMS,
5664 &cp->addr, sizeof(cp->addr));
5668 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5671 status = MGMT_STATUS_FAILED;
5675 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5677 status = MGMT_STATUS_SUCCESS;
5680 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5681 &cp->addr, sizeof(cp->addr));
5683 hci_dev_unlock(hdev);
5688 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5691 struct mgmt_cp_unblock_device *cp = data;
5695 bt_dev_dbg(hdev, "sock %p", sk);
5697 if (!bdaddr_type_is_valid(cp->addr.type))
5698 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5699 MGMT_STATUS_INVALID_PARAMS,
5700 &cp->addr, sizeof(cp->addr));
5704 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5707 status = MGMT_STATUS_INVALID_PARAMS;
5711 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5713 status = MGMT_STATUS_SUCCESS;
5716 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5717 &cp->addr, sizeof(cp->addr));
5719 hci_dev_unlock(hdev);
5724 static int set_device_id_sync(struct hci_dev *hdev, void *data)
5726 return hci_update_eir_sync(hdev);
5729 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5732 struct mgmt_cp_set_device_id *cp = data;
5736 bt_dev_dbg(hdev, "sock %p", sk);
5738 source = __le16_to_cpu(cp->source);
5740 if (source > 0x0002)
5741 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5742 MGMT_STATUS_INVALID_PARAMS);
5746 hdev->devid_source = source;
5747 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5748 hdev->devid_product = __le16_to_cpu(cp->product);
5749 hdev->devid_version = __le16_to_cpu(cp->version);
5751 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5754 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
5756 hci_dev_unlock(hdev);
5761 static void enable_advertising_instance(struct hci_dev *hdev, int err)
5764 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
5766 bt_dev_dbg(hdev, "status %d", err);
5769 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
5771 struct cmd_lookup match = { NULL, hdev };
5773 struct adv_info *adv_instance;
5774 u8 status = mgmt_status(err);
5777 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5778 cmd_status_rsp, &status);
5782 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5783 hci_dev_set_flag(hdev, HCI_ADVERTISING);
5785 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5787 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5790 new_settings(hdev, match.sk);
5795 /* If "Set Advertising" was just disabled and instance advertising was
5796 * set up earlier, then re-enable multi-instance advertising.
5798 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5799 list_empty(&hdev->adv_instances))
5802 instance = hdev->cur_adv_instance;
5804 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5805 struct adv_info, list);
5809 instance = adv_instance->instance;
5812 err = hci_schedule_adv_instance_sync(hdev, instance, true);
5814 enable_advertising_instance(hdev, err);
5817 static int set_adv_sync(struct hci_dev *hdev, void *data)
5819 struct mgmt_pending_cmd *cmd = data;
5820 struct mgmt_mode *cp = cmd->param;
5823 if (cp->val == 0x02)
5824 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5826 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5828 cancel_adv_timeout(hdev);
5831 /* Switch to instance "0" for the Set Advertising setting.
5832 * We cannot use update_[adv|scan_rsp]_data() here as the
5833 * HCI_ADVERTISING flag is not yet set.
5835 hdev->cur_adv_instance = 0x00;
5837 if (ext_adv_capable(hdev)) {
5838 hci_start_ext_adv_sync(hdev, 0x00);
5840 hci_update_adv_data_sync(hdev, 0x00);
5841 hci_update_scan_rsp_data_sync(hdev, 0x00);
5842 hci_enable_advertising_sync(hdev);
5845 hci_disable_advertising_sync(hdev);
5851 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5854 struct mgmt_mode *cp = data;
5855 struct mgmt_pending_cmd *cmd;
5859 bt_dev_dbg(hdev, "sock %p", sk);
5861 status = mgmt_le_support(hdev);
5863 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5866 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5867 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5868 MGMT_STATUS_INVALID_PARAMS);
5870 if (hdev->advertising_paused)
5871 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5878 /* The following conditions are ones which mean that we should
5879 * not do any HCI communication but directly send a mgmt
5880 * response to user space (after toggling the flag if
5883 if (!hdev_is_powered(hdev) ||
5884 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5885 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5886 hci_conn_num(hdev, LE_LINK) > 0 ||
5887 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5888 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5892 hdev->cur_adv_instance = 0x00;
5893 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5894 if (cp->val == 0x02)
5895 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5897 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5899 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5900 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5903 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5908 err = new_settings(hdev, sk);
5913 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5914 pending_find(MGMT_OP_SET_LE, hdev)) {
5915 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5920 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5924 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
5925 set_advertising_complete);
5928 mgmt_pending_remove(cmd);
5931 hci_dev_unlock(hdev);
5935 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5936 void *data, u16 len)
5938 struct mgmt_cp_set_static_address *cp = data;
5941 bt_dev_dbg(hdev, "sock %p", sk);
5943 if (!lmp_le_capable(hdev))
5944 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5945 MGMT_STATUS_NOT_SUPPORTED);
5947 if (hdev_is_powered(hdev))
5948 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5949 MGMT_STATUS_REJECTED);
5951 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5952 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5953 return mgmt_cmd_status(sk, hdev->id,
5954 MGMT_OP_SET_STATIC_ADDRESS,
5955 MGMT_STATUS_INVALID_PARAMS);
5957 /* Two most significant bits shall be set */
5958 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5959 return mgmt_cmd_status(sk, hdev->id,
5960 MGMT_OP_SET_STATIC_ADDRESS,
5961 MGMT_STATUS_INVALID_PARAMS);
5966 bacpy(&hdev->static_addr, &cp->bdaddr);
5968 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5972 err = new_settings(hdev, sk);
5975 hci_dev_unlock(hdev);
5979 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5980 void *data, u16 len)
5982 struct mgmt_cp_set_scan_params *cp = data;
5983 __u16 interval, window;
5986 bt_dev_dbg(hdev, "sock %p", sk);
5988 if (!lmp_le_capable(hdev))
5989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5990 MGMT_STATUS_NOT_SUPPORTED);
5992 interval = __le16_to_cpu(cp->interval);
5994 if (interval < 0x0004 || interval > 0x4000)
5995 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5996 MGMT_STATUS_INVALID_PARAMS);
5998 window = __le16_to_cpu(cp->window);
6000 if (window < 0x0004 || window > 0x4000)
6001 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6002 MGMT_STATUS_INVALID_PARAMS);
6004 if (window > interval)
6005 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6006 MGMT_STATUS_INVALID_PARAMS);
6010 hdev->le_scan_interval = interval;
6011 hdev->le_scan_window = window;
6013 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6016 /* If background scan is running, restart it so new parameters are
6019 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6020 hdev->discovery.state == DISCOVERY_STOPPED)
6021 hci_update_passive_scan(hdev);
6023 hci_dev_unlock(hdev);
6028 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6030 struct mgmt_pending_cmd *cmd = data;
6032 bt_dev_dbg(hdev, "err %d", err);
6035 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6038 struct mgmt_mode *cp = cmd->param;
6041 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6043 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6045 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6046 new_settings(hdev, cmd->sk);
6049 mgmt_pending_free(cmd);
6052 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6054 struct mgmt_pending_cmd *cmd = data;
6055 struct mgmt_mode *cp = cmd->param;
6057 return hci_write_fast_connectable_sync(hdev, cp->val);
6060 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6061 void *data, u16 len)
6063 struct mgmt_mode *cp = data;
6064 struct mgmt_pending_cmd *cmd;
6067 bt_dev_dbg(hdev, "sock %p", sk);
6069 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6070 hdev->hci_ver < BLUETOOTH_VER_1_2)
6071 return mgmt_cmd_status(sk, hdev->id,
6072 MGMT_OP_SET_FAST_CONNECTABLE,
6073 MGMT_STATUS_NOT_SUPPORTED);
6075 if (cp->val != 0x00 && cp->val != 0x01)
6076 return mgmt_cmd_status(sk, hdev->id,
6077 MGMT_OP_SET_FAST_CONNECTABLE,
6078 MGMT_STATUS_INVALID_PARAMS);
6082 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6083 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6087 if (!hdev_is_powered(hdev)) {
6088 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6089 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6090 new_settings(hdev, sk);
6094 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6099 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6100 fast_connectable_complete);
6103 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6104 MGMT_STATUS_FAILED);
6107 mgmt_pending_free(cmd);
6111 hci_dev_unlock(hdev);
6116 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6118 struct mgmt_pending_cmd *cmd = data;
6120 bt_dev_dbg(hdev, "err %d", err);
6123 u8 mgmt_err = mgmt_status(err);
6125 /* We need to restore the flag if related HCI commands
6128 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6130 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6132 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6133 new_settings(hdev, cmd->sk);
6136 mgmt_pending_free(cmd);
6139 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6143 status = hci_write_fast_connectable_sync(hdev, false);
6146 status = hci_update_scan_sync(hdev);
6148 /* Since only the advertising data flags will change, there
6149 * is no need to update the scan response data.
6152 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6157 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6159 struct mgmt_mode *cp = data;
6160 struct mgmt_pending_cmd *cmd;
6163 bt_dev_dbg(hdev, "sock %p", sk);
6165 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6166 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6167 MGMT_STATUS_NOT_SUPPORTED);
6169 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6170 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6171 MGMT_STATUS_REJECTED);
6173 if (cp->val != 0x00 && cp->val != 0x01)
6174 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6175 MGMT_STATUS_INVALID_PARAMS);
6179 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6180 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6184 if (!hdev_is_powered(hdev)) {
6186 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6187 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6188 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6189 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6190 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6193 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6195 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6199 err = new_settings(hdev, sk);
6203 /* Reject disabling when powered on */
6205 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6206 MGMT_STATUS_REJECTED);
6209 /* When configuring a dual-mode controller to operate
6210 * with LE only and using a static address, then switching
6211 * BR/EDR back on is not allowed.
6213 * Dual-mode controllers shall operate with the public
6214 * address as its identity address for BR/EDR and LE. So
6215 * reject the attempt to create an invalid configuration.
6217 * The same restrictions applies when secure connections
6218 * has been enabled. For BR/EDR this is a controller feature
6219 * while for LE it is a host stack feature. This means that
6220 * switching BR/EDR back on when secure connections has been
6221 * enabled is not a supported transaction.
6223 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6224 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6225 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6226 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6227 MGMT_STATUS_REJECTED);
6232 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6236 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6237 set_bredr_complete);
6240 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6241 MGMT_STATUS_FAILED);
6243 mgmt_pending_free(cmd);
6248 /* We need to flip the bit already here so that
6249 * hci_req_update_adv_data generates the correct flags.
6251 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6254 hci_dev_unlock(hdev);
6258 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6260 struct mgmt_pending_cmd *cmd = data;
6261 struct mgmt_mode *cp;
6263 bt_dev_dbg(hdev, "err %d", err);
6266 u8 mgmt_err = mgmt_status(err);
6268 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6276 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6277 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6280 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6281 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6284 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6285 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6289 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6290 new_settings(hdev, cmd->sk);
6293 mgmt_pending_free(cmd);
6296 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6298 struct mgmt_pending_cmd *cmd = data;
6299 struct mgmt_mode *cp = cmd->param;
6302 /* Force write of val */
6303 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6305 return hci_write_sc_support_sync(hdev, val);
6308 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6309 void *data, u16 len)
6311 struct mgmt_mode *cp = data;
6312 struct mgmt_pending_cmd *cmd;
6316 bt_dev_dbg(hdev, "sock %p", sk);
6318 if (!lmp_sc_capable(hdev) &&
6319 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6320 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6321 MGMT_STATUS_NOT_SUPPORTED);
6323 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6324 lmp_sc_capable(hdev) &&
6325 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6326 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6327 MGMT_STATUS_REJECTED);
6329 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6330 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6331 MGMT_STATUS_INVALID_PARAMS);
6335 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6336 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6340 changed = !hci_dev_test_and_set_flag(hdev,
6342 if (cp->val == 0x02)
6343 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6345 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6347 changed = hci_dev_test_and_clear_flag(hdev,
6349 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6352 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6357 err = new_settings(hdev, sk);
6364 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6365 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6366 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6370 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6374 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6375 set_secure_conn_complete);
6378 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6379 MGMT_STATUS_FAILED);
6381 mgmt_pending_free(cmd);
6385 hci_dev_unlock(hdev);
6389 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6390 void *data, u16 len)
6392 struct mgmt_mode *cp = data;
6393 bool changed, use_changed;
6396 bt_dev_dbg(hdev, "sock %p", sk);
6398 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6399 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6400 MGMT_STATUS_INVALID_PARAMS);
6405 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6407 changed = hci_dev_test_and_clear_flag(hdev,
6408 HCI_KEEP_DEBUG_KEYS);
6410 if (cp->val == 0x02)
6411 use_changed = !hci_dev_test_and_set_flag(hdev,
6412 HCI_USE_DEBUG_KEYS);
6414 use_changed = hci_dev_test_and_clear_flag(hdev,
6415 HCI_USE_DEBUG_KEYS);
6417 if (hdev_is_powered(hdev) && use_changed &&
6418 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6419 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6420 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6421 sizeof(mode), &mode);
6424 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6429 err = new_settings(hdev, sk);
6432 hci_dev_unlock(hdev);
6436 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6439 struct mgmt_cp_set_privacy *cp = cp_data;
6443 bt_dev_dbg(hdev, "sock %p", sk);
6445 if (!lmp_le_capable(hdev))
6446 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6447 MGMT_STATUS_NOT_SUPPORTED);
6449 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6450 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6451 MGMT_STATUS_INVALID_PARAMS);
6453 if (hdev_is_powered(hdev))
6454 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6455 MGMT_STATUS_REJECTED);
6459 /* If user space supports this command it is also expected to
6460 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6462 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6465 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6466 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6467 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6468 hci_adv_instances_set_rpa_expired(hdev, true);
6469 if (cp->privacy == 0x02)
6470 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6472 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6474 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6475 memset(hdev->irk, 0, sizeof(hdev->irk));
6476 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6477 hci_adv_instances_set_rpa_expired(hdev, false);
6478 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6481 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6486 err = new_settings(hdev, sk);
6489 hci_dev_unlock(hdev);
6493 static bool irk_is_valid(struct mgmt_irk_info *irk)
6495 switch (irk->addr.type) {
6496 case BDADDR_LE_PUBLIC:
6499 case BDADDR_LE_RANDOM:
6500 /* Two most significant bits shall be set */
6501 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6509 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6512 struct mgmt_cp_load_irks *cp = cp_data;
6513 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6514 sizeof(struct mgmt_irk_info));
6515 u16 irk_count, expected_len;
6518 bt_dev_dbg(hdev, "sock %p", sk);
6520 if (!lmp_le_capable(hdev))
6521 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6522 MGMT_STATUS_NOT_SUPPORTED);
6524 irk_count = __le16_to_cpu(cp->irk_count);
6525 if (irk_count > max_irk_count) {
6526 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6528 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6529 MGMT_STATUS_INVALID_PARAMS);
6532 expected_len = struct_size(cp, irks, irk_count);
6533 if (expected_len != len) {
6534 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6536 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6537 MGMT_STATUS_INVALID_PARAMS);
6540 bt_dev_dbg(hdev, "irk_count %u", irk_count);
6542 for (i = 0; i < irk_count; i++) {
6543 struct mgmt_irk_info *key = &cp->irks[i];
6545 if (!irk_is_valid(key))
6546 return mgmt_cmd_status(sk, hdev->id,
6548 MGMT_STATUS_INVALID_PARAMS);
6553 hci_smp_irks_clear(hdev);
6555 for (i = 0; i < irk_count; i++) {
6556 struct mgmt_irk_info *irk = &cp->irks[i];
6558 if (hci_is_blocked_key(hdev,
6559 HCI_BLOCKED_KEY_TYPE_IRK,
6561 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6566 hci_add_irk(hdev, &irk->addr.bdaddr,
6567 le_addr_type(irk->addr.type), irk->val,
6571 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6573 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6575 hci_dev_unlock(hdev);
6580 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6582 if (key->initiator != 0x00 && key->initiator != 0x01)
6585 switch (key->addr.type) {
6586 case BDADDR_LE_PUBLIC:
6589 case BDADDR_LE_RANDOM:
6590 /* Two most significant bits shall be set */
6591 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6599 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6600 void *cp_data, u16 len)
6602 struct mgmt_cp_load_long_term_keys *cp = cp_data;
6603 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6604 sizeof(struct mgmt_ltk_info));
6605 u16 key_count, expected_len;
6608 bt_dev_dbg(hdev, "sock %p", sk);
6610 if (!lmp_le_capable(hdev))
6611 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6612 MGMT_STATUS_NOT_SUPPORTED);
6614 key_count = __le16_to_cpu(cp->key_count);
6615 if (key_count > max_key_count) {
6616 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6618 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6619 MGMT_STATUS_INVALID_PARAMS);
6622 expected_len = struct_size(cp, keys, key_count);
6623 if (expected_len != len) {
6624 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6626 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6627 MGMT_STATUS_INVALID_PARAMS);
6630 bt_dev_dbg(hdev, "key_count %u", key_count);
6632 for (i = 0; i < key_count; i++) {
6633 struct mgmt_ltk_info *key = &cp->keys[i];
6635 if (!ltk_is_valid(key))
6636 return mgmt_cmd_status(sk, hdev->id,
6637 MGMT_OP_LOAD_LONG_TERM_KEYS,
6638 MGMT_STATUS_INVALID_PARAMS);
6643 hci_smp_ltks_clear(hdev);
6645 for (i = 0; i < key_count; i++) {
6646 struct mgmt_ltk_info *key = &cp->keys[i];
6647 u8 type, authenticated;
6649 if (hci_is_blocked_key(hdev,
6650 HCI_BLOCKED_KEY_TYPE_LTK,
6652 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6657 switch (key->type) {
6658 case MGMT_LTK_UNAUTHENTICATED:
6659 authenticated = 0x00;
6660 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6662 case MGMT_LTK_AUTHENTICATED:
6663 authenticated = 0x01;
6664 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6666 case MGMT_LTK_P256_UNAUTH:
6667 authenticated = 0x00;
6668 type = SMP_LTK_P256;
6670 case MGMT_LTK_P256_AUTH:
6671 authenticated = 0x01;
6672 type = SMP_LTK_P256;
6674 case MGMT_LTK_P256_DEBUG:
6675 authenticated = 0x00;
6676 type = SMP_LTK_P256_DEBUG;
6682 hci_add_ltk(hdev, &key->addr.bdaddr,
6683 le_addr_type(key->addr.type), type, authenticated,
6684 key->val, key->enc_size, key->ediv, key->rand);
6687 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6690 hci_dev_unlock(hdev);
6695 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
6697 struct mgmt_pending_cmd *cmd = data;
6698 struct hci_conn *conn = cmd->user_data;
6699 struct mgmt_cp_get_conn_info *cp = cmd->param;
6700 struct mgmt_rp_get_conn_info rp;
6703 bt_dev_dbg(hdev, "err %d", err);
6705 memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
6707 status = mgmt_status(err);
6708 if (status == MGMT_STATUS_SUCCESS) {
6709 rp.rssi = conn->rssi;
6710 rp.tx_power = conn->tx_power;
6711 rp.max_tx_power = conn->max_tx_power;
6713 rp.rssi = HCI_RSSI_INVALID;
6714 rp.tx_power = HCI_TX_POWER_INVALID;
6715 rp.max_tx_power = HCI_TX_POWER_INVALID;
6718 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
6722 hci_conn_drop(conn);
6726 mgmt_pending_free(cmd);
6729 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
6731 struct mgmt_pending_cmd *cmd = data;
6732 struct mgmt_cp_get_conn_info *cp = cmd->param;
6733 struct hci_conn *conn;
6737 /* Make sure we are still connected */
6738 if (cp->addr.type == BDADDR_BREDR)
6739 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6742 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6744 if (!conn || conn != cmd->user_data || conn->state != BT_CONNECTED) {
6745 if (cmd->user_data) {
6746 hci_conn_drop(cmd->user_data);
6747 hci_conn_put(cmd->user_data);
6748 cmd->user_data = NULL;
6750 return MGMT_STATUS_NOT_CONNECTED;
6753 handle = cpu_to_le16(conn->handle);
6755 /* Refresh RSSI each time */
6756 err = hci_read_rssi_sync(hdev, handle);
6758 /* For LE links TX power does not change thus we don't need to
6759 * query for it once value is known.
6761 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
6762 conn->tx_power == HCI_TX_POWER_INVALID))
6763 err = hci_read_tx_power_sync(hdev, handle, 0x00);
6765 /* Max TX power needs to be read only once per connection */
6766 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
6767 err = hci_read_tx_power_sync(hdev, handle, 0x01);
6772 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6775 struct mgmt_cp_get_conn_info *cp = data;
6776 struct mgmt_rp_get_conn_info rp;
6777 struct hci_conn *conn;
6778 unsigned long conn_info_age;
6781 bt_dev_dbg(hdev, "sock %p", sk);
6783 memset(&rp, 0, sizeof(rp));
6784 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6785 rp.addr.type = cp->addr.type;
6787 if (!bdaddr_type_is_valid(cp->addr.type))
6788 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6789 MGMT_STATUS_INVALID_PARAMS,
6794 if (!hdev_is_powered(hdev)) {
6795 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6796 MGMT_STATUS_NOT_POWERED, &rp,
6801 if (cp->addr.type == BDADDR_BREDR)
6802 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6805 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6807 if (!conn || conn->state != BT_CONNECTED) {
6808 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6809 MGMT_STATUS_NOT_CONNECTED, &rp,
6814 /* To avoid client trying to guess when to poll again for information we
6815 * calculate conn info age as random value between min/max set in hdev.
6817 conn_info_age = hdev->conn_info_min_age +
6818 prandom_u32_max(hdev->conn_info_max_age -
6819 hdev->conn_info_min_age);
6821 /* Query controller to refresh cached values if they are too old or were
6824 if (time_after(jiffies, conn->conn_info_timestamp +
6825 msecs_to_jiffies(conn_info_age)) ||
6826 !conn->conn_info_timestamp) {
6827 struct mgmt_pending_cmd *cmd;
6829 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
6834 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
6835 cmd, get_conn_info_complete);
6838 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6839 MGMT_STATUS_FAILED, &rp, sizeof(rp));
6842 mgmt_pending_free(cmd);
6847 hci_conn_hold(conn);
6848 cmd->user_data = hci_conn_get(conn);
6850 conn->conn_info_timestamp = jiffies;
6852 /* Cache is valid, just reply with values cached in hci_conn */
6853 rp.rssi = conn->rssi;
6854 rp.tx_power = conn->tx_power;
6855 rp.max_tx_power = conn->max_tx_power;
6857 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6858 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6862 hci_dev_unlock(hdev);
6866 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
6868 struct mgmt_pending_cmd *cmd = data;
6869 struct mgmt_cp_get_clock_info *cp = cmd->param;
6870 struct mgmt_rp_get_clock_info rp;
6871 struct hci_conn *conn = cmd->user_data;
6872 u8 status = mgmt_status(err);
6874 bt_dev_dbg(hdev, "err %d", err);
6876 memset(&rp, 0, sizeof(rp));
6877 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6878 rp.addr.type = cp->addr.type;
6883 rp.local_clock = cpu_to_le32(hdev->clock);
6886 rp.piconet_clock = cpu_to_le32(conn->clock);
6887 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6888 hci_conn_drop(conn);
6893 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6896 mgmt_pending_free(cmd);
6899 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
6901 struct mgmt_pending_cmd *cmd = data;
6902 struct mgmt_cp_get_clock_info *cp = cmd->param;
6903 struct hci_cp_read_clock hci_cp;
6904 struct hci_conn *conn = cmd->user_data;
6907 memset(&hci_cp, 0, sizeof(hci_cp));
6908 err = hci_read_clock_sync(hdev, &hci_cp);
6911 /* Make sure connection still exists */
6912 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6915 if (conn && conn == cmd->user_data &&
6916 conn->state == BT_CONNECTED) {
6917 hci_cp.handle = cpu_to_le16(conn->handle);
6918 hci_cp.which = 0x01; /* Piconet clock */
6919 err = hci_read_clock_sync(hdev, &hci_cp);
6920 } else if (cmd->user_data) {
6921 hci_conn_drop(cmd->user_data);
6922 hci_conn_put(cmd->user_data);
6923 cmd->user_data = NULL;
6930 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6933 struct mgmt_cp_get_clock_info *cp = data;
6934 struct mgmt_rp_get_clock_info rp;
6935 struct mgmt_pending_cmd *cmd;
6936 struct hci_conn *conn;
6939 bt_dev_dbg(hdev, "sock %p", sk);
6941 memset(&rp, 0, sizeof(rp));
6942 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6943 rp.addr.type = cp->addr.type;
6945 if (cp->addr.type != BDADDR_BREDR)
6946 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6947 MGMT_STATUS_INVALID_PARAMS,
6952 if (!hdev_is_powered(hdev)) {
6953 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6954 MGMT_STATUS_NOT_POWERED, &rp,
6959 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6960 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6962 if (!conn || conn->state != BT_CONNECTED) {
6963 err = mgmt_cmd_complete(sk, hdev->id,
6964 MGMT_OP_GET_CLOCK_INFO,
6965 MGMT_STATUS_NOT_CONNECTED,
6973 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6977 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
6978 get_clock_info_complete);
6981 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6982 MGMT_STATUS_FAILED, &rp, sizeof(rp));
6985 mgmt_pending_free(cmd);
6988 hci_conn_hold(conn);
6989 cmd->user_data = hci_conn_get(conn);
6994 hci_dev_unlock(hdev);
6998 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
7000 struct hci_conn *conn;
7002 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
7006 if (conn->dst_type != type)
7009 if (conn->state != BT_CONNECTED)
7015 /* This function requires the caller holds hdev->lock */
7016 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
7017 u8 addr_type, u8 auto_connect)
7019 struct hci_conn_params *params;
7021 params = hci_conn_params_add(hdev, addr, addr_type);
7025 if (params->auto_connect == auto_connect)
7028 list_del_init(¶ms->action);
7030 switch (auto_connect) {
7031 case HCI_AUTO_CONN_DISABLED:
7032 case HCI_AUTO_CONN_LINK_LOSS:
7033 /* If auto connect is being disabled when we're trying to
7034 * connect to device, keep connecting.
7036 if (params->explicit_connect)
7037 list_add(¶ms->action, &hdev->pend_le_conns);
7039 case HCI_AUTO_CONN_REPORT:
7040 if (params->explicit_connect)
7041 list_add(¶ms->action, &hdev->pend_le_conns);
7043 list_add(¶ms->action, &hdev->pend_le_reports);
7045 case HCI_AUTO_CONN_DIRECT:
7046 case HCI_AUTO_CONN_ALWAYS:
7047 if (!is_connected(hdev, addr, addr_type))
7048 list_add(¶ms->action, &hdev->pend_le_conns);
7052 params->auto_connect = auto_connect;
7054 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7055 addr, addr_type, auto_connect);
7060 static void device_added(struct sock *sk, struct hci_dev *hdev,
7061 bdaddr_t *bdaddr, u8 type, u8 action)
7063 struct mgmt_ev_device_added ev;
7065 bacpy(&ev.addr.bdaddr, bdaddr);
7066 ev.addr.type = type;
7069 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7072 static int add_device_sync(struct hci_dev *hdev, void *data)
7074 return hci_update_passive_scan_sync(hdev);
7077 static int add_device(struct sock *sk, struct hci_dev *hdev,
7078 void *data, u16 len)
7080 struct mgmt_cp_add_device *cp = data;
7081 u8 auto_conn, addr_type;
7082 struct hci_conn_params *params;
7084 u32 current_flags = 0;
7085 u32 supported_flags;
7087 bt_dev_dbg(hdev, "sock %p", sk);
7089 if (!bdaddr_type_is_valid(cp->addr.type) ||
7090 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7091 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7092 MGMT_STATUS_INVALID_PARAMS,
7093 &cp->addr, sizeof(cp->addr));
7095 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7096 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7097 MGMT_STATUS_INVALID_PARAMS,
7098 &cp->addr, sizeof(cp->addr));
7102 if (cp->addr.type == BDADDR_BREDR) {
7103 /* Only incoming connections action is supported for now */
7104 if (cp->action != 0x01) {
7105 err = mgmt_cmd_complete(sk, hdev->id,
7107 MGMT_STATUS_INVALID_PARAMS,
7108 &cp->addr, sizeof(cp->addr));
7112 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7118 hci_req_update_scan(hdev);
7123 addr_type = le_addr_type(cp->addr.type);
7125 if (cp->action == 0x02)
7126 auto_conn = HCI_AUTO_CONN_ALWAYS;
7127 else if (cp->action == 0x01)
7128 auto_conn = HCI_AUTO_CONN_DIRECT;
7130 auto_conn = HCI_AUTO_CONN_REPORT;
7132 /* Kernel internally uses conn_params with resolvable private
7133 * address, but Add Device allows only identity addresses.
7134 * Make sure it is enforced before calling
7135 * hci_conn_params_lookup.
7137 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7138 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7139 MGMT_STATUS_INVALID_PARAMS,
7140 &cp->addr, sizeof(cp->addr));
7144 /* If the connection parameters don't exist for this device,
7145 * they will be created and configured with defaults.
7147 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7149 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7150 MGMT_STATUS_FAILED, &cp->addr,
7154 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7157 bitmap_to_arr32(¤t_flags, params->flags,
7158 __HCI_CONN_NUM_FLAGS);
7161 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7166 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7167 bitmap_to_arr32(&supported_flags, hdev->conn_flags,
7168 __HCI_CONN_NUM_FLAGS);
7169 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7170 supported_flags, current_flags);
7172 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7173 MGMT_STATUS_SUCCESS, &cp->addr,
7177 hci_dev_unlock(hdev);
7181 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7182 bdaddr_t *bdaddr, u8 type)
7184 struct mgmt_ev_device_removed ev;
7186 bacpy(&ev.addr.bdaddr, bdaddr);
7187 ev.addr.type = type;
7189 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7192 static int remove_device_sync(struct hci_dev *hdev, void *data)
7194 return hci_update_passive_scan_sync(hdev);
7197 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7198 void *data, u16 len)
7200 struct mgmt_cp_remove_device *cp = data;
7203 bt_dev_dbg(hdev, "sock %p", sk);
7207 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7208 struct hci_conn_params *params;
7211 if (!bdaddr_type_is_valid(cp->addr.type)) {
7212 err = mgmt_cmd_complete(sk, hdev->id,
7213 MGMT_OP_REMOVE_DEVICE,
7214 MGMT_STATUS_INVALID_PARAMS,
7215 &cp->addr, sizeof(cp->addr));
7219 if (cp->addr.type == BDADDR_BREDR) {
7220 err = hci_bdaddr_list_del(&hdev->accept_list,
7224 err = mgmt_cmd_complete(sk, hdev->id,
7225 MGMT_OP_REMOVE_DEVICE,
7226 MGMT_STATUS_INVALID_PARAMS,
7232 hci_req_update_scan(hdev);
7234 device_removed(sk, hdev, &cp->addr.bdaddr,
7239 addr_type = le_addr_type(cp->addr.type);
7241 /* Kernel internally uses conn_params with resolvable private
7242 * address, but Remove Device allows only identity addresses.
7243 * Make sure it is enforced before calling
7244 * hci_conn_params_lookup.
7246 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7247 err = mgmt_cmd_complete(sk, hdev->id,
7248 MGMT_OP_REMOVE_DEVICE,
7249 MGMT_STATUS_INVALID_PARAMS,
7250 &cp->addr, sizeof(cp->addr));
7254 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7257 err = mgmt_cmd_complete(sk, hdev->id,
7258 MGMT_OP_REMOVE_DEVICE,
7259 MGMT_STATUS_INVALID_PARAMS,
7260 &cp->addr, sizeof(cp->addr));
7264 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7265 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7266 err = mgmt_cmd_complete(sk, hdev->id,
7267 MGMT_OP_REMOVE_DEVICE,
7268 MGMT_STATUS_INVALID_PARAMS,
7269 &cp->addr, sizeof(cp->addr));
7273 list_del(¶ms->action);
7274 list_del(¶ms->list);
7277 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7279 struct hci_conn_params *p, *tmp;
7280 struct bdaddr_list *b, *btmp;
7282 if (cp->addr.type) {
7283 err = mgmt_cmd_complete(sk, hdev->id,
7284 MGMT_OP_REMOVE_DEVICE,
7285 MGMT_STATUS_INVALID_PARAMS,
7286 &cp->addr, sizeof(cp->addr));
7290 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7291 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7296 hci_req_update_scan(hdev);
7298 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7299 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7301 device_removed(sk, hdev, &p->addr, p->addr_type);
7302 if (p->explicit_connect) {
7303 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7306 list_del(&p->action);
7311 bt_dev_dbg(hdev, "All LE connection parameters were removed");
7314 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7317 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7318 MGMT_STATUS_SUCCESS, &cp->addr,
7321 hci_dev_unlock(hdev);
7325 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7328 struct mgmt_cp_load_conn_param *cp = data;
7329 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7330 sizeof(struct mgmt_conn_param));
7331 u16 param_count, expected_len;
7334 if (!lmp_le_capable(hdev))
7335 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7336 MGMT_STATUS_NOT_SUPPORTED);
7338 param_count = __le16_to_cpu(cp->param_count);
7339 if (param_count > max_param_count) {
7340 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7342 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7343 MGMT_STATUS_INVALID_PARAMS);
7346 expected_len = struct_size(cp, params, param_count);
7347 if (expected_len != len) {
7348 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7350 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7351 MGMT_STATUS_INVALID_PARAMS);
7354 bt_dev_dbg(hdev, "param_count %u", param_count);
7358 hci_conn_params_clear_disabled(hdev);
7360 for (i = 0; i < param_count; i++) {
7361 struct mgmt_conn_param *param = &cp->params[i];
7362 struct hci_conn_params *hci_param;
7363 u16 min, max, latency, timeout;
7366 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
7369 if (param->addr.type == BDADDR_LE_PUBLIC) {
7370 addr_type = ADDR_LE_DEV_PUBLIC;
7371 } else if (param->addr.type == BDADDR_LE_RANDOM) {
7372 addr_type = ADDR_LE_DEV_RANDOM;
7374 bt_dev_err(hdev, "ignoring invalid connection parameters");
7378 min = le16_to_cpu(param->min_interval);
7379 max = le16_to_cpu(param->max_interval);
7380 latency = le16_to_cpu(param->latency);
7381 timeout = le16_to_cpu(param->timeout);
7383 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7384 min, max, latency, timeout);
7386 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7387 bt_dev_err(hdev, "ignoring invalid connection parameters");
7391 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
7394 bt_dev_err(hdev, "failed to add connection parameters");
7398 hci_param->conn_min_interval = min;
7399 hci_param->conn_max_interval = max;
7400 hci_param->conn_latency = latency;
7401 hci_param->supervision_timeout = timeout;
7404 hci_dev_unlock(hdev);
7406 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7410 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7411 void *data, u16 len)
7413 struct mgmt_cp_set_external_config *cp = data;
7417 bt_dev_dbg(hdev, "sock %p", sk);
7419 if (hdev_is_powered(hdev))
7420 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7421 MGMT_STATUS_REJECTED);
7423 if (cp->config != 0x00 && cp->config != 0x01)
7424 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7425 MGMT_STATUS_INVALID_PARAMS);
7427 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7428 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7429 MGMT_STATUS_NOT_SUPPORTED);
7434 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7436 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7438 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7445 err = new_options(hdev, sk);
7447 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7448 mgmt_index_removed(hdev);
7450 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7451 hci_dev_set_flag(hdev, HCI_CONFIG);
7452 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7454 queue_work(hdev->req_workqueue, &hdev->power_on);
7456 set_bit(HCI_RAW, &hdev->flags);
7457 mgmt_index_added(hdev);
7462 hci_dev_unlock(hdev);
7466 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7467 void *data, u16 len)
7469 struct mgmt_cp_set_public_address *cp = data;
7473 bt_dev_dbg(hdev, "sock %p", sk);
7475 if (hdev_is_powered(hdev))
7476 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7477 MGMT_STATUS_REJECTED);
7479 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7480 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7481 MGMT_STATUS_INVALID_PARAMS);
7483 if (!hdev->set_bdaddr)
7484 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7485 MGMT_STATUS_NOT_SUPPORTED);
7489 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7490 bacpy(&hdev->public_addr, &cp->bdaddr);
7492 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7499 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7500 err = new_options(hdev, sk);
7502 if (is_configured(hdev)) {
7503 mgmt_index_removed(hdev);
7505 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7507 hci_dev_set_flag(hdev, HCI_CONFIG);
7508 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7510 queue_work(hdev->req_workqueue, &hdev->power_on);
7514 hci_dev_unlock(hdev);
7518 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
7521 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7522 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7523 u8 *h192, *r192, *h256, *r256;
7524 struct mgmt_pending_cmd *cmd = data;
7525 struct sk_buff *skb = cmd->skb;
7526 u8 status = mgmt_status(err);
7529 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
7534 status = MGMT_STATUS_FAILED;
7535 else if (IS_ERR(skb))
7536 status = mgmt_status(PTR_ERR(skb));
7538 status = mgmt_status(skb->data[0]);
7541 bt_dev_dbg(hdev, "status %u", status);
7543 mgmt_cp = cmd->param;
7546 status = mgmt_status(status);
7553 } else if (!bredr_sc_enabled(hdev)) {
7554 struct hci_rp_read_local_oob_data *rp;
7556 if (skb->len != sizeof(*rp)) {
7557 status = MGMT_STATUS_FAILED;
7560 status = MGMT_STATUS_SUCCESS;
7561 rp = (void *)skb->data;
7563 eir_len = 5 + 18 + 18;
7570 struct hci_rp_read_local_oob_ext_data *rp;
7572 if (skb->len != sizeof(*rp)) {
7573 status = MGMT_STATUS_FAILED;
7576 status = MGMT_STATUS_SUCCESS;
7577 rp = (void *)skb->data;
7579 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7580 eir_len = 5 + 18 + 18;
7584 eir_len = 5 + 18 + 18 + 18 + 18;
7594 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7601 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7602 hdev->dev_class, 3);
7605 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7606 EIR_SSP_HASH_C192, h192, 16);
7607 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7608 EIR_SSP_RAND_R192, r192, 16);
7612 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7613 EIR_SSP_HASH_C256, h256, 16);
7614 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7615 EIR_SSP_RAND_R256, r256, 16);
7619 mgmt_rp->type = mgmt_cp->type;
7620 mgmt_rp->eir_len = cpu_to_le16(eir_len);
7622 err = mgmt_cmd_complete(cmd->sk, hdev->id,
7623 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7624 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7625 if (err < 0 || status)
7628 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7630 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7631 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7632 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7634 if (skb && !IS_ERR(skb))
7638 mgmt_pending_remove(cmd);
7641 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7642 struct mgmt_cp_read_local_oob_ext_data *cp)
7644 struct mgmt_pending_cmd *cmd;
7647 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7652 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
7653 read_local_oob_ext_data_complete);
7656 mgmt_pending_remove(cmd);
7663 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7664 void *data, u16 data_len)
7666 struct mgmt_cp_read_local_oob_ext_data *cp = data;
7667 struct mgmt_rp_read_local_oob_ext_data *rp;
7670 u8 status, flags, role, addr[7], hash[16], rand[16];
7673 bt_dev_dbg(hdev, "sock %p", sk);
7675 if (hdev_is_powered(hdev)) {
7677 case BIT(BDADDR_BREDR):
7678 status = mgmt_bredr_support(hdev);
7684 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7685 status = mgmt_le_support(hdev);
7689 eir_len = 9 + 3 + 18 + 18 + 3;
7692 status = MGMT_STATUS_INVALID_PARAMS;
7697 status = MGMT_STATUS_NOT_POWERED;
7701 rp_len = sizeof(*rp) + eir_len;
7702 rp = kmalloc(rp_len, GFP_ATOMIC);
7706 if (!status && !lmp_ssp_capable(hdev)) {
7707 status = MGMT_STATUS_NOT_SUPPORTED;
7718 case BIT(BDADDR_BREDR):
7719 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7720 err = read_local_ssp_oob_req(hdev, sk, cp);
7721 hci_dev_unlock(hdev);
7725 status = MGMT_STATUS_FAILED;
7728 eir_len = eir_append_data(rp->eir, eir_len,
7730 hdev->dev_class, 3);
7733 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7734 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7735 smp_generate_oob(hdev, hash, rand) < 0) {
7736 hci_dev_unlock(hdev);
7737 status = MGMT_STATUS_FAILED;
7741 /* This should return the active RPA, but since the RPA
7742 * is only programmed on demand, it is really hard to fill
7743 * this in at the moment. For now disallow retrieving
7744 * local out-of-band data when privacy is in use.
7746 * Returning the identity address will not help here since
7747 * pairing happens before the identity resolving key is
7748 * known and thus the connection establishment happens
7749 * based on the RPA and not the identity address.
7751 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7752 hci_dev_unlock(hdev);
7753 status = MGMT_STATUS_REJECTED;
7757 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7758 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7759 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7760 bacmp(&hdev->static_addr, BDADDR_ANY))) {
7761 memcpy(addr, &hdev->static_addr, 6);
7764 memcpy(addr, &hdev->bdaddr, 6);
7768 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7769 addr, sizeof(addr));
7771 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7776 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7777 &role, sizeof(role));
7779 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7780 eir_len = eir_append_data(rp->eir, eir_len,
7782 hash, sizeof(hash));
7784 eir_len = eir_append_data(rp->eir, eir_len,
7786 rand, sizeof(rand));
7789 flags = mgmt_get_adv_discov_flags(hdev);
7791 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7792 flags |= LE_AD_NO_BREDR;
7794 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7795 &flags, sizeof(flags));
7799 hci_dev_unlock(hdev);
7801 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7803 status = MGMT_STATUS_SUCCESS;
7806 rp->type = cp->type;
7807 rp->eir_len = cpu_to_le16(eir_len);
7809 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7810 status, rp, sizeof(*rp) + eir_len);
7811 if (err < 0 || status)
7814 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7815 rp, sizeof(*rp) + eir_len,
7816 HCI_MGMT_OOB_DATA_EVENTS, sk);
7824 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7828 flags |= MGMT_ADV_FLAG_CONNECTABLE;
7829 flags |= MGMT_ADV_FLAG_DISCOV;
7830 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7831 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7832 flags |= MGMT_ADV_FLAG_APPEARANCE;
7833 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7834 flags |= MGMT_ADV_PARAM_DURATION;
7835 flags |= MGMT_ADV_PARAM_TIMEOUT;
7836 flags |= MGMT_ADV_PARAM_INTERVALS;
7837 flags |= MGMT_ADV_PARAM_TX_POWER;
7838 flags |= MGMT_ADV_PARAM_SCAN_RSP;
7840 /* In extended adv TX_POWER returned from Set Adv Param
7841 * will be always valid.
7843 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7844 ext_adv_capable(hdev))
7845 flags |= MGMT_ADV_FLAG_TX_POWER;
7847 if (ext_adv_capable(hdev)) {
7848 flags |= MGMT_ADV_FLAG_SEC_1M;
7849 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7850 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7852 if (hdev->le_features[1] & HCI_LE_PHY_2M)
7853 flags |= MGMT_ADV_FLAG_SEC_2M;
7855 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7856 flags |= MGMT_ADV_FLAG_SEC_CODED;
7862 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7863 void *data, u16 data_len)
7865 struct mgmt_rp_read_adv_features *rp;
7868 struct adv_info *adv_instance;
7869 u32 supported_flags;
7872 bt_dev_dbg(hdev, "sock %p", sk);
7874 if (!lmp_le_capable(hdev))
7875 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7876 MGMT_STATUS_REJECTED);
7880 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7881 rp = kmalloc(rp_len, GFP_ATOMIC);
7883 hci_dev_unlock(hdev);
7887 supported_flags = get_supported_adv_flags(hdev);
7889 rp->supported_flags = cpu_to_le32(supported_flags);
7890 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7891 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7892 rp->max_instances = hdev->le_num_of_adv_sets;
7893 rp->num_instances = hdev->adv_instance_cnt;
7895 instance = rp->instance;
7896 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7897 *instance = adv_instance->instance;
7901 hci_dev_unlock(hdev);
7903 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7904 MGMT_STATUS_SUCCESS, rp, rp_len);
7911 static u8 calculate_name_len(struct hci_dev *hdev)
7913 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7915 return eir_append_local_name(hdev, buf, 0);
7918 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7921 u8 max_len = HCI_MAX_AD_LENGTH;
7924 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7925 MGMT_ADV_FLAG_LIMITED_DISCOV |
7926 MGMT_ADV_FLAG_MANAGED_FLAGS))
7929 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7932 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7933 max_len -= calculate_name_len(hdev);
7935 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7942 static bool flags_managed(u32 adv_flags)
7944 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7945 MGMT_ADV_FLAG_LIMITED_DISCOV |
7946 MGMT_ADV_FLAG_MANAGED_FLAGS);
7949 static bool tx_power_managed(u32 adv_flags)
7951 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7954 static bool name_managed(u32 adv_flags)
7956 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7959 static bool appearance_managed(u32 adv_flags)
7961 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7964 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7965 u8 len, bool is_adv_data)
7970 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7975 /* Make sure that the data is correctly formatted. */
7976 for (i = 0; i < len; i += (cur_len + 1)) {
7982 if (data[i + 1] == EIR_FLAGS &&
7983 (!is_adv_data || flags_managed(adv_flags)))
7986 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7989 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7992 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7995 if (data[i + 1] == EIR_APPEARANCE &&
7996 appearance_managed(adv_flags))
7999 /* If the current field length would exceed the total data
8000 * length, then it's invalid.
8002 if (i + cur_len >= len)
8009 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
8011 u32 supported_flags, phy_flags;
8013 /* The current implementation only supports a subset of the specified
8014 * flags. Also need to check mutual exclusiveness of sec flags.
8016 supported_flags = get_supported_adv_flags(hdev);
8017 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8018 if (adv_flags & ~supported_flags ||
8019 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8025 static bool adv_busy(struct hci_dev *hdev)
8027 return pending_find(MGMT_OP_SET_LE, hdev);
8030 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8033 struct adv_info *adv, *n;
8035 bt_dev_dbg(hdev, "err %d", err);
8039 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8046 adv->pending = false;
8050 instance = adv->instance;
8052 if (hdev->cur_adv_instance == instance)
8053 cancel_adv_timeout(hdev);
8055 hci_remove_adv_instance(hdev, instance);
8056 mgmt_advertising_removed(sk, hdev, instance);
8059 hci_dev_unlock(hdev);
8062 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8064 struct mgmt_pending_cmd *cmd = data;
8065 struct mgmt_cp_add_advertising *cp = cmd->param;
8066 struct mgmt_rp_add_advertising rp;
8068 memset(&rp, 0, sizeof(rp));
8070 rp.instance = cp->instance;
8073 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8076 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8077 mgmt_status(err), &rp, sizeof(rp));
8079 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8081 mgmt_pending_free(cmd);
8084 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8086 struct mgmt_pending_cmd *cmd = data;
8087 struct mgmt_cp_add_advertising *cp = cmd->param;
8089 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8092 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8093 void *data, u16 data_len)
8095 struct mgmt_cp_add_advertising *cp = data;
8096 struct mgmt_rp_add_advertising rp;
8099 u16 timeout, duration;
8100 unsigned int prev_instance_cnt;
8101 u8 schedule_instance = 0;
8102 struct adv_info *next_instance;
8104 struct mgmt_pending_cmd *cmd;
8106 bt_dev_dbg(hdev, "sock %p", sk);
8108 status = mgmt_le_support(hdev);
8110 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8113 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8114 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8115 MGMT_STATUS_INVALID_PARAMS);
8117 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8118 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8119 MGMT_STATUS_INVALID_PARAMS);
8121 flags = __le32_to_cpu(cp->flags);
8122 timeout = __le16_to_cpu(cp->timeout);
8123 duration = __le16_to_cpu(cp->duration);
8125 if (!requested_adv_flags_are_valid(hdev, flags))
8126 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8127 MGMT_STATUS_INVALID_PARAMS);
8131 if (timeout && !hdev_is_powered(hdev)) {
8132 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8133 MGMT_STATUS_REJECTED);
8137 if (adv_busy(hdev)) {
8138 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8143 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8144 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8145 cp->scan_rsp_len, false)) {
8146 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8147 MGMT_STATUS_INVALID_PARAMS);
8151 prev_instance_cnt = hdev->adv_instance_cnt;
8153 err = hci_add_adv_instance(hdev, cp->instance, flags,
8154 cp->adv_data_len, cp->data,
8156 cp->data + cp->adv_data_len,
8158 HCI_ADV_TX_POWER_NO_PREFERENCE,
8159 hdev->le_adv_min_interval,
8160 hdev->le_adv_max_interval);
8162 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8163 MGMT_STATUS_FAILED);
8167 /* Only trigger an advertising added event if a new instance was
8170 if (hdev->adv_instance_cnt > prev_instance_cnt)
8171 mgmt_advertising_added(sk, hdev, cp->instance);
8173 if (hdev->cur_adv_instance == cp->instance) {
8174 /* If the currently advertised instance is being changed then
8175 * cancel the current advertising and schedule the next
8176 * instance. If there is only one instance then the overridden
8177 * advertising data will be visible right away.
8179 cancel_adv_timeout(hdev);
8181 next_instance = hci_get_next_instance(hdev, cp->instance);
8183 schedule_instance = next_instance->instance;
8184 } else if (!hdev->adv_instance_timeout) {
8185 /* Immediately advertise the new instance if no other
8186 * instance is currently being advertised.
8188 schedule_instance = cp->instance;
8191 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
8192 * there is no instance to be advertised then we have no HCI
8193 * communication to make. Simply return.
8195 if (!hdev_is_powered(hdev) ||
8196 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8197 !schedule_instance) {
8198 rp.instance = cp->instance;
8199 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8200 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8204 /* We're good to go, update advertising data, parameters, and start
8207 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8214 cp->instance = schedule_instance;
8216 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8217 add_advertising_complete);
8219 mgmt_pending_free(cmd);
8222 hci_dev_unlock(hdev);
8227 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8230 struct mgmt_pending_cmd *cmd = data;
8231 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8232 struct mgmt_rp_add_ext_adv_params rp;
8233 struct adv_info *adv;
8236 BT_DBG("%s", hdev->name);
8240 adv = hci_find_adv_instance(hdev, cp->instance);
8244 rp.instance = cp->instance;
8245 rp.tx_power = adv->tx_power;
8247 /* While we're at it, inform userspace of the available space for this
8248 * advertisement, given the flags that will be used.
8250 flags = __le32_to_cpu(cp->flags);
8251 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8252 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8255 /* If this advertisement was previously advertising and we
8256 * failed to update it, we signal that it has been removed and
8257 * delete its structure
8260 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8262 hci_remove_adv_instance(hdev, cp->instance);
8264 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8267 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8268 mgmt_status(err), &rp, sizeof(rp));
8273 mgmt_pending_free(cmd);
8275 hci_dev_unlock(hdev);
8278 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8280 struct mgmt_pending_cmd *cmd = data;
8281 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8283 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8286 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8287 void *data, u16 data_len)
8289 struct mgmt_cp_add_ext_adv_params *cp = data;
8290 struct mgmt_rp_add_ext_adv_params rp;
8291 struct mgmt_pending_cmd *cmd = NULL;
8292 u32 flags, min_interval, max_interval;
8293 u16 timeout, duration;
8298 BT_DBG("%s", hdev->name);
8300 status = mgmt_le_support(hdev);
8302 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8305 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8306 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8307 MGMT_STATUS_INVALID_PARAMS);
8309 /* The purpose of breaking add_advertising into two separate MGMT calls
8310 * for params and data is to allow more parameters to be added to this
8311 * structure in the future. For this reason, we verify that we have the
8312 * bare minimum structure we know of when the interface was defined. Any
8313 * extra parameters we don't know about will be ignored in this request.
8315 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8316 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8317 MGMT_STATUS_INVALID_PARAMS);
8319 flags = __le32_to_cpu(cp->flags);
8321 if (!requested_adv_flags_are_valid(hdev, flags))
8322 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8323 MGMT_STATUS_INVALID_PARAMS);
8327 /* In new interface, we require that we are powered to register */
8328 if (!hdev_is_powered(hdev)) {
8329 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8330 MGMT_STATUS_REJECTED);
8334 if (adv_busy(hdev)) {
8335 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8340 /* Parse defined parameters from request, use defaults otherwise */
8341 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8342 __le16_to_cpu(cp->timeout) : 0;
8344 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8345 __le16_to_cpu(cp->duration) :
8346 hdev->def_multi_adv_rotation_duration;
8348 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8349 __le32_to_cpu(cp->min_interval) :
8350 hdev->le_adv_min_interval;
8352 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8353 __le32_to_cpu(cp->max_interval) :
8354 hdev->le_adv_max_interval;
8356 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8358 HCI_ADV_TX_POWER_NO_PREFERENCE;
8360 /* Create advertising instance with no advertising or response data */
8361 err = hci_add_adv_instance(hdev, cp->instance, flags,
8362 0, NULL, 0, NULL, timeout, duration,
8363 tx_power, min_interval, max_interval);
8366 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8367 MGMT_STATUS_FAILED);
8371 /* Submit request for advertising params if ext adv available */
8372 if (ext_adv_capable(hdev)) {
8373 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8377 hci_remove_adv_instance(hdev, cp->instance);
8381 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8382 add_ext_adv_params_complete);
8384 mgmt_pending_free(cmd);
8386 rp.instance = cp->instance;
8387 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8388 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8389 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8390 err = mgmt_cmd_complete(sk, hdev->id,
8391 MGMT_OP_ADD_EXT_ADV_PARAMS,
8392 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8396 hci_dev_unlock(hdev);
8401 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8403 struct mgmt_pending_cmd *cmd = data;
8404 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8405 struct mgmt_rp_add_advertising rp;
8407 add_adv_complete(hdev, cmd->sk, cp->instance, err);
8409 memset(&rp, 0, sizeof(rp));
8411 rp.instance = cp->instance;
8414 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8417 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8418 mgmt_status(err), &rp, sizeof(rp));
8420 mgmt_pending_free(cmd);
8423 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8425 struct mgmt_pending_cmd *cmd = data;
8426 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8429 if (ext_adv_capable(hdev)) {
8430 err = hci_update_adv_data_sync(hdev, cp->instance);
8434 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8438 return hci_enable_ext_advertising_sync(hdev, cp->instance);
8441 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8444 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8447 struct mgmt_cp_add_ext_adv_data *cp = data;
8448 struct mgmt_rp_add_ext_adv_data rp;
8449 u8 schedule_instance = 0;
8450 struct adv_info *next_instance;
8451 struct adv_info *adv_instance;
8453 struct mgmt_pending_cmd *cmd;
8455 BT_DBG("%s", hdev->name);
8459 adv_instance = hci_find_adv_instance(hdev, cp->instance);
8461 if (!adv_instance) {
8462 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8463 MGMT_STATUS_INVALID_PARAMS);
8467 /* In new interface, we require that we are powered to register */
8468 if (!hdev_is_powered(hdev)) {
8469 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8470 MGMT_STATUS_REJECTED);
8471 goto clear_new_instance;
8474 if (adv_busy(hdev)) {
8475 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8477 goto clear_new_instance;
8480 /* Validate new data */
8481 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8482 cp->adv_data_len, true) ||
8483 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8484 cp->adv_data_len, cp->scan_rsp_len, false)) {
8485 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8486 MGMT_STATUS_INVALID_PARAMS);
8487 goto clear_new_instance;
8490 /* Set the data in the advertising instance */
8491 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8492 cp->data, cp->scan_rsp_len,
8493 cp->data + cp->adv_data_len);
8495 /* If using software rotation, determine next instance to use */
8496 if (hdev->cur_adv_instance == cp->instance) {
8497 /* If the currently advertised instance is being changed
8498 * then cancel the current advertising and schedule the
8499 * next instance. If there is only one instance then the
8500 * overridden advertising data will be visible right
8503 cancel_adv_timeout(hdev);
8505 next_instance = hci_get_next_instance(hdev, cp->instance);
8507 schedule_instance = next_instance->instance;
8508 } else if (!hdev->adv_instance_timeout) {
8509 /* Immediately advertise the new instance if no other
8510 * instance is currently being advertised.
8512 schedule_instance = cp->instance;
8515 /* If the HCI_ADVERTISING flag is set or there is no instance to
8516 * be advertised then we have no HCI communication to make.
8519 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
8520 if (adv_instance->pending) {
8521 mgmt_advertising_added(sk, hdev, cp->instance);
8522 adv_instance->pending = false;
8524 rp.instance = cp->instance;
8525 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8526 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8530 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8534 goto clear_new_instance;
8537 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
8538 add_ext_adv_data_complete);
8540 mgmt_pending_free(cmd);
8541 goto clear_new_instance;
8544 /* We were successful in updating data, so trigger advertising_added
8545 * event if this is an instance that wasn't previously advertising. If
8546 * a failure occurs in the requests we initiated, we will remove the
8547 * instance again in add_advertising_complete
8549 if (adv_instance->pending)
8550 mgmt_advertising_added(sk, hdev, cp->instance);
8555 hci_remove_adv_instance(hdev, cp->instance);
8558 hci_dev_unlock(hdev);
8563 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
8566 struct mgmt_pending_cmd *cmd = data;
8567 struct mgmt_cp_remove_advertising *cp = cmd->param;
8568 struct mgmt_rp_remove_advertising rp;
8570 bt_dev_dbg(hdev, "err %d", err);
8572 memset(&rp, 0, sizeof(rp));
8573 rp.instance = cp->instance;
8576 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8579 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8580 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8582 mgmt_pending_free(cmd);
8585 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
8587 struct mgmt_pending_cmd *cmd = data;
8588 struct mgmt_cp_remove_advertising *cp = cmd->param;
8591 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
8595 if (list_empty(&hdev->adv_instances))
8596 err = hci_disable_advertising_sync(hdev);
8601 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8602 void *data, u16 data_len)
8604 struct mgmt_cp_remove_advertising *cp = data;
8605 struct mgmt_pending_cmd *cmd;
8608 bt_dev_dbg(hdev, "sock %p", sk);
8612 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8613 err = mgmt_cmd_status(sk, hdev->id,
8614 MGMT_OP_REMOVE_ADVERTISING,
8615 MGMT_STATUS_INVALID_PARAMS);
8619 if (pending_find(MGMT_OP_SET_LE, hdev)) {
8620 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8625 if (list_empty(&hdev->adv_instances)) {
8626 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8627 MGMT_STATUS_INVALID_PARAMS);
8631 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8638 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
8639 remove_advertising_complete);
8641 mgmt_pending_free(cmd);
8644 hci_dev_unlock(hdev);
8649 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8650 void *data, u16 data_len)
8652 struct mgmt_cp_get_adv_size_info *cp = data;
8653 struct mgmt_rp_get_adv_size_info rp;
8654 u32 flags, supported_flags;
8656 bt_dev_dbg(hdev, "sock %p", sk);
8658 if (!lmp_le_capable(hdev))
8659 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8660 MGMT_STATUS_REJECTED);
8662 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8663 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8664 MGMT_STATUS_INVALID_PARAMS);
8666 flags = __le32_to_cpu(cp->flags);
8668 /* The current implementation only supports a subset of the specified
8671 supported_flags = get_supported_adv_flags(hdev);
8672 if (flags & ~supported_flags)
8673 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8674 MGMT_STATUS_INVALID_PARAMS);
8676 rp.instance = cp->instance;
8677 rp.flags = cp->flags;
8678 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8679 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8681 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8682 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8685 static const struct hci_mgmt_handler mgmt_handlers[] = {
8686 { NULL }, /* 0x0000 (no command) */
8687 { read_version, MGMT_READ_VERSION_SIZE,
8689 HCI_MGMT_UNTRUSTED },
8690 { read_commands, MGMT_READ_COMMANDS_SIZE,
8692 HCI_MGMT_UNTRUSTED },
8693 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
8695 HCI_MGMT_UNTRUSTED },
8696 { read_controller_info, MGMT_READ_INFO_SIZE,
8697 HCI_MGMT_UNTRUSTED },
8698 { set_powered, MGMT_SETTING_SIZE },
8699 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
8700 { set_connectable, MGMT_SETTING_SIZE },
8701 { set_fast_connectable, MGMT_SETTING_SIZE },
8702 { set_bondable, MGMT_SETTING_SIZE },
8703 { set_link_security, MGMT_SETTING_SIZE },
8704 { set_ssp, MGMT_SETTING_SIZE },
8705 { set_hs, MGMT_SETTING_SIZE },
8706 { set_le, MGMT_SETTING_SIZE },
8707 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
8708 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
8709 { add_uuid, MGMT_ADD_UUID_SIZE },
8710 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
8711 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
8713 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8715 { disconnect, MGMT_DISCONNECT_SIZE },
8716 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
8717 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
8718 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
8719 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
8720 { pair_device, MGMT_PAIR_DEVICE_SIZE },
8721 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
8722 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
8723 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
8724 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8725 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
8726 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8727 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
8728 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8730 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8731 { start_discovery, MGMT_START_DISCOVERY_SIZE },
8732 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
8733 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
8734 { block_device, MGMT_BLOCK_DEVICE_SIZE },
8735 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
8736 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
8737 { set_advertising, MGMT_SETTING_SIZE },
8738 { set_bredr, MGMT_SETTING_SIZE },
8739 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
8740 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
8741 { set_secure_conn, MGMT_SETTING_SIZE },
8742 { set_debug_keys, MGMT_SETTING_SIZE },
8743 { set_privacy, MGMT_SET_PRIVACY_SIZE },
8744 { load_irks, MGMT_LOAD_IRKS_SIZE,
8746 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
8747 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
8748 { add_device, MGMT_ADD_DEVICE_SIZE },
8749 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
8750 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
8752 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8754 HCI_MGMT_UNTRUSTED },
8755 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
8756 HCI_MGMT_UNCONFIGURED |
8757 HCI_MGMT_UNTRUSTED },
8758 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
8759 HCI_MGMT_UNCONFIGURED },
8760 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
8761 HCI_MGMT_UNCONFIGURED },
8762 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8764 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8765 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
8767 HCI_MGMT_UNTRUSTED },
8768 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
8769 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
8771 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
8772 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
8773 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8774 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8775 HCI_MGMT_UNTRUSTED },
8776 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
8777 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
8778 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
8779 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8781 { set_wideband_speech, MGMT_SETTING_SIZE },
8782 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
8783 HCI_MGMT_UNTRUSTED },
8784 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
8785 HCI_MGMT_UNTRUSTED |
8786 HCI_MGMT_HDEV_OPTIONAL },
8787 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
8789 HCI_MGMT_HDEV_OPTIONAL },
8790 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8791 HCI_MGMT_UNTRUSTED },
8792 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8794 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8795 HCI_MGMT_UNTRUSTED },
8796 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8798 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
8799 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
8800 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8801 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8803 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
8804 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8806 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
8808 { add_adv_patterns_monitor_rssi,
8809 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8813 void mgmt_index_added(struct hci_dev *hdev)
8815 struct mgmt_ev_ext_index ev;
8817 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8820 switch (hdev->dev_type) {
8822 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8823 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8824 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8827 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8828 HCI_MGMT_INDEX_EVENTS);
8841 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8842 HCI_MGMT_EXT_INDEX_EVENTS);
8845 void mgmt_index_removed(struct hci_dev *hdev)
8847 struct mgmt_ev_ext_index ev;
8848 u8 status = MGMT_STATUS_INVALID_INDEX;
8850 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8853 switch (hdev->dev_type) {
8855 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8857 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8858 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8859 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8862 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8863 HCI_MGMT_INDEX_EVENTS);
8876 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8877 HCI_MGMT_EXT_INDEX_EVENTS);
8880 void mgmt_power_on(struct hci_dev *hdev, int err)
8882 struct cmd_lookup match = { NULL, hdev };
8884 bt_dev_dbg(hdev, "err %d", err);
8889 restart_le_actions(hdev);
8890 hci_update_passive_scan(hdev);
8893 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8895 new_settings(hdev, match.sk);
8900 hci_dev_unlock(hdev);
8903 void __mgmt_power_off(struct hci_dev *hdev)
8905 struct cmd_lookup match = { NULL, hdev };
8906 u8 status, zero_cod[] = { 0, 0, 0 };
8908 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8910 /* If the power off is because of hdev unregistration let
8911 * use the appropriate INVALID_INDEX status. Otherwise use
8912 * NOT_POWERED. We cover both scenarios here since later in
8913 * mgmt_index_removed() any hci_conn callbacks will have already
8914 * been triggered, potentially causing misleading DISCONNECTED
8917 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8918 status = MGMT_STATUS_INVALID_INDEX;
8920 status = MGMT_STATUS_NOT_POWERED;
8922 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8924 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8925 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8926 zero_cod, sizeof(zero_cod),
8927 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8928 ext_info_changed(hdev, NULL);
8931 new_settings(hdev, match.sk);
8937 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8939 struct mgmt_pending_cmd *cmd;
8942 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8946 if (err == -ERFKILL)
8947 status = MGMT_STATUS_RFKILLED;
8949 status = MGMT_STATUS_FAILED;
8951 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8953 mgmt_pending_remove(cmd);
8956 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8959 struct mgmt_ev_new_link_key ev;
8961 memset(&ev, 0, sizeof(ev));
8963 ev.store_hint = persistent;
8964 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8965 ev.key.addr.type = BDADDR_BREDR;
8966 ev.key.type = key->type;
8967 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8968 ev.key.pin_len = key->pin_len;
8970 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8973 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8975 switch (ltk->type) {
8977 case SMP_LTK_RESPONDER:
8978 if (ltk->authenticated)
8979 return MGMT_LTK_AUTHENTICATED;
8980 return MGMT_LTK_UNAUTHENTICATED;
8982 if (ltk->authenticated)
8983 return MGMT_LTK_P256_AUTH;
8984 return MGMT_LTK_P256_UNAUTH;
8985 case SMP_LTK_P256_DEBUG:
8986 return MGMT_LTK_P256_DEBUG;
8989 return MGMT_LTK_UNAUTHENTICATED;
8992 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8994 struct mgmt_ev_new_long_term_key ev;
8996 memset(&ev, 0, sizeof(ev));
8998 /* Devices using resolvable or non-resolvable random addresses
8999 * without providing an identity resolving key don't require
9000 * to store long term keys. Their addresses will change the
9003 * Only when a remote device provides an identity address
9004 * make sure the long term key is stored. If the remote
9005 * identity is known, the long term keys are internally
9006 * mapped to the identity address. So allow static random
9007 * and public addresses here.
9009 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9010 (key->bdaddr.b[5] & 0xc0) != 0xc0)
9011 ev.store_hint = 0x00;
9013 ev.store_hint = persistent;
9015 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
9016 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
9017 ev.key.type = mgmt_ltk_type(key);
9018 ev.key.enc_size = key->enc_size;
9019 ev.key.ediv = key->ediv;
9020 ev.key.rand = key->rand;
9022 if (key->type == SMP_LTK)
9023 ev.key.initiator = 1;
9025 /* Make sure we copy only the significant bytes based on the
9026 * encryption key size, and set the rest of the value to zeroes.
9028 memcpy(ev.key.val, key->val, key->enc_size);
9029 memset(ev.key.val + key->enc_size, 0,
9030 sizeof(ev.key.val) - key->enc_size);
9032 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9035 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9037 struct mgmt_ev_new_irk ev;
9039 memset(&ev, 0, sizeof(ev));
9041 ev.store_hint = persistent;
9043 bacpy(&ev.rpa, &irk->rpa);
9044 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9045 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9046 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9048 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9051 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9054 struct mgmt_ev_new_csrk ev;
9056 memset(&ev, 0, sizeof(ev));
9058 /* Devices using resolvable or non-resolvable random addresses
9059 * without providing an identity resolving key don't require
9060 * to store signature resolving keys. Their addresses will change
9061 * the next time around.
9063 * Only when a remote device provides an identity address
9064 * make sure the signature resolving key is stored. So allow
9065 * static random and public addresses here.
9067 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9068 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9069 ev.store_hint = 0x00;
9071 ev.store_hint = persistent;
9073 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9074 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9075 ev.key.type = csrk->type;
9076 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9078 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9081 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9082 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9083 u16 max_interval, u16 latency, u16 timeout)
9085 struct mgmt_ev_new_conn_param ev;
9087 if (!hci_is_identity_address(bdaddr, bdaddr_type))
9090 memset(&ev, 0, sizeof(ev));
9091 bacpy(&ev.addr.bdaddr, bdaddr);
9092 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9093 ev.store_hint = store_hint;
9094 ev.min_interval = cpu_to_le16(min_interval);
9095 ev.max_interval = cpu_to_le16(max_interval);
9096 ev.latency = cpu_to_le16(latency);
9097 ev.timeout = cpu_to_le16(timeout);
9099 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9102 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9103 u8 *name, u8 name_len)
9105 struct sk_buff *skb;
9106 struct mgmt_ev_device_connected *ev;
9110 /* allocate buff for LE or BR/EDR adv */
9111 if (conn->le_adv_data_len > 0)
9112 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9113 sizeof(*ev) + conn->le_adv_data_len);
9115 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9116 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9117 eir_precalc_len(sizeof(conn->dev_class)));
9119 ev = skb_put(skb, sizeof(*ev));
9120 bacpy(&ev->addr.bdaddr, &conn->dst);
9121 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9124 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9126 ev->flags = __cpu_to_le32(flags);
9128 /* We must ensure that the EIR Data fields are ordered and
9129 * unique. Keep it simple for now and avoid the problem by not
9130 * adding any BR/EDR data to the LE adv.
9132 if (conn->le_adv_data_len > 0) {
9133 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9134 eir_len = conn->le_adv_data_len;
9137 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9139 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9140 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9141 conn->dev_class, sizeof(conn->dev_class));
9144 ev->eir_len = cpu_to_le16(eir_len);
9146 mgmt_event_skb(skb, NULL);
9149 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9151 struct sock **sk = data;
9153 cmd->cmd_complete(cmd, 0);
9158 mgmt_pending_remove(cmd);
9161 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9163 struct hci_dev *hdev = data;
9164 struct mgmt_cp_unpair_device *cp = cmd->param;
9166 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9168 cmd->cmd_complete(cmd, 0);
9169 mgmt_pending_remove(cmd);
9172 bool mgmt_powering_down(struct hci_dev *hdev)
9174 struct mgmt_pending_cmd *cmd;
9175 struct mgmt_mode *cp;
9177 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9188 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9189 u8 link_type, u8 addr_type, u8 reason,
9190 bool mgmt_connected)
9192 struct mgmt_ev_device_disconnected ev;
9193 struct sock *sk = NULL;
9195 /* The connection is still in hci_conn_hash so test for 1
9196 * instead of 0 to know if this is the last one.
9198 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9199 cancel_delayed_work(&hdev->power_off);
9200 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9203 if (!mgmt_connected)
9206 if (link_type != ACL_LINK && link_type != LE_LINK)
9209 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9211 bacpy(&ev.addr.bdaddr, bdaddr);
9212 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9215 /* Report disconnects due to suspend */
9216 if (hdev->suspended)
9217 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9219 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9224 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9228 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9229 u8 link_type, u8 addr_type, u8 status)
9231 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9232 struct mgmt_cp_disconnect *cp;
9233 struct mgmt_pending_cmd *cmd;
9235 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9238 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9244 if (bacmp(bdaddr, &cp->addr.bdaddr))
9247 if (cp->addr.type != bdaddr_type)
9250 cmd->cmd_complete(cmd, mgmt_status(status));
9251 mgmt_pending_remove(cmd);
9254 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9255 u8 addr_type, u8 status)
9257 struct mgmt_ev_connect_failed ev;
9259 /* The connection is still in hci_conn_hash so test for 1
9260 * instead of 0 to know if this is the last one.
9262 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9263 cancel_delayed_work(&hdev->power_off);
9264 queue_work(hdev->req_workqueue, &hdev->power_off.work);
9267 bacpy(&ev.addr.bdaddr, bdaddr);
9268 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9269 ev.status = mgmt_status(status);
9271 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9274 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9276 struct mgmt_ev_pin_code_request ev;
9278 bacpy(&ev.addr.bdaddr, bdaddr);
9279 ev.addr.type = BDADDR_BREDR;
9282 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9285 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9288 struct mgmt_pending_cmd *cmd;
9290 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9294 cmd->cmd_complete(cmd, mgmt_status(status));
9295 mgmt_pending_remove(cmd);
9298 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9301 struct mgmt_pending_cmd *cmd;
9303 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9307 cmd->cmd_complete(cmd, mgmt_status(status));
9308 mgmt_pending_remove(cmd);
9311 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9312 u8 link_type, u8 addr_type, u32 value,
9315 struct mgmt_ev_user_confirm_request ev;
9317 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9319 bacpy(&ev.addr.bdaddr, bdaddr);
9320 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9321 ev.confirm_hint = confirm_hint;
9322 ev.value = cpu_to_le32(value);
9324 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9328 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9329 u8 link_type, u8 addr_type)
9331 struct mgmt_ev_user_passkey_request ev;
9333 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9335 bacpy(&ev.addr.bdaddr, bdaddr);
9336 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9338 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9342 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9343 u8 link_type, u8 addr_type, u8 status,
9346 struct mgmt_pending_cmd *cmd;
9348 cmd = pending_find(opcode, hdev);
9352 cmd->cmd_complete(cmd, mgmt_status(status));
9353 mgmt_pending_remove(cmd);
9358 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9359 u8 link_type, u8 addr_type, u8 status)
9361 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9362 status, MGMT_OP_USER_CONFIRM_REPLY);
9365 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9366 u8 link_type, u8 addr_type, u8 status)
9368 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9370 MGMT_OP_USER_CONFIRM_NEG_REPLY);
9373 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9374 u8 link_type, u8 addr_type, u8 status)
9376 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9377 status, MGMT_OP_USER_PASSKEY_REPLY);
9380 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9381 u8 link_type, u8 addr_type, u8 status)
9383 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9385 MGMT_OP_USER_PASSKEY_NEG_REPLY);
9388 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9389 u8 link_type, u8 addr_type, u32 passkey,
9392 struct mgmt_ev_passkey_notify ev;
9394 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9396 bacpy(&ev.addr.bdaddr, bdaddr);
9397 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9398 ev.passkey = __cpu_to_le32(passkey);
9399 ev.entered = entered;
9401 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9404 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9406 struct mgmt_ev_auth_failed ev;
9407 struct mgmt_pending_cmd *cmd;
9408 u8 status = mgmt_status(hci_status);
9410 bacpy(&ev.addr.bdaddr, &conn->dst);
9411 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9414 cmd = find_pairing(conn);
9416 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9417 cmd ? cmd->sk : NULL);
9420 cmd->cmd_complete(cmd, status);
9421 mgmt_pending_remove(cmd);
9425 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9427 struct cmd_lookup match = { NULL, hdev };
9431 u8 mgmt_err = mgmt_status(status);
9432 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9433 cmd_status_rsp, &mgmt_err);
9437 if (test_bit(HCI_AUTH, &hdev->flags))
9438 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9440 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9442 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9446 new_settings(hdev, match.sk);
9452 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9454 struct cmd_lookup *match = data;
9456 if (match->sk == NULL) {
9457 match->sk = cmd->sk;
9458 sock_hold(match->sk);
9462 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9465 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9467 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9468 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9469 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9472 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9473 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9474 ext_info_changed(hdev, NULL);
9481 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9483 struct mgmt_cp_set_local_name ev;
9484 struct mgmt_pending_cmd *cmd;
9489 memset(&ev, 0, sizeof(ev));
9490 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9491 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9493 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9495 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9497 /* If this is a HCI command related to powering on the
9498 * HCI dev don't send any mgmt signals.
9500 if (pending_find(MGMT_OP_SET_POWERED, hdev))
9504 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9505 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9506 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9509 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9513 for (i = 0; i < uuid_count; i++) {
9514 if (!memcmp(uuid, uuids[i], 16))
9521 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9525 while (parsed < eir_len) {
9526 u8 field_len = eir[0];
9533 if (eir_len - parsed < field_len + 1)
9537 case EIR_UUID16_ALL:
9538 case EIR_UUID16_SOME:
9539 for (i = 0; i + 3 <= field_len; i += 2) {
9540 memcpy(uuid, bluetooth_base_uuid, 16);
9541 uuid[13] = eir[i + 3];
9542 uuid[12] = eir[i + 2];
9543 if (has_uuid(uuid, uuid_count, uuids))
9547 case EIR_UUID32_ALL:
9548 case EIR_UUID32_SOME:
9549 for (i = 0; i + 5 <= field_len; i += 4) {
9550 memcpy(uuid, bluetooth_base_uuid, 16);
9551 uuid[15] = eir[i + 5];
9552 uuid[14] = eir[i + 4];
9553 uuid[13] = eir[i + 3];
9554 uuid[12] = eir[i + 2];
9555 if (has_uuid(uuid, uuid_count, uuids))
9559 case EIR_UUID128_ALL:
9560 case EIR_UUID128_SOME:
9561 for (i = 0; i + 17 <= field_len; i += 16) {
9562 memcpy(uuid, eir + i + 2, 16);
9563 if (has_uuid(uuid, uuid_count, uuids))
9569 parsed += field_len + 1;
9570 eir += field_len + 1;
9576 static void restart_le_scan(struct hci_dev *hdev)
9578 /* If controller is not scanning we are done. */
9579 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9582 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9583 hdev->discovery.scan_start +
9584 hdev->discovery.scan_duration))
9587 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9588 DISCOV_LE_RESTART_DELAY);
9591 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9592 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9594 /* If a RSSI threshold has been specified, and
9595 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9596 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9597 * is set, let it through for further processing, as we might need to
9600 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9601 * the results are also dropped.
9603 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9604 (rssi == HCI_RSSI_INVALID ||
9605 (rssi < hdev->discovery.rssi &&
9606 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9609 if (hdev->discovery.uuid_count != 0) {
9610 /* If a list of UUIDs is provided in filter, results with no
9611 * matching UUID should be dropped.
9613 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9614 hdev->discovery.uuids) &&
9615 !eir_has_uuids(scan_rsp, scan_rsp_len,
9616 hdev->discovery.uuid_count,
9617 hdev->discovery.uuids))
9621 /* If duplicate filtering does not report RSSI changes, then restart
9622 * scanning to ensure updated result with updated RSSI values.
9624 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9625 restart_le_scan(hdev);
9627 /* Validate RSSI value against the RSSI threshold once more. */
9628 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9629 rssi < hdev->discovery.rssi)
9636 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
9637 bdaddr_t *bdaddr, u8 addr_type)
9639 struct mgmt_ev_adv_monitor_device_lost ev;
9641 ev.monitor_handle = cpu_to_le16(handle);
9642 bacpy(&ev.addr.bdaddr, bdaddr);
9643 ev.addr.type = addr_type;
9645 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
9649 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
9650 struct sk_buff *skb,
9651 struct sock *skip_sk,
9654 struct sk_buff *advmon_skb;
9655 size_t advmon_skb_len;
9656 __le16 *monitor_handle;
9661 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
9662 sizeof(struct mgmt_ev_device_found)) + skb->len;
9663 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
9668 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
9669 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
9670 * store monitor_handle of the matched monitor.
9672 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
9673 *monitor_handle = cpu_to_le16(handle);
9674 skb_put_data(advmon_skb, skb->data, skb->len);
9676 mgmt_event_skb(advmon_skb, skip_sk);
9679 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
9680 bdaddr_t *bdaddr, bool report_device,
9681 struct sk_buff *skb,
9682 struct sock *skip_sk)
9684 struct monitored_device *dev, *tmp;
9685 bool matched = false;
9686 bool notified = false;
9688 /* We have received the Advertisement Report because:
9689 * 1. the kernel has initiated active discovery
9690 * 2. if not, we have pend_le_reports > 0 in which case we are doing
9692 * 3. if none of the above is true, we have one or more active
9693 * Advertisement Monitor
9695 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
9696 * and report ONLY one advertisement per device for the matched Monitor
9697 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9699 * For case 3, since we are not active scanning and all advertisements
9700 * received are due to a matched Advertisement Monitor, report all
9701 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9703 if (report_device && !hdev->advmon_pend_notify) {
9704 mgmt_event_skb(skb, skip_sk);
9708 hdev->advmon_pend_notify = false;
9710 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
9711 if (!bacmp(&dev->bdaddr, bdaddr)) {
9714 if (!dev->notified) {
9715 mgmt_send_adv_monitor_device_found(hdev, skb,
9719 dev->notified = true;
9724 hdev->advmon_pend_notify = true;
9727 if (!report_device &&
9728 ((matched && !notified) || !msft_monitor_supported(hdev))) {
9729 /* Handle 0 indicates that we are not active scanning and this
9730 * is a subsequent advertisement report for an already matched
9731 * Advertisement Monitor or the controller offloading support
9734 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
9738 mgmt_event_skb(skb, skip_sk);
9743 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9744 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9745 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9747 struct sk_buff *skb;
9748 struct mgmt_ev_device_found *ev;
9749 bool report_device = hci_discovery_active(hdev);
9751 /* Don't send events for a non-kernel initiated discovery. With
9752 * LE one exception is if we have pend_le_reports > 0 in which
9753 * case we're doing passive scanning and want these events.
9755 if (!hci_discovery_active(hdev)) {
9756 if (link_type == ACL_LINK)
9758 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
9759 report_device = true;
9760 else if (!hci_is_adv_monitoring(hdev))
9764 if (hdev->discovery.result_filtering) {
9765 /* We are using service discovery */
9766 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9771 if (hdev->discovery.limited) {
9772 /* Check for limited discoverable bit */
9774 if (!(dev_class[1] & 0x20))
9777 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9778 if (!flags || !(flags[0] & LE_AD_LIMITED))
9783 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
9784 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9785 sizeof(*ev) + eir_len + scan_rsp_len + 5);
9789 ev = skb_put(skb, sizeof(*ev));
9791 /* In case of device discovery with BR/EDR devices (pre 1.2), the
9792 * RSSI value was reported as 0 when not available. This behavior
9793 * is kept when using device discovery. This is required for full
9794 * backwards compatibility with the API.
9796 * However when using service discovery, the value 127 will be
9797 * returned when the RSSI is not available.
9799 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9800 link_type == ACL_LINK)
9803 bacpy(&ev->addr.bdaddr, bdaddr);
9804 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9806 ev->flags = cpu_to_le32(flags);
9809 /* Copy EIR or advertising data into event */
9810 skb_put_data(skb, eir, eir_len);
9812 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
9815 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
9817 skb_put_data(skb, eir_cod, sizeof(eir_cod));
9820 if (scan_rsp_len > 0)
9821 /* Append scan response data to event */
9822 skb_put_data(skb, scan_rsp, scan_rsp_len);
9824 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9826 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
9829 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9830 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9832 struct sk_buff *skb;
9833 struct mgmt_ev_device_found *ev;
9837 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9838 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
9840 ev = skb_put(skb, sizeof(*ev));
9841 bacpy(&ev->addr.bdaddr, bdaddr);
9842 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9846 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9848 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
9850 ev->eir_len = cpu_to_le16(eir_len);
9851 ev->flags = cpu_to_le32(flags);
9853 mgmt_event_skb(skb, NULL);
9856 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9858 struct mgmt_ev_discovering ev;
9860 bt_dev_dbg(hdev, "discovering %u", discovering);
9862 memset(&ev, 0, sizeof(ev));
9863 ev.type = hdev->discovery.type;
9864 ev.discovering = discovering;
9866 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9869 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9871 struct mgmt_ev_controller_suspend ev;
9873 ev.suspend_state = state;
9874 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9877 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9880 struct mgmt_ev_controller_resume ev;
9882 ev.wake_reason = reason;
9884 bacpy(&ev.addr.bdaddr, bdaddr);
9885 ev.addr.type = addr_type;
9887 memset(&ev.addr, 0, sizeof(ev.addr));
9890 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9893 static struct hci_mgmt_chan chan = {
9894 .channel = HCI_CHANNEL_CONTROL,
9895 .handler_count = ARRAY_SIZE(mgmt_handlers),
9896 .handlers = mgmt_handlers,
9897 .hdev_init = mgmt_init_hdev,
9902 return hci_mgmt_chan_register(&chan);
9905 void mgmt_exit(void)
9907 hci_mgmt_chan_unregister(&chan);