2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
37 #include <net/bluetooth/sco.h>
40 #include "hci_request.h"
42 #include "mgmt_util.h"
43 #include "mgmt_config.h"
48 #define MGMT_VERSION 1
49 #define MGMT_REVISION 22
51 static const u16 mgmt_commands[] = {
52 MGMT_OP_READ_INDEX_LIST,
55 MGMT_OP_SET_DISCOVERABLE,
56 MGMT_OP_SET_CONNECTABLE,
57 MGMT_OP_SET_FAST_CONNECTABLE,
59 MGMT_OP_SET_LINK_SECURITY,
63 MGMT_OP_SET_DEV_CLASS,
64 MGMT_OP_SET_LOCAL_NAME,
67 MGMT_OP_LOAD_LINK_KEYS,
68 MGMT_OP_LOAD_LONG_TERM_KEYS,
70 MGMT_OP_GET_CONNECTIONS,
71 MGMT_OP_PIN_CODE_REPLY,
72 MGMT_OP_PIN_CODE_NEG_REPLY,
73 MGMT_OP_SET_IO_CAPABILITY,
75 MGMT_OP_CANCEL_PAIR_DEVICE,
76 MGMT_OP_UNPAIR_DEVICE,
77 MGMT_OP_USER_CONFIRM_REPLY,
78 MGMT_OP_USER_CONFIRM_NEG_REPLY,
79 MGMT_OP_USER_PASSKEY_REPLY,
80 MGMT_OP_USER_PASSKEY_NEG_REPLY,
81 MGMT_OP_READ_LOCAL_OOB_DATA,
82 MGMT_OP_ADD_REMOTE_OOB_DATA,
83 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
84 MGMT_OP_START_DISCOVERY,
85 MGMT_OP_STOP_DISCOVERY,
88 MGMT_OP_UNBLOCK_DEVICE,
89 MGMT_OP_SET_DEVICE_ID,
90 MGMT_OP_SET_ADVERTISING,
92 MGMT_OP_SET_STATIC_ADDRESS,
93 MGMT_OP_SET_SCAN_PARAMS,
94 MGMT_OP_SET_SECURE_CONN,
95 MGMT_OP_SET_DEBUG_KEYS,
98 MGMT_OP_GET_CONN_INFO,
99 MGMT_OP_GET_CLOCK_INFO,
101 MGMT_OP_REMOVE_DEVICE,
102 MGMT_OP_LOAD_CONN_PARAM,
103 MGMT_OP_READ_UNCONF_INDEX_LIST,
104 MGMT_OP_READ_CONFIG_INFO,
105 MGMT_OP_SET_EXTERNAL_CONFIG,
106 MGMT_OP_SET_PUBLIC_ADDRESS,
107 MGMT_OP_START_SERVICE_DISCOVERY,
108 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
109 MGMT_OP_READ_EXT_INDEX_LIST,
110 MGMT_OP_READ_ADV_FEATURES,
111 MGMT_OP_ADD_ADVERTISING,
112 MGMT_OP_REMOVE_ADVERTISING,
113 MGMT_OP_GET_ADV_SIZE_INFO,
114 MGMT_OP_START_LIMITED_DISCOVERY,
115 MGMT_OP_READ_EXT_INFO,
116 MGMT_OP_SET_APPEARANCE,
117 MGMT_OP_GET_PHY_CONFIGURATION,
118 MGMT_OP_SET_PHY_CONFIGURATION,
119 MGMT_OP_SET_BLOCKED_KEYS,
120 MGMT_OP_SET_WIDEBAND_SPEECH,
121 MGMT_OP_READ_CONTROLLER_CAP,
122 MGMT_OP_READ_EXP_FEATURES_INFO,
123 MGMT_OP_SET_EXP_FEATURE,
124 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
125 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
126 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
127 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
128 MGMT_OP_GET_DEVICE_FLAGS,
129 MGMT_OP_SET_DEVICE_FLAGS,
130 MGMT_OP_READ_ADV_MONITOR_FEATURES,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
132 MGMT_OP_REMOVE_ADV_MONITOR,
133 MGMT_OP_ADD_EXT_ADV_PARAMS,
134 MGMT_OP_ADD_EXT_ADV_DATA,
135 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
136 MGMT_OP_SET_MESH_RECEIVER,
137 MGMT_OP_MESH_READ_FEATURES,
139 MGMT_OP_MESH_SEND_CANCEL,
142 static const u16 mgmt_events[] = {
143 MGMT_EV_CONTROLLER_ERROR,
145 MGMT_EV_INDEX_REMOVED,
146 MGMT_EV_NEW_SETTINGS,
147 MGMT_EV_CLASS_OF_DEV_CHANGED,
148 MGMT_EV_LOCAL_NAME_CHANGED,
149 MGMT_EV_NEW_LINK_KEY,
150 MGMT_EV_NEW_LONG_TERM_KEY,
151 MGMT_EV_DEVICE_CONNECTED,
152 MGMT_EV_DEVICE_DISCONNECTED,
153 MGMT_EV_CONNECT_FAILED,
154 MGMT_EV_PIN_CODE_REQUEST,
155 MGMT_EV_USER_CONFIRM_REQUEST,
156 MGMT_EV_USER_PASSKEY_REQUEST,
158 MGMT_EV_DEVICE_FOUND,
160 MGMT_EV_DEVICE_BLOCKED,
161 MGMT_EV_DEVICE_UNBLOCKED,
162 MGMT_EV_DEVICE_UNPAIRED,
163 MGMT_EV_PASSKEY_NOTIFY,
166 MGMT_EV_DEVICE_ADDED,
167 MGMT_EV_DEVICE_REMOVED,
168 MGMT_EV_NEW_CONN_PARAM,
169 MGMT_EV_UNCONF_INDEX_ADDED,
170 MGMT_EV_UNCONF_INDEX_REMOVED,
171 MGMT_EV_NEW_CONFIG_OPTIONS,
172 MGMT_EV_EXT_INDEX_ADDED,
173 MGMT_EV_EXT_INDEX_REMOVED,
174 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
175 MGMT_EV_ADVERTISING_ADDED,
176 MGMT_EV_ADVERTISING_REMOVED,
177 MGMT_EV_EXT_INFO_CHANGED,
178 MGMT_EV_PHY_CONFIGURATION_CHANGED,
179 MGMT_EV_EXP_FEATURE_CHANGED,
180 MGMT_EV_DEVICE_FLAGS_CHANGED,
181 MGMT_EV_ADV_MONITOR_ADDED,
182 MGMT_EV_ADV_MONITOR_REMOVED,
183 MGMT_EV_CONTROLLER_SUSPEND,
184 MGMT_EV_CONTROLLER_RESUME,
185 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
186 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
189 static const u16 mgmt_untrusted_commands[] = {
190 MGMT_OP_READ_INDEX_LIST,
192 MGMT_OP_READ_UNCONF_INDEX_LIST,
193 MGMT_OP_READ_CONFIG_INFO,
194 MGMT_OP_READ_EXT_INDEX_LIST,
195 MGMT_OP_READ_EXT_INFO,
196 MGMT_OP_READ_CONTROLLER_CAP,
197 MGMT_OP_READ_EXP_FEATURES_INFO,
198 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
199 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
202 static const u16 mgmt_untrusted_events[] = {
204 MGMT_EV_INDEX_REMOVED,
205 MGMT_EV_NEW_SETTINGS,
206 MGMT_EV_CLASS_OF_DEV_CHANGED,
207 MGMT_EV_LOCAL_NAME_CHANGED,
208 MGMT_EV_UNCONF_INDEX_ADDED,
209 MGMT_EV_UNCONF_INDEX_REMOVED,
210 MGMT_EV_NEW_CONFIG_OPTIONS,
211 MGMT_EV_EXT_INDEX_ADDED,
212 MGMT_EV_EXT_INDEX_REMOVED,
213 MGMT_EV_EXT_INFO_CHANGED,
214 MGMT_EV_EXP_FEATURE_CHANGED,
217 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
219 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
220 "\x00\x00\x00\x00\x00\x00\x00\x00"
222 /* HCI to MGMT error code conversion table */
223 static const u8 mgmt_status_table[] = {
225 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
226 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
227 MGMT_STATUS_FAILED, /* Hardware Failure */
228 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
229 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
230 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
231 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
232 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
233 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
234 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
235 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
236 MGMT_STATUS_BUSY, /* Command Disallowed */
237 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
238 MGMT_STATUS_REJECTED, /* Rejected Security */
239 MGMT_STATUS_REJECTED, /* Rejected Personal */
240 MGMT_STATUS_TIMEOUT, /* Host Timeout */
241 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
242 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
243 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
244 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
245 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
246 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
247 MGMT_STATUS_BUSY, /* Repeated Attempts */
248 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
249 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
250 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
251 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
252 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
253 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
254 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
255 MGMT_STATUS_FAILED, /* Unspecified Error */
256 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
257 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
258 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
259 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
260 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
261 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
262 MGMT_STATUS_FAILED, /* Unit Link Key Used */
263 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
264 MGMT_STATUS_TIMEOUT, /* Instant Passed */
265 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
266 MGMT_STATUS_FAILED, /* Transaction Collision */
267 MGMT_STATUS_FAILED, /* Reserved for future use */
268 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
269 MGMT_STATUS_REJECTED, /* QoS Rejected */
270 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
271 MGMT_STATUS_REJECTED, /* Insufficient Security */
272 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
273 MGMT_STATUS_FAILED, /* Reserved for future use */
274 MGMT_STATUS_BUSY, /* Role Switch Pending */
275 MGMT_STATUS_FAILED, /* Reserved for future use */
276 MGMT_STATUS_FAILED, /* Slot Violation */
277 MGMT_STATUS_FAILED, /* Role Switch Failed */
278 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
279 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
280 MGMT_STATUS_BUSY, /* Host Busy Pairing */
281 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
282 MGMT_STATUS_BUSY, /* Controller Busy */
283 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
284 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
285 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
286 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
287 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
290 static u8 mgmt_errno_status(int err)
294 return MGMT_STATUS_SUCCESS;
296 return MGMT_STATUS_REJECTED;
298 return MGMT_STATUS_INVALID_PARAMS;
300 return MGMT_STATUS_NOT_SUPPORTED;
302 return MGMT_STATUS_BUSY;
304 return MGMT_STATUS_AUTH_FAILED;
306 return MGMT_STATUS_NO_RESOURCES;
308 return MGMT_STATUS_ALREADY_CONNECTED;
310 return MGMT_STATUS_DISCONNECTED;
313 return MGMT_STATUS_FAILED;
316 static u8 mgmt_status(int err)
319 return mgmt_errno_status(err);
321 if (err < ARRAY_SIZE(mgmt_status_table))
322 return mgmt_status_table[err];
324 return MGMT_STATUS_FAILED;
327 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
330 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
335 u16 len, int flag, struct sock *skip_sk)
337 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
342 struct sock *skip_sk)
344 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
345 HCI_SOCK_TRUSTED, skip_sk);
348 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
350 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
354 static u8 le_addr_type(u8 mgmt_addr_type)
356 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
357 return ADDR_LE_DEV_PUBLIC;
359 return ADDR_LE_DEV_RANDOM;
362 void mgmt_fill_version_info(void *ver)
364 struct mgmt_rp_read_version *rp = ver;
366 rp->version = MGMT_VERSION;
367 rp->revision = cpu_to_le16(MGMT_REVISION);
370 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
373 struct mgmt_rp_read_version rp;
375 bt_dev_dbg(hdev, "sock %p", sk);
377 mgmt_fill_version_info(&rp);
379 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
383 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
386 struct mgmt_rp_read_commands *rp;
387 u16 num_commands, num_events;
391 bt_dev_dbg(hdev, "sock %p", sk);
393 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
394 num_commands = ARRAY_SIZE(mgmt_commands);
395 num_events = ARRAY_SIZE(mgmt_events);
397 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
398 num_events = ARRAY_SIZE(mgmt_untrusted_events);
401 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
403 rp = kmalloc(rp_size, GFP_KERNEL);
407 rp->num_commands = cpu_to_le16(num_commands);
408 rp->num_events = cpu_to_le16(num_events);
410 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
411 __le16 *opcode = rp->opcodes;
413 for (i = 0; i < num_commands; i++, opcode++)
414 put_unaligned_le16(mgmt_commands[i], opcode);
416 for (i = 0; i < num_events; i++, opcode++)
417 put_unaligned_le16(mgmt_events[i], opcode);
419 __le16 *opcode = rp->opcodes;
421 for (i = 0; i < num_commands; i++, opcode++)
422 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
424 for (i = 0; i < num_events; i++, opcode++)
425 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
428 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
435 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
438 struct mgmt_rp_read_index_list *rp;
444 bt_dev_dbg(hdev, "sock %p", sk);
446 read_lock(&hci_dev_list_lock);
449 list_for_each_entry(d, &hci_dev_list, list) {
450 if (d->dev_type == HCI_PRIMARY &&
451 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
455 rp_len = sizeof(*rp) + (2 * count);
456 rp = kmalloc(rp_len, GFP_ATOMIC);
458 read_unlock(&hci_dev_list_lock);
463 list_for_each_entry(d, &hci_dev_list, list) {
464 if (hci_dev_test_flag(d, HCI_SETUP) ||
465 hci_dev_test_flag(d, HCI_CONFIG) ||
466 hci_dev_test_flag(d, HCI_USER_CHANNEL))
469 /* Devices marked as raw-only are neither configured
470 * nor unconfigured controllers.
472 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
475 if (d->dev_type == HCI_PRIMARY &&
476 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
477 rp->index[count++] = cpu_to_le16(d->id);
478 bt_dev_dbg(hdev, "Added hci%u", d->id);
482 rp->num_controllers = cpu_to_le16(count);
483 rp_len = sizeof(*rp) + (2 * count);
485 read_unlock(&hci_dev_list_lock);
487 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
495 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
496 void *data, u16 data_len)
498 struct mgmt_rp_read_unconf_index_list *rp;
504 bt_dev_dbg(hdev, "sock %p", sk);
506 read_lock(&hci_dev_list_lock);
509 list_for_each_entry(d, &hci_dev_list, list) {
510 if (d->dev_type == HCI_PRIMARY &&
511 hci_dev_test_flag(d, HCI_UNCONFIGURED))
515 rp_len = sizeof(*rp) + (2 * count);
516 rp = kmalloc(rp_len, GFP_ATOMIC);
518 read_unlock(&hci_dev_list_lock);
523 list_for_each_entry(d, &hci_dev_list, list) {
524 if (hci_dev_test_flag(d, HCI_SETUP) ||
525 hci_dev_test_flag(d, HCI_CONFIG) ||
526 hci_dev_test_flag(d, HCI_USER_CHANNEL))
529 /* Devices marked as raw-only are neither configured
530 * nor unconfigured controllers.
532 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
535 if (d->dev_type == HCI_PRIMARY &&
536 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
537 rp->index[count++] = cpu_to_le16(d->id);
538 bt_dev_dbg(hdev, "Added hci%u", d->id);
542 rp->num_controllers = cpu_to_le16(count);
543 rp_len = sizeof(*rp) + (2 * count);
545 read_unlock(&hci_dev_list_lock);
547 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
548 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
555 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
556 void *data, u16 data_len)
558 struct mgmt_rp_read_ext_index_list *rp;
563 bt_dev_dbg(hdev, "sock %p", sk);
565 read_lock(&hci_dev_list_lock);
568 list_for_each_entry(d, &hci_dev_list, list) {
569 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
573 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
575 read_unlock(&hci_dev_list_lock);
580 list_for_each_entry(d, &hci_dev_list, list) {
581 if (hci_dev_test_flag(d, HCI_SETUP) ||
582 hci_dev_test_flag(d, HCI_CONFIG) ||
583 hci_dev_test_flag(d, HCI_USER_CHANNEL))
586 /* Devices marked as raw-only are neither configured
587 * nor unconfigured controllers.
589 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
592 if (d->dev_type == HCI_PRIMARY) {
593 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
594 rp->entry[count].type = 0x01;
596 rp->entry[count].type = 0x00;
597 } else if (d->dev_type == HCI_AMP) {
598 rp->entry[count].type = 0x02;
603 rp->entry[count].bus = d->bus;
604 rp->entry[count++].index = cpu_to_le16(d->id);
605 bt_dev_dbg(hdev, "Added hci%u", d->id);
608 rp->num_controllers = cpu_to_le16(count);
610 read_unlock(&hci_dev_list_lock);
612 /* If this command is called at least once, then all the
613 * default index and unconfigured index events are disabled
614 * and from now on only extended index events are used.
616 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
617 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
618 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
620 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
621 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
622 struct_size(rp, entry, count));
629 static bool is_configured(struct hci_dev *hdev)
631 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
635 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
643 static __le32 get_missing_options(struct hci_dev *hdev)
647 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
648 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
649 options |= MGMT_OPTION_EXTERNAL_CONFIG;
651 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
652 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
653 !bacmp(&hdev->public_addr, BDADDR_ANY))
654 options |= MGMT_OPTION_PUBLIC_ADDRESS;
656 return cpu_to_le32(options);
659 static int new_options(struct hci_dev *hdev, struct sock *skip)
661 __le32 options = get_missing_options(hdev);
663 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
664 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
667 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
669 __le32 options = get_missing_options(hdev);
671 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
675 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
676 void *data, u16 data_len)
678 struct mgmt_rp_read_config_info rp;
681 bt_dev_dbg(hdev, "sock %p", sk);
685 memset(&rp, 0, sizeof(rp));
686 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
688 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
689 options |= MGMT_OPTION_EXTERNAL_CONFIG;
691 if (hdev->set_bdaddr)
692 options |= MGMT_OPTION_PUBLIC_ADDRESS;
694 rp.supported_options = cpu_to_le32(options);
695 rp.missing_options = get_missing_options(hdev);
697 hci_dev_unlock(hdev);
699 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
703 static u32 get_supported_phys(struct hci_dev *hdev)
705 u32 supported_phys = 0;
707 if (lmp_bredr_capable(hdev)) {
708 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
710 if (hdev->features[0][0] & LMP_3SLOT)
711 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
713 if (hdev->features[0][0] & LMP_5SLOT)
714 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
716 if (lmp_edr_2m_capable(hdev)) {
717 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
719 if (lmp_edr_3slot_capable(hdev))
720 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
722 if (lmp_edr_5slot_capable(hdev))
723 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
725 if (lmp_edr_3m_capable(hdev)) {
726 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
728 if (lmp_edr_3slot_capable(hdev))
729 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
731 if (lmp_edr_5slot_capable(hdev))
732 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
737 if (lmp_le_capable(hdev)) {
738 supported_phys |= MGMT_PHY_LE_1M_TX;
739 supported_phys |= MGMT_PHY_LE_1M_RX;
741 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
742 supported_phys |= MGMT_PHY_LE_2M_TX;
743 supported_phys |= MGMT_PHY_LE_2M_RX;
746 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
747 supported_phys |= MGMT_PHY_LE_CODED_TX;
748 supported_phys |= MGMT_PHY_LE_CODED_RX;
752 return supported_phys;
755 static u32 get_selected_phys(struct hci_dev *hdev)
757 u32 selected_phys = 0;
759 if (lmp_bredr_capable(hdev)) {
760 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
762 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
763 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
765 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
766 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
768 if (lmp_edr_2m_capable(hdev)) {
769 if (!(hdev->pkt_type & HCI_2DH1))
770 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
772 if (lmp_edr_3slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_2DH3))
774 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
776 if (lmp_edr_5slot_capable(hdev) &&
777 !(hdev->pkt_type & HCI_2DH5))
778 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
780 if (lmp_edr_3m_capable(hdev)) {
781 if (!(hdev->pkt_type & HCI_3DH1))
782 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
784 if (lmp_edr_3slot_capable(hdev) &&
785 !(hdev->pkt_type & HCI_3DH3))
786 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
788 if (lmp_edr_5slot_capable(hdev) &&
789 !(hdev->pkt_type & HCI_3DH5))
790 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
795 if (lmp_le_capable(hdev)) {
796 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
797 selected_phys |= MGMT_PHY_LE_1M_TX;
799 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
800 selected_phys |= MGMT_PHY_LE_1M_RX;
802 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
803 selected_phys |= MGMT_PHY_LE_2M_TX;
805 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
806 selected_phys |= MGMT_PHY_LE_2M_RX;
808 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
809 selected_phys |= MGMT_PHY_LE_CODED_TX;
811 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
812 selected_phys |= MGMT_PHY_LE_CODED_RX;
815 return selected_phys;
818 static u32 get_configurable_phys(struct hci_dev *hdev)
820 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
821 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
824 static u32 get_supported_settings(struct hci_dev *hdev)
828 settings |= MGMT_SETTING_POWERED;
829 settings |= MGMT_SETTING_BONDABLE;
830 settings |= MGMT_SETTING_DEBUG_KEYS;
831 settings |= MGMT_SETTING_CONNECTABLE;
832 settings |= MGMT_SETTING_DISCOVERABLE;
834 if (lmp_bredr_capable(hdev)) {
835 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
836 settings |= MGMT_SETTING_FAST_CONNECTABLE;
837 settings |= MGMT_SETTING_BREDR;
838 settings |= MGMT_SETTING_LINK_SECURITY;
840 if (lmp_ssp_capable(hdev)) {
841 settings |= MGMT_SETTING_SSP;
842 if (IS_ENABLED(CONFIG_BT_HS))
843 settings |= MGMT_SETTING_HS;
846 if (lmp_sc_capable(hdev))
847 settings |= MGMT_SETTING_SECURE_CONN;
849 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
851 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
854 if (lmp_le_capable(hdev)) {
855 settings |= MGMT_SETTING_LE;
856 settings |= MGMT_SETTING_SECURE_CONN;
857 settings |= MGMT_SETTING_PRIVACY;
858 settings |= MGMT_SETTING_STATIC_ADDRESS;
859 settings |= MGMT_SETTING_ADVERTISING;
862 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
864 settings |= MGMT_SETTING_CONFIGURATION;
866 if (cis_central_capable(hdev))
867 settings |= MGMT_SETTING_CIS_CENTRAL;
869 if (cis_peripheral_capable(hdev))
870 settings |= MGMT_SETTING_CIS_PERIPHERAL;
872 settings |= MGMT_SETTING_PHY_CONFIGURATION;
877 static u32 get_current_settings(struct hci_dev *hdev)
881 if (hdev_is_powered(hdev))
882 settings |= MGMT_SETTING_POWERED;
884 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
885 settings |= MGMT_SETTING_CONNECTABLE;
887 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
888 settings |= MGMT_SETTING_FAST_CONNECTABLE;
890 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
891 settings |= MGMT_SETTING_DISCOVERABLE;
893 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
894 settings |= MGMT_SETTING_BONDABLE;
896 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
897 settings |= MGMT_SETTING_BREDR;
899 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
900 settings |= MGMT_SETTING_LE;
902 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
903 settings |= MGMT_SETTING_LINK_SECURITY;
905 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
906 settings |= MGMT_SETTING_SSP;
908 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
909 settings |= MGMT_SETTING_HS;
911 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
912 settings |= MGMT_SETTING_ADVERTISING;
914 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
915 settings |= MGMT_SETTING_SECURE_CONN;
917 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
918 settings |= MGMT_SETTING_DEBUG_KEYS;
920 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
921 settings |= MGMT_SETTING_PRIVACY;
923 /* The current setting for static address has two purposes. The
924 * first is to indicate if the static address will be used and
925 * the second is to indicate if it is actually set.
927 * This means if the static address is not configured, this flag
928 * will never be set. If the address is configured, then if the
929 * address is actually used decides if the flag is set or not.
931 * For single mode LE only controllers and dual-mode controllers
932 * with BR/EDR disabled, the existence of the static address will
935 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
936 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
937 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
938 if (bacmp(&hdev->static_addr, BDADDR_ANY))
939 settings |= MGMT_SETTING_STATIC_ADDRESS;
942 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
943 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
945 if (cis_central_capable(hdev))
946 settings |= MGMT_SETTING_CIS_CENTRAL;
948 if (cis_peripheral_capable(hdev))
949 settings |= MGMT_SETTING_CIS_PERIPHERAL;
951 if (bis_capable(hdev))
952 settings |= MGMT_SETTING_ISO_BROADCASTER;
954 if (sync_recv_capable(hdev))
955 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
960 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
962 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
965 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
967 struct mgmt_pending_cmd *cmd;
969 /* If there's a pending mgmt command the flags will not yet have
970 * their final values, so check for this first.
972 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
974 struct mgmt_mode *cp = cmd->param;
976 return LE_AD_GENERAL;
977 else if (cp->val == 0x02)
978 return LE_AD_LIMITED;
980 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
981 return LE_AD_LIMITED;
982 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
983 return LE_AD_GENERAL;
989 bool mgmt_get_connectable(struct hci_dev *hdev)
991 struct mgmt_pending_cmd *cmd;
993 /* If there's a pending mgmt command the flag will not yet have
994 * it's final value, so check for this first.
996 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
998 struct mgmt_mode *cp = cmd->param;
1003 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1006 static int service_cache_sync(struct hci_dev *hdev, void *data)
1008 hci_update_eir_sync(hdev);
1009 hci_update_class_sync(hdev);
1014 static void service_cache_off(struct work_struct *work)
1016 struct hci_dev *hdev = container_of(work, struct hci_dev,
1017 service_cache.work);
1019 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1022 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1025 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1027 /* The generation of a new RPA and programming it into the
1028 * controller happens in the hci_req_enable_advertising()
1031 if (ext_adv_capable(hdev))
1032 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1034 return hci_enable_advertising_sync(hdev);
1037 static void rpa_expired(struct work_struct *work)
1039 struct hci_dev *hdev = container_of(work, struct hci_dev,
1042 bt_dev_dbg(hdev, "");
1044 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1046 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1049 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1052 static void discov_off(struct work_struct *work)
1054 struct hci_dev *hdev = container_of(work, struct hci_dev,
1057 bt_dev_dbg(hdev, "");
1061 /* When discoverable timeout triggers, then just make sure
1062 * the limited discoverable flag is cleared. Even in the case
1063 * of a timeout triggered from general discoverable, it is
1064 * safe to unconditionally clear the flag.
1066 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1067 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1068 hdev->discov_timeout = 0;
1070 hci_update_discoverable(hdev);
1072 mgmt_new_settings(hdev);
1074 hci_dev_unlock(hdev);
1077 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1079 static void mesh_send_complete(struct hci_dev *hdev,
1080 struct mgmt_mesh_tx *mesh_tx, bool silent)
1082 u8 handle = mesh_tx->handle;
1085 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1086 sizeof(handle), NULL);
1088 mgmt_mesh_remove(mesh_tx);
1091 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1093 struct mgmt_mesh_tx *mesh_tx;
1095 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1096 hci_disable_advertising_sync(hdev);
1097 mesh_tx = mgmt_mesh_next(hdev, NULL);
1100 mesh_send_complete(hdev, mesh_tx, false);
1105 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1106 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1107 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1109 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1114 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1115 mesh_send_start_complete);
1118 mesh_send_complete(hdev, mesh_tx, false);
1120 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1123 static void mesh_send_done(struct work_struct *work)
1125 struct hci_dev *hdev = container_of(work, struct hci_dev,
1126 mesh_send_done.work);
1128 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1131 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1134 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1136 if (hci_dev_test_flag(hdev, HCI_MGMT))
1139 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1141 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1142 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1143 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1144 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1146 /* Non-mgmt controlled devices get this bit set
1147 * implicitly so that pairing works for them, however
1148 * for mgmt we require user-space to explicitly enable
1151 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1153 hci_dev_set_flag(hdev, HCI_MGMT);
1156 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1157 void *data, u16 data_len)
1159 struct mgmt_rp_read_info rp;
1161 bt_dev_dbg(hdev, "sock %p", sk);
1165 memset(&rp, 0, sizeof(rp));
1167 bacpy(&rp.bdaddr, &hdev->bdaddr);
1169 rp.version = hdev->hci_ver;
1170 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1172 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1173 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1175 memcpy(rp.dev_class, hdev->dev_class, 3);
1177 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1178 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1180 hci_dev_unlock(hdev);
1182 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1186 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1191 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1192 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1193 hdev->dev_class, 3);
1195 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1196 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1199 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1200 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1201 hdev->dev_name, name_len);
1203 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1204 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1205 hdev->short_name, name_len);
1210 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1211 void *data, u16 data_len)
1214 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1217 bt_dev_dbg(hdev, "sock %p", sk);
1219 memset(&buf, 0, sizeof(buf));
1223 bacpy(&rp->bdaddr, &hdev->bdaddr);
1225 rp->version = hdev->hci_ver;
1226 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1228 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1229 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1232 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1233 rp->eir_len = cpu_to_le16(eir_len);
1235 hci_dev_unlock(hdev);
1237 /* If this command is called at least once, then the events
1238 * for class of device and local name changes are disabled
1239 * and only the new extended controller information event
1242 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1243 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1244 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1246 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1247 sizeof(*rp) + eir_len);
1250 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1253 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1256 memset(buf, 0, sizeof(buf));
1258 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1259 ev->eir_len = cpu_to_le16(eir_len);
1261 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1262 sizeof(*ev) + eir_len,
1263 HCI_MGMT_EXT_INFO_EVENTS, skip);
1266 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1268 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1270 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1274 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1276 struct mgmt_ev_advertising_added ev;
1278 ev.instance = instance;
1280 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1283 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1286 struct mgmt_ev_advertising_removed ev;
1288 ev.instance = instance;
1290 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1293 static void cancel_adv_timeout(struct hci_dev *hdev)
1295 if (hdev->adv_instance_timeout) {
1296 hdev->adv_instance_timeout = 0;
1297 cancel_delayed_work(&hdev->adv_instance_expire);
1301 /* This function requires the caller holds hdev->lock */
1302 static void restart_le_actions(struct hci_dev *hdev)
1304 struct hci_conn_params *p;
1306 list_for_each_entry(p, &hdev->le_conn_params, list) {
1307 /* Needed for AUTO_OFF case where might not "really"
1308 * have been powered off.
1310 hci_pend_le_list_del_init(p);
1312 switch (p->auto_connect) {
1313 case HCI_AUTO_CONN_DIRECT:
1314 case HCI_AUTO_CONN_ALWAYS:
1315 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1317 case HCI_AUTO_CONN_REPORT:
1318 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1326 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1328 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1330 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1331 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1334 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1336 struct mgmt_pending_cmd *cmd = data;
1337 struct mgmt_mode *cp;
1339 /* Make sure cmd still outstanding. */
1340 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1345 bt_dev_dbg(hdev, "err %d", err);
1350 restart_le_actions(hdev);
1351 hci_update_passive_scan(hdev);
1352 hci_dev_unlock(hdev);
1355 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1357 /* Only call new_setting for power on as power off is deferred
1358 * to hdev->power_off work which does call hci_dev_do_close.
1361 new_settings(hdev, cmd->sk);
1363 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1367 mgmt_pending_remove(cmd);
1370 static int set_powered_sync(struct hci_dev *hdev, void *data)
1372 struct mgmt_pending_cmd *cmd = data;
1373 struct mgmt_mode *cp = cmd->param;
1375 BT_DBG("%s", hdev->name);
1377 return hci_set_powered_sync(hdev, cp->val);
1380 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1383 struct mgmt_mode *cp = data;
1384 struct mgmt_pending_cmd *cmd;
1387 bt_dev_dbg(hdev, "sock %p", sk);
1389 if (cp->val != 0x00 && cp->val != 0x01)
1390 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1391 MGMT_STATUS_INVALID_PARAMS);
1395 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1396 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1401 if (!!cp->val == hdev_is_powered(hdev)) {
1402 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1406 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1412 /* Cancel potentially blocking sync operation before power off */
1413 if (cp->val == 0x00) {
1414 __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
1415 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1416 mgmt_set_powered_complete);
1418 /* Use hci_cmd_sync_submit since hdev might not be running */
1419 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1420 mgmt_set_powered_complete);
1424 mgmt_pending_remove(cmd);
1427 hci_dev_unlock(hdev);
1431 int mgmt_new_settings(struct hci_dev *hdev)
1433 return new_settings(hdev, NULL);
1438 struct hci_dev *hdev;
1442 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1444 struct cmd_lookup *match = data;
1446 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1448 list_del(&cmd->list);
1450 if (match->sk == NULL) {
1451 match->sk = cmd->sk;
1452 sock_hold(match->sk);
1455 mgmt_pending_free(cmd);
1458 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1462 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1463 mgmt_pending_remove(cmd);
1466 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1468 if (cmd->cmd_complete) {
1471 cmd->cmd_complete(cmd, *status);
1472 mgmt_pending_remove(cmd);
1477 cmd_status_rsp(cmd, data);
1480 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1482 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1483 cmd->param, cmd->param_len);
1486 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1488 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1489 cmd->param, sizeof(struct mgmt_addr_info));
1492 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1494 if (!lmp_bredr_capable(hdev))
1495 return MGMT_STATUS_NOT_SUPPORTED;
1496 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1497 return MGMT_STATUS_REJECTED;
1499 return MGMT_STATUS_SUCCESS;
1502 static u8 mgmt_le_support(struct hci_dev *hdev)
1504 if (!lmp_le_capable(hdev))
1505 return MGMT_STATUS_NOT_SUPPORTED;
1506 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1507 return MGMT_STATUS_REJECTED;
1509 return MGMT_STATUS_SUCCESS;
1512 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1515 struct mgmt_pending_cmd *cmd = data;
1517 bt_dev_dbg(hdev, "err %d", err);
1519 /* Make sure cmd still outstanding. */
1520 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1526 u8 mgmt_err = mgmt_status(err);
1527 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1528 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1532 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1533 hdev->discov_timeout > 0) {
1534 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1535 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1538 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1539 new_settings(hdev, cmd->sk);
1542 mgmt_pending_remove(cmd);
1543 hci_dev_unlock(hdev);
1546 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1548 BT_DBG("%s", hdev->name);
1550 return hci_update_discoverable_sync(hdev);
1553 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1556 struct mgmt_cp_set_discoverable *cp = data;
1557 struct mgmt_pending_cmd *cmd;
1561 bt_dev_dbg(hdev, "sock %p", sk);
1563 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1564 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1565 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1566 MGMT_STATUS_REJECTED);
1568 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1569 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1570 MGMT_STATUS_INVALID_PARAMS);
1572 timeout = __le16_to_cpu(cp->timeout);
1574 /* Disabling discoverable requires that no timeout is set,
1575 * and enabling limited discoverable requires a timeout.
1577 if ((cp->val == 0x00 && timeout > 0) ||
1578 (cp->val == 0x02 && timeout == 0))
1579 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1580 MGMT_STATUS_INVALID_PARAMS);
1584 if (!hdev_is_powered(hdev) && timeout > 0) {
1585 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1586 MGMT_STATUS_NOT_POWERED);
1590 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1591 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1597 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1598 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1599 MGMT_STATUS_REJECTED);
1603 if (hdev->advertising_paused) {
1604 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1609 if (!hdev_is_powered(hdev)) {
1610 bool changed = false;
1612 /* Setting limited discoverable when powered off is
1613 * not a valid operation since it requires a timeout
1614 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1616 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1617 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1621 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1626 err = new_settings(hdev, sk);
1631 /* If the current mode is the same, then just update the timeout
1632 * value with the new value. And if only the timeout gets updated,
1633 * then no need for any HCI transactions.
1635 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1636 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1637 HCI_LIMITED_DISCOVERABLE)) {
1638 cancel_delayed_work(&hdev->discov_off);
1639 hdev->discov_timeout = timeout;
1641 if (cp->val && hdev->discov_timeout > 0) {
1642 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1643 queue_delayed_work(hdev->req_workqueue,
1644 &hdev->discov_off, to);
1647 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1651 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1657 /* Cancel any potential discoverable timeout that might be
1658 * still active and store new timeout value. The arming of
1659 * the timeout happens in the complete handler.
1661 cancel_delayed_work(&hdev->discov_off);
1662 hdev->discov_timeout = timeout;
1665 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1667 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1669 /* Limited discoverable mode */
1670 if (cp->val == 0x02)
1671 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1673 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1675 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1676 mgmt_set_discoverable_complete);
1679 mgmt_pending_remove(cmd);
1682 hci_dev_unlock(hdev);
1686 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1689 struct mgmt_pending_cmd *cmd = data;
1691 bt_dev_dbg(hdev, "err %d", err);
1693 /* Make sure cmd still outstanding. */
1694 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1700 u8 mgmt_err = mgmt_status(err);
1701 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1705 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1706 new_settings(hdev, cmd->sk);
1710 mgmt_pending_remove(cmd);
1712 hci_dev_unlock(hdev);
1715 static int set_connectable_update_settings(struct hci_dev *hdev,
1716 struct sock *sk, u8 val)
1718 bool changed = false;
1721 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1725 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1727 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1728 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1731 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1736 hci_update_scan(hdev);
1737 hci_update_passive_scan(hdev);
1738 return new_settings(hdev, sk);
1744 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1746 BT_DBG("%s", hdev->name);
1748 return hci_update_connectable_sync(hdev);
1751 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1754 struct mgmt_mode *cp = data;
1755 struct mgmt_pending_cmd *cmd;
1758 bt_dev_dbg(hdev, "sock %p", sk);
1760 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1761 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1762 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1763 MGMT_STATUS_REJECTED);
1765 if (cp->val != 0x00 && cp->val != 0x01)
1766 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1767 MGMT_STATUS_INVALID_PARAMS);
1771 if (!hdev_is_powered(hdev)) {
1772 err = set_connectable_update_settings(hdev, sk, cp->val);
1776 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1777 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1778 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1783 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1790 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1792 if (hdev->discov_timeout > 0)
1793 cancel_delayed_work(&hdev->discov_off);
1795 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1796 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1797 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1800 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1801 mgmt_set_connectable_complete);
1804 mgmt_pending_remove(cmd);
1807 hci_dev_unlock(hdev);
1811 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1814 struct mgmt_mode *cp = data;
1818 bt_dev_dbg(hdev, "sock %p", sk);
1820 if (cp->val != 0x00 && cp->val != 0x01)
1821 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1822 MGMT_STATUS_INVALID_PARAMS);
1827 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1829 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1831 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1836 /* In limited privacy mode the change of bondable mode
1837 * may affect the local advertising address.
1839 hci_update_discoverable(hdev);
1841 err = new_settings(hdev, sk);
1845 hci_dev_unlock(hdev);
1849 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1852 struct mgmt_mode *cp = data;
1853 struct mgmt_pending_cmd *cmd;
1857 bt_dev_dbg(hdev, "sock %p", sk);
1859 status = mgmt_bredr_support(hdev);
1861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1864 if (cp->val != 0x00 && cp->val != 0x01)
1865 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1866 MGMT_STATUS_INVALID_PARAMS);
1870 if (!hdev_is_powered(hdev)) {
1871 bool changed = false;
1873 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1874 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1878 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1883 err = new_settings(hdev, sk);
1888 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1889 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1896 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1897 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1901 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1907 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1909 mgmt_pending_remove(cmd);
1914 hci_dev_unlock(hdev);
1918 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1920 struct cmd_lookup match = { NULL, hdev };
1921 struct mgmt_pending_cmd *cmd = data;
1922 struct mgmt_mode *cp = cmd->param;
1923 u8 enable = cp->val;
1926 /* Make sure cmd still outstanding. */
1927 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1931 u8 mgmt_err = mgmt_status(err);
1933 if (enable && hci_dev_test_and_clear_flag(hdev,
1935 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1936 new_settings(hdev, NULL);
1939 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1945 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1947 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1950 changed = hci_dev_test_and_clear_flag(hdev,
1953 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1956 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1959 new_settings(hdev, match.sk);
1964 hci_update_eir_sync(hdev);
1967 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1969 struct mgmt_pending_cmd *cmd = data;
1970 struct mgmt_mode *cp = cmd->param;
1971 bool changed = false;
1975 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1977 err = hci_write_ssp_mode_sync(hdev, cp->val);
1979 if (!err && changed)
1980 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1985 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1987 struct mgmt_mode *cp = data;
1988 struct mgmt_pending_cmd *cmd;
1992 bt_dev_dbg(hdev, "sock %p", sk);
1994 status = mgmt_bredr_support(hdev);
1996 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1998 if (!lmp_ssp_capable(hdev))
1999 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2000 MGMT_STATUS_NOT_SUPPORTED);
2002 if (cp->val != 0x00 && cp->val != 0x01)
2003 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2004 MGMT_STATUS_INVALID_PARAMS);
2008 if (!hdev_is_powered(hdev)) {
2012 changed = !hci_dev_test_and_set_flag(hdev,
2015 changed = hci_dev_test_and_clear_flag(hdev,
2018 changed = hci_dev_test_and_clear_flag(hdev,
2021 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2024 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2029 err = new_settings(hdev, sk);
2034 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2035 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2040 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2041 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2045 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2049 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2053 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2054 MGMT_STATUS_FAILED);
2057 mgmt_pending_remove(cmd);
2061 hci_dev_unlock(hdev);
2065 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2067 struct mgmt_mode *cp = data;
2072 bt_dev_dbg(hdev, "sock %p", sk);
2074 if (!IS_ENABLED(CONFIG_BT_HS))
2075 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2076 MGMT_STATUS_NOT_SUPPORTED);
2078 status = mgmt_bredr_support(hdev);
2080 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2082 if (!lmp_ssp_capable(hdev))
2083 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2084 MGMT_STATUS_NOT_SUPPORTED);
2086 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2087 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2088 MGMT_STATUS_REJECTED);
2090 if (cp->val != 0x00 && cp->val != 0x01)
2091 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2092 MGMT_STATUS_INVALID_PARAMS);
2096 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2097 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2103 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2105 if (hdev_is_powered(hdev)) {
2106 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2107 MGMT_STATUS_REJECTED);
2111 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2114 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2119 err = new_settings(hdev, sk);
2122 hci_dev_unlock(hdev);
2126 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2128 struct cmd_lookup match = { NULL, hdev };
2129 u8 status = mgmt_status(err);
2131 bt_dev_dbg(hdev, "err %d", err);
2134 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2139 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2141 new_settings(hdev, match.sk);
2147 static int set_le_sync(struct hci_dev *hdev, void *data)
2149 struct mgmt_pending_cmd *cmd = data;
2150 struct mgmt_mode *cp = cmd->param;
2155 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2157 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2158 hci_disable_advertising_sync(hdev);
2160 if (ext_adv_capable(hdev))
2161 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2163 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2166 err = hci_write_le_host_supported_sync(hdev, val, 0);
2168 /* Make sure the controller has a good default for
2169 * advertising data. Restrict the update to when LE
2170 * has actually been enabled. During power on, the
2171 * update in powered_update_hci will take care of it.
2173 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2174 if (ext_adv_capable(hdev)) {
2177 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2179 hci_update_scan_rsp_data_sync(hdev, 0x00);
2181 hci_update_adv_data_sync(hdev, 0x00);
2182 hci_update_scan_rsp_data_sync(hdev, 0x00);
2185 hci_update_passive_scan(hdev);
2191 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2193 struct mgmt_pending_cmd *cmd = data;
2194 u8 status = mgmt_status(err);
2195 struct sock *sk = cmd->sk;
2198 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2199 cmd_status_rsp, &status);
2203 mgmt_pending_remove(cmd);
2204 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2207 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2209 struct mgmt_pending_cmd *cmd = data;
2210 struct mgmt_cp_set_mesh *cp = cmd->param;
2211 size_t len = cmd->param_len;
2213 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2216 hci_dev_set_flag(hdev, HCI_MESH);
2218 hci_dev_clear_flag(hdev, HCI_MESH);
2222 /* If filters don't fit, forward all adv pkts */
2223 if (len <= sizeof(hdev->mesh_ad_types))
2224 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2226 hci_update_passive_scan_sync(hdev);
2230 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2232 struct mgmt_cp_set_mesh *cp = data;
2233 struct mgmt_pending_cmd *cmd;
2236 bt_dev_dbg(hdev, "sock %p", sk);
2238 if (!lmp_le_capable(hdev) ||
2239 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2240 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2241 MGMT_STATUS_NOT_SUPPORTED);
2243 if (cp->enable != 0x00 && cp->enable != 0x01)
2244 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2245 MGMT_STATUS_INVALID_PARAMS);
2249 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2253 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2257 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2258 MGMT_STATUS_FAILED);
2261 mgmt_pending_remove(cmd);
2264 hci_dev_unlock(hdev);
2268 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2270 struct mgmt_mesh_tx *mesh_tx = data;
2271 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2272 unsigned long mesh_send_interval;
2273 u8 mgmt_err = mgmt_status(err);
2275 /* Report any errors here, but don't report completion */
2278 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2279 /* Send Complete Error Code for handle */
2280 mesh_send_complete(hdev, mesh_tx, false);
2284 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2285 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2286 mesh_send_interval);
2289 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2291 struct mgmt_mesh_tx *mesh_tx = data;
2292 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2293 struct adv_info *adv, *next_instance;
2294 u8 instance = hdev->le_num_of_adv_sets + 1;
2295 u16 timeout, duration;
2298 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2299 return MGMT_STATUS_BUSY;
2302 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2303 adv = hci_add_adv_instance(hdev, instance, 0,
2304 send->adv_data_len, send->adv_data,
2307 HCI_ADV_TX_POWER_NO_PREFERENCE,
2308 hdev->le_adv_min_interval,
2309 hdev->le_adv_max_interval,
2313 mesh_tx->instance = instance;
2317 if (hdev->cur_adv_instance == instance) {
2318 /* If the currently advertised instance is being changed then
2319 * cancel the current advertising and schedule the next
2320 * instance. If there is only one instance then the overridden
2321 * advertising data will be visible right away.
2323 cancel_adv_timeout(hdev);
2325 next_instance = hci_get_next_instance(hdev, instance);
2327 instance = next_instance->instance;
2330 } else if (hdev->adv_instance_timeout) {
2331 /* Immediately advertise the new instance if no other, or
2332 * let it go naturally from queue if ADV is already happening
2338 return hci_schedule_adv_instance_sync(hdev, instance, true);
2343 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2345 struct mgmt_rp_mesh_read_features *rp = data;
2347 if (rp->used_handles >= rp->max_handles)
2350 rp->handles[rp->used_handles++] = mesh_tx->handle;
2353 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2354 void *data, u16 len)
2356 struct mgmt_rp_mesh_read_features rp;
2358 if (!lmp_le_capable(hdev) ||
2359 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2360 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2361 MGMT_STATUS_NOT_SUPPORTED);
2363 memset(&rp, 0, sizeof(rp));
2364 rp.index = cpu_to_le16(hdev->id);
2365 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2366 rp.max_handles = MESH_HANDLES_MAX;
2371 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2373 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2374 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2376 hci_dev_unlock(hdev);
2380 static int send_cancel(struct hci_dev *hdev, void *data)
2382 struct mgmt_pending_cmd *cmd = data;
2383 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2384 struct mgmt_mesh_tx *mesh_tx;
2386 if (!cancel->handle) {
2388 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2391 mesh_send_complete(hdev, mesh_tx, false);
2394 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2396 if (mesh_tx && mesh_tx->sk == cmd->sk)
2397 mesh_send_complete(hdev, mesh_tx, false);
2400 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2402 mgmt_pending_free(cmd);
2407 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2408 void *data, u16 len)
2410 struct mgmt_pending_cmd *cmd;
2413 if (!lmp_le_capable(hdev) ||
2414 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2415 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2416 MGMT_STATUS_NOT_SUPPORTED);
2418 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2419 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2420 MGMT_STATUS_REJECTED);
2423 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2427 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2430 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2431 MGMT_STATUS_FAILED);
2434 mgmt_pending_free(cmd);
2437 hci_dev_unlock(hdev);
2441 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2443 struct mgmt_mesh_tx *mesh_tx;
2444 struct mgmt_cp_mesh_send *send = data;
2445 struct mgmt_rp_mesh_read_features rp;
2449 if (!lmp_le_capable(hdev) ||
2450 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2451 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2452 MGMT_STATUS_NOT_SUPPORTED);
2453 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2454 len <= MGMT_MESH_SEND_SIZE ||
2455 len > (MGMT_MESH_SEND_SIZE + 31))
2456 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2457 MGMT_STATUS_REJECTED);
2461 memset(&rp, 0, sizeof(rp));
2462 rp.max_handles = MESH_HANDLES_MAX;
2464 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2466 if (rp.max_handles <= rp.used_handles) {
2467 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2472 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2473 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2478 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2479 mesh_send_start_complete);
2482 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2483 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2484 MGMT_STATUS_FAILED);
2488 mgmt_mesh_remove(mesh_tx);
2491 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2493 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2494 &mesh_tx->handle, 1);
2498 hci_dev_unlock(hdev);
2502 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2504 struct mgmt_mode *cp = data;
2505 struct mgmt_pending_cmd *cmd;
2509 bt_dev_dbg(hdev, "sock %p", sk);
2511 if (!lmp_le_capable(hdev))
2512 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2513 MGMT_STATUS_NOT_SUPPORTED);
2515 if (cp->val != 0x00 && cp->val != 0x01)
2516 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2517 MGMT_STATUS_INVALID_PARAMS);
2519 /* Bluetooth single mode LE only controllers or dual-mode
2520 * controllers configured as LE only devices, do not allow
2521 * switching LE off. These have either LE enabled explicitly
2522 * or BR/EDR has been previously switched off.
2524 * When trying to enable an already enabled LE, then gracefully
2525 * send a positive response. Trying to disable it however will
2526 * result into rejection.
2528 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2529 if (cp->val == 0x01)
2530 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2532 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2533 MGMT_STATUS_REJECTED);
2539 enabled = lmp_host_le_capable(hdev);
2541 if (!hdev_is_powered(hdev) || val == enabled) {
2542 bool changed = false;
2544 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2545 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2549 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2550 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2554 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2559 err = new_settings(hdev, sk);
2564 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2565 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2566 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2571 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2575 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2579 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2580 MGMT_STATUS_FAILED);
2583 mgmt_pending_remove(cmd);
2587 hci_dev_unlock(hdev);
2591 /* This is a helper function to test for pending mgmt commands that can
2592 * cause CoD or EIR HCI commands. We can only allow one such pending
2593 * mgmt command at a time since otherwise we cannot easily track what
2594 * the current values are, will be, and based on that calculate if a new
2595 * HCI command needs to be sent and if yes with what value.
2597 static bool pending_eir_or_class(struct hci_dev *hdev)
2599 struct mgmt_pending_cmd *cmd;
2601 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2602 switch (cmd->opcode) {
2603 case MGMT_OP_ADD_UUID:
2604 case MGMT_OP_REMOVE_UUID:
2605 case MGMT_OP_SET_DEV_CLASS:
2606 case MGMT_OP_SET_POWERED:
2614 static const u8 bluetooth_base_uuid[] = {
2615 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2616 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2619 static u8 get_uuid_size(const u8 *uuid)
2623 if (memcmp(uuid, bluetooth_base_uuid, 12))
2626 val = get_unaligned_le32(&uuid[12]);
2633 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2635 struct mgmt_pending_cmd *cmd = data;
2637 bt_dev_dbg(hdev, "err %d", err);
2639 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2640 mgmt_status(err), hdev->dev_class, 3);
2642 mgmt_pending_free(cmd);
2645 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2649 err = hci_update_class_sync(hdev);
2653 return hci_update_eir_sync(hdev);
2656 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2658 struct mgmt_cp_add_uuid *cp = data;
2659 struct mgmt_pending_cmd *cmd;
2660 struct bt_uuid *uuid;
2663 bt_dev_dbg(hdev, "sock %p", sk);
2667 if (pending_eir_or_class(hdev)) {
2668 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2673 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2679 memcpy(uuid->uuid, cp->uuid, 16);
2680 uuid->svc_hint = cp->svc_hint;
2681 uuid->size = get_uuid_size(cp->uuid);
2683 list_add_tail(&uuid->list, &hdev->uuids);
2685 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2691 err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2693 mgmt_pending_free(cmd);
2698 hci_dev_unlock(hdev);
2702 static bool enable_service_cache(struct hci_dev *hdev)
2704 if (!hdev_is_powered(hdev))
2707 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2708 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2716 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2720 err = hci_update_class_sync(hdev);
2724 return hci_update_eir_sync(hdev);
2727 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2730 struct mgmt_cp_remove_uuid *cp = data;
2731 struct mgmt_pending_cmd *cmd;
2732 struct bt_uuid *match, *tmp;
2733 static const u8 bt_uuid_any[] = {
2734 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2738 bt_dev_dbg(hdev, "sock %p", sk);
2742 if (pending_eir_or_class(hdev)) {
2743 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2748 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2749 hci_uuids_clear(hdev);
2751 if (enable_service_cache(hdev)) {
2752 err = mgmt_cmd_complete(sk, hdev->id,
2753 MGMT_OP_REMOVE_UUID,
2754 0, hdev->dev_class, 3);
2763 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2764 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2767 list_del(&match->list);
2773 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2774 MGMT_STATUS_INVALID_PARAMS);
2779 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2785 err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2786 mgmt_class_complete);
2788 mgmt_pending_free(cmd);
2791 hci_dev_unlock(hdev);
2795 static int set_class_sync(struct hci_dev *hdev, void *data)
2799 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2800 cancel_delayed_work_sync(&hdev->service_cache);
2801 err = hci_update_eir_sync(hdev);
2807 return hci_update_class_sync(hdev);
2810 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2813 struct mgmt_cp_set_dev_class *cp = data;
2814 struct mgmt_pending_cmd *cmd;
2817 bt_dev_dbg(hdev, "sock %p", sk);
2819 if (!lmp_bredr_capable(hdev))
2820 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2821 MGMT_STATUS_NOT_SUPPORTED);
2825 if (pending_eir_or_class(hdev)) {
2826 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2831 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2832 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2833 MGMT_STATUS_INVALID_PARAMS);
2837 hdev->major_class = cp->major;
2838 hdev->minor_class = cp->minor;
2840 if (!hdev_is_powered(hdev)) {
2841 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2842 hdev->dev_class, 3);
2846 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2852 err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2853 mgmt_class_complete);
2855 mgmt_pending_free(cmd);
2858 hci_dev_unlock(hdev);
2862 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2865 struct mgmt_cp_load_link_keys *cp = data;
2866 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2867 sizeof(struct mgmt_link_key_info));
2868 u16 key_count, expected_len;
2872 bt_dev_dbg(hdev, "sock %p", sk);
2874 if (!lmp_bredr_capable(hdev))
2875 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2876 MGMT_STATUS_NOT_SUPPORTED);
2878 key_count = __le16_to_cpu(cp->key_count);
2879 if (key_count > max_key_count) {
2880 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2882 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2883 MGMT_STATUS_INVALID_PARAMS);
2886 expected_len = struct_size(cp, keys, key_count);
2887 if (expected_len != len) {
2888 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2890 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2891 MGMT_STATUS_INVALID_PARAMS);
2894 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2895 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2896 MGMT_STATUS_INVALID_PARAMS);
2898 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2901 for (i = 0; i < key_count; i++) {
2902 struct mgmt_link_key_info *key = &cp->keys[i];
2904 /* Considering SMP over BREDR/LE, there is no need to check addr_type */
2905 if (key->type > 0x08)
2906 return mgmt_cmd_status(sk, hdev->id,
2907 MGMT_OP_LOAD_LINK_KEYS,
2908 MGMT_STATUS_INVALID_PARAMS);
2913 hci_link_keys_clear(hdev);
2916 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2918 changed = hci_dev_test_and_clear_flag(hdev,
2919 HCI_KEEP_DEBUG_KEYS);
2922 new_settings(hdev, NULL);
2924 for (i = 0; i < key_count; i++) {
2925 struct mgmt_link_key_info *key = &cp->keys[i];
2927 if (hci_is_blocked_key(hdev,
2928 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2930 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2935 /* Always ignore debug keys and require a new pairing if
2936 * the user wants to use them.
2938 if (key->type == HCI_LK_DEBUG_COMBINATION)
2941 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2942 key->type, key->pin_len, NULL);
2945 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2947 hci_dev_unlock(hdev);
2952 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2953 u8 addr_type, struct sock *skip_sk)
2955 struct mgmt_ev_device_unpaired ev;
2957 bacpy(&ev.addr.bdaddr, bdaddr);
2958 ev.addr.type = addr_type;
2960 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2964 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2966 struct mgmt_pending_cmd *cmd = data;
2967 struct mgmt_cp_unpair_device *cp = cmd->param;
2970 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2972 cmd->cmd_complete(cmd, err);
2973 mgmt_pending_free(cmd);
2976 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2978 struct mgmt_pending_cmd *cmd = data;
2979 struct mgmt_cp_unpair_device *cp = cmd->param;
2980 struct hci_conn *conn;
2982 if (cp->addr.type == BDADDR_BREDR)
2983 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2986 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2987 le_addr_type(cp->addr.type));
2992 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2995 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2998 struct mgmt_cp_unpair_device *cp = data;
2999 struct mgmt_rp_unpair_device rp;
3000 struct hci_conn_params *params;
3001 struct mgmt_pending_cmd *cmd;
3002 struct hci_conn *conn;
3006 memset(&rp, 0, sizeof(rp));
3007 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3008 rp.addr.type = cp->addr.type;
3010 if (!bdaddr_type_is_valid(cp->addr.type))
3011 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3012 MGMT_STATUS_INVALID_PARAMS,
3015 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3016 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3017 MGMT_STATUS_INVALID_PARAMS,
3022 if (!hdev_is_powered(hdev)) {
3023 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3024 MGMT_STATUS_NOT_POWERED, &rp,
3029 if (cp->addr.type == BDADDR_BREDR) {
3030 /* If disconnection is requested, then look up the
3031 * connection. If the remote device is connected, it
3032 * will be later used to terminate the link.
3034 * Setting it to NULL explicitly will cause no
3035 * termination of the link.
3038 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3043 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3045 err = mgmt_cmd_complete(sk, hdev->id,
3046 MGMT_OP_UNPAIR_DEVICE,
3047 MGMT_STATUS_NOT_PAIRED, &rp,
3055 /* LE address type */
3056 addr_type = le_addr_type(cp->addr.type);
3058 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3059 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3061 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3062 MGMT_STATUS_NOT_PAIRED, &rp,
3067 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3069 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3074 /* Defer clearing up the connection parameters until closing to
3075 * give a chance of keeping them if a repairing happens.
3077 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3079 /* Disable auto-connection parameters if present */
3080 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3082 if (params->explicit_connect)
3083 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3085 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3088 /* If disconnection is not requested, then clear the connection
3089 * variable so that the link is not terminated.
3091 if (!cp->disconnect)
3095 /* If the connection variable is set, then termination of the
3096 * link is requested.
3099 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3101 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3105 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3112 cmd->cmd_complete = addr_cmd_complete;
3114 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3115 unpair_device_complete);
3117 mgmt_pending_free(cmd);
3120 hci_dev_unlock(hdev);
3124 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3127 struct mgmt_cp_disconnect *cp = data;
3128 struct mgmt_rp_disconnect rp;
3129 struct mgmt_pending_cmd *cmd;
3130 struct hci_conn *conn;
3133 bt_dev_dbg(hdev, "sock %p", sk);
3135 memset(&rp, 0, sizeof(rp));
3136 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3137 rp.addr.type = cp->addr.type;
3139 if (!bdaddr_type_is_valid(cp->addr.type))
3140 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3141 MGMT_STATUS_INVALID_PARAMS,
3146 if (!test_bit(HCI_UP, &hdev->flags)) {
3147 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3148 MGMT_STATUS_NOT_POWERED, &rp,
3153 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3154 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3155 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3159 if (cp->addr.type == BDADDR_BREDR)
3160 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3163 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3164 le_addr_type(cp->addr.type));
3166 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3167 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3168 MGMT_STATUS_NOT_CONNECTED, &rp,
3173 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3179 cmd->cmd_complete = generic_cmd_complete;
3181 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3183 mgmt_pending_remove(cmd);
3186 hci_dev_unlock(hdev);
3190 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3192 switch (link_type) {
3194 switch (addr_type) {
3195 case ADDR_LE_DEV_PUBLIC:
3196 return BDADDR_LE_PUBLIC;
3199 /* Fallback to LE Random address type */
3200 return BDADDR_LE_RANDOM;
3204 /* Fallback to BR/EDR type */
3205 return BDADDR_BREDR;
3209 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3212 struct mgmt_rp_get_connections *rp;
3217 bt_dev_dbg(hdev, "sock %p", sk);
3221 if (!hdev_is_powered(hdev)) {
3222 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3223 MGMT_STATUS_NOT_POWERED);
3228 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3229 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3233 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3240 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3241 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3243 bacpy(&rp->addr[i].bdaddr, &c->dst);
3244 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3245 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3250 rp->conn_count = cpu_to_le16(i);
3252 /* Recalculate length in case of filtered SCO connections, etc */
3253 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3254 struct_size(rp, addr, i));
3259 hci_dev_unlock(hdev);
3263 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3264 struct mgmt_cp_pin_code_neg_reply *cp)
3266 struct mgmt_pending_cmd *cmd;
3269 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3274 cmd->cmd_complete = addr_cmd_complete;
3276 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3277 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3279 mgmt_pending_remove(cmd);
3284 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3287 struct hci_conn *conn;
3288 struct mgmt_cp_pin_code_reply *cp = data;
3289 struct hci_cp_pin_code_reply reply;
3290 struct mgmt_pending_cmd *cmd;
3293 bt_dev_dbg(hdev, "sock %p", sk);
3297 if (!hdev_is_powered(hdev)) {
3298 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3299 MGMT_STATUS_NOT_POWERED);
3303 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3305 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3306 MGMT_STATUS_NOT_CONNECTED);
3310 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3311 struct mgmt_cp_pin_code_neg_reply ncp;
3313 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3315 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3317 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3319 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3320 MGMT_STATUS_INVALID_PARAMS);
3325 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3331 cmd->cmd_complete = addr_cmd_complete;
3333 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3334 reply.pin_len = cp->pin_len;
3335 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3337 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3339 mgmt_pending_remove(cmd);
3342 hci_dev_unlock(hdev);
3346 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3349 struct mgmt_cp_set_io_capability *cp = data;
3351 bt_dev_dbg(hdev, "sock %p", sk);
3353 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3354 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3355 MGMT_STATUS_INVALID_PARAMS);
3359 hdev->io_capability = cp->io_capability;
3361 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3363 hci_dev_unlock(hdev);
3365 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3369 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3371 struct hci_dev *hdev = conn->hdev;
3372 struct mgmt_pending_cmd *cmd;
3374 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3375 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3378 if (cmd->user_data != conn)
3387 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3389 struct mgmt_rp_pair_device rp;
3390 struct hci_conn *conn = cmd->user_data;
3393 bacpy(&rp.addr.bdaddr, &conn->dst);
3394 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3396 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3397 status, &rp, sizeof(rp));
3399 /* So we don't get further callbacks for this connection */
3400 conn->connect_cfm_cb = NULL;
3401 conn->security_cfm_cb = NULL;
3402 conn->disconn_cfm_cb = NULL;
3404 hci_conn_drop(conn);
3406 /* The device is paired so there is no need to remove
3407 * its connection parameters anymore.
3409 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3416 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3418 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3419 struct mgmt_pending_cmd *cmd;
3421 cmd = find_pairing(conn);
3423 cmd->cmd_complete(cmd, status);
3424 mgmt_pending_remove(cmd);
3428 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3430 struct mgmt_pending_cmd *cmd;
3432 BT_DBG("status %u", status);
3434 cmd = find_pairing(conn);
3436 BT_DBG("Unable to find a pending command");
3440 cmd->cmd_complete(cmd, mgmt_status(status));
3441 mgmt_pending_remove(cmd);
3444 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3446 struct mgmt_pending_cmd *cmd;
3448 BT_DBG("status %u", status);
3453 cmd = find_pairing(conn);
3455 BT_DBG("Unable to find a pending command");
3459 cmd->cmd_complete(cmd, mgmt_status(status));
3460 mgmt_pending_remove(cmd);
3463 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3466 struct mgmt_cp_pair_device *cp = data;
3467 struct mgmt_rp_pair_device rp;
3468 struct mgmt_pending_cmd *cmd;
3469 u8 sec_level, auth_type;
3470 struct hci_conn *conn;
3473 bt_dev_dbg(hdev, "sock %p", sk);
3475 memset(&rp, 0, sizeof(rp));
3476 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3477 rp.addr.type = cp->addr.type;
3479 if (!bdaddr_type_is_valid(cp->addr.type))
3480 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3481 MGMT_STATUS_INVALID_PARAMS,
3484 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3485 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3486 MGMT_STATUS_INVALID_PARAMS,
3491 if (!hdev_is_powered(hdev)) {
3492 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3493 MGMT_STATUS_NOT_POWERED, &rp,
3498 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3499 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3500 MGMT_STATUS_ALREADY_PAIRED, &rp,
3505 sec_level = BT_SECURITY_MEDIUM;
3506 auth_type = HCI_AT_DEDICATED_BONDING;
3508 if (cp->addr.type == BDADDR_BREDR) {
3509 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3510 auth_type, CONN_REASON_PAIR_DEVICE);
3512 u8 addr_type = le_addr_type(cp->addr.type);
3513 struct hci_conn_params *p;
3515 /* When pairing a new device, it is expected to remember
3516 * this device for future connections. Adding the connection
3517 * parameter information ahead of time allows tracking
3518 * of the peripheral preferred values and will speed up any
3519 * further connection establishment.
3521 * If connection parameters already exist, then they
3522 * will be kept and this function does nothing.
3524 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3526 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3527 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3529 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3530 sec_level, HCI_LE_CONN_TIMEOUT,
3531 CONN_REASON_PAIR_DEVICE);
3537 if (PTR_ERR(conn) == -EBUSY)
3538 status = MGMT_STATUS_BUSY;
3539 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3540 status = MGMT_STATUS_NOT_SUPPORTED;
3541 else if (PTR_ERR(conn) == -ECONNREFUSED)
3542 status = MGMT_STATUS_REJECTED;
3544 status = MGMT_STATUS_CONNECT_FAILED;
3546 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3547 status, &rp, sizeof(rp));
3551 if (conn->connect_cfm_cb) {
3552 hci_conn_drop(conn);
3553 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3554 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3558 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3561 hci_conn_drop(conn);
3565 cmd->cmd_complete = pairing_complete;
3567 /* For LE, just connecting isn't a proof that the pairing finished */
3568 if (cp->addr.type == BDADDR_BREDR) {
3569 conn->connect_cfm_cb = pairing_complete_cb;
3570 conn->security_cfm_cb = pairing_complete_cb;
3571 conn->disconn_cfm_cb = pairing_complete_cb;
3573 conn->connect_cfm_cb = le_pairing_complete_cb;
3574 conn->security_cfm_cb = le_pairing_complete_cb;
3575 conn->disconn_cfm_cb = le_pairing_complete_cb;
3578 conn->io_capability = cp->io_cap;
3579 cmd->user_data = hci_conn_get(conn);
3581 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3582 hci_conn_security(conn, sec_level, auth_type, true)) {
3583 cmd->cmd_complete(cmd, 0);
3584 mgmt_pending_remove(cmd);
3590 hci_dev_unlock(hdev);
3594 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3597 struct mgmt_addr_info *addr = data;
3598 struct mgmt_pending_cmd *cmd;
3599 struct hci_conn *conn;
3602 bt_dev_dbg(hdev, "sock %p", sk);
3606 if (!hdev_is_powered(hdev)) {
3607 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3608 MGMT_STATUS_NOT_POWERED);
3612 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3614 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3615 MGMT_STATUS_INVALID_PARAMS);
3619 conn = cmd->user_data;
3621 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3622 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3623 MGMT_STATUS_INVALID_PARAMS);
3627 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3628 mgmt_pending_remove(cmd);
3630 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3631 addr, sizeof(*addr));
3633 /* Since user doesn't want to proceed with the connection, abort any
3634 * ongoing pairing and then terminate the link if it was created
3635 * because of the pair device action.
3637 if (addr->type == BDADDR_BREDR)
3638 hci_remove_link_key(hdev, &addr->bdaddr);
3640 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3641 le_addr_type(addr->type));
3643 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3644 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3647 hci_dev_unlock(hdev);
3651 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3652 struct mgmt_addr_info *addr, u16 mgmt_op,
3653 u16 hci_op, __le32 passkey)
3655 struct mgmt_pending_cmd *cmd;
3656 struct hci_conn *conn;
3661 if (!hdev_is_powered(hdev)) {
3662 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3663 MGMT_STATUS_NOT_POWERED, addr,
3668 if (addr->type == BDADDR_BREDR)
3669 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3671 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3672 le_addr_type(addr->type));
3675 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3676 MGMT_STATUS_NOT_CONNECTED, addr,
3681 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3682 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3684 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3685 MGMT_STATUS_SUCCESS, addr,
3688 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3689 MGMT_STATUS_FAILED, addr,
3695 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3701 cmd->cmd_complete = addr_cmd_complete;
3703 /* Continue with pairing via HCI */
3704 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3705 struct hci_cp_user_passkey_reply cp;
3707 bacpy(&cp.bdaddr, &addr->bdaddr);
3708 cp.passkey = passkey;
3709 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3711 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3715 mgmt_pending_remove(cmd);
3718 hci_dev_unlock(hdev);
3722 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3723 void *data, u16 len)
3725 struct mgmt_cp_pin_code_neg_reply *cp = data;
3727 bt_dev_dbg(hdev, "sock %p", sk);
3729 return user_pairing_resp(sk, hdev, &cp->addr,
3730 MGMT_OP_PIN_CODE_NEG_REPLY,
3731 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3734 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3737 struct mgmt_cp_user_confirm_reply *cp = data;
3739 bt_dev_dbg(hdev, "sock %p", sk);
3741 if (len != sizeof(*cp))
3742 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3743 MGMT_STATUS_INVALID_PARAMS);
3745 return user_pairing_resp(sk, hdev, &cp->addr,
3746 MGMT_OP_USER_CONFIRM_REPLY,
3747 HCI_OP_USER_CONFIRM_REPLY, 0);
3750 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3751 void *data, u16 len)
3753 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3755 bt_dev_dbg(hdev, "sock %p", sk);
3757 return user_pairing_resp(sk, hdev, &cp->addr,
3758 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3759 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3762 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3765 struct mgmt_cp_user_passkey_reply *cp = data;
3767 bt_dev_dbg(hdev, "sock %p", sk);
3769 return user_pairing_resp(sk, hdev, &cp->addr,
3770 MGMT_OP_USER_PASSKEY_REPLY,
3771 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3774 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3775 void *data, u16 len)
3777 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3779 bt_dev_dbg(hdev, "sock %p", sk);
3781 return user_pairing_resp(sk, hdev, &cp->addr,
3782 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3783 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3786 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3788 struct adv_info *adv_instance;
3790 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3794 /* stop if current instance doesn't need to be changed */
3795 if (!(adv_instance->flags & flags))
3798 cancel_adv_timeout(hdev);
3800 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3804 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3809 static int name_changed_sync(struct hci_dev *hdev, void *data)
3811 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3814 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3816 struct mgmt_pending_cmd *cmd = data;
3817 struct mgmt_cp_set_local_name *cp = cmd->param;
3818 u8 status = mgmt_status(err);
3820 bt_dev_dbg(hdev, "err %d", err);
3822 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3826 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3829 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3832 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3833 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3836 mgmt_pending_remove(cmd);
3839 static int set_name_sync(struct hci_dev *hdev, void *data)
3841 if (lmp_bredr_capable(hdev)) {
3842 hci_update_name_sync(hdev);
3843 hci_update_eir_sync(hdev);
3846 /* The name is stored in the scan response data and so
3847 * no need to update the advertising data here.
3849 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3850 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3855 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3858 struct mgmt_cp_set_local_name *cp = data;
3859 struct mgmt_pending_cmd *cmd;
3862 bt_dev_dbg(hdev, "sock %p", sk);
3866 /* If the old values are the same as the new ones just return a
3867 * direct command complete event.
3869 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3870 !memcmp(hdev->short_name, cp->short_name,
3871 sizeof(hdev->short_name))) {
3872 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3877 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3879 if (!hdev_is_powered(hdev)) {
3880 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3882 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3887 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3888 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3889 ext_info_changed(hdev, sk);
3894 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3898 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3902 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3903 MGMT_STATUS_FAILED);
3906 mgmt_pending_remove(cmd);
3911 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3914 hci_dev_unlock(hdev);
3918 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3920 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3923 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3926 struct mgmt_cp_set_appearance *cp = data;
3930 bt_dev_dbg(hdev, "sock %p", sk);
3932 if (!lmp_le_capable(hdev))
3933 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3934 MGMT_STATUS_NOT_SUPPORTED);
3936 appearance = le16_to_cpu(cp->appearance);
3940 if (hdev->appearance != appearance) {
3941 hdev->appearance = appearance;
3943 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3944 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3947 ext_info_changed(hdev, sk);
3950 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3953 hci_dev_unlock(hdev);
3958 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3959 void *data, u16 len)
3961 struct mgmt_rp_get_phy_configuration rp;
3963 bt_dev_dbg(hdev, "sock %p", sk);
3967 memset(&rp, 0, sizeof(rp));
3969 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3970 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3971 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3973 hci_dev_unlock(hdev);
3975 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3979 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3981 struct mgmt_ev_phy_configuration_changed ev;
3983 memset(&ev, 0, sizeof(ev));
3985 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3987 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3991 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3993 struct mgmt_pending_cmd *cmd = data;
3994 struct sk_buff *skb = cmd->skb;
3995 u8 status = mgmt_status(err);
3997 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4002 status = MGMT_STATUS_FAILED;
4003 else if (IS_ERR(skb))
4004 status = mgmt_status(PTR_ERR(skb));
4006 status = mgmt_status(skb->data[0]);
4009 bt_dev_dbg(hdev, "status %d", status);
4012 mgmt_cmd_status(cmd->sk, hdev->id,
4013 MGMT_OP_SET_PHY_CONFIGURATION, status);
4015 mgmt_cmd_complete(cmd->sk, hdev->id,
4016 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4019 mgmt_phy_configuration_changed(hdev, cmd->sk);
4022 if (skb && !IS_ERR(skb))
4025 mgmt_pending_remove(cmd);
4028 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4030 struct mgmt_pending_cmd *cmd = data;
4031 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4032 struct hci_cp_le_set_default_phy cp_phy;
4033 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4035 memset(&cp_phy, 0, sizeof(cp_phy));
4037 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4038 cp_phy.all_phys |= 0x01;
4040 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4041 cp_phy.all_phys |= 0x02;
4043 if (selected_phys & MGMT_PHY_LE_1M_TX)
4044 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4046 if (selected_phys & MGMT_PHY_LE_2M_TX)
4047 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4049 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4050 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4052 if (selected_phys & MGMT_PHY_LE_1M_RX)
4053 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4055 if (selected_phys & MGMT_PHY_LE_2M_RX)
4056 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4058 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4059 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4061 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4062 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4067 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4068 void *data, u16 len)
4070 struct mgmt_cp_set_phy_configuration *cp = data;
4071 struct mgmt_pending_cmd *cmd;
4072 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4073 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4074 bool changed = false;
4077 bt_dev_dbg(hdev, "sock %p", sk);
4079 configurable_phys = get_configurable_phys(hdev);
4080 supported_phys = get_supported_phys(hdev);
4081 selected_phys = __le32_to_cpu(cp->selected_phys);
4083 if (selected_phys & ~supported_phys)
4084 return mgmt_cmd_status(sk, hdev->id,
4085 MGMT_OP_SET_PHY_CONFIGURATION,
4086 MGMT_STATUS_INVALID_PARAMS);
4088 unconfigure_phys = supported_phys & ~configurable_phys;
4090 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4091 return mgmt_cmd_status(sk, hdev->id,
4092 MGMT_OP_SET_PHY_CONFIGURATION,
4093 MGMT_STATUS_INVALID_PARAMS);
4095 if (selected_phys == get_selected_phys(hdev))
4096 return mgmt_cmd_complete(sk, hdev->id,
4097 MGMT_OP_SET_PHY_CONFIGURATION,
4102 if (!hdev_is_powered(hdev)) {
4103 err = mgmt_cmd_status(sk, hdev->id,
4104 MGMT_OP_SET_PHY_CONFIGURATION,
4105 MGMT_STATUS_REJECTED);
4109 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4110 err = mgmt_cmd_status(sk, hdev->id,
4111 MGMT_OP_SET_PHY_CONFIGURATION,
4116 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4117 pkt_type |= (HCI_DH3 | HCI_DM3);
4119 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4121 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4122 pkt_type |= (HCI_DH5 | HCI_DM5);
4124 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4126 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4127 pkt_type &= ~HCI_2DH1;
4129 pkt_type |= HCI_2DH1;
4131 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4132 pkt_type &= ~HCI_2DH3;
4134 pkt_type |= HCI_2DH3;
4136 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4137 pkt_type &= ~HCI_2DH5;
4139 pkt_type |= HCI_2DH5;
4141 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4142 pkt_type &= ~HCI_3DH1;
4144 pkt_type |= HCI_3DH1;
4146 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4147 pkt_type &= ~HCI_3DH3;
4149 pkt_type |= HCI_3DH3;
4151 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4152 pkt_type &= ~HCI_3DH5;
4154 pkt_type |= HCI_3DH5;
4156 if (pkt_type != hdev->pkt_type) {
4157 hdev->pkt_type = pkt_type;
4161 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4162 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4164 mgmt_phy_configuration_changed(hdev, sk);
4166 err = mgmt_cmd_complete(sk, hdev->id,
4167 MGMT_OP_SET_PHY_CONFIGURATION,
4173 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4178 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4179 set_default_phy_complete);
4182 err = mgmt_cmd_status(sk, hdev->id,
4183 MGMT_OP_SET_PHY_CONFIGURATION,
4184 MGMT_STATUS_FAILED);
4187 mgmt_pending_remove(cmd);
4191 hci_dev_unlock(hdev);
4196 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4199 int err = MGMT_STATUS_SUCCESS;
4200 struct mgmt_cp_set_blocked_keys *keys = data;
4201 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4202 sizeof(struct mgmt_blocked_key_info));
4203 u16 key_count, expected_len;
4206 bt_dev_dbg(hdev, "sock %p", sk);
4208 key_count = __le16_to_cpu(keys->key_count);
4209 if (key_count > max_key_count) {
4210 bt_dev_err(hdev, "too big key_count value %u", key_count);
4211 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4212 MGMT_STATUS_INVALID_PARAMS);
4215 expected_len = struct_size(keys, keys, key_count);
4216 if (expected_len != len) {
4217 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4219 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4220 MGMT_STATUS_INVALID_PARAMS);
4225 hci_blocked_keys_clear(hdev);
4227 for (i = 0; i < key_count; ++i) {
4228 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4231 err = MGMT_STATUS_NO_RESOURCES;
4235 b->type = keys->keys[i].type;
4236 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4237 list_add_rcu(&b->list, &hdev->blocked_keys);
4239 hci_dev_unlock(hdev);
4241 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4245 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4246 void *data, u16 len)
4248 struct mgmt_mode *cp = data;
4250 bool changed = false;
4252 bt_dev_dbg(hdev, "sock %p", sk);
4254 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4255 return mgmt_cmd_status(sk, hdev->id,
4256 MGMT_OP_SET_WIDEBAND_SPEECH,
4257 MGMT_STATUS_NOT_SUPPORTED);
4259 if (cp->val != 0x00 && cp->val != 0x01)
4260 return mgmt_cmd_status(sk, hdev->id,
4261 MGMT_OP_SET_WIDEBAND_SPEECH,
4262 MGMT_STATUS_INVALID_PARAMS);
4266 if (hdev_is_powered(hdev) &&
4267 !!cp->val != hci_dev_test_flag(hdev,
4268 HCI_WIDEBAND_SPEECH_ENABLED)) {
4269 err = mgmt_cmd_status(sk, hdev->id,
4270 MGMT_OP_SET_WIDEBAND_SPEECH,
4271 MGMT_STATUS_REJECTED);
4276 changed = !hci_dev_test_and_set_flag(hdev,
4277 HCI_WIDEBAND_SPEECH_ENABLED);
4279 changed = hci_dev_test_and_clear_flag(hdev,
4280 HCI_WIDEBAND_SPEECH_ENABLED);
4282 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4287 err = new_settings(hdev, sk);
4290 hci_dev_unlock(hdev);
4294 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4295 void *data, u16 data_len)
4298 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4301 u8 tx_power_range[2];
4303 bt_dev_dbg(hdev, "sock %p", sk);
4305 memset(&buf, 0, sizeof(buf));
4309 /* When the Read Simple Pairing Options command is supported, then
4310 * the remote public key validation is supported.
4312 * Alternatively, when Microsoft extensions are available, they can
4313 * indicate support for public key validation as well.
4315 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4316 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4318 flags |= 0x02; /* Remote public key validation (LE) */
4320 /* When the Read Encryption Key Size command is supported, then the
4321 * encryption key size is enforced.
4323 if (hdev->commands[20] & 0x10)
4324 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4326 flags |= 0x08; /* Encryption key size enforcement (LE) */
4328 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4331 /* When the Read Simple Pairing Options command is supported, then
4332 * also max encryption key size information is provided.
4334 if (hdev->commands[41] & 0x08)
4335 cap_len = eir_append_le16(rp->cap, cap_len,
4336 MGMT_CAP_MAX_ENC_KEY_SIZE,
4337 hdev->max_enc_key_size);
4339 cap_len = eir_append_le16(rp->cap, cap_len,
4340 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4341 SMP_MAX_ENC_KEY_SIZE);
4343 /* Append the min/max LE tx power parameters if we were able to fetch
4344 * it from the controller
4346 if (hdev->commands[38] & 0x80) {
4347 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4348 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4349 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4353 rp->cap_len = cpu_to_le16(cap_len);
4355 hci_dev_unlock(hdev);
4357 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4358 rp, sizeof(*rp) + cap_len);
4361 #ifdef CONFIG_BT_FEATURE_DEBUG
4362 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4363 static const u8 debug_uuid[16] = {
4364 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4365 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4369 /* 330859bc-7506-492d-9370-9a6f0614037f */
4370 static const u8 quality_report_uuid[16] = {
4371 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4372 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4375 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4376 static const u8 offload_codecs_uuid[16] = {
4377 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4378 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4381 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4382 static const u8 le_simultaneous_roles_uuid[16] = {
4383 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4384 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4387 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4388 static const u8 rpa_resolution_uuid[16] = {
4389 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4390 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4393 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4394 static const u8 iso_socket_uuid[16] = {
4395 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4396 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4399 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4400 static const u8 mgmt_mesh_uuid[16] = {
4401 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4402 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4405 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4406 void *data, u16 data_len)
4408 struct mgmt_rp_read_exp_features_info *rp;
4414 bt_dev_dbg(hdev, "sock %p", sk);
4416 /* Enough space for 7 features */
4417 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4418 rp = kzalloc(len, GFP_KERNEL);
4422 #ifdef CONFIG_BT_FEATURE_DEBUG
4424 flags = bt_dbg_get() ? BIT(0) : 0;
4426 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4427 rp->features[idx].flags = cpu_to_le32(flags);
4432 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4433 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4438 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4439 rp->features[idx].flags = cpu_to_le32(flags);
4443 if (hdev && ll_privacy_capable(hdev)) {
4444 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4445 flags = BIT(0) | BIT(1);
4449 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4450 rp->features[idx].flags = cpu_to_le32(flags);
4454 if (hdev && (aosp_has_quality_report(hdev) ||
4455 hdev->set_quality_report)) {
4456 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4461 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4462 rp->features[idx].flags = cpu_to_le32(flags);
4466 if (hdev && hdev->get_data_path_id) {
4467 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4472 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4473 rp->features[idx].flags = cpu_to_le32(flags);
4477 if (IS_ENABLED(CONFIG_BT_LE)) {
4478 flags = iso_enabled() ? BIT(0) : 0;
4479 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4480 rp->features[idx].flags = cpu_to_le32(flags);
4484 if (hdev && lmp_le_capable(hdev)) {
4485 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4490 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4491 rp->features[idx].flags = cpu_to_le32(flags);
4495 rp->feature_count = cpu_to_le16(idx);
4497 /* After reading the experimental features information, enable
4498 * the events to update client on any future change.
4500 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4502 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4503 MGMT_OP_READ_EXP_FEATURES_INFO,
4504 0, rp, sizeof(*rp) + (20 * idx));
4510 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4513 struct mgmt_ev_exp_feature_changed ev;
4515 memset(&ev, 0, sizeof(ev));
4516 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4517 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4519 // Do we need to be atomic with the conn_flags?
4520 if (enabled && privacy_mode_capable(hdev))
4521 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4523 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4525 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4527 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4531 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4532 bool enabled, struct sock *skip)
4534 struct mgmt_ev_exp_feature_changed ev;
4536 memset(&ev, 0, sizeof(ev));
4537 memcpy(ev.uuid, uuid, 16);
4538 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4540 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4542 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4545 #define EXP_FEAT(_uuid, _set_func) \
4548 .set_func = _set_func, \
4551 /* The zero key uuid is special. Multiple exp features are set through it. */
4552 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4553 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4555 struct mgmt_rp_set_exp_feature rp;
4557 memset(rp.uuid, 0, 16);
4558 rp.flags = cpu_to_le32(0);
4560 #ifdef CONFIG_BT_FEATURE_DEBUG
4562 bool changed = bt_dbg_get();
4567 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4571 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4574 changed = hci_dev_test_and_clear_flag(hdev,
4575 HCI_ENABLE_LL_PRIVACY);
4577 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4581 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4583 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4584 MGMT_OP_SET_EXP_FEATURE, 0,
4588 #ifdef CONFIG_BT_FEATURE_DEBUG
4589 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4590 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4592 struct mgmt_rp_set_exp_feature rp;
4597 /* Command requires to use the non-controller index */
4599 return mgmt_cmd_status(sk, hdev->id,
4600 MGMT_OP_SET_EXP_FEATURE,
4601 MGMT_STATUS_INVALID_INDEX);
4603 /* Parameters are limited to a single octet */
4604 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4605 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4606 MGMT_OP_SET_EXP_FEATURE,
4607 MGMT_STATUS_INVALID_PARAMS);
4609 /* Only boolean on/off is supported */
4610 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4611 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4612 MGMT_OP_SET_EXP_FEATURE,
4613 MGMT_STATUS_INVALID_PARAMS);
4615 val = !!cp->param[0];
4616 changed = val ? !bt_dbg_get() : bt_dbg_get();
4619 memcpy(rp.uuid, debug_uuid, 16);
4620 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4622 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4624 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4625 MGMT_OP_SET_EXP_FEATURE, 0,
4629 exp_feature_changed(hdev, debug_uuid, val, sk);
4635 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4636 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4638 struct mgmt_rp_set_exp_feature rp;
4642 /* Command requires to use the controller index */
4644 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4645 MGMT_OP_SET_EXP_FEATURE,
4646 MGMT_STATUS_INVALID_INDEX);
4648 /* Parameters are limited to a single octet */
4649 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4650 return mgmt_cmd_status(sk, hdev->id,
4651 MGMT_OP_SET_EXP_FEATURE,
4652 MGMT_STATUS_INVALID_PARAMS);
4654 /* Only boolean on/off is supported */
4655 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4656 return mgmt_cmd_status(sk, hdev->id,
4657 MGMT_OP_SET_EXP_FEATURE,
4658 MGMT_STATUS_INVALID_PARAMS);
4660 val = !!cp->param[0];
4663 changed = !hci_dev_test_and_set_flag(hdev,
4664 HCI_MESH_EXPERIMENTAL);
4666 hci_dev_clear_flag(hdev, HCI_MESH);
4667 changed = hci_dev_test_and_clear_flag(hdev,
4668 HCI_MESH_EXPERIMENTAL);
4671 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4672 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4674 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4676 err = mgmt_cmd_complete(sk, hdev->id,
4677 MGMT_OP_SET_EXP_FEATURE, 0,
4681 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4686 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4687 struct mgmt_cp_set_exp_feature *cp,
4690 struct mgmt_rp_set_exp_feature rp;
4695 /* Command requires to use the controller index */
4697 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4698 MGMT_OP_SET_EXP_FEATURE,
4699 MGMT_STATUS_INVALID_INDEX);
4701 /* Changes can only be made when controller is powered down */
4702 if (hdev_is_powered(hdev))
4703 return mgmt_cmd_status(sk, hdev->id,
4704 MGMT_OP_SET_EXP_FEATURE,
4705 MGMT_STATUS_REJECTED);
4707 /* Parameters are limited to a single octet */
4708 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4709 return mgmt_cmd_status(sk, hdev->id,
4710 MGMT_OP_SET_EXP_FEATURE,
4711 MGMT_STATUS_INVALID_PARAMS);
4713 /* Only boolean on/off is supported */
4714 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4715 return mgmt_cmd_status(sk, hdev->id,
4716 MGMT_OP_SET_EXP_FEATURE,
4717 MGMT_STATUS_INVALID_PARAMS);
4719 val = !!cp->param[0];
4722 changed = !hci_dev_test_and_set_flag(hdev,
4723 HCI_ENABLE_LL_PRIVACY);
4724 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4726 /* Enable LL privacy + supported settings changed */
4727 flags = BIT(0) | BIT(1);
4729 changed = hci_dev_test_and_clear_flag(hdev,
4730 HCI_ENABLE_LL_PRIVACY);
4732 /* Disable LL privacy + supported settings changed */
4736 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4737 rp.flags = cpu_to_le32(flags);
4739 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4741 err = mgmt_cmd_complete(sk, hdev->id,
4742 MGMT_OP_SET_EXP_FEATURE, 0,
4746 exp_ll_privacy_feature_changed(val, hdev, sk);
4751 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4752 struct mgmt_cp_set_exp_feature *cp,
4755 struct mgmt_rp_set_exp_feature rp;
4759 /* Command requires to use a valid controller index */
4761 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4762 MGMT_OP_SET_EXP_FEATURE,
4763 MGMT_STATUS_INVALID_INDEX);
4765 /* Parameters are limited to a single octet */
4766 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4767 return mgmt_cmd_status(sk, hdev->id,
4768 MGMT_OP_SET_EXP_FEATURE,
4769 MGMT_STATUS_INVALID_PARAMS);
4771 /* Only boolean on/off is supported */
4772 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4773 return mgmt_cmd_status(sk, hdev->id,
4774 MGMT_OP_SET_EXP_FEATURE,
4775 MGMT_STATUS_INVALID_PARAMS);
4777 hci_req_sync_lock(hdev);
4779 val = !!cp->param[0];
4780 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4782 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4783 err = mgmt_cmd_status(sk, hdev->id,
4784 MGMT_OP_SET_EXP_FEATURE,
4785 MGMT_STATUS_NOT_SUPPORTED);
4786 goto unlock_quality_report;
4790 if (hdev->set_quality_report)
4791 err = hdev->set_quality_report(hdev, val);
4793 err = aosp_set_quality_report(hdev, val);
4796 err = mgmt_cmd_status(sk, hdev->id,
4797 MGMT_OP_SET_EXP_FEATURE,
4798 MGMT_STATUS_FAILED);
4799 goto unlock_quality_report;
4803 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4805 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4808 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4810 memcpy(rp.uuid, quality_report_uuid, 16);
4811 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4812 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4814 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4818 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4820 unlock_quality_report:
4821 hci_req_sync_unlock(hdev);
4825 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4826 struct mgmt_cp_set_exp_feature *cp,
4831 struct mgmt_rp_set_exp_feature rp;
4833 /* Command requires to use a valid controller index */
4835 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4836 MGMT_OP_SET_EXP_FEATURE,
4837 MGMT_STATUS_INVALID_INDEX);
4839 /* Parameters are limited to a single octet */
4840 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4841 return mgmt_cmd_status(sk, hdev->id,
4842 MGMT_OP_SET_EXP_FEATURE,
4843 MGMT_STATUS_INVALID_PARAMS);
4845 /* Only boolean on/off is supported */
4846 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4847 return mgmt_cmd_status(sk, hdev->id,
4848 MGMT_OP_SET_EXP_FEATURE,
4849 MGMT_STATUS_INVALID_PARAMS);
4851 val = !!cp->param[0];
4852 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4854 if (!hdev->get_data_path_id) {
4855 return mgmt_cmd_status(sk, hdev->id,
4856 MGMT_OP_SET_EXP_FEATURE,
4857 MGMT_STATUS_NOT_SUPPORTED);
4862 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4864 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4867 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4870 memcpy(rp.uuid, offload_codecs_uuid, 16);
4871 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4872 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4873 err = mgmt_cmd_complete(sk, hdev->id,
4874 MGMT_OP_SET_EXP_FEATURE, 0,
4878 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4883 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4884 struct mgmt_cp_set_exp_feature *cp,
4889 struct mgmt_rp_set_exp_feature rp;
4891 /* Command requires to use a valid controller index */
4893 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4894 MGMT_OP_SET_EXP_FEATURE,
4895 MGMT_STATUS_INVALID_INDEX);
4897 /* Parameters are limited to a single octet */
4898 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4899 return mgmt_cmd_status(sk, hdev->id,
4900 MGMT_OP_SET_EXP_FEATURE,
4901 MGMT_STATUS_INVALID_PARAMS);
4903 /* Only boolean on/off is supported */
4904 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4905 return mgmt_cmd_status(sk, hdev->id,
4906 MGMT_OP_SET_EXP_FEATURE,
4907 MGMT_STATUS_INVALID_PARAMS);
4909 val = !!cp->param[0];
4910 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4912 if (!hci_dev_le_state_simultaneous(hdev)) {
4913 return mgmt_cmd_status(sk, hdev->id,
4914 MGMT_OP_SET_EXP_FEATURE,
4915 MGMT_STATUS_NOT_SUPPORTED);
4920 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4922 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4925 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4928 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4929 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4930 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4931 err = mgmt_cmd_complete(sk, hdev->id,
4932 MGMT_OP_SET_EXP_FEATURE, 0,
4936 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4942 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4943 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4945 struct mgmt_rp_set_exp_feature rp;
4946 bool val, changed = false;
4949 /* Command requires to use the non-controller index */
4951 return mgmt_cmd_status(sk, hdev->id,
4952 MGMT_OP_SET_EXP_FEATURE,
4953 MGMT_STATUS_INVALID_INDEX);
4955 /* Parameters are limited to a single octet */
4956 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4957 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4958 MGMT_OP_SET_EXP_FEATURE,
4959 MGMT_STATUS_INVALID_PARAMS);
4961 /* Only boolean on/off is supported */
4962 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4963 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4964 MGMT_OP_SET_EXP_FEATURE,
4965 MGMT_STATUS_INVALID_PARAMS);
4967 val = cp->param[0] ? true : false;
4976 memcpy(rp.uuid, iso_socket_uuid, 16);
4977 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4979 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4981 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4982 MGMT_OP_SET_EXP_FEATURE, 0,
4986 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4992 static const struct mgmt_exp_feature {
4994 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4995 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4996 } exp_features[] = {
4997 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4998 #ifdef CONFIG_BT_FEATURE_DEBUG
4999 EXP_FEAT(debug_uuid, set_debug_func),
5001 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5002 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
5003 EXP_FEAT(quality_report_uuid, set_quality_report_func),
5004 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5005 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5007 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5010 /* end with a null feature */
5011 EXP_FEAT(NULL, NULL)
5014 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5015 void *data, u16 data_len)
5017 struct mgmt_cp_set_exp_feature *cp = data;
5020 bt_dev_dbg(hdev, "sock %p", sk);
5022 for (i = 0; exp_features[i].uuid; i++) {
5023 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5024 return exp_features[i].set_func(sk, hdev, cp, data_len);
5027 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5028 MGMT_OP_SET_EXP_FEATURE,
5029 MGMT_STATUS_NOT_SUPPORTED);
5032 static u32 get_params_flags(struct hci_dev *hdev,
5033 struct hci_conn_params *params)
5035 u32 flags = hdev->conn_flags;
5037 /* Devices using RPAs can only be programmed in the acceptlist if
5038 * LL Privacy has been enable otherwise they cannot mark
5039 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5041 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5042 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
5043 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5048 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5051 struct mgmt_cp_get_device_flags *cp = data;
5052 struct mgmt_rp_get_device_flags rp;
5053 struct bdaddr_list_with_flags *br_params;
5054 struct hci_conn_params *params;
5055 u32 supported_flags;
5056 u32 current_flags = 0;
5057 u8 status = MGMT_STATUS_INVALID_PARAMS;
5059 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5060 &cp->addr.bdaddr, cp->addr.type);
5064 supported_flags = hdev->conn_flags;
5066 memset(&rp, 0, sizeof(rp));
5068 if (cp->addr.type == BDADDR_BREDR) {
5069 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5075 current_flags = br_params->flags;
5077 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5078 le_addr_type(cp->addr.type));
5082 supported_flags = get_params_flags(hdev, params);
5083 current_flags = params->flags;
5086 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5087 rp.addr.type = cp->addr.type;
5088 rp.supported_flags = cpu_to_le32(supported_flags);
5089 rp.current_flags = cpu_to_le32(current_flags);
5091 status = MGMT_STATUS_SUCCESS;
5094 hci_dev_unlock(hdev);
5096 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5100 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5101 bdaddr_t *bdaddr, u8 bdaddr_type,
5102 u32 supported_flags, u32 current_flags)
5104 struct mgmt_ev_device_flags_changed ev;
5106 bacpy(&ev.addr.bdaddr, bdaddr);
5107 ev.addr.type = bdaddr_type;
5108 ev.supported_flags = cpu_to_le32(supported_flags);
5109 ev.current_flags = cpu_to_le32(current_flags);
5111 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5114 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5117 struct mgmt_cp_set_device_flags *cp = data;
5118 struct bdaddr_list_with_flags *br_params;
5119 struct hci_conn_params *params;
5120 u8 status = MGMT_STATUS_INVALID_PARAMS;
5121 u32 supported_flags;
5122 u32 current_flags = __le32_to_cpu(cp->current_flags);
5124 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5125 &cp->addr.bdaddr, cp->addr.type, current_flags);
5127 // We should take hci_dev_lock() early, I think.. conn_flags can change
5128 supported_flags = hdev->conn_flags;
5130 if ((supported_flags | current_flags) != supported_flags) {
5131 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5132 current_flags, supported_flags);
5138 if (cp->addr.type == BDADDR_BREDR) {
5139 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5144 br_params->flags = current_flags;
5145 status = MGMT_STATUS_SUCCESS;
5147 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5148 &cp->addr.bdaddr, cp->addr.type);
5154 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5155 le_addr_type(cp->addr.type));
5157 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5158 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5162 supported_flags = get_params_flags(hdev, params);
5164 if ((supported_flags | current_flags) != supported_flags) {
5165 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5166 current_flags, supported_flags);
5170 WRITE_ONCE(params->flags, current_flags);
5171 status = MGMT_STATUS_SUCCESS;
5173 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5176 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5177 hci_update_passive_scan(hdev);
5180 hci_dev_unlock(hdev);
5183 if (status == MGMT_STATUS_SUCCESS)
5184 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5185 supported_flags, current_flags);
5187 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5188 &cp->addr, sizeof(cp->addr));
5191 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5194 struct mgmt_ev_adv_monitor_added ev;
5196 ev.monitor_handle = cpu_to_le16(handle);
5198 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5201 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5203 struct mgmt_ev_adv_monitor_removed ev;
5204 struct mgmt_pending_cmd *cmd;
5205 struct sock *sk_skip = NULL;
5206 struct mgmt_cp_remove_adv_monitor *cp;
5208 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5212 if (cp->monitor_handle)
5216 ev.monitor_handle = cpu_to_le16(handle);
5218 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5221 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5222 void *data, u16 len)
5224 struct adv_monitor *monitor = NULL;
5225 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5228 __u32 supported = 0;
5230 __u16 num_handles = 0;
5231 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5233 BT_DBG("request for %s", hdev->name);
5237 if (msft_monitor_supported(hdev))
5238 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5240 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5241 handles[num_handles++] = monitor->handle;
5243 hci_dev_unlock(hdev);
5245 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5246 rp = kmalloc(rp_size, GFP_KERNEL);
5250 /* All supported features are currently enabled */
5251 enabled = supported;
5253 rp->supported_features = cpu_to_le32(supported);
5254 rp->enabled_features = cpu_to_le32(enabled);
5255 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5256 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5257 rp->num_handles = cpu_to_le16(num_handles);
5259 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5261 err = mgmt_cmd_complete(sk, hdev->id,
5262 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5263 MGMT_STATUS_SUCCESS, rp, rp_size);
5270 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5271 void *data, int status)
5273 struct mgmt_rp_add_adv_patterns_monitor rp;
5274 struct mgmt_pending_cmd *cmd = data;
5275 struct adv_monitor *monitor = cmd->user_data;
5279 rp.monitor_handle = cpu_to_le16(monitor->handle);
5282 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5283 hdev->adv_monitors_cnt++;
5284 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5285 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5286 hci_update_passive_scan(hdev);
5289 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5290 mgmt_status(status), &rp, sizeof(rp));
5291 mgmt_pending_remove(cmd);
5293 hci_dev_unlock(hdev);
5294 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5295 rp.monitor_handle, status);
5298 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5300 struct mgmt_pending_cmd *cmd = data;
5301 struct adv_monitor *monitor = cmd->user_data;
5303 return hci_add_adv_monitor(hdev, monitor);
5306 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5307 struct adv_monitor *m, u8 status,
5308 void *data, u16 len, u16 op)
5310 struct mgmt_pending_cmd *cmd;
5318 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5319 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5320 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5321 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5322 status = MGMT_STATUS_BUSY;
5326 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5328 status = MGMT_STATUS_NO_RESOURCES;
5333 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5334 mgmt_add_adv_patterns_monitor_complete);
5337 status = MGMT_STATUS_NO_RESOURCES;
5339 status = MGMT_STATUS_FAILED;
5344 hci_dev_unlock(hdev);
5349 hci_free_adv_monitor(hdev, m);
5350 hci_dev_unlock(hdev);
5351 return mgmt_cmd_status(sk, hdev->id, op, status);
5354 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5355 struct mgmt_adv_rssi_thresholds *rssi)
5358 m->rssi.low_threshold = rssi->low_threshold;
5359 m->rssi.low_threshold_timeout =
5360 __le16_to_cpu(rssi->low_threshold_timeout);
5361 m->rssi.high_threshold = rssi->high_threshold;
5362 m->rssi.high_threshold_timeout =
5363 __le16_to_cpu(rssi->high_threshold_timeout);
5364 m->rssi.sampling_period = rssi->sampling_period;
5366 /* Default values. These numbers are the least constricting
5367 * parameters for MSFT API to work, so it behaves as if there
5368 * are no rssi parameter to consider. May need to be changed
5369 * if other API are to be supported.
5371 m->rssi.low_threshold = -127;
5372 m->rssi.low_threshold_timeout = 60;
5373 m->rssi.high_threshold = -127;
5374 m->rssi.high_threshold_timeout = 0;
5375 m->rssi.sampling_period = 0;
5379 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5380 struct mgmt_adv_pattern *patterns)
5382 u8 offset = 0, length = 0;
5383 struct adv_pattern *p = NULL;
5386 for (i = 0; i < pattern_count; i++) {
5387 offset = patterns[i].offset;
5388 length = patterns[i].length;
5389 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5390 length > HCI_MAX_EXT_AD_LENGTH ||
5391 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5392 return MGMT_STATUS_INVALID_PARAMS;
5394 p = kmalloc(sizeof(*p), GFP_KERNEL);
5396 return MGMT_STATUS_NO_RESOURCES;
5398 p->ad_type = patterns[i].ad_type;
5399 p->offset = patterns[i].offset;
5400 p->length = patterns[i].length;
5401 memcpy(p->value, patterns[i].value, p->length);
5403 INIT_LIST_HEAD(&p->list);
5404 list_add(&p->list, &m->patterns);
5407 return MGMT_STATUS_SUCCESS;
5410 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5411 void *data, u16 len)
5413 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5414 struct adv_monitor *m = NULL;
5415 u8 status = MGMT_STATUS_SUCCESS;
5416 size_t expected_size = sizeof(*cp);
5418 BT_DBG("request for %s", hdev->name);
5420 if (len <= sizeof(*cp)) {
5421 status = MGMT_STATUS_INVALID_PARAMS;
5425 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5426 if (len != expected_size) {
5427 status = MGMT_STATUS_INVALID_PARAMS;
5431 m = kzalloc(sizeof(*m), GFP_KERNEL);
5433 status = MGMT_STATUS_NO_RESOURCES;
5437 INIT_LIST_HEAD(&m->patterns);
5439 parse_adv_monitor_rssi(m, NULL);
5440 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5443 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5444 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5447 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5448 void *data, u16 len)
5450 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5451 struct adv_monitor *m = NULL;
5452 u8 status = MGMT_STATUS_SUCCESS;
5453 size_t expected_size = sizeof(*cp);
5455 BT_DBG("request for %s", hdev->name);
5457 if (len <= sizeof(*cp)) {
5458 status = MGMT_STATUS_INVALID_PARAMS;
5462 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5463 if (len != expected_size) {
5464 status = MGMT_STATUS_INVALID_PARAMS;
5468 m = kzalloc(sizeof(*m), GFP_KERNEL);
5470 status = MGMT_STATUS_NO_RESOURCES;
5474 INIT_LIST_HEAD(&m->patterns);
5476 parse_adv_monitor_rssi(m, &cp->rssi);
5477 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5480 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5481 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5484 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5485 void *data, int status)
5487 struct mgmt_rp_remove_adv_monitor rp;
5488 struct mgmt_pending_cmd *cmd = data;
5489 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5493 rp.monitor_handle = cp->monitor_handle;
5496 hci_update_passive_scan(hdev);
5498 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5499 mgmt_status(status), &rp, sizeof(rp));
5500 mgmt_pending_remove(cmd);
5502 hci_dev_unlock(hdev);
5503 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5504 rp.monitor_handle, status);
5507 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5509 struct mgmt_pending_cmd *cmd = data;
5510 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5511 u16 handle = __le16_to_cpu(cp->monitor_handle);
5514 return hci_remove_all_adv_monitor(hdev);
5516 return hci_remove_single_adv_monitor(hdev, handle);
5519 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5520 void *data, u16 len)
5522 struct mgmt_pending_cmd *cmd;
5527 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5528 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5529 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5530 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5531 status = MGMT_STATUS_BUSY;
5535 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5537 status = MGMT_STATUS_NO_RESOURCES;
5541 err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5542 mgmt_remove_adv_monitor_complete);
5545 mgmt_pending_remove(cmd);
5548 status = MGMT_STATUS_NO_RESOURCES;
5550 status = MGMT_STATUS_FAILED;
5555 hci_dev_unlock(hdev);
5560 hci_dev_unlock(hdev);
5561 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5565 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5567 struct mgmt_rp_read_local_oob_data mgmt_rp;
5568 size_t rp_size = sizeof(mgmt_rp);
5569 struct mgmt_pending_cmd *cmd = data;
5570 struct sk_buff *skb = cmd->skb;
5571 u8 status = mgmt_status(err);
5575 status = MGMT_STATUS_FAILED;
5576 else if (IS_ERR(skb))
5577 status = mgmt_status(PTR_ERR(skb));
5579 status = mgmt_status(skb->data[0]);
5582 bt_dev_dbg(hdev, "status %d", status);
5585 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5589 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5591 if (!bredr_sc_enabled(hdev)) {
5592 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5594 if (skb->len < sizeof(*rp)) {
5595 mgmt_cmd_status(cmd->sk, hdev->id,
5596 MGMT_OP_READ_LOCAL_OOB_DATA,
5597 MGMT_STATUS_FAILED);
5601 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5602 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5604 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5606 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5608 if (skb->len < sizeof(*rp)) {
5609 mgmt_cmd_status(cmd->sk, hdev->id,
5610 MGMT_OP_READ_LOCAL_OOB_DATA,
5611 MGMT_STATUS_FAILED);
5615 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5616 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5618 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5619 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5622 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5623 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5626 if (skb && !IS_ERR(skb))
5629 mgmt_pending_free(cmd);
5632 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5634 struct mgmt_pending_cmd *cmd = data;
5636 if (bredr_sc_enabled(hdev))
5637 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5639 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5641 if (IS_ERR(cmd->skb))
5642 return PTR_ERR(cmd->skb);
5647 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5648 void *data, u16 data_len)
5650 struct mgmt_pending_cmd *cmd;
5653 bt_dev_dbg(hdev, "sock %p", sk);
5657 if (!hdev_is_powered(hdev)) {
5658 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5659 MGMT_STATUS_NOT_POWERED);
5663 if (!lmp_ssp_capable(hdev)) {
5664 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5665 MGMT_STATUS_NOT_SUPPORTED);
5669 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5673 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5674 read_local_oob_data_complete);
5677 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5678 MGMT_STATUS_FAILED);
5681 mgmt_pending_free(cmd);
5685 hci_dev_unlock(hdev);
5689 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5690 void *data, u16 len)
5692 struct mgmt_addr_info *addr = data;
5695 bt_dev_dbg(hdev, "sock %p", sk);
5697 if (!bdaddr_type_is_valid(addr->type))
5698 return mgmt_cmd_complete(sk, hdev->id,
5699 MGMT_OP_ADD_REMOTE_OOB_DATA,
5700 MGMT_STATUS_INVALID_PARAMS,
5701 addr, sizeof(*addr));
5705 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5706 struct mgmt_cp_add_remote_oob_data *cp = data;
5709 if (cp->addr.type != BDADDR_BREDR) {
5710 err = mgmt_cmd_complete(sk, hdev->id,
5711 MGMT_OP_ADD_REMOTE_OOB_DATA,
5712 MGMT_STATUS_INVALID_PARAMS,
5713 &cp->addr, sizeof(cp->addr));
5717 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5718 cp->addr.type, cp->hash,
5719 cp->rand, NULL, NULL);
5721 status = MGMT_STATUS_FAILED;
5723 status = MGMT_STATUS_SUCCESS;
5725 err = mgmt_cmd_complete(sk, hdev->id,
5726 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5727 &cp->addr, sizeof(cp->addr));
5728 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5729 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5730 u8 *rand192, *hash192, *rand256, *hash256;
5733 if (bdaddr_type_is_le(cp->addr.type)) {
5734 /* Enforce zero-valued 192-bit parameters as
5735 * long as legacy SMP OOB isn't implemented.
5737 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5738 memcmp(cp->hash192, ZERO_KEY, 16)) {
5739 err = mgmt_cmd_complete(sk, hdev->id,
5740 MGMT_OP_ADD_REMOTE_OOB_DATA,
5741 MGMT_STATUS_INVALID_PARAMS,
5742 addr, sizeof(*addr));
5749 /* In case one of the P-192 values is set to zero,
5750 * then just disable OOB data for P-192.
5752 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5753 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5757 rand192 = cp->rand192;
5758 hash192 = cp->hash192;
5762 /* In case one of the P-256 values is set to zero, then just
5763 * disable OOB data for P-256.
5765 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5766 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5770 rand256 = cp->rand256;
5771 hash256 = cp->hash256;
5774 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5775 cp->addr.type, hash192, rand192,
5778 status = MGMT_STATUS_FAILED;
5780 status = MGMT_STATUS_SUCCESS;
5782 err = mgmt_cmd_complete(sk, hdev->id,
5783 MGMT_OP_ADD_REMOTE_OOB_DATA,
5784 status, &cp->addr, sizeof(cp->addr));
5786 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5788 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5789 MGMT_STATUS_INVALID_PARAMS);
5793 hci_dev_unlock(hdev);
5797 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5798 void *data, u16 len)
5800 struct mgmt_cp_remove_remote_oob_data *cp = data;
5804 bt_dev_dbg(hdev, "sock %p", sk);
5806 if (cp->addr.type != BDADDR_BREDR)
5807 return mgmt_cmd_complete(sk, hdev->id,
5808 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5809 MGMT_STATUS_INVALID_PARAMS,
5810 &cp->addr, sizeof(cp->addr));
5814 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5815 hci_remote_oob_data_clear(hdev);
5816 status = MGMT_STATUS_SUCCESS;
5820 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5822 status = MGMT_STATUS_INVALID_PARAMS;
5824 status = MGMT_STATUS_SUCCESS;
5827 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5828 status, &cp->addr, sizeof(cp->addr));
5830 hci_dev_unlock(hdev);
5834 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5836 struct mgmt_pending_cmd *cmd;
5838 bt_dev_dbg(hdev, "status %u", status);
5842 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5844 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5847 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5850 cmd->cmd_complete(cmd, mgmt_status(status));
5851 mgmt_pending_remove(cmd);
5854 hci_dev_unlock(hdev);
5857 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5858 uint8_t *mgmt_status)
5861 case DISCOV_TYPE_LE:
5862 *mgmt_status = mgmt_le_support(hdev);
5866 case DISCOV_TYPE_INTERLEAVED:
5867 *mgmt_status = mgmt_le_support(hdev);
5871 case DISCOV_TYPE_BREDR:
5872 *mgmt_status = mgmt_bredr_support(hdev);
5877 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5884 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5886 struct mgmt_pending_cmd *cmd = data;
5888 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5889 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5890 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5893 bt_dev_dbg(hdev, "err %d", err);
5895 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5897 mgmt_pending_remove(cmd);
5899 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5903 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5905 return hci_start_discovery_sync(hdev);
5908 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5909 u16 op, void *data, u16 len)
5911 struct mgmt_cp_start_discovery *cp = data;
5912 struct mgmt_pending_cmd *cmd;
5916 bt_dev_dbg(hdev, "sock %p", sk);
5920 if (!hdev_is_powered(hdev)) {
5921 err = mgmt_cmd_complete(sk, hdev->id, op,
5922 MGMT_STATUS_NOT_POWERED,
5923 &cp->type, sizeof(cp->type));
5927 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5928 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5929 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5930 &cp->type, sizeof(cp->type));
5934 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5935 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5936 &cp->type, sizeof(cp->type));
5940 /* Can't start discovery when it is paused */
5941 if (hdev->discovery_paused) {
5942 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5943 &cp->type, sizeof(cp->type));
5947 /* Clear the discovery filter first to free any previously
5948 * allocated memory for the UUID list.
5950 hci_discovery_filter_clear(hdev);
5952 hdev->discovery.type = cp->type;
5953 hdev->discovery.report_invalid_rssi = false;
5954 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5955 hdev->discovery.limited = true;
5957 hdev->discovery.limited = false;
5959 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5965 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5966 start_discovery_complete);
5968 mgmt_pending_remove(cmd);
5972 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5975 hci_dev_unlock(hdev);
5979 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5980 void *data, u16 len)
5982 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5986 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5987 void *data, u16 len)
5989 return start_discovery_internal(sk, hdev,
5990 MGMT_OP_START_LIMITED_DISCOVERY,
5994 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5995 void *data, u16 len)
5997 struct mgmt_cp_start_service_discovery *cp = data;
5998 struct mgmt_pending_cmd *cmd;
5999 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
6000 u16 uuid_count, expected_len;
6004 bt_dev_dbg(hdev, "sock %p", sk);
6008 if (!hdev_is_powered(hdev)) {
6009 err = mgmt_cmd_complete(sk, hdev->id,
6010 MGMT_OP_START_SERVICE_DISCOVERY,
6011 MGMT_STATUS_NOT_POWERED,
6012 &cp->type, sizeof(cp->type));
6016 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6017 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6018 err = mgmt_cmd_complete(sk, hdev->id,
6019 MGMT_OP_START_SERVICE_DISCOVERY,
6020 MGMT_STATUS_BUSY, &cp->type,
6025 if (hdev->discovery_paused) {
6026 err = mgmt_cmd_complete(sk, hdev->id,
6027 MGMT_OP_START_SERVICE_DISCOVERY,
6028 MGMT_STATUS_BUSY, &cp->type,
6033 uuid_count = __le16_to_cpu(cp->uuid_count);
6034 if (uuid_count > max_uuid_count) {
6035 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6037 err = mgmt_cmd_complete(sk, hdev->id,
6038 MGMT_OP_START_SERVICE_DISCOVERY,
6039 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6044 expected_len = sizeof(*cp) + uuid_count * 16;
6045 if (expected_len != len) {
6046 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6048 err = mgmt_cmd_complete(sk, hdev->id,
6049 MGMT_OP_START_SERVICE_DISCOVERY,
6050 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6055 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6056 err = mgmt_cmd_complete(sk, hdev->id,
6057 MGMT_OP_START_SERVICE_DISCOVERY,
6058 status, &cp->type, sizeof(cp->type));
6062 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6069 /* Clear the discovery filter first to free any previously
6070 * allocated memory for the UUID list.
6072 hci_discovery_filter_clear(hdev);
6074 hdev->discovery.result_filtering = true;
6075 hdev->discovery.type = cp->type;
6076 hdev->discovery.rssi = cp->rssi;
6077 hdev->discovery.uuid_count = uuid_count;
6079 if (uuid_count > 0) {
6080 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6082 if (!hdev->discovery.uuids) {
6083 err = mgmt_cmd_complete(sk, hdev->id,
6084 MGMT_OP_START_SERVICE_DISCOVERY,
6086 &cp->type, sizeof(cp->type));
6087 mgmt_pending_remove(cmd);
6092 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6093 start_discovery_complete);
6095 mgmt_pending_remove(cmd);
6099 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6102 hci_dev_unlock(hdev);
6106 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6108 struct mgmt_pending_cmd *cmd;
6110 bt_dev_dbg(hdev, "status %u", status);
6114 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6116 cmd->cmd_complete(cmd, mgmt_status(status));
6117 mgmt_pending_remove(cmd);
6120 hci_dev_unlock(hdev);
6123 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6125 struct mgmt_pending_cmd *cmd = data;
6127 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6130 bt_dev_dbg(hdev, "err %d", err);
6132 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6134 mgmt_pending_remove(cmd);
6137 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6140 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6142 return hci_stop_discovery_sync(hdev);
6145 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6148 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6149 struct mgmt_pending_cmd *cmd;
6152 bt_dev_dbg(hdev, "sock %p", sk);
6156 if (!hci_discovery_active(hdev)) {
6157 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6158 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6159 sizeof(mgmt_cp->type));
6163 if (hdev->discovery.type != mgmt_cp->type) {
6164 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6165 MGMT_STATUS_INVALID_PARAMS,
6166 &mgmt_cp->type, sizeof(mgmt_cp->type));
6170 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6176 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6177 stop_discovery_complete);
6179 mgmt_pending_remove(cmd);
6183 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6186 hci_dev_unlock(hdev);
6190 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6193 struct mgmt_cp_confirm_name *cp = data;
6194 struct inquiry_entry *e;
6197 bt_dev_dbg(hdev, "sock %p", sk);
6201 if (!hci_discovery_active(hdev)) {
6202 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6203 MGMT_STATUS_FAILED, &cp->addr,
6208 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6210 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6211 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6216 if (cp->name_known) {
6217 e->name_state = NAME_KNOWN;
6220 e->name_state = NAME_NEEDED;
6221 hci_inquiry_cache_update_resolve(hdev, e);
6224 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6225 &cp->addr, sizeof(cp->addr));
6228 hci_dev_unlock(hdev);
6232 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6235 struct mgmt_cp_block_device *cp = data;
6239 bt_dev_dbg(hdev, "sock %p", sk);
6241 if (!bdaddr_type_is_valid(cp->addr.type))
6242 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6243 MGMT_STATUS_INVALID_PARAMS,
6244 &cp->addr, sizeof(cp->addr));
6248 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6251 status = MGMT_STATUS_FAILED;
6255 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6257 status = MGMT_STATUS_SUCCESS;
6260 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6261 &cp->addr, sizeof(cp->addr));
6263 hci_dev_unlock(hdev);
6268 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6271 struct mgmt_cp_unblock_device *cp = data;
6275 bt_dev_dbg(hdev, "sock %p", sk);
6277 if (!bdaddr_type_is_valid(cp->addr.type))
6278 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6279 MGMT_STATUS_INVALID_PARAMS,
6280 &cp->addr, sizeof(cp->addr));
6284 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6287 status = MGMT_STATUS_INVALID_PARAMS;
6291 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6293 status = MGMT_STATUS_SUCCESS;
6296 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6297 &cp->addr, sizeof(cp->addr));
6299 hci_dev_unlock(hdev);
6304 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6306 return hci_update_eir_sync(hdev);
6309 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6312 struct mgmt_cp_set_device_id *cp = data;
6316 bt_dev_dbg(hdev, "sock %p", sk);
6318 source = __le16_to_cpu(cp->source);
6320 if (source > 0x0002)
6321 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6322 MGMT_STATUS_INVALID_PARAMS);
6326 hdev->devid_source = source;
6327 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6328 hdev->devid_product = __le16_to_cpu(cp->product);
6329 hdev->devid_version = __le16_to_cpu(cp->version);
6331 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6334 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6336 hci_dev_unlock(hdev);
6341 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6344 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6346 bt_dev_dbg(hdev, "status %d", err);
6349 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6351 struct cmd_lookup match = { NULL, hdev };
6353 struct adv_info *adv_instance;
6354 u8 status = mgmt_status(err);
6357 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6358 cmd_status_rsp, &status);
6362 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6363 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6365 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6367 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6370 new_settings(hdev, match.sk);
6375 /* If "Set Advertising" was just disabled and instance advertising was
6376 * set up earlier, then re-enable multi-instance advertising.
6378 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6379 list_empty(&hdev->adv_instances))
6382 instance = hdev->cur_adv_instance;
6384 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6385 struct adv_info, list);
6389 instance = adv_instance->instance;
6392 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6394 enable_advertising_instance(hdev, err);
6397 static int set_adv_sync(struct hci_dev *hdev, void *data)
6399 struct mgmt_pending_cmd *cmd = data;
6400 struct mgmt_mode *cp = cmd->param;
6403 if (cp->val == 0x02)
6404 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6406 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6408 cancel_adv_timeout(hdev);
6411 /* Switch to instance "0" for the Set Advertising setting.
6412 * We cannot use update_[adv|scan_rsp]_data() here as the
6413 * HCI_ADVERTISING flag is not yet set.
6415 hdev->cur_adv_instance = 0x00;
6417 if (ext_adv_capable(hdev)) {
6418 hci_start_ext_adv_sync(hdev, 0x00);
6420 hci_update_adv_data_sync(hdev, 0x00);
6421 hci_update_scan_rsp_data_sync(hdev, 0x00);
6422 hci_enable_advertising_sync(hdev);
6425 hci_disable_advertising_sync(hdev);
6431 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6434 struct mgmt_mode *cp = data;
6435 struct mgmt_pending_cmd *cmd;
6439 bt_dev_dbg(hdev, "sock %p", sk);
6441 status = mgmt_le_support(hdev);
6443 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6446 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6448 MGMT_STATUS_INVALID_PARAMS);
6450 if (hdev->advertising_paused)
6451 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6458 /* The following conditions are ones which mean that we should
6459 * not do any HCI communication but directly send a mgmt
6460 * response to user space (after toggling the flag if
6463 if (!hdev_is_powered(hdev) ||
6464 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6465 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6466 hci_dev_test_flag(hdev, HCI_MESH) ||
6467 hci_conn_num(hdev, LE_LINK) > 0 ||
6468 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6469 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6473 hdev->cur_adv_instance = 0x00;
6474 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6475 if (cp->val == 0x02)
6476 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6478 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6480 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6481 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6484 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6489 err = new_settings(hdev, sk);
6494 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6495 pending_find(MGMT_OP_SET_LE, hdev)) {
6496 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6501 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6505 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6506 set_advertising_complete);
6509 mgmt_pending_remove(cmd);
6512 hci_dev_unlock(hdev);
6516 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6517 void *data, u16 len)
6519 struct mgmt_cp_set_static_address *cp = data;
6522 bt_dev_dbg(hdev, "sock %p", sk);
6524 if (!lmp_le_capable(hdev))
6525 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6526 MGMT_STATUS_NOT_SUPPORTED);
6528 if (hdev_is_powered(hdev))
6529 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6530 MGMT_STATUS_REJECTED);
6532 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6533 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6534 return mgmt_cmd_status(sk, hdev->id,
6535 MGMT_OP_SET_STATIC_ADDRESS,
6536 MGMT_STATUS_INVALID_PARAMS);
6538 /* Two most significant bits shall be set */
6539 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6540 return mgmt_cmd_status(sk, hdev->id,
6541 MGMT_OP_SET_STATIC_ADDRESS,
6542 MGMT_STATUS_INVALID_PARAMS);
6547 bacpy(&hdev->static_addr, &cp->bdaddr);
6549 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6553 err = new_settings(hdev, sk);
6556 hci_dev_unlock(hdev);
6560 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6561 void *data, u16 len)
6563 struct mgmt_cp_set_scan_params *cp = data;
6564 __u16 interval, window;
6567 bt_dev_dbg(hdev, "sock %p", sk);
6569 if (!lmp_le_capable(hdev))
6570 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6571 MGMT_STATUS_NOT_SUPPORTED);
6573 interval = __le16_to_cpu(cp->interval);
6575 if (interval < 0x0004 || interval > 0x4000)
6576 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6577 MGMT_STATUS_INVALID_PARAMS);
6579 window = __le16_to_cpu(cp->window);
6581 if (window < 0x0004 || window > 0x4000)
6582 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6583 MGMT_STATUS_INVALID_PARAMS);
6585 if (window > interval)
6586 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6587 MGMT_STATUS_INVALID_PARAMS);
6591 hdev->le_scan_interval = interval;
6592 hdev->le_scan_window = window;
6594 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6597 /* If background scan is running, restart it so new parameters are
6600 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6601 hdev->discovery.state == DISCOVERY_STOPPED)
6602 hci_update_passive_scan(hdev);
6604 hci_dev_unlock(hdev);
6609 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6611 struct mgmt_pending_cmd *cmd = data;
6613 bt_dev_dbg(hdev, "err %d", err);
6616 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6619 struct mgmt_mode *cp = cmd->param;
6622 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6624 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6626 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6627 new_settings(hdev, cmd->sk);
6630 mgmt_pending_free(cmd);
6633 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6635 struct mgmt_pending_cmd *cmd = data;
6636 struct mgmt_mode *cp = cmd->param;
6638 return hci_write_fast_connectable_sync(hdev, cp->val);
6641 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6642 void *data, u16 len)
6644 struct mgmt_mode *cp = data;
6645 struct mgmt_pending_cmd *cmd;
6648 bt_dev_dbg(hdev, "sock %p", sk);
6650 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6651 hdev->hci_ver < BLUETOOTH_VER_1_2)
6652 return mgmt_cmd_status(sk, hdev->id,
6653 MGMT_OP_SET_FAST_CONNECTABLE,
6654 MGMT_STATUS_NOT_SUPPORTED);
6656 if (cp->val != 0x00 && cp->val != 0x01)
6657 return mgmt_cmd_status(sk, hdev->id,
6658 MGMT_OP_SET_FAST_CONNECTABLE,
6659 MGMT_STATUS_INVALID_PARAMS);
6663 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6664 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6668 if (!hdev_is_powered(hdev)) {
6669 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6670 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6671 new_settings(hdev, sk);
6675 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6680 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6681 fast_connectable_complete);
6684 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6685 MGMT_STATUS_FAILED);
6688 mgmt_pending_free(cmd);
6692 hci_dev_unlock(hdev);
6697 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6699 struct mgmt_pending_cmd *cmd = data;
6701 bt_dev_dbg(hdev, "err %d", err);
6704 u8 mgmt_err = mgmt_status(err);
6706 /* We need to restore the flag if related HCI commands
6709 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6711 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6713 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6714 new_settings(hdev, cmd->sk);
6717 mgmt_pending_free(cmd);
6720 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6724 status = hci_write_fast_connectable_sync(hdev, false);
6727 status = hci_update_scan_sync(hdev);
6729 /* Since only the advertising data flags will change, there
6730 * is no need to update the scan response data.
6733 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6738 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6740 struct mgmt_mode *cp = data;
6741 struct mgmt_pending_cmd *cmd;
6744 bt_dev_dbg(hdev, "sock %p", sk);
6746 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6747 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6748 MGMT_STATUS_NOT_SUPPORTED);
6750 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6751 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6752 MGMT_STATUS_REJECTED);
6754 if (cp->val != 0x00 && cp->val != 0x01)
6755 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6756 MGMT_STATUS_INVALID_PARAMS);
6760 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6761 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6765 if (!hdev_is_powered(hdev)) {
6767 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6768 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6769 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6770 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6771 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6774 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6776 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6780 err = new_settings(hdev, sk);
6784 /* Reject disabling when powered on */
6786 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6787 MGMT_STATUS_REJECTED);
6790 /* When configuring a dual-mode controller to operate
6791 * with LE only and using a static address, then switching
6792 * BR/EDR back on is not allowed.
6794 * Dual-mode controllers shall operate with the public
6795 * address as its identity address for BR/EDR and LE. So
6796 * reject the attempt to create an invalid configuration.
6798 * The same restrictions applies when secure connections
6799 * has been enabled. For BR/EDR this is a controller feature
6800 * while for LE it is a host stack feature. This means that
6801 * switching BR/EDR back on when secure connections has been
6802 * enabled is not a supported transaction.
6804 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6805 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6806 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6807 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6808 MGMT_STATUS_REJECTED);
6813 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6817 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6818 set_bredr_complete);
6821 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6822 MGMT_STATUS_FAILED);
6824 mgmt_pending_free(cmd);
6829 /* We need to flip the bit already here so that
6830 * hci_req_update_adv_data generates the correct flags.
6832 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6835 hci_dev_unlock(hdev);
6839 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6841 struct mgmt_pending_cmd *cmd = data;
6842 struct mgmt_mode *cp;
6844 bt_dev_dbg(hdev, "err %d", err);
6847 u8 mgmt_err = mgmt_status(err);
6849 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6857 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6858 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6861 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6862 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6865 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6866 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6870 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6871 new_settings(hdev, cmd->sk);
6874 mgmt_pending_free(cmd);
6877 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6879 struct mgmt_pending_cmd *cmd = data;
6880 struct mgmt_mode *cp = cmd->param;
6883 /* Force write of val */
6884 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6886 return hci_write_sc_support_sync(hdev, val);
6889 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6890 void *data, u16 len)
6892 struct mgmt_mode *cp = data;
6893 struct mgmt_pending_cmd *cmd;
6897 bt_dev_dbg(hdev, "sock %p", sk);
6899 if (!lmp_sc_capable(hdev) &&
6900 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6901 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6902 MGMT_STATUS_NOT_SUPPORTED);
6904 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6905 lmp_sc_capable(hdev) &&
6906 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6907 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6908 MGMT_STATUS_REJECTED);
6910 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6911 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6912 MGMT_STATUS_INVALID_PARAMS);
6916 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6917 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6921 changed = !hci_dev_test_and_set_flag(hdev,
6923 if (cp->val == 0x02)
6924 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6926 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6928 changed = hci_dev_test_and_clear_flag(hdev,
6930 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6933 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6938 err = new_settings(hdev, sk);
6945 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6946 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6947 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6951 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6955 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6956 set_secure_conn_complete);
6959 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6960 MGMT_STATUS_FAILED);
6962 mgmt_pending_free(cmd);
6966 hci_dev_unlock(hdev);
6970 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6971 void *data, u16 len)
6973 struct mgmt_mode *cp = data;
6974 bool changed, use_changed;
6977 bt_dev_dbg(hdev, "sock %p", sk);
6979 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6980 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6981 MGMT_STATUS_INVALID_PARAMS);
6986 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6988 changed = hci_dev_test_and_clear_flag(hdev,
6989 HCI_KEEP_DEBUG_KEYS);
6991 if (cp->val == 0x02)
6992 use_changed = !hci_dev_test_and_set_flag(hdev,
6993 HCI_USE_DEBUG_KEYS);
6995 use_changed = hci_dev_test_and_clear_flag(hdev,
6996 HCI_USE_DEBUG_KEYS);
6998 if (hdev_is_powered(hdev) && use_changed &&
6999 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7000 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
7001 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
7002 sizeof(mode), &mode);
7005 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7010 err = new_settings(hdev, sk);
7013 hci_dev_unlock(hdev);
7017 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7020 struct mgmt_cp_set_privacy *cp = cp_data;
7024 bt_dev_dbg(hdev, "sock %p", sk);
7026 if (!lmp_le_capable(hdev))
7027 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7028 MGMT_STATUS_NOT_SUPPORTED);
7030 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7031 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7032 MGMT_STATUS_INVALID_PARAMS);
7034 if (hdev_is_powered(hdev))
7035 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7036 MGMT_STATUS_REJECTED);
7040 /* If user space supports this command it is also expected to
7041 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7043 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7046 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7047 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7048 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7049 hci_adv_instances_set_rpa_expired(hdev, true);
7050 if (cp->privacy == 0x02)
7051 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7053 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7055 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7056 memset(hdev->irk, 0, sizeof(hdev->irk));
7057 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7058 hci_adv_instances_set_rpa_expired(hdev, false);
7059 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7062 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7067 err = new_settings(hdev, sk);
7070 hci_dev_unlock(hdev);
7074 static bool irk_is_valid(struct mgmt_irk_info *irk)
7076 switch (irk->addr.type) {
7077 case BDADDR_LE_PUBLIC:
7080 case BDADDR_LE_RANDOM:
7081 /* Two most significant bits shall be set */
7082 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7090 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7093 struct mgmt_cp_load_irks *cp = cp_data;
7094 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7095 sizeof(struct mgmt_irk_info));
7096 u16 irk_count, expected_len;
7099 bt_dev_dbg(hdev, "sock %p", sk);
7101 if (!lmp_le_capable(hdev))
7102 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7103 MGMT_STATUS_NOT_SUPPORTED);
7105 irk_count = __le16_to_cpu(cp->irk_count);
7106 if (irk_count > max_irk_count) {
7107 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7109 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7110 MGMT_STATUS_INVALID_PARAMS);
7113 expected_len = struct_size(cp, irks, irk_count);
7114 if (expected_len != len) {
7115 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7117 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7118 MGMT_STATUS_INVALID_PARAMS);
7121 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7123 for (i = 0; i < irk_count; i++) {
7124 struct mgmt_irk_info *key = &cp->irks[i];
7126 if (!irk_is_valid(key))
7127 return mgmt_cmd_status(sk, hdev->id,
7129 MGMT_STATUS_INVALID_PARAMS);
7134 hci_smp_irks_clear(hdev);
7136 for (i = 0; i < irk_count; i++) {
7137 struct mgmt_irk_info *irk = &cp->irks[i];
7138 u8 addr_type = le_addr_type(irk->addr.type);
7140 if (hci_is_blocked_key(hdev,
7141 HCI_BLOCKED_KEY_TYPE_IRK,
7143 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7148 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7149 if (irk->addr.type == BDADDR_BREDR)
7150 addr_type = BDADDR_BREDR;
7152 hci_add_irk(hdev, &irk->addr.bdaddr,
7153 addr_type, irk->val,
7157 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7159 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7161 hci_dev_unlock(hdev);
7167 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
7168 void *data, u16 len)
7170 struct mgmt_cp_set_advertising_params *cp = data;
7175 BT_DBG("%s", hdev->name);
7177 if (!lmp_le_capable(hdev))
7178 return mgmt_cmd_status(sk, hdev->id,
7179 MGMT_OP_SET_ADVERTISING_PARAMS,
7180 MGMT_STATUS_NOT_SUPPORTED);
7182 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7183 return mgmt_cmd_status(sk, hdev->id,
7184 MGMT_OP_SET_ADVERTISING_PARAMS,
7187 min_interval = __le16_to_cpu(cp->interval_min);
7188 max_interval = __le16_to_cpu(cp->interval_max);
7190 if (min_interval > max_interval ||
7191 min_interval < 0x0020 || max_interval > 0x4000)
7192 return mgmt_cmd_status(sk, hdev->id,
7193 MGMT_OP_SET_ADVERTISING_PARAMS,
7194 MGMT_STATUS_INVALID_PARAMS);
7198 hdev->le_adv_min_interval = min_interval;
7199 hdev->le_adv_max_interval = max_interval;
7200 hdev->adv_filter_policy = cp->filter_policy;
7201 hdev->adv_type = cp->type;
7203 err = mgmt_cmd_complete(sk, hdev->id,
7204 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
7206 hci_dev_unlock(hdev);
7211 static void set_advertising_data_complete(struct hci_dev *hdev,
7212 u8 status, u16 opcode)
7214 struct mgmt_cp_set_advertising_data *cp;
7215 struct mgmt_pending_cmd *cmd;
7217 BT_DBG("status 0x%02x", status);
7221 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
7228 mgmt_cmd_status(cmd->sk, hdev->id,
7229 MGMT_OP_SET_ADVERTISING_DATA,
7230 mgmt_status(status));
7232 mgmt_cmd_complete(cmd->sk, hdev->id,
7233 MGMT_OP_SET_ADVERTISING_DATA, 0,
7236 mgmt_pending_remove(cmd);
7239 hci_dev_unlock(hdev);
7242 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
7243 void *data, u16 len)
7245 struct mgmt_pending_cmd *cmd;
7246 struct hci_request req;
7247 struct mgmt_cp_set_advertising_data *cp = data;
7248 struct hci_cp_le_set_adv_data adv;
7251 BT_DBG("%s", hdev->name);
7253 if (!lmp_le_capable(hdev)) {
7254 return mgmt_cmd_status(sk, hdev->id,
7255 MGMT_OP_SET_ADVERTISING_DATA,
7256 MGMT_STATUS_NOT_SUPPORTED);
7261 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
7262 err = mgmt_cmd_status(sk, hdev->id,
7263 MGMT_OP_SET_ADVERTISING_DATA,
7268 if (len > HCI_MAX_AD_LENGTH) {
7269 err = mgmt_cmd_status(sk, hdev->id,
7270 MGMT_OP_SET_ADVERTISING_DATA,
7271 MGMT_STATUS_INVALID_PARAMS);
7275 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
7282 hci_req_init(&req, hdev);
7284 memset(&adv, 0, sizeof(adv));
7285 memcpy(adv.data, cp->data, len);
7288 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
7290 err = hci_req_run(&req, set_advertising_data_complete);
7292 mgmt_pending_remove(cmd);
7295 hci_dev_unlock(hdev);
7300 /* Adv White List feature */
7301 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7303 struct mgmt_cp_add_dev_white_list *cp;
7304 struct mgmt_pending_cmd *cmd;
7306 BT_DBG("status 0x%02x", status);
7310 cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
7317 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7318 mgmt_status(status));
7320 mgmt_cmd_complete(cmd->sk, hdev->id,
7321 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
7323 mgmt_pending_remove(cmd);
7326 hci_dev_unlock(hdev);
7329 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
7330 void *data, u16 len)
7332 struct mgmt_pending_cmd *cmd;
7333 struct mgmt_cp_add_dev_white_list *cp = data;
7334 struct hci_request req;
7337 BT_DBG("%s", hdev->name);
7339 if (!lmp_le_capable(hdev))
7340 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7341 MGMT_STATUS_NOT_SUPPORTED);
7343 if (!hdev_is_powered(hdev))
7344 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7345 MGMT_STATUS_REJECTED);
7349 if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
7350 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7355 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
7361 hci_req_init(&req, hdev);
7363 hci_req_add(&req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(*cp), cp);
7365 err = hci_req_run(&req, add_white_list_complete);
7367 mgmt_pending_remove(cmd);
7372 hci_dev_unlock(hdev);
7377 static void remove_from_white_list_complete(struct hci_dev *hdev,
7378 u8 status, u16 opcode)
7380 struct mgmt_cp_remove_dev_from_white_list *cp;
7381 struct mgmt_pending_cmd *cmd;
7383 BT_DBG("status 0x%02x", status);
7387 cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
7394 mgmt_cmd_status(cmd->sk, hdev->id,
7395 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7396 mgmt_status(status));
7398 mgmt_cmd_complete(cmd->sk, hdev->id,
7399 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
7402 mgmt_pending_remove(cmd);
7405 hci_dev_unlock(hdev);
7408 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
7409 void *data, u16 len)
7411 struct mgmt_pending_cmd *cmd;
7412 struct mgmt_cp_remove_dev_from_white_list *cp = data;
7413 struct hci_request req;
7416 BT_DBG("%s", hdev->name);
7418 if (!lmp_le_capable(hdev))
7419 return mgmt_cmd_status(sk, hdev->id,
7420 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7421 MGMT_STATUS_NOT_SUPPORTED);
7423 if (!hdev_is_powered(hdev))
7424 return mgmt_cmd_status(sk, hdev->id,
7425 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7426 MGMT_STATUS_REJECTED);
7430 if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
7431 err = mgmt_cmd_status(sk, hdev->id,
7432 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7437 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7444 hci_req_init(&req, hdev);
7446 hci_req_add(&req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(*cp), cp);
7448 err = hci_req_run(&req, remove_from_white_list_complete);
7450 mgmt_pending_remove(cmd);
7455 hci_dev_unlock(hdev);
7460 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
7463 struct mgmt_pending_cmd *cmd;
7465 BT_DBG("status 0x%02x", status);
7469 cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
7474 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
7475 mgmt_status(status));
7477 mgmt_cmd_complete(cmd->sk, hdev->id,
7478 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7481 mgmt_pending_remove(cmd);
7484 hci_dev_unlock(hdev);
7487 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
7488 void *data, u16 len)
7490 struct mgmt_pending_cmd *cmd;
7491 struct hci_request req;
7494 BT_DBG("%s", hdev->name);
7496 if (!lmp_le_capable(hdev))
7497 return mgmt_cmd_status(sk, hdev->id,
7498 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7499 MGMT_STATUS_NOT_SUPPORTED);
7501 if (!hdev_is_powered(hdev))
7502 return mgmt_cmd_status(sk, hdev->id,
7503 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7504 MGMT_STATUS_REJECTED);
7508 if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
7509 err = mgmt_cmd_status(sk, hdev->id,
7510 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7515 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
7522 hci_req_init(&req, hdev);
7524 hci_req_add(&req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
7526 err = hci_req_run(&req, clear_white_list_complete);
7528 mgmt_pending_remove(cmd);
7533 hci_dev_unlock(hdev);
7538 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
7541 struct mgmt_cp_set_scan_rsp_data *cp;
7542 struct mgmt_pending_cmd *cmd;
7544 BT_DBG("status 0x%02x", status);
7548 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
7555 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7556 mgmt_status(status));
7558 mgmt_cmd_complete(cmd->sk, hdev->id,
7559 MGMT_OP_SET_SCAN_RSP_DATA, 0,
7562 mgmt_pending_remove(cmd);
7565 hci_dev_unlock(hdev);
7568 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
7571 struct mgmt_pending_cmd *cmd;
7572 struct hci_request req;
7573 struct mgmt_cp_set_scan_rsp_data *cp = data;
7574 struct hci_cp_le_set_scan_rsp_data rsp;
7577 BT_DBG("%s", hdev->name);
7579 if (!lmp_le_capable(hdev))
7580 return mgmt_cmd_status(sk, hdev->id,
7581 MGMT_OP_SET_SCAN_RSP_DATA,
7582 MGMT_STATUS_NOT_SUPPORTED);
7586 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
7587 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7592 if (len > HCI_MAX_AD_LENGTH) {
7593 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7594 MGMT_STATUS_INVALID_PARAMS);
7598 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
7604 hci_req_init(&req, hdev);
7606 memset(&rsp, 0, sizeof(rsp));
7607 memcpy(rsp.data, cp->data, len);
7610 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
7612 err = hci_req_run(&req, set_scan_rsp_data_complete);
7614 mgmt_pending_remove(cmd);
7617 hci_dev_unlock(hdev);
7622 static void set_rssi_threshold_complete(struct hci_dev *hdev,
7623 u8 status, u16 opcode)
7625 struct mgmt_pending_cmd *cmd;
7627 BT_DBG("status 0x%02x", status);
7631 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7636 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7637 mgmt_status(status));
7639 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7642 mgmt_pending_remove(cmd);
7645 hci_dev_unlock(hdev);
7648 static void set_rssi_disable_complete(struct hci_dev *hdev,
7649 u8 status, u16 opcode)
7651 struct mgmt_pending_cmd *cmd;
7653 BT_DBG("status 0x%02x", status);
7657 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7662 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7663 mgmt_status(status));
7665 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7668 mgmt_pending_remove(cmd);
7671 hci_dev_unlock(hdev);
7674 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
7675 void *data, u16 len)
7678 struct hci_cp_set_rssi_threshold th = { 0, };
7679 struct mgmt_cp_set_enable_rssi *cp = data;
7680 struct hci_conn *conn;
7681 struct mgmt_pending_cmd *cmd;
7682 struct hci_request req;
7687 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7689 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7690 MGMT_STATUS_FAILED);
7694 if (!lmp_le_capable(hdev)) {
7695 mgmt_pending_remove(cmd);
7696 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7697 MGMT_STATUS_NOT_SUPPORTED);
7701 if (!hdev_is_powered(hdev)) {
7702 BT_DBG("%s", hdev->name);
7703 mgmt_pending_remove(cmd);
7704 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7705 MGMT_STATUS_NOT_POWERED);
7709 if (cp->link_type == 0x01)
7710 dest_type = LE_LINK;
7712 dest_type = ACL_LINK;
7714 /* Get LE/ACL link handle info */
7715 conn = hci_conn_hash_lookup_ba(hdev,
7716 dest_type, &cp->bdaddr);
7719 err = mgmt_cmd_complete(sk, hdev->id,
7720 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
7721 mgmt_pending_remove(cmd);
7725 hci_req_init(&req, hdev);
7727 th.hci_le_ext_opcode = 0x0B;
7729 th.conn_handle = conn->handle;
7730 th.alert_mask = 0x07;
7731 th.low_th = cp->low_th;
7732 th.in_range_th = cp->in_range_th;
7733 th.high_th = cp->high_th;
7735 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
7736 err = hci_req_run(&req, set_rssi_threshold_complete);
7739 mgmt_pending_remove(cmd);
7740 BT_ERR("Error in requesting hci_req_run");
7745 hci_dev_unlock(hdev);
7749 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
7750 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
7752 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
7753 struct mgmt_cp_set_enable_rssi *cp = data;
7754 struct mgmt_pending_cmd *cmd;
7759 mgmt_rp.status = rp->status;
7760 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
7761 mgmt_rp.bt_address = cp->bdaddr;
7762 mgmt_rp.link_type = cp->link_type;
7764 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7765 MGMT_STATUS_SUCCESS, &mgmt_rp,
7766 sizeof(struct mgmt_cc_rsp_enable_rssi));
7768 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
7769 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
7771 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
7772 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
7773 &mgmt_rp.bt_address, true);
7777 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7779 mgmt_pending_remove(cmd);
7781 hci_dev_unlock(hdev);
7784 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
7785 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
7787 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
7788 struct mgmt_cp_disable_rssi *cp = data;
7789 struct mgmt_pending_cmd *cmd;
7794 mgmt_rp.status = rp->status;
7795 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
7796 mgmt_rp.bt_address = cp->bdaddr;
7797 mgmt_rp.link_type = cp->link_type;
7799 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7800 MGMT_STATUS_SUCCESS, &mgmt_rp,
7801 sizeof(struct mgmt_cc_rsp_enable_rssi));
7803 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
7804 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
7806 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
7807 &mgmt_rp.bt_address, false);
7811 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7813 mgmt_pending_remove(cmd);
7815 hci_dev_unlock(hdev);
7818 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
7819 void *data, u16 len)
7821 struct mgmt_pending_cmd *cmd;
7822 struct hci_request req;
7823 struct hci_cp_set_enable_rssi cp_en = { 0, };
7826 BT_DBG("Set Disable RSSI.");
7828 cp_en.hci_le_ext_opcode = 0x01;
7829 cp_en.le_enable_cs_Features = 0x00;
7830 cp_en.data[0] = 0x00;
7831 cp_en.data[1] = 0x00;
7832 cp_en.data[2] = 0x00;
7836 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7838 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7839 MGMT_STATUS_FAILED);
7843 if (!lmp_le_capable(hdev)) {
7844 mgmt_pending_remove(cmd);
7845 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7846 MGMT_STATUS_NOT_SUPPORTED);
7850 if (!hdev_is_powered(hdev)) {
7851 BT_DBG("%s", hdev->name);
7852 mgmt_pending_remove(cmd);
7853 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7854 MGMT_STATUS_NOT_POWERED);
7858 hci_req_init(&req, hdev);
7860 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
7861 sizeof(struct hci_cp_set_enable_rssi),
7862 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
7863 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
7865 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
7866 err = hci_req_run(&req, set_rssi_disable_complete);
7869 mgmt_pending_remove(cmd);
7870 BT_ERR("Error in requesting hci_req_run");
7875 hci_dev_unlock(hdev);
7879 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
7881 struct hci_cc_rsp_enable_rssi *rp = response;
7882 struct mgmt_pending_cmd *cmd_enable = NULL;
7883 struct mgmt_pending_cmd *cmd_disable = NULL;
7884 struct mgmt_cp_set_enable_rssi *cp_en;
7885 struct mgmt_cp_disable_rssi *cp_dis;
7888 cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7889 cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7890 hci_dev_unlock(hdev);
7893 BT_DBG("Enable Request");
7896 BT_DBG("Disable Request");
7899 cp_en = cmd_enable->param;
7904 switch (rp->le_ext_opcode) {
7906 BT_DBG("RSSI enabled.. Setting Threshold...");
7907 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
7908 cp_en, sizeof(*cp_en));
7912 BT_DBG("Sending RSSI enable success");
7913 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
7914 cp_en, rp, rp->status);
7918 } else if (cmd_disable) {
7919 cp_dis = cmd_disable->param;
7924 switch (rp->le_ext_opcode) {
7926 BT_DBG("Sending RSSI disable success");
7927 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
7928 cp_dis, rp, rp->status);
7933 * Only unset RSSI Threshold values for the Link if
7934 * RSSI is monitored for other BREDR or LE Links
7936 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
7937 BT_DBG("Unset Threshold. Other links being monitored");
7938 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
7939 cp_dis, rp, rp->status);
7941 BT_DBG("Unset Threshold. Disabling...");
7942 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
7943 cp_dis, sizeof(*cp_dis));
7950 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
7953 struct mgmt_pending_cmd *cmd;
7955 BT_DBG("status 0x%02x", status);
7959 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7964 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7965 mgmt_status(status));
7967 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7970 mgmt_pending_remove(cmd);
7973 hci_dev_unlock(hdev);
7976 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
7977 void *data, u16 len)
7979 struct mgmt_pending_cmd *cmd;
7980 struct hci_request req;
7981 struct mgmt_cp_set_enable_rssi *cp = data;
7982 struct hci_cp_set_enable_rssi cp_en = { 0, };
7985 BT_DBG("Set Enable RSSI.");
7987 cp_en.hci_le_ext_opcode = 0x01;
7988 cp_en.le_enable_cs_Features = 0x04;
7989 cp_en.data[0] = 0x00;
7990 cp_en.data[1] = 0x00;
7991 cp_en.data[2] = 0x00;
7995 if (!lmp_le_capable(hdev)) {
7996 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7997 MGMT_STATUS_NOT_SUPPORTED);
8001 if (!hdev_is_powered(hdev)) {
8002 BT_DBG("%s", hdev->name);
8003 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
8004 MGMT_STATUS_NOT_POWERED);
8008 if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
8009 BT_DBG("%s", hdev->name);
8010 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
8015 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
8018 BT_DBG("%s", hdev->name);
8023 /* If RSSI is already enabled directly set Threshold values */
8024 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
8025 hci_dev_unlock(hdev);
8026 BT_DBG("RSSI Enabled. Directly set Threshold");
8027 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
8031 hci_req_init(&req, hdev);
8033 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
8034 sizeof(struct hci_cp_set_enable_rssi),
8035 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
8036 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
8038 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
8039 err = hci_req_run(&req, set_rssi_enable_complete);
8042 mgmt_pending_remove(cmd);
8043 BT_ERR("Error in requesting hci_req_run");
8048 hci_dev_unlock(hdev);
8053 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8055 struct mgmt_pending_cmd *cmd;
8057 BT_DBG("status 0x%02x", status);
8061 cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
8065 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8066 MGMT_STATUS_SUCCESS, &status, 1);
8068 mgmt_pending_remove(cmd);
8071 hci_dev_unlock(hdev);
8074 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
8077 struct mgmt_pending_cmd *cmd;
8078 struct hci_request req;
8079 struct mgmt_cp_get_raw_rssi *cp = data;
8080 struct hci_cp_get_raw_rssi hci_cp;
8082 struct hci_conn *conn;
8086 BT_DBG("Get Raw RSSI.");
8090 if (!lmp_le_capable(hdev)) {
8091 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8092 MGMT_STATUS_NOT_SUPPORTED);
8096 if (cp->link_type == 0x01)
8097 dest_type = LE_LINK;
8099 dest_type = ACL_LINK;
8101 /* Get LE/BREDR link handle info */
8102 conn = hci_conn_hash_lookup_ba(hdev,
8103 dest_type, &cp->bt_address);
8105 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8106 MGMT_STATUS_NOT_CONNECTED);
8109 hci_cp.conn_handle = conn->handle;
8111 if (!hdev_is_powered(hdev)) {
8112 BT_DBG("%s", hdev->name);
8113 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8114 MGMT_STATUS_NOT_POWERED);
8118 if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
8119 BT_DBG("%s", hdev->name);
8120 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8125 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
8127 BT_DBG("%s", hdev->name);
8132 hci_req_init(&req, hdev);
8134 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
8135 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
8136 err = hci_req_run(&req, get_raw_rssi_complete);
8139 mgmt_pending_remove(cmd);
8140 BT_ERR("Error in requesting hci_req_run");
8144 hci_dev_unlock(hdev);
8149 void mgmt_raw_rssi_response(struct hci_dev *hdev,
8150 struct hci_cc_rp_get_raw_rssi *rp, int success)
8152 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
8153 struct hci_conn *conn;
8155 mgmt_rp.status = rp->status;
8156 mgmt_rp.rssi_dbm = rp->rssi_dbm;
8158 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
8162 bacpy(&mgmt_rp.bt_address, &conn->dst);
8163 if (conn->type == LE_LINK)
8164 mgmt_rp.link_type = 0x01;
8166 mgmt_rp.link_type = 0x00;
8168 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
8169 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
8172 static void set_disable_threshold_complete(struct hci_dev *hdev,
8173 u8 status, u16 opcode)
8175 struct mgmt_pending_cmd *cmd;
8177 BT_DBG("status 0x%02x", status);
8181 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
8185 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8186 MGMT_STATUS_SUCCESS, &status, 1);
8188 mgmt_pending_remove(cmd);
8191 hci_dev_unlock(hdev);
8194 /** Removes monitoring for a link*/
8195 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
8196 void *data, u16 len)
8199 struct hci_cp_set_rssi_threshold th = { 0, };
8200 struct mgmt_cp_disable_rssi *cp = data;
8201 struct hci_conn *conn;
8202 struct mgmt_pending_cmd *cmd;
8203 struct hci_request req;
8206 BT_DBG("Set Disable RSSI.");
8210 if (!lmp_le_capable(hdev)) {
8211 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8212 MGMT_STATUS_NOT_SUPPORTED);
8216 /* Get LE/ACL link handle info*/
8217 if (cp->link_type == 0x01)
8218 dest_type = LE_LINK;
8220 dest_type = ACL_LINK;
8222 conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
8224 err = mgmt_cmd_complete(sk, hdev->id,
8225 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
8229 th.hci_le_ext_opcode = 0x0B;
8231 th.conn_handle = conn->handle;
8232 th.alert_mask = 0x00;
8234 th.in_range_th = 0x00;
8237 if (!hdev_is_powered(hdev)) {
8238 BT_DBG("%s", hdev->name);
8239 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8244 if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
8245 BT_DBG("%s", hdev->name);
8246 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8251 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
8254 BT_DBG("%s", hdev->name);
8259 hci_req_init(&req, hdev);
8261 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
8262 err = hci_req_run(&req, set_disable_threshold_complete);
8264 mgmt_pending_remove(cmd);
8265 BT_ERR("Error in requesting hci_req_run");
8270 hci_dev_unlock(hdev);
8275 void mgmt_rssi_alert_evt(struct hci_dev *hdev, u16 conn_handle,
8276 s8 alert_type, s8 rssi_dbm)
8278 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
8279 struct hci_conn *conn;
8281 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
8282 conn_handle, alert_type, rssi_dbm);
8284 conn = hci_conn_hash_lookup_handle(hdev, conn_handle);
8287 BT_ERR("RSSI alert Error: Device not found for handle");
8290 bacpy(&mgmt_ev.bdaddr, &conn->dst);
8292 if (conn->type == LE_LINK)
8293 mgmt_ev.link_type = 0x01;
8295 mgmt_ev.link_type = 0x00;
8297 mgmt_ev.alert_type = alert_type;
8298 mgmt_ev.rssi_dbm = rssi_dbm;
8300 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
8301 sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
8305 static int mgmt_start_le_discovery_failed(struct hci_dev *hdev, u8 status)
8307 struct mgmt_pending_cmd *cmd;
8311 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
8313 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
8317 type = hdev->le_discovery.type;
8319 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
8320 mgmt_status(status), &type, sizeof(type));
8321 mgmt_pending_remove(cmd);
8326 static void start_le_discovery_complete(struct hci_dev *hdev, u8 status,
8329 unsigned long timeout = 0;
8331 BT_DBG("status %d", status);
8335 mgmt_start_le_discovery_failed(hdev, status);
8336 hci_dev_unlock(hdev);
8341 hci_le_discovery_set_state(hdev, DISCOVERY_FINDING);
8342 hci_dev_unlock(hdev);
8344 if (hdev->le_discovery.type != DISCOV_TYPE_LE)
8345 BT_ERR("Invalid discovery type %d", hdev->le_discovery.type);
8350 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
8353 static int start_le_discovery(struct sock *sk, struct hci_dev *hdev,
8354 void *data, u16 len)
8356 struct mgmt_cp_start_le_discovery *cp = data;
8357 struct mgmt_pending_cmd *cmd;
8358 struct hci_cp_le_set_scan_param param_cp;
8359 struct hci_cp_le_set_scan_enable enable_cp;
8360 struct hci_request req;
8361 u8 status, own_addr_type;
8364 BT_DBG("%s", hdev->name);
8366 if (!hdev_is_powered(hdev)) {
8367 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8368 MGMT_STATUS_NOT_POWERED);
8372 if (hdev->le_discovery.state != DISCOVERY_STOPPED) {
8373 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8378 if (cp->type != DISCOV_TYPE_LE) {
8379 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8380 MGMT_STATUS_INVALID_PARAMS);
8384 cmd = mgmt_pending_add(sk, MGMT_OP_START_LE_DISCOVERY, hdev, NULL, 0);
8390 hdev->le_discovery.type = cp->type;
8392 hci_req_init(&req, hdev);
8394 status = mgmt_le_support(hdev);
8396 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8398 mgmt_pending_remove(cmd);
8402 /* If controller is scanning, it means the background scanning
8403 * is running. Thus, we should temporarily stop it in order to
8404 * set the discovery scanning parameters.
8406 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
8407 hci_req_add_le_scan_disable(&req, false);
8409 memset(¶m_cp, 0, sizeof(param_cp));
8411 /* All active scans will be done with either a resolvable
8412 * private address (when privacy feature has been enabled)
8413 * or unresolvable private address.
8415 err = hci_update_random_address_sync(hdev, true, hci_dev_test_flag(hdev, HCI_PRIVACY), &own_addr_type);
8417 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8418 MGMT_STATUS_FAILED);
8419 mgmt_pending_remove(cmd);
8423 param_cp.type = hdev->le_scan_type;
8424 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
8425 param_cp.window = cpu_to_le16(hdev->le_scan_window);
8426 param_cp.own_address_type = own_addr_type;
8427 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
8430 memset(&enable_cp, 0, sizeof(enable_cp));
8431 enable_cp.enable = LE_SCAN_ENABLE;
8432 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
8434 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
8437 err = hci_req_run(&req, start_le_discovery_complete);
8439 mgmt_pending_remove(cmd);
8441 hci_le_discovery_set_state(hdev, DISCOVERY_STARTING);
8447 static int mgmt_stop_le_discovery_failed(struct hci_dev *hdev, u8 status)
8449 struct mgmt_pending_cmd *cmd;
8452 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
8456 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
8457 mgmt_status(status), &hdev->le_discovery.type,
8458 sizeof(hdev->le_discovery.type));
8459 mgmt_pending_remove(cmd);
8464 static void stop_le_discovery_complete(struct hci_dev *hdev, u8 status,
8467 BT_DBG("status %d", status);
8472 mgmt_stop_le_discovery_failed(hdev, status);
8476 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
8479 hci_dev_unlock(hdev);
8482 static int stop_le_discovery(struct sock *sk, struct hci_dev *hdev,
8483 void *data, u16 len)
8485 struct mgmt_cp_stop_le_discovery *mgmt_cp = data;
8486 struct mgmt_pending_cmd *cmd;
8487 struct hci_request req;
8490 BT_DBG("%s", hdev->name);
8494 if (!hci_le_discovery_active(hdev)) {
8495 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8496 MGMT_STATUS_REJECTED, &mgmt_cp->type,
8497 sizeof(mgmt_cp->type));
8501 if (hdev->le_discovery.type != mgmt_cp->type) {
8502 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8503 MGMT_STATUS_INVALID_PARAMS,
8504 &mgmt_cp->type, sizeof(mgmt_cp->type));
8508 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_LE_DISCOVERY, hdev, NULL, 0);
8514 hci_req_init(&req, hdev);
8516 if (hdev->le_discovery.state != DISCOVERY_FINDING) {
8517 BT_DBG("unknown le discovery state %u",
8518 hdev->le_discovery.state);
8520 mgmt_pending_remove(cmd);
8521 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8522 MGMT_STATUS_FAILED, &mgmt_cp->type,
8523 sizeof(mgmt_cp->type));
8527 cancel_delayed_work(&hdev->le_scan_disable);
8528 hci_req_add_le_scan_disable(&req, false);
8530 err = hci_req_run(&req, stop_le_discovery_complete);
8532 mgmt_pending_remove(cmd);
8534 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
8537 hci_dev_unlock(hdev);
8541 /* Separate LE discovery */
8542 void mgmt_le_discovering(struct hci_dev *hdev, u8 discovering)
8544 struct mgmt_ev_discovering ev;
8545 struct mgmt_pending_cmd *cmd;
8547 BT_DBG("%s le discovering %u", hdev->name, discovering);
8550 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
8552 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
8555 u8 type = hdev->le_discovery.type;
8557 mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
8559 mgmt_pending_remove(cmd);
8562 memset(&ev, 0, sizeof(ev));
8563 ev.type = hdev->le_discovery.type;
8564 ev.discovering = discovering;
8566 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8569 static int disable_le_auto_connect(struct sock *sk, struct hci_dev *hdev,
8570 void *data, u16 len)
8574 BT_DBG("%s", hdev->name);
8578 err = hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
8580 BT_ERR("HCI_OP_LE_CREATE_CONN_CANCEL is failed");
8582 hci_dev_unlock(hdev);
8587 static inline int check_le_conn_update_param(u16 min, u16 max, u16 latency,
8592 if (min > max || min < 6 || max > 3200)
8595 if (to_multiplier < 10 || to_multiplier > 3200)
8598 if (max >= to_multiplier * 8)
8601 max_latency = (to_multiplier * 8 / max) - 1;
8603 if (latency > 499 || latency > max_latency)
8609 static int le_conn_update(struct sock *sk, struct hci_dev *hdev, void *data,
8612 struct mgmt_cp_le_conn_update *cp = data;
8614 struct hci_conn *conn;
8615 u16 min, max, latency, supervision_timeout;
8618 if (!hdev_is_powered(hdev))
8619 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8620 MGMT_STATUS_NOT_POWERED);
8622 min = __le16_to_cpu(cp->conn_interval_min);
8623 max = __le16_to_cpu(cp->conn_interval_max);
8624 latency = __le16_to_cpu(cp->conn_latency);
8625 supervision_timeout = __le16_to_cpu(cp->supervision_timeout);
8627 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x supervision_timeout: 0x%4.4x",
8628 min, max, latency, supervision_timeout);
8630 err = check_le_conn_update_param(min, max, latency,
8631 supervision_timeout);
8634 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8635 MGMT_STATUS_INVALID_PARAMS);
8639 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
8641 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8642 MGMT_STATUS_NOT_CONNECTED);
8643 hci_dev_unlock(hdev);
8647 hci_dev_unlock(hdev);
8649 hci_le_conn_update(conn, min, max, latency, supervision_timeout);
8651 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE, 0,
8655 static void set_manufacturer_data_complete(struct hci_dev *hdev, u8 status,
8658 struct mgmt_cp_set_manufacturer_data *cp;
8659 struct mgmt_pending_cmd *cmd;
8661 BT_DBG("status 0x%02x", status);
8665 cmd = pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev);
8672 mgmt_cmd_status(cmd->sk, hdev->id,
8673 MGMT_OP_SET_MANUFACTURER_DATA,
8674 mgmt_status(status));
8676 mgmt_cmd_complete(cmd->sk, hdev->id,
8677 MGMT_OP_SET_MANUFACTURER_DATA, 0,
8680 mgmt_pending_remove(cmd);
8683 hci_dev_unlock(hdev);
8686 static int set_manufacturer_data(struct sock *sk, struct hci_dev *hdev,
8687 void *data, u16 len)
8689 struct mgmt_pending_cmd *cmd;
8690 struct hci_request req;
8691 struct mgmt_cp_set_manufacturer_data *cp = data;
8692 u8 old_data[HCI_MAX_EIR_LENGTH] = {0, };
8696 BT_DBG("%s", hdev->name);
8698 if (!lmp_bredr_capable(hdev))
8699 return mgmt_cmd_status(sk, hdev->id,
8700 MGMT_OP_SET_MANUFACTURER_DATA,
8701 MGMT_STATUS_NOT_SUPPORTED);
8703 if (cp->data[0] == 0 ||
8704 cp->data[0] - 1 > sizeof(hdev->manufacturer_data))
8705 return mgmt_cmd_status(sk, hdev->id,
8706 MGMT_OP_SET_MANUFACTURER_DATA,
8707 MGMT_STATUS_INVALID_PARAMS);
8709 if (cp->data[1] != 0xFF)
8710 return mgmt_cmd_status(sk, hdev->id,
8711 MGMT_OP_SET_MANUFACTURER_DATA,
8712 MGMT_STATUS_NOT_SUPPORTED);
8716 if (pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev)) {
8717 err = mgmt_cmd_status(sk, hdev->id,
8718 MGMT_OP_SET_MANUFACTURER_DATA,
8723 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MANUFACTURER_DATA, hdev, data,
8730 hci_req_init(&req, hdev);
8732 /* if new data is same as previous data then return command
8735 if (hdev->manufacturer_len == cp->data[0] - 1 &&
8736 !memcmp(hdev->manufacturer_data, cp->data + 2, cp->data[0] - 1)) {
8737 mgmt_pending_remove(cmd);
8738 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
8739 0, cp, sizeof(*cp));
8744 old_len = hdev->manufacturer_len;
8746 memcpy(old_data, hdev->manufacturer_data, old_len);
8748 hdev->manufacturer_len = cp->data[0] - 1;
8749 if (hdev->manufacturer_len > 0)
8750 memcpy(hdev->manufacturer_data, cp->data + 2,
8751 hdev->manufacturer_len);
8753 hci_update_eir_sync(hdev);
8755 err = hci_req_run(&req, set_manufacturer_data_complete);
8757 mgmt_pending_remove(cmd);
8762 hci_dev_unlock(hdev);
8767 memset(hdev->manufacturer_data, 0x00, sizeof(hdev->manufacturer_data));
8768 hdev->manufacturer_len = old_len;
8769 if (hdev->manufacturer_len > 0)
8770 memcpy(hdev->manufacturer_data, old_data,
8771 hdev->manufacturer_len);
8772 hci_dev_unlock(hdev);
8776 static int le_set_scan_params(struct sock *sk, struct hci_dev *hdev,
8777 void *data, u16 len)
8779 struct mgmt_cp_le_set_scan_params *cp = data;
8780 __u16 interval, window;
8783 BT_DBG("%s", hdev->name);
8785 if (!lmp_le_capable(hdev))
8786 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8787 MGMT_STATUS_NOT_SUPPORTED);
8789 interval = __le16_to_cpu(cp->interval);
8791 if (interval < 0x0004 || interval > 0x4000)
8792 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8793 MGMT_STATUS_INVALID_PARAMS);
8795 window = __le16_to_cpu(cp->window);
8797 if (window < 0x0004 || window > 0x4000)
8798 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8799 MGMT_STATUS_INVALID_PARAMS);
8801 if (window > interval)
8802 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8803 MGMT_STATUS_INVALID_PARAMS);
8807 hdev->le_scan_type = cp->type;
8808 hdev->le_scan_interval = interval;
8809 hdev->le_scan_window = window;
8811 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS, 0,
8814 /* If background scan is running, restart it so new parameters are
8817 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
8818 hdev->discovery.state == DISCOVERY_STOPPED) {
8819 struct hci_request req;
8821 hci_req_init(&req, hdev);
8823 hci_req_add_le_scan_disable(&req, false);
8824 hci_req_add_le_passive_scan(&req);
8826 hci_req_run(&req, NULL);
8829 hci_dev_unlock(hdev);
8834 static int set_voice_setting(struct sock *sk, struct hci_dev *hdev,
8835 void *data, u16 len)
8837 struct mgmt_cp_set_voice_setting *cp = data;
8838 struct hci_conn *conn;
8839 struct hci_conn *sco_conn;
8843 BT_DBG("%s", hdev->name);
8845 if (!lmp_bredr_capable(hdev)) {
8846 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING,
8847 MGMT_STATUS_NOT_SUPPORTED);
8852 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
8854 err = mgmt_cmd_complete(sk, hdev->id,
8855 MGMT_OP_SET_VOICE_SETTING, 0, NULL, 0);
8859 conn->voice_setting = cp->voice_setting;
8860 conn->sco_role = cp->sco_role;
8862 sco_conn = hci_conn_hash_lookup_sco(hdev);
8863 if (sco_conn && bacmp(&sco_conn->dst, &cp->bdaddr) != 0) {
8864 BT_ERR("There is other SCO connection.");
8868 if (conn->sco_role == MGMT_SCO_ROLE_HANDSFREE) {
8869 if (conn->voice_setting == 0x0063)
8870 sco_connect_set_wbc(hdev);
8872 sco_connect_set_nbc(hdev);
8874 if (conn->voice_setting == 0x0063)
8875 sco_connect_set_gw_wbc(hdev);
8877 sco_connect_set_gw_nbc(hdev);
8881 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING, 0,
8885 hci_dev_unlock(hdev);
8889 static int get_adv_tx_power(struct sock *sk, struct hci_dev *hdev,
8890 void *data, u16 len)
8892 struct mgmt_rp_get_adv_tx_power *rp;
8896 BT_DBG("%s", hdev->name);
8900 rp_len = sizeof(*rp);
8901 rp = kmalloc(rp_len, GFP_KERNEL);
8907 rp->adv_tx_power = hdev->adv_tx_power;
8909 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_TX_POWER, 0, rp,
8915 hci_dev_unlock(hdev);
8920 void mgmt_hardware_error(struct hci_dev *hdev, u8 err_code)
8922 struct mgmt_ev_hardware_error ev;
8924 ev.error_code = err_code;
8925 mgmt_event(MGMT_EV_HARDWARE_ERROR, hdev, &ev, sizeof(ev), NULL);
8928 void mgmt_tx_timeout_error(struct hci_dev *hdev)
8930 mgmt_event(MGMT_EV_TX_TIMEOUT_ERROR, hdev, NULL, 0, NULL);
8933 void mgmt_multi_adv_state_change_evt(struct hci_dev *hdev, u8 adv_instance,
8934 u8 state_change_reason, u16 connection_handle)
8936 struct mgmt_ev_vendor_specific_multi_adv_state_changed mgmt_ev;
8938 BT_DBG("Multi adv state changed [%2.2X %2.2X %2.2X]",
8939 adv_instance, state_change_reason, connection_handle);
8941 mgmt_ev.adv_instance = adv_instance;
8942 mgmt_ev.state_change_reason = state_change_reason;
8943 mgmt_ev.connection_handle = connection_handle;
8945 mgmt_event(MGMT_EV_MULTI_ADV_STATE_CHANGED, hdev, &mgmt_ev,
8946 sizeof(struct mgmt_ev_vendor_specific_multi_adv_state_changed),
8949 #endif /* TIZEN_BT */
8951 static bool ltk_is_valid(struct mgmt_ltk_info *key)
8953 if (key->initiator != 0x00 && key->initiator != 0x01)
8956 switch (key->addr.type) {
8957 case BDADDR_LE_PUBLIC:
8960 case BDADDR_LE_RANDOM:
8961 /* Two most significant bits shall be set */
8962 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
8970 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
8971 void *cp_data, u16 len)
8973 struct mgmt_cp_load_long_term_keys *cp = cp_data;
8974 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
8975 sizeof(struct mgmt_ltk_info));
8976 u16 key_count, expected_len;
8979 bt_dev_dbg(hdev, "sock %p", sk);
8981 if (!lmp_le_capable(hdev))
8982 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8983 MGMT_STATUS_NOT_SUPPORTED);
8985 key_count = __le16_to_cpu(cp->key_count);
8986 if (key_count > max_key_count) {
8987 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
8989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8990 MGMT_STATUS_INVALID_PARAMS);
8993 expected_len = struct_size(cp, keys, key_count);
8994 if (expected_len != len) {
8995 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
8997 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
8998 MGMT_STATUS_INVALID_PARAMS);
9001 bt_dev_dbg(hdev, "key_count %u", key_count);
9003 for (i = 0; i < key_count; i++) {
9004 struct mgmt_ltk_info *key = &cp->keys[i];
9006 if (!ltk_is_valid(key))
9007 return mgmt_cmd_status(sk, hdev->id,
9008 MGMT_OP_LOAD_LONG_TERM_KEYS,
9009 MGMT_STATUS_INVALID_PARAMS);
9014 hci_smp_ltks_clear(hdev);
9016 for (i = 0; i < key_count; i++) {
9017 struct mgmt_ltk_info *key = &cp->keys[i];
9018 u8 type, authenticated;
9019 u8 addr_type = le_addr_type(key->addr.type);
9021 if (hci_is_blocked_key(hdev,
9022 HCI_BLOCKED_KEY_TYPE_LTK,
9024 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
9029 switch (key->type) {
9030 case MGMT_LTK_UNAUTHENTICATED:
9031 authenticated = 0x00;
9032 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
9034 case MGMT_LTK_AUTHENTICATED:
9035 authenticated = 0x01;
9036 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
9038 case MGMT_LTK_P256_UNAUTH:
9039 authenticated = 0x00;
9040 type = SMP_LTK_P256;
9042 case MGMT_LTK_P256_AUTH:
9043 authenticated = 0x01;
9044 type = SMP_LTK_P256;
9046 case MGMT_LTK_P256_DEBUG:
9047 authenticated = 0x00;
9048 type = SMP_LTK_P256_DEBUG;
9054 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
9055 if (key->addr.type == BDADDR_BREDR)
9056 addr_type = BDADDR_BREDR;
9058 hci_add_ltk(hdev, &key->addr.bdaddr,
9059 addr_type, type, authenticated,
9060 key->val, key->enc_size, key->ediv, key->rand);
9063 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
9066 hci_dev_unlock(hdev);
9071 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
9073 struct mgmt_pending_cmd *cmd = data;
9074 struct hci_conn *conn = cmd->user_data;
9075 struct mgmt_cp_get_conn_info *cp = cmd->param;
9076 struct mgmt_rp_get_conn_info rp;
9079 bt_dev_dbg(hdev, "err %d", err);
9081 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
9083 status = mgmt_status(err);
9084 if (status == MGMT_STATUS_SUCCESS) {
9085 rp.rssi = conn->rssi;
9086 rp.tx_power = conn->tx_power;
9087 rp.max_tx_power = conn->max_tx_power;
9089 rp.rssi = HCI_RSSI_INVALID;
9090 rp.tx_power = HCI_TX_POWER_INVALID;
9091 rp.max_tx_power = HCI_TX_POWER_INVALID;
9094 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
9097 mgmt_pending_free(cmd);
9100 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
9102 struct mgmt_pending_cmd *cmd = data;
9103 struct mgmt_cp_get_conn_info *cp = cmd->param;
9104 struct hci_conn *conn;
9108 /* Make sure we are still connected */
9109 if (cp->addr.type == BDADDR_BREDR)
9110 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9113 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
9115 if (!conn || conn->state != BT_CONNECTED)
9116 return MGMT_STATUS_NOT_CONNECTED;
9118 cmd->user_data = conn;
9119 handle = cpu_to_le16(conn->handle);
9121 /* Refresh RSSI each time */
9122 err = hci_read_rssi_sync(hdev, handle);
9124 /* For LE links TX power does not change thus we don't need to
9125 * query for it once value is known.
9127 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
9128 conn->tx_power == HCI_TX_POWER_INVALID))
9129 err = hci_read_tx_power_sync(hdev, handle, 0x00);
9131 /* Max TX power needs to be read only once per connection */
9132 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
9133 err = hci_read_tx_power_sync(hdev, handle, 0x01);
9138 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
9141 struct mgmt_cp_get_conn_info *cp = data;
9142 struct mgmt_rp_get_conn_info rp;
9143 struct hci_conn *conn;
9144 unsigned long conn_info_age;
9147 bt_dev_dbg(hdev, "sock %p", sk);
9149 memset(&rp, 0, sizeof(rp));
9150 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9151 rp.addr.type = cp->addr.type;
9153 if (!bdaddr_type_is_valid(cp->addr.type))
9154 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9155 MGMT_STATUS_INVALID_PARAMS,
9160 if (!hdev_is_powered(hdev)) {
9161 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9162 MGMT_STATUS_NOT_POWERED, &rp,
9167 if (cp->addr.type == BDADDR_BREDR)
9168 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9171 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
9173 if (!conn || conn->state != BT_CONNECTED) {
9174 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9175 MGMT_STATUS_NOT_CONNECTED, &rp,
9180 /* To avoid client trying to guess when to poll again for information we
9181 * calculate conn info age as random value between min/max set in hdev.
9183 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
9184 hdev->conn_info_max_age - 1);
9186 /* Query controller to refresh cached values if they are too old or were
9189 if (time_after(jiffies, conn->conn_info_timestamp +
9190 msecs_to_jiffies(conn_info_age)) ||
9191 !conn->conn_info_timestamp) {
9192 struct mgmt_pending_cmd *cmd;
9194 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
9199 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
9200 cmd, get_conn_info_complete);
9204 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9205 MGMT_STATUS_FAILED, &rp, sizeof(rp));
9208 mgmt_pending_free(cmd);
9213 conn->conn_info_timestamp = jiffies;
9215 /* Cache is valid, just reply with values cached in hci_conn */
9216 rp.rssi = conn->rssi;
9217 rp.tx_power = conn->tx_power;
9218 rp.max_tx_power = conn->max_tx_power;
9220 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9221 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9225 hci_dev_unlock(hdev);
9229 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
9231 struct mgmt_pending_cmd *cmd = data;
9232 struct mgmt_cp_get_clock_info *cp = cmd->param;
9233 struct mgmt_rp_get_clock_info rp;
9234 struct hci_conn *conn = cmd->user_data;
9235 u8 status = mgmt_status(err);
9237 bt_dev_dbg(hdev, "err %d", err);
9239 memset(&rp, 0, sizeof(rp));
9240 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9241 rp.addr.type = cp->addr.type;
9246 rp.local_clock = cpu_to_le32(hdev->clock);
9249 rp.piconet_clock = cpu_to_le32(conn->clock);
9250 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
9254 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
9257 mgmt_pending_free(cmd);
9260 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
9262 struct mgmt_pending_cmd *cmd = data;
9263 struct mgmt_cp_get_clock_info *cp = cmd->param;
9264 struct hci_cp_read_clock hci_cp;
9265 struct hci_conn *conn;
9267 memset(&hci_cp, 0, sizeof(hci_cp));
9268 hci_read_clock_sync(hdev, &hci_cp);
9270 /* Make sure connection still exists */
9271 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
9272 if (!conn || conn->state != BT_CONNECTED)
9273 return MGMT_STATUS_NOT_CONNECTED;
9275 cmd->user_data = conn;
9276 hci_cp.handle = cpu_to_le16(conn->handle);
9277 hci_cp.which = 0x01; /* Piconet clock */
9279 return hci_read_clock_sync(hdev, &hci_cp);
9282 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
9285 struct mgmt_cp_get_clock_info *cp = data;
9286 struct mgmt_rp_get_clock_info rp;
9287 struct mgmt_pending_cmd *cmd;
9288 struct hci_conn *conn;
9291 bt_dev_dbg(hdev, "sock %p", sk);
9293 memset(&rp, 0, sizeof(rp));
9294 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9295 rp.addr.type = cp->addr.type;
9297 if (cp->addr.type != BDADDR_BREDR)
9298 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9299 MGMT_STATUS_INVALID_PARAMS,
9304 if (!hdev_is_powered(hdev)) {
9305 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9306 MGMT_STATUS_NOT_POWERED, &rp,
9311 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
9312 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9314 if (!conn || conn->state != BT_CONNECTED) {
9315 err = mgmt_cmd_complete(sk, hdev->id,
9316 MGMT_OP_GET_CLOCK_INFO,
9317 MGMT_STATUS_NOT_CONNECTED,
9325 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
9329 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
9330 get_clock_info_complete);
9333 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9334 MGMT_STATUS_FAILED, &rp, sizeof(rp));
9337 mgmt_pending_free(cmd);
9342 hci_dev_unlock(hdev);
9346 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
9348 struct hci_conn *conn;
9350 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
9354 if (conn->dst_type != type)
9357 if (conn->state != BT_CONNECTED)
9363 /* This function requires the caller holds hdev->lock */
9364 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
9365 u8 addr_type, u8 auto_connect)
9367 struct hci_conn_params *params;
9369 params = hci_conn_params_add(hdev, addr, addr_type);
9373 if (params->auto_connect == auto_connect)
9376 hci_pend_le_list_del_init(params);
9378 switch (auto_connect) {
9379 case HCI_AUTO_CONN_DISABLED:
9380 case HCI_AUTO_CONN_LINK_LOSS:
9381 /* If auto connect is being disabled when we're trying to
9382 * connect to device, keep connecting.
9384 if (params->explicit_connect)
9385 hci_pend_le_list_add(params, &hdev->pend_le_conns);
9387 case HCI_AUTO_CONN_REPORT:
9388 if (params->explicit_connect)
9389 hci_pend_le_list_add(params, &hdev->pend_le_conns);
9391 hci_pend_le_list_add(params, &hdev->pend_le_reports);
9393 case HCI_AUTO_CONN_DIRECT:
9394 case HCI_AUTO_CONN_ALWAYS:
9395 if (!is_connected(hdev, addr, addr_type))
9396 hci_pend_le_list_add(params, &hdev->pend_le_conns);
9400 params->auto_connect = auto_connect;
9402 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
9403 addr, addr_type, auto_connect);
9408 static void device_added(struct sock *sk, struct hci_dev *hdev,
9409 bdaddr_t *bdaddr, u8 type, u8 action)
9411 struct mgmt_ev_device_added ev;
9413 bacpy(&ev.addr.bdaddr, bdaddr);
9414 ev.addr.type = type;
9417 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
9420 static int add_device_sync(struct hci_dev *hdev, void *data)
9422 return hci_update_passive_scan_sync(hdev);
9425 static int add_device(struct sock *sk, struct hci_dev *hdev,
9426 void *data, u16 len)
9428 struct mgmt_cp_add_device *cp = data;
9429 u8 auto_conn, addr_type;
9430 struct hci_conn_params *params;
9432 u32 current_flags = 0;
9433 u32 supported_flags;
9435 bt_dev_dbg(hdev, "sock %p", sk);
9437 if (!bdaddr_type_is_valid(cp->addr.type) ||
9438 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
9439 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9440 MGMT_STATUS_INVALID_PARAMS,
9441 &cp->addr, sizeof(cp->addr));
9443 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
9444 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9445 MGMT_STATUS_INVALID_PARAMS,
9446 &cp->addr, sizeof(cp->addr));
9450 if (cp->addr.type == BDADDR_BREDR) {
9451 /* Only incoming connections action is supported for now */
9452 if (cp->action != 0x01) {
9453 err = mgmt_cmd_complete(sk, hdev->id,
9455 MGMT_STATUS_INVALID_PARAMS,
9456 &cp->addr, sizeof(cp->addr));
9460 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
9466 hci_update_scan(hdev);
9471 addr_type = le_addr_type(cp->addr.type);
9473 if (cp->action == 0x02)
9474 auto_conn = HCI_AUTO_CONN_ALWAYS;
9475 else if (cp->action == 0x01)
9476 auto_conn = HCI_AUTO_CONN_DIRECT;
9478 auto_conn = HCI_AUTO_CONN_REPORT;
9480 /* Kernel internally uses conn_params with resolvable private
9481 * address, but Add Device allows only identity addresses.
9482 * Make sure it is enforced before calling
9483 * hci_conn_params_lookup.
9485 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
9486 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9487 MGMT_STATUS_INVALID_PARAMS,
9488 &cp->addr, sizeof(cp->addr));
9492 /* If the connection parameters don't exist for this device,
9493 * they will be created and configured with defaults.
9495 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
9497 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9498 MGMT_STATUS_FAILED, &cp->addr,
9502 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
9505 current_flags = params->flags;
9508 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
9513 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
9514 supported_flags = hdev->conn_flags;
9515 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
9516 supported_flags, current_flags);
9518 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9519 MGMT_STATUS_SUCCESS, &cp->addr,
9523 hci_dev_unlock(hdev);
9527 static void device_removed(struct sock *sk, struct hci_dev *hdev,
9528 bdaddr_t *bdaddr, u8 type)
9530 struct mgmt_ev_device_removed ev;
9532 bacpy(&ev.addr.bdaddr, bdaddr);
9533 ev.addr.type = type;
9535 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
9538 static int remove_device_sync(struct hci_dev *hdev, void *data)
9540 return hci_update_passive_scan_sync(hdev);
9543 static int remove_device(struct sock *sk, struct hci_dev *hdev,
9544 void *data, u16 len)
9546 struct mgmt_cp_remove_device *cp = data;
9549 bt_dev_dbg(hdev, "sock %p", sk);
9553 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
9554 struct hci_conn_params *params;
9557 if (!bdaddr_type_is_valid(cp->addr.type)) {
9558 err = mgmt_cmd_complete(sk, hdev->id,
9559 MGMT_OP_REMOVE_DEVICE,
9560 MGMT_STATUS_INVALID_PARAMS,
9561 &cp->addr, sizeof(cp->addr));
9565 if (cp->addr.type == BDADDR_BREDR) {
9566 err = hci_bdaddr_list_del(&hdev->accept_list,
9570 err = mgmt_cmd_complete(sk, hdev->id,
9571 MGMT_OP_REMOVE_DEVICE,
9572 MGMT_STATUS_INVALID_PARAMS,
9578 hci_update_scan(hdev);
9580 device_removed(sk, hdev, &cp->addr.bdaddr,
9585 addr_type = le_addr_type(cp->addr.type);
9587 /* Kernel internally uses conn_params with resolvable private
9588 * address, but Remove Device allows only identity addresses.
9589 * Make sure it is enforced before calling
9590 * hci_conn_params_lookup.
9592 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
9593 err = mgmt_cmd_complete(sk, hdev->id,
9594 MGMT_OP_REMOVE_DEVICE,
9595 MGMT_STATUS_INVALID_PARAMS,
9596 &cp->addr, sizeof(cp->addr));
9600 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
9603 err = mgmt_cmd_complete(sk, hdev->id,
9604 MGMT_OP_REMOVE_DEVICE,
9605 MGMT_STATUS_INVALID_PARAMS,
9606 &cp->addr, sizeof(cp->addr));
9610 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
9611 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
9612 err = mgmt_cmd_complete(sk, hdev->id,
9613 MGMT_OP_REMOVE_DEVICE,
9614 MGMT_STATUS_INVALID_PARAMS,
9615 &cp->addr, sizeof(cp->addr));
9619 hci_conn_params_free(params);
9621 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
9623 struct hci_conn_params *p, *tmp;
9624 struct bdaddr_list *b, *btmp;
9626 if (cp->addr.type) {
9627 err = mgmt_cmd_complete(sk, hdev->id,
9628 MGMT_OP_REMOVE_DEVICE,
9629 MGMT_STATUS_INVALID_PARAMS,
9630 &cp->addr, sizeof(cp->addr));
9634 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
9635 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
9640 hci_update_scan(hdev);
9642 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
9643 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
9645 device_removed(sk, hdev, &p->addr, p->addr_type);
9646 if (p->explicit_connect) {
9647 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
9650 hci_conn_params_free(p);
9653 bt_dev_dbg(hdev, "All LE connection parameters were removed");
9656 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
9659 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
9660 MGMT_STATUS_SUCCESS, &cp->addr,
9663 hci_dev_unlock(hdev);
9667 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
9670 struct mgmt_cp_load_conn_param *cp = data;
9671 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
9672 sizeof(struct mgmt_conn_param));
9673 u16 param_count, expected_len;
9676 if (!lmp_le_capable(hdev))
9677 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9678 MGMT_STATUS_NOT_SUPPORTED);
9680 param_count = __le16_to_cpu(cp->param_count);
9681 if (param_count > max_param_count) {
9682 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
9684 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9685 MGMT_STATUS_INVALID_PARAMS);
9688 expected_len = struct_size(cp, params, param_count);
9689 if (expected_len != len) {
9690 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
9692 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9693 MGMT_STATUS_INVALID_PARAMS);
9696 bt_dev_dbg(hdev, "param_count %u", param_count);
9700 hci_conn_params_clear_disabled(hdev);
9702 for (i = 0; i < param_count; i++) {
9703 struct mgmt_conn_param *param = &cp->params[i];
9704 struct hci_conn_params *hci_param;
9705 u16 min, max, latency, timeout;
9708 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
9711 if (param->addr.type == BDADDR_LE_PUBLIC) {
9712 addr_type = ADDR_LE_DEV_PUBLIC;
9713 } else if (param->addr.type == BDADDR_LE_RANDOM) {
9714 addr_type = ADDR_LE_DEV_RANDOM;
9716 bt_dev_err(hdev, "ignoring invalid connection parameters");
9720 min = le16_to_cpu(param->min_interval);
9721 max = le16_to_cpu(param->max_interval);
9722 latency = le16_to_cpu(param->latency);
9723 timeout = le16_to_cpu(param->timeout);
9725 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
9726 min, max, latency, timeout);
9728 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
9729 bt_dev_err(hdev, "ignoring invalid connection parameters");
9733 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
9736 bt_dev_err(hdev, "failed to add connection parameters");
9740 hci_param->conn_min_interval = min;
9741 hci_param->conn_max_interval = max;
9742 hci_param->conn_latency = latency;
9743 hci_param->supervision_timeout = timeout;
9746 hci_dev_unlock(hdev);
9748 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
9752 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
9753 void *data, u16 len)
9755 struct mgmt_cp_set_external_config *cp = data;
9759 bt_dev_dbg(hdev, "sock %p", sk);
9761 if (hdev_is_powered(hdev))
9762 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9763 MGMT_STATUS_REJECTED);
9765 if (cp->config != 0x00 && cp->config != 0x01)
9766 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9767 MGMT_STATUS_INVALID_PARAMS);
9769 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
9770 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9771 MGMT_STATUS_NOT_SUPPORTED);
9776 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
9778 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
9780 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
9787 err = new_options(hdev, sk);
9789 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
9790 mgmt_index_removed(hdev);
9792 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
9793 hci_dev_set_flag(hdev, HCI_CONFIG);
9794 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
9796 queue_work(hdev->req_workqueue, &hdev->power_on);
9798 set_bit(HCI_RAW, &hdev->flags);
9799 mgmt_index_added(hdev);
9804 hci_dev_unlock(hdev);
9808 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
9809 void *data, u16 len)
9811 struct mgmt_cp_set_public_address *cp = data;
9815 bt_dev_dbg(hdev, "sock %p", sk);
9817 if (hdev_is_powered(hdev))
9818 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9819 MGMT_STATUS_REJECTED);
9821 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
9822 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9823 MGMT_STATUS_INVALID_PARAMS);
9825 if (!hdev->set_bdaddr)
9826 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9827 MGMT_STATUS_NOT_SUPPORTED);
9831 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
9832 bacpy(&hdev->public_addr, &cp->bdaddr);
9834 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
9841 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
9842 err = new_options(hdev, sk);
9844 if (is_configured(hdev)) {
9845 mgmt_index_removed(hdev);
9847 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
9849 hci_dev_set_flag(hdev, HCI_CONFIG);
9850 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
9852 queue_work(hdev->req_workqueue, &hdev->power_on);
9856 hci_dev_unlock(hdev);
9861 int mgmt_device_name_update(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name,
9865 struct mgmt_ev_device_name_update *ev = (void *)buf;
9871 bacpy(&ev->addr.bdaddr, bdaddr);
9872 ev->addr.type = BDADDR_BREDR;
9874 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9877 ev->eir_len = cpu_to_le16(eir_len);
9879 return mgmt_event(MGMT_EV_DEVICE_NAME_UPDATE, hdev, buf,
9880 sizeof(*ev) + eir_len, NULL);
9883 int mgmt_le_conn_update_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9884 u8 link_type, u8 addr_type, u8 status)
9886 struct mgmt_ev_conn_update_failed ev;
9888 bacpy(&ev.addr.bdaddr, bdaddr);
9889 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9892 return mgmt_event(MGMT_EV_CONN_UPDATE_FAILED, hdev,
9893 &ev, sizeof(ev), NULL);
9896 int mgmt_le_conn_updated(struct hci_dev *hdev, bdaddr_t *bdaddr,
9897 u8 link_type, u8 addr_type, u16 conn_interval,
9898 u16 conn_latency, u16 supervision_timeout)
9900 struct mgmt_ev_conn_updated ev;
9902 bacpy(&ev.addr.bdaddr, bdaddr);
9903 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9904 ev.conn_interval = cpu_to_le16(conn_interval);
9905 ev.conn_latency = cpu_to_le16(conn_latency);
9906 ev.supervision_timeout = cpu_to_le16(supervision_timeout);
9908 return mgmt_event(MGMT_EV_CONN_UPDATED, hdev,
9909 &ev, sizeof(ev), NULL);
9912 /* le device found event - Pass adv type */
9913 void mgmt_le_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9914 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir,
9915 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, u8 adv_type)
9918 struct mgmt_ev_le_device_found *ev = (void *)buf;
9921 if (!hci_discovery_active(hdev) && !hci_le_discovery_active(hdev))
9924 /* Make sure that the buffer is big enough. The 5 extra bytes
9925 * are for the potential CoD field.
9927 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9930 memset(buf, 0, sizeof(buf));
9932 bacpy(&ev->addr.bdaddr, bdaddr);
9933 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9935 ev->flags = cpu_to_le32(flags);
9936 ev->adv_type = adv_type;
9939 memcpy(ev->eir, eir, eir_len);
9941 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, NULL))
9942 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9945 if (scan_rsp_len > 0)
9946 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9948 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9949 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9951 mgmt_event(MGMT_EV_LE_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9955 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
9958 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
9959 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
9960 u8 *h192, *r192, *h256, *r256;
9961 struct mgmt_pending_cmd *cmd = data;
9962 struct sk_buff *skb = cmd->skb;
9963 u8 status = mgmt_status(err);
9966 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
9971 status = MGMT_STATUS_FAILED;
9972 else if (IS_ERR(skb))
9973 status = mgmt_status(PTR_ERR(skb));
9975 status = mgmt_status(skb->data[0]);
9978 bt_dev_dbg(hdev, "status %u", status);
9980 mgmt_cp = cmd->param;
9983 status = mgmt_status(status);
9990 } else if (!bredr_sc_enabled(hdev)) {
9991 struct hci_rp_read_local_oob_data *rp;
9993 if (skb->len != sizeof(*rp)) {
9994 status = MGMT_STATUS_FAILED;
9997 status = MGMT_STATUS_SUCCESS;
9998 rp = (void *)skb->data;
10000 eir_len = 5 + 18 + 18;
10007 struct hci_rp_read_local_oob_ext_data *rp;
10009 if (skb->len != sizeof(*rp)) {
10010 status = MGMT_STATUS_FAILED;
10013 status = MGMT_STATUS_SUCCESS;
10014 rp = (void *)skb->data;
10016 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
10017 eir_len = 5 + 18 + 18;
10021 eir_len = 5 + 18 + 18 + 18 + 18;
10022 h192 = rp->hash192;
10023 r192 = rp->rand192;
10026 h256 = rp->hash256;
10027 r256 = rp->rand256;
10031 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
10038 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
10039 hdev->dev_class, 3);
10041 if (h192 && r192) {
10042 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10043 EIR_SSP_HASH_C192, h192, 16);
10044 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10045 EIR_SSP_RAND_R192, r192, 16);
10048 if (h256 && r256) {
10049 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10050 EIR_SSP_HASH_C256, h256, 16);
10051 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10052 EIR_SSP_RAND_R256, r256, 16);
10056 mgmt_rp->type = mgmt_cp->type;
10057 mgmt_rp->eir_len = cpu_to_le16(eir_len);
10059 err = mgmt_cmd_complete(cmd->sk, hdev->id,
10060 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
10061 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
10062 if (err < 0 || status)
10065 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
10067 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
10068 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
10069 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
10071 if (skb && !IS_ERR(skb))
10075 mgmt_pending_remove(cmd);
10078 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
10079 struct mgmt_cp_read_local_oob_ext_data *cp)
10081 struct mgmt_pending_cmd *cmd;
10084 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
10089 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
10090 read_local_oob_ext_data_complete);
10093 mgmt_pending_remove(cmd);
10100 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
10101 void *data, u16 data_len)
10103 struct mgmt_cp_read_local_oob_ext_data *cp = data;
10104 struct mgmt_rp_read_local_oob_ext_data *rp;
10107 u8 status, flags, role, addr[7], hash[16], rand[16];
10110 bt_dev_dbg(hdev, "sock %p", sk);
10112 if (hdev_is_powered(hdev)) {
10113 switch (cp->type) {
10114 case BIT(BDADDR_BREDR):
10115 status = mgmt_bredr_support(hdev);
10121 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
10122 status = mgmt_le_support(hdev);
10126 eir_len = 9 + 3 + 18 + 18 + 3;
10129 status = MGMT_STATUS_INVALID_PARAMS;
10134 status = MGMT_STATUS_NOT_POWERED;
10138 rp_len = sizeof(*rp) + eir_len;
10139 rp = kmalloc(rp_len, GFP_ATOMIC);
10143 if (!status && !lmp_ssp_capable(hdev)) {
10144 status = MGMT_STATUS_NOT_SUPPORTED;
10151 hci_dev_lock(hdev);
10154 switch (cp->type) {
10155 case BIT(BDADDR_BREDR):
10156 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
10157 err = read_local_ssp_oob_req(hdev, sk, cp);
10158 hci_dev_unlock(hdev);
10162 status = MGMT_STATUS_FAILED;
10165 eir_len = eir_append_data(rp->eir, eir_len,
10167 hdev->dev_class, 3);
10170 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
10171 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
10172 smp_generate_oob(hdev, hash, rand) < 0) {
10173 hci_dev_unlock(hdev);
10174 status = MGMT_STATUS_FAILED;
10178 /* This should return the active RPA, but since the RPA
10179 * is only programmed on demand, it is really hard to fill
10180 * this in at the moment. For now disallow retrieving
10181 * local out-of-band data when privacy is in use.
10183 * Returning the identity address will not help here since
10184 * pairing happens before the identity resolving key is
10185 * known and thus the connection establishment happens
10186 * based on the RPA and not the identity address.
10188 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
10189 hci_dev_unlock(hdev);
10190 status = MGMT_STATUS_REJECTED;
10194 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
10195 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
10196 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
10197 bacmp(&hdev->static_addr, BDADDR_ANY))) {
10198 memcpy(addr, &hdev->static_addr, 6);
10201 memcpy(addr, &hdev->bdaddr, 6);
10205 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
10206 addr, sizeof(addr));
10208 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
10213 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
10214 &role, sizeof(role));
10216 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
10217 eir_len = eir_append_data(rp->eir, eir_len,
10219 hash, sizeof(hash));
10221 eir_len = eir_append_data(rp->eir, eir_len,
10223 rand, sizeof(rand));
10226 flags = mgmt_get_adv_discov_flags(hdev);
10228 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
10229 flags |= LE_AD_NO_BREDR;
10231 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
10232 &flags, sizeof(flags));
10236 hci_dev_unlock(hdev);
10238 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
10240 status = MGMT_STATUS_SUCCESS;
10243 rp->type = cp->type;
10244 rp->eir_len = cpu_to_le16(eir_len);
10246 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
10247 status, rp, sizeof(*rp) + eir_len);
10248 if (err < 0 || status)
10251 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
10252 rp, sizeof(*rp) + eir_len,
10253 HCI_MGMT_OOB_DATA_EVENTS, sk);
10261 static u32 get_supported_adv_flags(struct hci_dev *hdev)
10265 flags |= MGMT_ADV_FLAG_CONNECTABLE;
10266 flags |= MGMT_ADV_FLAG_DISCOV;
10267 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
10268 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
10269 flags |= MGMT_ADV_FLAG_APPEARANCE;
10270 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
10271 flags |= MGMT_ADV_PARAM_DURATION;
10272 flags |= MGMT_ADV_PARAM_TIMEOUT;
10273 flags |= MGMT_ADV_PARAM_INTERVALS;
10274 flags |= MGMT_ADV_PARAM_TX_POWER;
10275 flags |= MGMT_ADV_PARAM_SCAN_RSP;
10277 /* In extended adv TX_POWER returned from Set Adv Param
10278 * will be always valid.
10280 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
10281 flags |= MGMT_ADV_FLAG_TX_POWER;
10283 if (ext_adv_capable(hdev)) {
10284 flags |= MGMT_ADV_FLAG_SEC_1M;
10285 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
10286 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
10288 if (le_2m_capable(hdev))
10289 flags |= MGMT_ADV_FLAG_SEC_2M;
10291 if (le_coded_capable(hdev))
10292 flags |= MGMT_ADV_FLAG_SEC_CODED;
10298 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
10299 void *data, u16 data_len)
10301 struct mgmt_rp_read_adv_features *rp;
10304 struct adv_info *adv_instance;
10305 u32 supported_flags;
10308 bt_dev_dbg(hdev, "sock %p", sk);
10310 if (!lmp_le_capable(hdev))
10311 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
10312 MGMT_STATUS_REJECTED);
10314 hci_dev_lock(hdev);
10316 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
10317 rp = kmalloc(rp_len, GFP_ATOMIC);
10319 hci_dev_unlock(hdev);
10323 supported_flags = get_supported_adv_flags(hdev);
10325 rp->supported_flags = cpu_to_le32(supported_flags);
10326 rp->max_adv_data_len = max_adv_len(hdev);
10327 rp->max_scan_rsp_len = max_adv_len(hdev);
10328 rp->max_instances = hdev->le_num_of_adv_sets;
10329 rp->num_instances = hdev->adv_instance_cnt;
10331 instance = rp->instance;
10332 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
10333 /* Only instances 1-le_num_of_adv_sets are externally visible */
10334 if (adv_instance->instance <= hdev->adv_instance_cnt) {
10335 *instance = adv_instance->instance;
10338 rp->num_instances--;
10343 hci_dev_unlock(hdev);
10345 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
10346 MGMT_STATUS_SUCCESS, rp, rp_len);
10353 static u8 calculate_name_len(struct hci_dev *hdev)
10355 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
10357 return eir_append_local_name(hdev, buf, 0);
10360 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
10363 u8 max_len = max_adv_len(hdev);
10366 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
10367 MGMT_ADV_FLAG_LIMITED_DISCOV |
10368 MGMT_ADV_FLAG_MANAGED_FLAGS))
10371 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
10374 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
10375 max_len -= calculate_name_len(hdev);
10377 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
10384 static bool flags_managed(u32 adv_flags)
10386 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
10387 MGMT_ADV_FLAG_LIMITED_DISCOV |
10388 MGMT_ADV_FLAG_MANAGED_FLAGS);
10391 static bool tx_power_managed(u32 adv_flags)
10393 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
10396 static bool name_managed(u32 adv_flags)
10398 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
10401 static bool appearance_managed(u32 adv_flags)
10403 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
10406 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
10407 u8 len, bool is_adv_data)
10412 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
10417 /* Make sure that the data is correctly formatted. */
10418 for (i = 0; i < len; i += (cur_len + 1)) {
10424 if (data[i + 1] == EIR_FLAGS &&
10425 (!is_adv_data || flags_managed(adv_flags)))
10428 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
10431 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
10434 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
10437 if (data[i + 1] == EIR_APPEARANCE &&
10438 appearance_managed(adv_flags))
10441 /* If the current field length would exceed the total data
10442 * length, then it's invalid.
10444 if (i + cur_len >= len)
10451 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
10453 u32 supported_flags, phy_flags;
10455 /* The current implementation only supports a subset of the specified
10456 * flags. Also need to check mutual exclusiveness of sec flags.
10458 supported_flags = get_supported_adv_flags(hdev);
10459 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
10460 if (adv_flags & ~supported_flags ||
10461 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
10467 static bool adv_busy(struct hci_dev *hdev)
10469 return pending_find(MGMT_OP_SET_LE, hdev);
10472 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
10475 struct adv_info *adv, *n;
10477 bt_dev_dbg(hdev, "err %d", err);
10479 hci_dev_lock(hdev);
10481 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
10488 adv->pending = false;
10492 instance = adv->instance;
10494 if (hdev->cur_adv_instance == instance)
10495 cancel_adv_timeout(hdev);
10497 hci_remove_adv_instance(hdev, instance);
10498 mgmt_advertising_removed(sk, hdev, instance);
10501 hci_dev_unlock(hdev);
10504 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
10506 struct mgmt_pending_cmd *cmd = data;
10507 struct mgmt_cp_add_advertising *cp = cmd->param;
10508 struct mgmt_rp_add_advertising rp;
10510 memset(&rp, 0, sizeof(rp));
10512 rp.instance = cp->instance;
10515 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10518 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10519 mgmt_status(err), &rp, sizeof(rp));
10521 add_adv_complete(hdev, cmd->sk, cp->instance, err);
10523 mgmt_pending_free(cmd);
10526 static int add_advertising_sync(struct hci_dev *hdev, void *data)
10528 struct mgmt_pending_cmd *cmd = data;
10529 struct mgmt_cp_add_advertising *cp = cmd->param;
10531 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
10534 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
10535 void *data, u16 data_len)
10537 struct mgmt_cp_add_advertising *cp = data;
10538 struct mgmt_rp_add_advertising rp;
10541 u16 timeout, duration;
10542 unsigned int prev_instance_cnt;
10543 u8 schedule_instance = 0;
10544 struct adv_info *adv, *next_instance;
10546 struct mgmt_pending_cmd *cmd;
10548 bt_dev_dbg(hdev, "sock %p", sk);
10550 status = mgmt_le_support(hdev);
10552 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10555 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10556 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10557 MGMT_STATUS_INVALID_PARAMS);
10559 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
10560 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10561 MGMT_STATUS_INVALID_PARAMS);
10563 flags = __le32_to_cpu(cp->flags);
10564 timeout = __le16_to_cpu(cp->timeout);
10565 duration = __le16_to_cpu(cp->duration);
10567 if (!requested_adv_flags_are_valid(hdev, flags))
10568 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10569 MGMT_STATUS_INVALID_PARAMS);
10571 hci_dev_lock(hdev);
10573 if (timeout && !hdev_is_powered(hdev)) {
10574 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10575 MGMT_STATUS_REJECTED);
10579 if (adv_busy(hdev)) {
10580 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10585 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
10586 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
10587 cp->scan_rsp_len, false)) {
10588 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10589 MGMT_STATUS_INVALID_PARAMS);
10593 prev_instance_cnt = hdev->adv_instance_cnt;
10595 adv = hci_add_adv_instance(hdev, cp->instance, flags,
10596 cp->adv_data_len, cp->data,
10598 cp->data + cp->adv_data_len,
10600 HCI_ADV_TX_POWER_NO_PREFERENCE,
10601 hdev->le_adv_min_interval,
10602 hdev->le_adv_max_interval, 0);
10604 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10605 MGMT_STATUS_FAILED);
10609 /* Only trigger an advertising added event if a new instance was
10612 if (hdev->adv_instance_cnt > prev_instance_cnt)
10613 mgmt_advertising_added(sk, hdev, cp->instance);
10615 if (hdev->cur_adv_instance == cp->instance) {
10616 /* If the currently advertised instance is being changed then
10617 * cancel the current advertising and schedule the next
10618 * instance. If there is only one instance then the overridden
10619 * advertising data will be visible right away.
10621 cancel_adv_timeout(hdev);
10623 next_instance = hci_get_next_instance(hdev, cp->instance);
10625 schedule_instance = next_instance->instance;
10626 } else if (!hdev->adv_instance_timeout) {
10627 /* Immediately advertise the new instance if no other
10628 * instance is currently being advertised.
10630 schedule_instance = cp->instance;
10633 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
10634 * there is no instance to be advertised then we have no HCI
10635 * communication to make. Simply return.
10637 if (!hdev_is_powered(hdev) ||
10638 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
10639 !schedule_instance) {
10640 rp.instance = cp->instance;
10641 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10642 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10646 /* We're good to go, update advertising data, parameters, and start
10649 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
10656 cp->instance = schedule_instance;
10658 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
10659 add_advertising_complete);
10661 mgmt_pending_free(cmd);
10664 hci_dev_unlock(hdev);
10669 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
10672 struct mgmt_pending_cmd *cmd = data;
10673 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
10674 struct mgmt_rp_add_ext_adv_params rp;
10675 struct adv_info *adv;
10678 BT_DBG("%s", hdev->name);
10680 hci_dev_lock(hdev);
10682 adv = hci_find_adv_instance(hdev, cp->instance);
10686 rp.instance = cp->instance;
10687 rp.tx_power = adv->tx_power;
10689 /* While we're at it, inform userspace of the available space for this
10690 * advertisement, given the flags that will be used.
10692 flags = __le32_to_cpu(cp->flags);
10693 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10694 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10697 /* If this advertisement was previously advertising and we
10698 * failed to update it, we signal that it has been removed and
10699 * delete its structure
10702 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
10704 hci_remove_adv_instance(hdev, cp->instance);
10706 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10709 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10710 mgmt_status(err), &rp, sizeof(rp));
10715 mgmt_pending_free(cmd);
10717 hci_dev_unlock(hdev);
10720 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
10722 struct mgmt_pending_cmd *cmd = data;
10723 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
10725 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
10728 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
10729 void *data, u16 data_len)
10731 struct mgmt_cp_add_ext_adv_params *cp = data;
10732 struct mgmt_rp_add_ext_adv_params rp;
10733 struct mgmt_pending_cmd *cmd = NULL;
10734 struct adv_info *adv;
10735 u32 flags, min_interval, max_interval;
10736 u16 timeout, duration;
10741 BT_DBG("%s", hdev->name);
10743 status = mgmt_le_support(hdev);
10745 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10748 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10750 MGMT_STATUS_INVALID_PARAMS);
10752 /* The purpose of breaking add_advertising into two separate MGMT calls
10753 * for params and data is to allow more parameters to be added to this
10754 * structure in the future. For this reason, we verify that we have the
10755 * bare minimum structure we know of when the interface was defined. Any
10756 * extra parameters we don't know about will be ignored in this request.
10758 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
10759 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10760 MGMT_STATUS_INVALID_PARAMS);
10762 flags = __le32_to_cpu(cp->flags);
10764 if (!requested_adv_flags_are_valid(hdev, flags))
10765 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10766 MGMT_STATUS_INVALID_PARAMS);
10768 hci_dev_lock(hdev);
10770 /* In new interface, we require that we are powered to register */
10771 if (!hdev_is_powered(hdev)) {
10772 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10773 MGMT_STATUS_REJECTED);
10777 if (adv_busy(hdev)) {
10778 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10783 /* Parse defined parameters from request, use defaults otherwise */
10784 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
10785 __le16_to_cpu(cp->timeout) : 0;
10787 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
10788 __le16_to_cpu(cp->duration) :
10789 hdev->def_multi_adv_rotation_duration;
10791 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
10792 __le32_to_cpu(cp->min_interval) :
10793 hdev->le_adv_min_interval;
10795 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
10796 __le32_to_cpu(cp->max_interval) :
10797 hdev->le_adv_max_interval;
10799 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
10801 HCI_ADV_TX_POWER_NO_PREFERENCE;
10803 /* Create advertising instance with no advertising or response data */
10804 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
10805 timeout, duration, tx_power, min_interval,
10809 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10810 MGMT_STATUS_FAILED);
10814 /* Submit request for advertising params if ext adv available */
10815 if (ext_adv_capable(hdev)) {
10816 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
10820 hci_remove_adv_instance(hdev, cp->instance);
10824 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
10825 add_ext_adv_params_complete);
10827 mgmt_pending_free(cmd);
10829 rp.instance = cp->instance;
10830 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
10831 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10832 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10833 err = mgmt_cmd_complete(sk, hdev->id,
10834 MGMT_OP_ADD_EXT_ADV_PARAMS,
10835 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10839 hci_dev_unlock(hdev);
10844 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
10846 struct mgmt_pending_cmd *cmd = data;
10847 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
10848 struct mgmt_rp_add_advertising rp;
10850 add_adv_complete(hdev, cmd->sk, cp->instance, err);
10852 memset(&rp, 0, sizeof(rp));
10854 rp.instance = cp->instance;
10857 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10860 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10861 mgmt_status(err), &rp, sizeof(rp));
10863 mgmt_pending_free(cmd);
10866 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
10868 struct mgmt_pending_cmd *cmd = data;
10869 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
10872 if (ext_adv_capable(hdev)) {
10873 err = hci_update_adv_data_sync(hdev, cp->instance);
10877 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
10881 return hci_enable_ext_advertising_sync(hdev, cp->instance);
10884 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
10887 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
10890 struct mgmt_cp_add_ext_adv_data *cp = data;
10891 struct mgmt_rp_add_ext_adv_data rp;
10892 u8 schedule_instance = 0;
10893 struct adv_info *next_instance;
10894 struct adv_info *adv_instance;
10896 struct mgmt_pending_cmd *cmd;
10898 BT_DBG("%s", hdev->name);
10900 hci_dev_lock(hdev);
10902 adv_instance = hci_find_adv_instance(hdev, cp->instance);
10904 if (!adv_instance) {
10905 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10906 MGMT_STATUS_INVALID_PARAMS);
10910 /* In new interface, we require that we are powered to register */
10911 if (!hdev_is_powered(hdev)) {
10912 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10913 MGMT_STATUS_REJECTED);
10914 goto clear_new_instance;
10917 if (adv_busy(hdev)) {
10918 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10920 goto clear_new_instance;
10923 /* Validate new data */
10924 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
10925 cp->adv_data_len, true) ||
10926 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
10927 cp->adv_data_len, cp->scan_rsp_len, false)) {
10928 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10929 MGMT_STATUS_INVALID_PARAMS);
10930 goto clear_new_instance;
10933 /* Set the data in the advertising instance */
10934 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
10935 cp->data, cp->scan_rsp_len,
10936 cp->data + cp->adv_data_len);
10938 /* If using software rotation, determine next instance to use */
10939 if (hdev->cur_adv_instance == cp->instance) {
10940 /* If the currently advertised instance is being changed
10941 * then cancel the current advertising and schedule the
10942 * next instance. If there is only one instance then the
10943 * overridden advertising data will be visible right
10946 cancel_adv_timeout(hdev);
10948 next_instance = hci_get_next_instance(hdev, cp->instance);
10950 schedule_instance = next_instance->instance;
10951 } else if (!hdev->adv_instance_timeout) {
10952 /* Immediately advertise the new instance if no other
10953 * instance is currently being advertised.
10955 schedule_instance = cp->instance;
10958 /* If the HCI_ADVERTISING flag is set or there is no instance to
10959 * be advertised then we have no HCI communication to make.
10962 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
10963 if (adv_instance->pending) {
10964 mgmt_advertising_added(sk, hdev, cp->instance);
10965 adv_instance->pending = false;
10967 rp.instance = cp->instance;
10968 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10969 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10973 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
10977 goto clear_new_instance;
10980 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
10981 add_ext_adv_data_complete);
10983 mgmt_pending_free(cmd);
10984 goto clear_new_instance;
10987 /* We were successful in updating data, so trigger advertising_added
10988 * event if this is an instance that wasn't previously advertising. If
10989 * a failure occurs in the requests we initiated, we will remove the
10990 * instance again in add_advertising_complete
10992 if (adv_instance->pending)
10993 mgmt_advertising_added(sk, hdev, cp->instance);
10997 clear_new_instance:
10998 hci_remove_adv_instance(hdev, cp->instance);
11001 hci_dev_unlock(hdev);
11006 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
11009 struct mgmt_pending_cmd *cmd = data;
11010 struct mgmt_cp_remove_advertising *cp = cmd->param;
11011 struct mgmt_rp_remove_advertising rp;
11013 bt_dev_dbg(hdev, "err %d", err);
11015 memset(&rp, 0, sizeof(rp));
11016 rp.instance = cp->instance;
11019 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
11022 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
11023 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11025 mgmt_pending_free(cmd);
11028 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
11030 struct mgmt_pending_cmd *cmd = data;
11031 struct mgmt_cp_remove_advertising *cp = cmd->param;
11034 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
11038 if (list_empty(&hdev->adv_instances))
11039 err = hci_disable_advertising_sync(hdev);
11044 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
11045 void *data, u16 data_len)
11047 struct mgmt_cp_remove_advertising *cp = data;
11048 struct mgmt_pending_cmd *cmd;
11051 bt_dev_dbg(hdev, "sock %p", sk);
11053 hci_dev_lock(hdev);
11055 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
11056 err = mgmt_cmd_status(sk, hdev->id,
11057 MGMT_OP_REMOVE_ADVERTISING,
11058 MGMT_STATUS_INVALID_PARAMS);
11062 if (pending_find(MGMT_OP_SET_LE, hdev)) {
11063 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
11068 if (list_empty(&hdev->adv_instances)) {
11069 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
11070 MGMT_STATUS_INVALID_PARAMS);
11074 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
11081 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
11082 remove_advertising_complete);
11084 mgmt_pending_free(cmd);
11087 hci_dev_unlock(hdev);
11092 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
11093 void *data, u16 data_len)
11095 struct mgmt_cp_get_adv_size_info *cp = data;
11096 struct mgmt_rp_get_adv_size_info rp;
11097 u32 flags, supported_flags;
11099 bt_dev_dbg(hdev, "sock %p", sk);
11101 if (!lmp_le_capable(hdev))
11102 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11103 MGMT_STATUS_REJECTED);
11105 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
11106 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11107 MGMT_STATUS_INVALID_PARAMS);
11109 flags = __le32_to_cpu(cp->flags);
11111 /* The current implementation only supports a subset of the specified
11114 supported_flags = get_supported_adv_flags(hdev);
11115 if (flags & ~supported_flags)
11116 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11117 MGMT_STATUS_INVALID_PARAMS);
11119 rp.instance = cp->instance;
11120 rp.flags = cp->flags;
11121 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
11122 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
11124 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11125 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11128 static const struct hci_mgmt_handler mgmt_handlers[] = {
11129 { NULL }, /* 0x0000 (no command) */
11130 { read_version, MGMT_READ_VERSION_SIZE,
11132 HCI_MGMT_UNTRUSTED },
11133 { read_commands, MGMT_READ_COMMANDS_SIZE,
11135 HCI_MGMT_UNTRUSTED },
11136 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
11138 HCI_MGMT_UNTRUSTED },
11139 { read_controller_info, MGMT_READ_INFO_SIZE,
11140 HCI_MGMT_UNTRUSTED },
11141 { set_powered, MGMT_SETTING_SIZE },
11142 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
11143 { set_connectable, MGMT_SETTING_SIZE },
11144 { set_fast_connectable, MGMT_SETTING_SIZE },
11145 { set_bondable, MGMT_SETTING_SIZE },
11146 { set_link_security, MGMT_SETTING_SIZE },
11147 { set_ssp, MGMT_SETTING_SIZE },
11148 { set_hs, MGMT_SETTING_SIZE },
11149 { set_le, MGMT_SETTING_SIZE },
11150 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
11151 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
11152 { add_uuid, MGMT_ADD_UUID_SIZE },
11153 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
11154 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
11155 HCI_MGMT_VAR_LEN },
11156 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
11157 HCI_MGMT_VAR_LEN },
11158 { disconnect, MGMT_DISCONNECT_SIZE },
11159 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
11160 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
11161 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
11162 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
11163 { pair_device, MGMT_PAIR_DEVICE_SIZE },
11164 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
11165 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
11166 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
11167 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
11168 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
11169 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
11170 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
11171 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
11172 HCI_MGMT_VAR_LEN },
11173 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
11174 { start_discovery, MGMT_START_DISCOVERY_SIZE },
11175 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
11176 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
11177 { block_device, MGMT_BLOCK_DEVICE_SIZE },
11178 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
11179 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
11180 { set_advertising, MGMT_SETTING_SIZE },
11181 { set_bredr, MGMT_SETTING_SIZE },
11182 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
11183 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
11184 { set_secure_conn, MGMT_SETTING_SIZE },
11185 { set_debug_keys, MGMT_SETTING_SIZE },
11186 { set_privacy, MGMT_SET_PRIVACY_SIZE },
11187 { load_irks, MGMT_LOAD_IRKS_SIZE,
11188 HCI_MGMT_VAR_LEN },
11189 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
11190 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
11191 { add_device, MGMT_ADD_DEVICE_SIZE },
11192 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
11193 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
11194 HCI_MGMT_VAR_LEN },
11195 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
11197 HCI_MGMT_UNTRUSTED },
11198 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
11199 HCI_MGMT_UNCONFIGURED |
11200 HCI_MGMT_UNTRUSTED },
11201 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
11202 HCI_MGMT_UNCONFIGURED },
11203 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
11204 HCI_MGMT_UNCONFIGURED },
11205 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
11206 HCI_MGMT_VAR_LEN },
11207 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
11208 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
11210 HCI_MGMT_UNTRUSTED },
11211 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
11212 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
11213 HCI_MGMT_VAR_LEN },
11214 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
11215 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
11216 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
11217 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
11218 HCI_MGMT_UNTRUSTED },
11219 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
11220 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
11221 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
11222 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
11223 HCI_MGMT_VAR_LEN },
11224 { set_wideband_speech, MGMT_SETTING_SIZE },
11225 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
11226 HCI_MGMT_UNTRUSTED },
11227 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
11228 HCI_MGMT_UNTRUSTED |
11229 HCI_MGMT_HDEV_OPTIONAL },
11230 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
11232 HCI_MGMT_HDEV_OPTIONAL },
11233 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
11234 HCI_MGMT_UNTRUSTED },
11235 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
11236 HCI_MGMT_VAR_LEN },
11237 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
11238 HCI_MGMT_UNTRUSTED },
11239 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
11240 HCI_MGMT_VAR_LEN },
11241 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
11242 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
11243 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
11244 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
11245 HCI_MGMT_VAR_LEN },
11246 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
11247 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
11248 HCI_MGMT_VAR_LEN },
11249 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
11250 HCI_MGMT_VAR_LEN },
11251 { add_adv_patterns_monitor_rssi,
11252 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
11253 HCI_MGMT_VAR_LEN },
11254 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
11255 HCI_MGMT_VAR_LEN },
11256 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
11257 { mesh_send, MGMT_MESH_SEND_SIZE,
11258 HCI_MGMT_VAR_LEN },
11259 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
11263 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
11264 { NULL }, /* 0x0000 (no command) */
11265 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
11266 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
11267 HCI_MGMT_VAR_LEN },
11268 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
11269 HCI_MGMT_VAR_LEN },
11270 { add_white_list, MGMT_ADD_DEV_WHITE_LIST_SIZE },
11271 { remove_from_white_list, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
11272 { clear_white_list, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
11273 { set_enable_rssi, MGMT_SET_RSSI_ENABLE_SIZE },
11274 { get_raw_rssi, MGMT_GET_RAW_RSSI_SIZE },
11275 { set_disable_threshold, MGMT_SET_RSSI_DISABLE_SIZE },
11276 { start_le_discovery, MGMT_START_LE_DISCOVERY_SIZE },
11277 { stop_le_discovery, MGMT_STOP_LE_DISCOVERY_SIZE },
11278 { disable_le_auto_connect, MGMT_DISABLE_LE_AUTO_CONNECT_SIZE },
11279 { le_conn_update, MGMT_LE_CONN_UPDATE_SIZE },
11280 { set_manufacturer_data, MGMT_SET_MANUFACTURER_DATA_SIZE },
11281 { le_set_scan_params, MGMT_LE_SET_SCAN_PARAMS_SIZE },
11282 { set_voice_setting, MGMT_SET_VOICE_SETTING_SIZE },
11283 { get_adv_tx_power, MGMT_GET_ADV_TX_POWER_SIZE },
11287 void mgmt_index_added(struct hci_dev *hdev)
11289 struct mgmt_ev_ext_index ev;
11291 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
11294 switch (hdev->dev_type) {
11296 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
11297 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
11298 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
11301 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
11302 HCI_MGMT_INDEX_EVENTS);
11313 ev.bus = hdev->bus;
11315 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
11316 HCI_MGMT_EXT_INDEX_EVENTS);
11319 void mgmt_index_removed(struct hci_dev *hdev)
11321 struct mgmt_ev_ext_index ev;
11322 u8 status = MGMT_STATUS_INVALID_INDEX;
11324 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
11327 switch (hdev->dev_type) {
11329 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
11331 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
11332 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
11333 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
11336 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
11337 HCI_MGMT_INDEX_EVENTS);
11348 ev.bus = hdev->bus;
11350 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
11351 HCI_MGMT_EXT_INDEX_EVENTS);
11353 /* Cancel any remaining timed work */
11354 if (!hci_dev_test_flag(hdev, HCI_MGMT))
11356 cancel_delayed_work_sync(&hdev->discov_off);
11357 cancel_delayed_work_sync(&hdev->service_cache);
11358 cancel_delayed_work_sync(&hdev->rpa_expired);
11361 void mgmt_power_on(struct hci_dev *hdev, int err)
11363 struct cmd_lookup match = { NULL, hdev };
11365 bt_dev_dbg(hdev, "err %d", err);
11367 hci_dev_lock(hdev);
11370 restart_le_actions(hdev);
11371 hci_update_passive_scan(hdev);
11374 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
11376 new_settings(hdev, match.sk);
11379 sock_put(match.sk);
11381 hci_dev_unlock(hdev);
11384 void __mgmt_power_off(struct hci_dev *hdev)
11386 struct cmd_lookup match = { NULL, hdev };
11387 u8 status, zero_cod[] = { 0, 0, 0 };
11389 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
11391 /* If the power off is because of hdev unregistration let
11392 * use the appropriate INVALID_INDEX status. Otherwise use
11393 * NOT_POWERED. We cover both scenarios here since later in
11394 * mgmt_index_removed() any hci_conn callbacks will have already
11395 * been triggered, potentially causing misleading DISCONNECTED
11396 * status responses.
11398 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
11399 status = MGMT_STATUS_INVALID_INDEX;
11401 status = MGMT_STATUS_NOT_POWERED;
11403 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
11405 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
11406 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
11407 zero_cod, sizeof(zero_cod),
11408 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
11409 ext_info_changed(hdev, NULL);
11412 new_settings(hdev, match.sk);
11415 sock_put(match.sk);
11418 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
11420 struct mgmt_pending_cmd *cmd;
11423 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
11427 if (err == -ERFKILL)
11428 status = MGMT_STATUS_RFKILLED;
11430 status = MGMT_STATUS_FAILED;
11432 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
11434 mgmt_pending_remove(cmd);
11437 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
11440 struct mgmt_ev_new_link_key ev;
11442 memset(&ev, 0, sizeof(ev));
11444 ev.store_hint = persistent;
11445 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
11446 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
11447 ev.key.type = key->type;
11448 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
11449 ev.key.pin_len = key->pin_len;
11451 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
11454 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
11456 switch (ltk->type) {
11458 case SMP_LTK_RESPONDER:
11459 if (ltk->authenticated)
11460 return MGMT_LTK_AUTHENTICATED;
11461 return MGMT_LTK_UNAUTHENTICATED;
11463 if (ltk->authenticated)
11464 return MGMT_LTK_P256_AUTH;
11465 return MGMT_LTK_P256_UNAUTH;
11466 case SMP_LTK_P256_DEBUG:
11467 return MGMT_LTK_P256_DEBUG;
11470 return MGMT_LTK_UNAUTHENTICATED;
11473 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
11475 struct mgmt_ev_new_long_term_key ev;
11477 memset(&ev, 0, sizeof(ev));
11479 /* Devices using resolvable or non-resolvable random addresses
11480 * without providing an identity resolving key don't require
11481 * to store long term keys. Their addresses will change the
11482 * next time around.
11484 * Only when a remote device provides an identity address
11485 * make sure the long term key is stored. If the remote
11486 * identity is known, the long term keys are internally
11487 * mapped to the identity address. So allow static random
11488 * and public addresses here.
11490 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
11491 (key->bdaddr.b[5] & 0xc0) != 0xc0)
11492 ev.store_hint = 0x00;
11494 ev.store_hint = persistent;
11496 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
11497 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
11498 ev.key.type = mgmt_ltk_type(key);
11499 ev.key.enc_size = key->enc_size;
11500 ev.key.ediv = key->ediv;
11501 ev.key.rand = key->rand;
11503 if (key->type == SMP_LTK)
11504 ev.key.initiator = 1;
11506 /* Make sure we copy only the significant bytes based on the
11507 * encryption key size, and set the rest of the value to zeroes.
11509 memcpy(ev.key.val, key->val, key->enc_size);
11510 memset(ev.key.val + key->enc_size, 0,
11511 sizeof(ev.key.val) - key->enc_size);
11513 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
11516 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
11518 struct mgmt_ev_new_irk ev;
11520 memset(&ev, 0, sizeof(ev));
11522 ev.store_hint = persistent;
11524 bacpy(&ev.rpa, &irk->rpa);
11525 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
11526 ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
11527 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
11529 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
11532 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
11535 struct mgmt_ev_new_csrk ev;
11537 memset(&ev, 0, sizeof(ev));
11539 /* Devices using resolvable or non-resolvable random addresses
11540 * without providing an identity resolving key don't require
11541 * to store signature resolving keys. Their addresses will change
11542 * the next time around.
11544 * Only when a remote device provides an identity address
11545 * make sure the signature resolving key is stored. So allow
11546 * static random and public addresses here.
11548 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
11549 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
11550 ev.store_hint = 0x00;
11552 ev.store_hint = persistent;
11554 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
11555 ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
11556 ev.key.type = csrk->type;
11557 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
11559 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
11562 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
11563 u8 bdaddr_type, u8 store_hint, u16 min_interval,
11564 u16 max_interval, u16 latency, u16 timeout)
11566 struct mgmt_ev_new_conn_param ev;
11568 if (!hci_is_identity_address(bdaddr, bdaddr_type))
11571 memset(&ev, 0, sizeof(ev));
11572 bacpy(&ev.addr.bdaddr, bdaddr);
11573 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
11574 ev.store_hint = store_hint;
11575 ev.min_interval = cpu_to_le16(min_interval);
11576 ev.max_interval = cpu_to_le16(max_interval);
11577 ev.latency = cpu_to_le16(latency);
11578 ev.timeout = cpu_to_le16(timeout);
11580 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
11583 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
11584 u8 *name, u8 name_len)
11586 struct sk_buff *skb;
11587 struct mgmt_ev_device_connected *ev;
11591 /* allocate buff for LE or BR/EDR adv */
11592 if (conn->le_adv_data_len > 0)
11593 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
11594 sizeof(*ev) + conn->le_adv_data_len);
11596 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
11597 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
11598 eir_precalc_len(sizeof(conn->dev_class)));
11600 ev = skb_put(skb, sizeof(*ev));
11601 bacpy(&ev->addr.bdaddr, &conn->dst);
11602 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
11605 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
11607 ev->flags = __cpu_to_le32(flags);
11609 /* We must ensure that the EIR Data fields are ordered and
11610 * unique. Keep it simple for now and avoid the problem by not
11611 * adding any BR/EDR data to the LE adv.
11613 if (conn->le_adv_data_len > 0) {
11614 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
11615 eir_len = conn->le_adv_data_len;
11618 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
11620 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
11621 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
11622 conn->dev_class, sizeof(conn->dev_class));
11625 ev->eir_len = cpu_to_le16(eir_len);
11627 mgmt_event_skb(skb, NULL);
11630 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
11632 struct sock **sk = data;
11634 cmd->cmd_complete(cmd, 0);
11639 mgmt_pending_remove(cmd);
11642 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
11644 struct hci_dev *hdev = data;
11645 struct mgmt_cp_unpair_device *cp = cmd->param;
11647 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
11649 cmd->cmd_complete(cmd, 0);
11650 mgmt_pending_remove(cmd);
11653 bool mgmt_powering_down(struct hci_dev *hdev)
11655 struct mgmt_pending_cmd *cmd;
11656 struct mgmt_mode *cp;
11658 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
11669 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
11670 u8 link_type, u8 addr_type, u8 reason,
11671 bool mgmt_connected)
11673 struct mgmt_ev_device_disconnected ev;
11674 struct sock *sk = NULL;
11676 /* The connection is still in hci_conn_hash so test for 1
11677 * instead of 0 to know if this is the last one.
11679 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
11680 cancel_delayed_work(&hdev->power_off);
11681 queue_work(hdev->req_workqueue, &hdev->power_off.work);
11684 if (!mgmt_connected)
11687 if (link_type != ACL_LINK && link_type != LE_LINK)
11690 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
11692 bacpy(&ev.addr.bdaddr, bdaddr);
11693 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11694 ev.reason = reason;
11696 /* Report disconnects due to suspend */
11697 if (hdev->suspended)
11698 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
11700 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
11705 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
11709 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
11710 u8 link_type, u8 addr_type, u8 status)
11712 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
11713 struct mgmt_cp_disconnect *cp;
11714 struct mgmt_pending_cmd *cmd;
11716 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
11719 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
11725 if (bacmp(bdaddr, &cp->addr.bdaddr))
11728 if (cp->addr.type != bdaddr_type)
11731 cmd->cmd_complete(cmd, mgmt_status(status));
11732 mgmt_pending_remove(cmd);
11735 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11736 u8 addr_type, u8 status)
11738 struct mgmt_ev_connect_failed ev;
11740 /* The connection is still in hci_conn_hash so test for 1
11741 * instead of 0 to know if this is the last one.
11743 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
11744 cancel_delayed_work(&hdev->power_off);
11745 queue_work(hdev->req_workqueue, &hdev->power_off.work);
11748 bacpy(&ev.addr.bdaddr, bdaddr);
11749 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11750 ev.status = mgmt_status(status);
11752 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
11755 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
11757 struct mgmt_ev_pin_code_request ev;
11759 bacpy(&ev.addr.bdaddr, bdaddr);
11760 ev.addr.type = BDADDR_BREDR;
11761 ev.secure = secure;
11763 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
11766 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11769 struct mgmt_pending_cmd *cmd;
11771 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
11775 cmd->cmd_complete(cmd, mgmt_status(status));
11776 mgmt_pending_remove(cmd);
11779 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11782 struct mgmt_pending_cmd *cmd;
11784 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
11788 cmd->cmd_complete(cmd, mgmt_status(status));
11789 mgmt_pending_remove(cmd);
11792 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
11793 u8 link_type, u8 addr_type, u32 value,
11796 struct mgmt_ev_user_confirm_request ev;
11798 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11800 bacpy(&ev.addr.bdaddr, bdaddr);
11801 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11802 ev.confirm_hint = confirm_hint;
11803 ev.value = cpu_to_le32(value);
11805 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
11809 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
11810 u8 link_type, u8 addr_type)
11812 struct mgmt_ev_user_passkey_request ev;
11814 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11816 bacpy(&ev.addr.bdaddr, bdaddr);
11817 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11819 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
11823 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11824 u8 link_type, u8 addr_type, u8 status,
11827 struct mgmt_pending_cmd *cmd;
11829 cmd = pending_find(opcode, hdev);
11833 cmd->cmd_complete(cmd, mgmt_status(status));
11834 mgmt_pending_remove(cmd);
11839 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11840 u8 link_type, u8 addr_type, u8 status)
11842 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11843 status, MGMT_OP_USER_CONFIRM_REPLY);
11846 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11847 u8 link_type, u8 addr_type, u8 status)
11849 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11851 MGMT_OP_USER_CONFIRM_NEG_REPLY);
11854 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11855 u8 link_type, u8 addr_type, u8 status)
11857 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11858 status, MGMT_OP_USER_PASSKEY_REPLY);
11861 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11862 u8 link_type, u8 addr_type, u8 status)
11864 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11866 MGMT_OP_USER_PASSKEY_NEG_REPLY);
11869 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
11870 u8 link_type, u8 addr_type, u32 passkey,
11873 struct mgmt_ev_passkey_notify ev;
11875 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11877 bacpy(&ev.addr.bdaddr, bdaddr);
11878 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11879 ev.passkey = __cpu_to_le32(passkey);
11880 ev.entered = entered;
11882 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
11885 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
11887 struct mgmt_ev_auth_failed ev;
11888 struct mgmt_pending_cmd *cmd;
11889 u8 status = mgmt_status(hci_status);
11891 bacpy(&ev.addr.bdaddr, &conn->dst);
11892 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
11893 ev.status = status;
11895 cmd = find_pairing(conn);
11897 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
11898 cmd ? cmd->sk : NULL);
11901 cmd->cmd_complete(cmd, status);
11902 mgmt_pending_remove(cmd);
11906 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
11908 struct cmd_lookup match = { NULL, hdev };
11912 u8 mgmt_err = mgmt_status(status);
11913 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
11914 cmd_status_rsp, &mgmt_err);
11918 if (test_bit(HCI_AUTH, &hdev->flags))
11919 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
11921 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
11923 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
11927 new_settings(hdev, match.sk);
11930 sock_put(match.sk);
11933 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
11935 struct cmd_lookup *match = data;
11937 if (match->sk == NULL) {
11938 match->sk = cmd->sk;
11939 sock_hold(match->sk);
11943 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
11946 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
11948 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
11949 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
11950 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
11953 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
11954 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
11955 ext_info_changed(hdev, NULL);
11959 sock_put(match.sk);
11962 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
11964 struct mgmt_cp_set_local_name ev;
11965 struct mgmt_pending_cmd *cmd;
11970 memset(&ev, 0, sizeof(ev));
11971 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
11972 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
11974 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
11976 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
11978 /* If this is a HCI command related to powering on the
11979 * HCI dev don't send any mgmt signals.
11981 if (pending_find(MGMT_OP_SET_POWERED, hdev))
11985 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
11986 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
11987 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
11990 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
11994 for (i = 0; i < uuid_count; i++) {
11995 if (!memcmp(uuid, uuids[i], 16))
12002 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
12006 while (parsed < eir_len) {
12007 u8 field_len = eir[0];
12011 if (field_len == 0)
12014 if (eir_len - parsed < field_len + 1)
12018 case EIR_UUID16_ALL:
12019 case EIR_UUID16_SOME:
12020 for (i = 0; i + 3 <= field_len; i += 2) {
12021 memcpy(uuid, bluetooth_base_uuid, 16);
12022 uuid[13] = eir[i + 3];
12023 uuid[12] = eir[i + 2];
12024 if (has_uuid(uuid, uuid_count, uuids))
12028 case EIR_UUID32_ALL:
12029 case EIR_UUID32_SOME:
12030 for (i = 0; i + 5 <= field_len; i += 4) {
12031 memcpy(uuid, bluetooth_base_uuid, 16);
12032 uuid[15] = eir[i + 5];
12033 uuid[14] = eir[i + 4];
12034 uuid[13] = eir[i + 3];
12035 uuid[12] = eir[i + 2];
12036 if (has_uuid(uuid, uuid_count, uuids))
12040 case EIR_UUID128_ALL:
12041 case EIR_UUID128_SOME:
12042 for (i = 0; i + 17 <= field_len; i += 16) {
12043 memcpy(uuid, eir + i + 2, 16);
12044 if (has_uuid(uuid, uuid_count, uuids))
12050 parsed += field_len + 1;
12051 eir += field_len + 1;
12057 static void restart_le_scan(struct hci_dev *hdev)
12059 /* If controller is not scanning we are done. */
12060 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
12063 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
12064 hdev->discovery.scan_start +
12065 hdev->discovery.scan_duration))
12068 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
12069 DISCOV_LE_RESTART_DELAY);
12072 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
12073 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
12075 /* If a RSSI threshold has been specified, and
12076 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
12077 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
12078 * is set, let it through for further processing, as we might need to
12079 * restart the scan.
12081 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
12082 * the results are also dropped.
12084 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
12085 (rssi == HCI_RSSI_INVALID ||
12086 (rssi < hdev->discovery.rssi &&
12087 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
12090 if (hdev->discovery.uuid_count != 0) {
12091 /* If a list of UUIDs is provided in filter, results with no
12092 * matching UUID should be dropped.
12094 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
12095 hdev->discovery.uuids) &&
12096 !eir_has_uuids(scan_rsp, scan_rsp_len,
12097 hdev->discovery.uuid_count,
12098 hdev->discovery.uuids))
12102 /* If duplicate filtering does not report RSSI changes, then restart
12103 * scanning to ensure updated result with updated RSSI values.
12105 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
12106 restart_le_scan(hdev);
12108 /* Validate RSSI value against the RSSI threshold once more. */
12109 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
12110 rssi < hdev->discovery.rssi)
12117 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
12118 bdaddr_t *bdaddr, u8 addr_type)
12120 struct mgmt_ev_adv_monitor_device_lost ev;
12122 ev.monitor_handle = cpu_to_le16(handle);
12123 bacpy(&ev.addr.bdaddr, bdaddr);
12124 ev.addr.type = addr_type;
12126 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
12130 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
12131 struct sk_buff *skb,
12132 struct sock *skip_sk,
12135 struct sk_buff *advmon_skb;
12136 size_t advmon_skb_len;
12137 __le16 *monitor_handle;
12142 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
12143 sizeof(struct mgmt_ev_device_found)) + skb->len;
12144 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
12149 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
12150 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
12151 * store monitor_handle of the matched monitor.
12153 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
12154 *monitor_handle = cpu_to_le16(handle);
12155 skb_put_data(advmon_skb, skb->data, skb->len);
12157 mgmt_event_skb(advmon_skb, skip_sk);
12160 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
12161 bdaddr_t *bdaddr, bool report_device,
12162 struct sk_buff *skb,
12163 struct sock *skip_sk)
12165 struct monitored_device *dev, *tmp;
12166 bool matched = false;
12167 bool notified = false;
12169 /* We have received the Advertisement Report because:
12170 * 1. the kernel has initiated active discovery
12171 * 2. if not, we have pend_le_reports > 0 in which case we are doing
12173 * 3. if none of the above is true, we have one or more active
12174 * Advertisement Monitor
12176 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
12177 * and report ONLY one advertisement per device for the matched Monitor
12178 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
12180 * For case 3, since we are not active scanning and all advertisements
12181 * received are due to a matched Advertisement Monitor, report all
12182 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
12184 if (report_device && !hdev->advmon_pend_notify) {
12185 mgmt_event_skb(skb, skip_sk);
12189 hdev->advmon_pend_notify = false;
12191 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
12192 if (!bacmp(&dev->bdaddr, bdaddr)) {
12195 if (!dev->notified) {
12196 mgmt_send_adv_monitor_device_found(hdev, skb,
12200 dev->notified = true;
12204 if (!dev->notified)
12205 hdev->advmon_pend_notify = true;
12208 if (!report_device &&
12209 ((matched && !notified) || !msft_monitor_supported(hdev))) {
12210 /* Handle 0 indicates that we are not active scanning and this
12211 * is a subsequent advertisement report for an already matched
12212 * Advertisement Monitor or the controller offloading support
12213 * is not available.
12215 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
12219 mgmt_event_skb(skb, skip_sk);
12224 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
12225 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
12226 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
12229 struct sk_buff *skb;
12230 struct mgmt_ev_mesh_device_found *ev;
12233 if (!hdev->mesh_ad_types[0])
12236 /* Scan for requested AD types */
12238 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
12239 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
12240 if (!hdev->mesh_ad_types[j])
12243 if (hdev->mesh_ad_types[j] == eir[i + 1])
12249 if (scan_rsp_len > 0) {
12250 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
12251 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
12252 if (!hdev->mesh_ad_types[j])
12255 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
12264 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
12265 sizeof(*ev) + eir_len + scan_rsp_len);
12269 ev = skb_put(skb, sizeof(*ev));
12271 bacpy(&ev->addr.bdaddr, bdaddr);
12272 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
12274 ev->flags = cpu_to_le32(flags);
12275 ev->instant = cpu_to_le64(instant);
12278 /* Copy EIR or advertising data into event */
12279 skb_put_data(skb, eir, eir_len);
12281 if (scan_rsp_len > 0)
12282 /* Append scan response data to event */
12283 skb_put_data(skb, scan_rsp, scan_rsp_len);
12285 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
12287 mgmt_event_skb(skb, NULL);
12290 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
12291 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
12292 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
12295 struct sk_buff *skb;
12296 struct mgmt_ev_device_found *ev;
12297 bool report_device = hci_discovery_active(hdev);
12299 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
12300 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
12301 eir, eir_len, scan_rsp, scan_rsp_len,
12304 /* Don't send events for a non-kernel initiated discovery. With
12305 * LE one exception is if we have pend_le_reports > 0 in which
12306 * case we're doing passive scanning and want these events.
12308 if (!hci_discovery_active(hdev)) {
12309 if (link_type == ACL_LINK)
12311 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
12312 report_device = true;
12313 else if (!hci_is_adv_monitoring(hdev))
12317 if (hdev->discovery.result_filtering) {
12318 /* We are using service discovery */
12319 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
12324 if (hdev->discovery.limited) {
12325 /* Check for limited discoverable bit */
12327 if (!(dev_class[1] & 0x20))
12330 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
12331 if (!flags || !(flags[0] & LE_AD_LIMITED))
12336 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
12337 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
12338 sizeof(*ev) + eir_len + scan_rsp_len + 5);
12342 ev = skb_put(skb, sizeof(*ev));
12344 /* In case of device discovery with BR/EDR devices (pre 1.2), the
12345 * RSSI value was reported as 0 when not available. This behavior
12346 * is kept when using device discovery. This is required for full
12347 * backwards compatibility with the API.
12349 * However when using service discovery, the value 127 will be
12350 * returned when the RSSI is not available.
12352 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
12353 link_type == ACL_LINK)
12356 bacpy(&ev->addr.bdaddr, bdaddr);
12357 ev->addr.type = link_to_bdaddr(link_type, addr_type);
12359 ev->flags = cpu_to_le32(flags);
12362 /* Copy EIR or advertising data into event */
12363 skb_put_data(skb, eir, eir_len);
12365 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
12368 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
12370 skb_put_data(skb, eir_cod, sizeof(eir_cod));
12373 if (scan_rsp_len > 0)
12374 /* Append scan response data to event */
12375 skb_put_data(skb, scan_rsp, scan_rsp_len);
12377 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
12379 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
12382 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
12383 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
12385 struct sk_buff *skb;
12386 struct mgmt_ev_device_found *ev;
12390 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
12391 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
12393 ev = skb_put(skb, sizeof(*ev));
12394 bacpy(&ev->addr.bdaddr, bdaddr);
12395 ev->addr.type = link_to_bdaddr(link_type, addr_type);
12399 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
12401 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
12403 ev->eir_len = cpu_to_le16(eir_len);
12404 ev->flags = cpu_to_le32(flags);
12406 mgmt_event_skb(skb, NULL);
12409 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
12411 struct mgmt_ev_discovering ev;
12413 bt_dev_dbg(hdev, "discovering %u", discovering);
12415 memset(&ev, 0, sizeof(ev));
12416 ev.type = hdev->discovery.type;
12417 ev.discovering = discovering;
12419 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
12422 void mgmt_suspending(struct hci_dev *hdev, u8 state)
12424 struct mgmt_ev_controller_suspend ev;
12426 ev.suspend_state = state;
12427 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
12430 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
12433 struct mgmt_ev_controller_resume ev;
12435 ev.wake_reason = reason;
12437 bacpy(&ev.addr.bdaddr, bdaddr);
12438 ev.addr.type = addr_type;
12440 memset(&ev.addr, 0, sizeof(ev.addr));
12443 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
12446 static struct hci_mgmt_chan chan = {
12447 .channel = HCI_CHANNEL_CONTROL,
12448 .handler_count = ARRAY_SIZE(mgmt_handlers),
12449 .handlers = mgmt_handlers,
12451 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
12452 .tizen_handlers = tizen_mgmt_handlers,
12454 .hdev_init = mgmt_init_hdev,
12457 int mgmt_init(void)
12459 return hci_mgmt_chan_register(&chan);
12462 void mgmt_exit(void)
12464 hci_mgmt_chan_unregister(&chan);
12467 void mgmt_cleanup(struct sock *sk)
12469 struct mgmt_mesh_tx *mesh_tx;
12470 struct hci_dev *hdev;
12472 read_lock(&hci_dev_list_lock);
12474 list_for_each_entry(hdev, &hci_dev_list, list) {
12476 mesh_tx = mgmt_mesh_next(hdev, sk);
12479 mesh_send_complete(hdev, mesh_tx, true);
12483 read_unlock(&hci_dev_list_lock);