2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
37 #include <net/bluetooth/sco.h>
40 #include "hci_request.h"
42 #include "mgmt_util.h"
43 #include "mgmt_config.h"
48 #define MGMT_VERSION 1
49 #define MGMT_REVISION 22
51 static const u16 mgmt_commands[] = {
52 MGMT_OP_READ_INDEX_LIST,
55 MGMT_OP_SET_DISCOVERABLE,
56 MGMT_OP_SET_CONNECTABLE,
57 MGMT_OP_SET_FAST_CONNECTABLE,
59 MGMT_OP_SET_LINK_SECURITY,
63 MGMT_OP_SET_DEV_CLASS,
64 MGMT_OP_SET_LOCAL_NAME,
67 MGMT_OP_LOAD_LINK_KEYS,
68 MGMT_OP_LOAD_LONG_TERM_KEYS,
70 MGMT_OP_GET_CONNECTIONS,
71 MGMT_OP_PIN_CODE_REPLY,
72 MGMT_OP_PIN_CODE_NEG_REPLY,
73 MGMT_OP_SET_IO_CAPABILITY,
75 MGMT_OP_CANCEL_PAIR_DEVICE,
76 MGMT_OP_UNPAIR_DEVICE,
77 MGMT_OP_USER_CONFIRM_REPLY,
78 MGMT_OP_USER_CONFIRM_NEG_REPLY,
79 MGMT_OP_USER_PASSKEY_REPLY,
80 MGMT_OP_USER_PASSKEY_NEG_REPLY,
81 MGMT_OP_READ_LOCAL_OOB_DATA,
82 MGMT_OP_ADD_REMOTE_OOB_DATA,
83 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
84 MGMT_OP_START_DISCOVERY,
85 MGMT_OP_STOP_DISCOVERY,
88 MGMT_OP_UNBLOCK_DEVICE,
89 MGMT_OP_SET_DEVICE_ID,
90 MGMT_OP_SET_ADVERTISING,
92 MGMT_OP_SET_STATIC_ADDRESS,
93 MGMT_OP_SET_SCAN_PARAMS,
94 MGMT_OP_SET_SECURE_CONN,
95 MGMT_OP_SET_DEBUG_KEYS,
98 MGMT_OP_GET_CONN_INFO,
99 MGMT_OP_GET_CLOCK_INFO,
101 MGMT_OP_REMOVE_DEVICE,
102 MGMT_OP_LOAD_CONN_PARAM,
103 MGMT_OP_READ_UNCONF_INDEX_LIST,
104 MGMT_OP_READ_CONFIG_INFO,
105 MGMT_OP_SET_EXTERNAL_CONFIG,
106 MGMT_OP_SET_PUBLIC_ADDRESS,
107 MGMT_OP_START_SERVICE_DISCOVERY,
108 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
109 MGMT_OP_READ_EXT_INDEX_LIST,
110 MGMT_OP_READ_ADV_FEATURES,
111 MGMT_OP_ADD_ADVERTISING,
112 MGMT_OP_REMOVE_ADVERTISING,
113 MGMT_OP_GET_ADV_SIZE_INFO,
114 MGMT_OP_START_LIMITED_DISCOVERY,
115 MGMT_OP_READ_EXT_INFO,
116 MGMT_OP_SET_APPEARANCE,
117 MGMT_OP_GET_PHY_CONFIGURATION,
118 MGMT_OP_SET_PHY_CONFIGURATION,
119 MGMT_OP_SET_BLOCKED_KEYS,
120 MGMT_OP_SET_WIDEBAND_SPEECH,
121 MGMT_OP_READ_CONTROLLER_CAP,
122 MGMT_OP_READ_EXP_FEATURES_INFO,
123 MGMT_OP_SET_EXP_FEATURE,
124 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
125 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
126 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
127 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
128 MGMT_OP_GET_DEVICE_FLAGS,
129 MGMT_OP_SET_DEVICE_FLAGS,
130 MGMT_OP_READ_ADV_MONITOR_FEATURES,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
132 MGMT_OP_REMOVE_ADV_MONITOR,
133 MGMT_OP_ADD_EXT_ADV_PARAMS,
134 MGMT_OP_ADD_EXT_ADV_DATA,
135 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
136 MGMT_OP_SET_MESH_RECEIVER,
137 MGMT_OP_MESH_READ_FEATURES,
139 MGMT_OP_MESH_SEND_CANCEL,
142 static const u16 mgmt_events[] = {
143 MGMT_EV_CONTROLLER_ERROR,
145 MGMT_EV_INDEX_REMOVED,
146 MGMT_EV_NEW_SETTINGS,
147 MGMT_EV_CLASS_OF_DEV_CHANGED,
148 MGMT_EV_LOCAL_NAME_CHANGED,
149 MGMT_EV_NEW_LINK_KEY,
150 MGMT_EV_NEW_LONG_TERM_KEY,
151 MGMT_EV_DEVICE_CONNECTED,
152 MGMT_EV_DEVICE_DISCONNECTED,
153 MGMT_EV_CONNECT_FAILED,
154 MGMT_EV_PIN_CODE_REQUEST,
155 MGMT_EV_USER_CONFIRM_REQUEST,
156 MGMT_EV_USER_PASSKEY_REQUEST,
158 MGMT_EV_DEVICE_FOUND,
160 MGMT_EV_DEVICE_BLOCKED,
161 MGMT_EV_DEVICE_UNBLOCKED,
162 MGMT_EV_DEVICE_UNPAIRED,
163 MGMT_EV_PASSKEY_NOTIFY,
166 MGMT_EV_DEVICE_ADDED,
167 MGMT_EV_DEVICE_REMOVED,
168 MGMT_EV_NEW_CONN_PARAM,
169 MGMT_EV_UNCONF_INDEX_ADDED,
170 MGMT_EV_UNCONF_INDEX_REMOVED,
171 MGMT_EV_NEW_CONFIG_OPTIONS,
172 MGMT_EV_EXT_INDEX_ADDED,
173 MGMT_EV_EXT_INDEX_REMOVED,
174 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
175 MGMT_EV_ADVERTISING_ADDED,
176 MGMT_EV_ADVERTISING_REMOVED,
177 MGMT_EV_EXT_INFO_CHANGED,
178 MGMT_EV_PHY_CONFIGURATION_CHANGED,
179 MGMT_EV_EXP_FEATURE_CHANGED,
180 MGMT_EV_DEVICE_FLAGS_CHANGED,
181 MGMT_EV_ADV_MONITOR_ADDED,
182 MGMT_EV_ADV_MONITOR_REMOVED,
183 MGMT_EV_CONTROLLER_SUSPEND,
184 MGMT_EV_CONTROLLER_RESUME,
185 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
186 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
189 static const u16 mgmt_untrusted_commands[] = {
190 MGMT_OP_READ_INDEX_LIST,
192 MGMT_OP_READ_UNCONF_INDEX_LIST,
193 MGMT_OP_READ_CONFIG_INFO,
194 MGMT_OP_READ_EXT_INDEX_LIST,
195 MGMT_OP_READ_EXT_INFO,
196 MGMT_OP_READ_CONTROLLER_CAP,
197 MGMT_OP_READ_EXP_FEATURES_INFO,
198 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
199 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
202 static const u16 mgmt_untrusted_events[] = {
204 MGMT_EV_INDEX_REMOVED,
205 MGMT_EV_NEW_SETTINGS,
206 MGMT_EV_CLASS_OF_DEV_CHANGED,
207 MGMT_EV_LOCAL_NAME_CHANGED,
208 MGMT_EV_UNCONF_INDEX_ADDED,
209 MGMT_EV_UNCONF_INDEX_REMOVED,
210 MGMT_EV_NEW_CONFIG_OPTIONS,
211 MGMT_EV_EXT_INDEX_ADDED,
212 MGMT_EV_EXT_INDEX_REMOVED,
213 MGMT_EV_EXT_INFO_CHANGED,
214 MGMT_EV_EXP_FEATURE_CHANGED,
217 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
219 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
220 "\x00\x00\x00\x00\x00\x00\x00\x00"
222 /* HCI to MGMT error code conversion table */
223 static const u8 mgmt_status_table[] = {
225 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
226 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
227 MGMT_STATUS_FAILED, /* Hardware Failure */
228 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
229 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
230 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
231 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
232 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
233 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
234 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
235 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
236 MGMT_STATUS_BUSY, /* Command Disallowed */
237 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
238 MGMT_STATUS_REJECTED, /* Rejected Security */
239 MGMT_STATUS_REJECTED, /* Rejected Personal */
240 MGMT_STATUS_TIMEOUT, /* Host Timeout */
241 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
242 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
243 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
244 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
245 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
246 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
247 MGMT_STATUS_BUSY, /* Repeated Attempts */
248 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
249 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
250 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
251 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
252 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
253 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
254 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
255 MGMT_STATUS_FAILED, /* Unspecified Error */
256 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
257 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
258 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
259 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
260 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
261 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
262 MGMT_STATUS_FAILED, /* Unit Link Key Used */
263 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
264 MGMT_STATUS_TIMEOUT, /* Instant Passed */
265 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
266 MGMT_STATUS_FAILED, /* Transaction Collision */
267 MGMT_STATUS_FAILED, /* Reserved for future use */
268 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
269 MGMT_STATUS_REJECTED, /* QoS Rejected */
270 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
271 MGMT_STATUS_REJECTED, /* Insufficient Security */
272 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
273 MGMT_STATUS_FAILED, /* Reserved for future use */
274 MGMT_STATUS_BUSY, /* Role Switch Pending */
275 MGMT_STATUS_FAILED, /* Reserved for future use */
276 MGMT_STATUS_FAILED, /* Slot Violation */
277 MGMT_STATUS_FAILED, /* Role Switch Failed */
278 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
279 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
280 MGMT_STATUS_BUSY, /* Host Busy Pairing */
281 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
282 MGMT_STATUS_BUSY, /* Controller Busy */
283 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
284 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
285 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
286 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
287 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
290 static u8 mgmt_errno_status(int err)
294 return MGMT_STATUS_SUCCESS;
296 return MGMT_STATUS_REJECTED;
298 return MGMT_STATUS_INVALID_PARAMS;
300 return MGMT_STATUS_NOT_SUPPORTED;
302 return MGMT_STATUS_BUSY;
304 return MGMT_STATUS_AUTH_FAILED;
306 return MGMT_STATUS_NO_RESOURCES;
308 return MGMT_STATUS_ALREADY_CONNECTED;
310 return MGMT_STATUS_DISCONNECTED;
313 return MGMT_STATUS_FAILED;
316 static u8 mgmt_status(int err)
319 return mgmt_errno_status(err);
321 if (err < ARRAY_SIZE(mgmt_status_table))
322 return mgmt_status_table[err];
324 return MGMT_STATUS_FAILED;
327 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
330 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
335 u16 len, int flag, struct sock *skip_sk)
337 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
342 struct sock *skip_sk)
344 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
345 HCI_SOCK_TRUSTED, skip_sk);
348 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
350 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
354 static u8 le_addr_type(u8 mgmt_addr_type)
356 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
357 return ADDR_LE_DEV_PUBLIC;
359 return ADDR_LE_DEV_RANDOM;
362 void mgmt_fill_version_info(void *ver)
364 struct mgmt_rp_read_version *rp = ver;
366 rp->version = MGMT_VERSION;
367 rp->revision = cpu_to_le16(MGMT_REVISION);
370 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
373 struct mgmt_rp_read_version rp;
375 bt_dev_dbg(hdev, "sock %p", sk);
377 mgmt_fill_version_info(&rp);
379 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
383 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
386 struct mgmt_rp_read_commands *rp;
387 u16 num_commands, num_events;
391 bt_dev_dbg(hdev, "sock %p", sk);
393 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
394 num_commands = ARRAY_SIZE(mgmt_commands);
395 num_events = ARRAY_SIZE(mgmt_events);
397 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
398 num_events = ARRAY_SIZE(mgmt_untrusted_events);
401 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
403 rp = kmalloc(rp_size, GFP_KERNEL);
407 rp->num_commands = cpu_to_le16(num_commands);
408 rp->num_events = cpu_to_le16(num_events);
410 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
411 __le16 *opcode = rp->opcodes;
413 for (i = 0; i < num_commands; i++, opcode++)
414 put_unaligned_le16(mgmt_commands[i], opcode);
416 for (i = 0; i < num_events; i++, opcode++)
417 put_unaligned_le16(mgmt_events[i], opcode);
419 __le16 *opcode = rp->opcodes;
421 for (i = 0; i < num_commands; i++, opcode++)
422 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
424 for (i = 0; i < num_events; i++, opcode++)
425 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
428 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
435 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
438 struct mgmt_rp_read_index_list *rp;
444 bt_dev_dbg(hdev, "sock %p", sk);
446 read_lock(&hci_dev_list_lock);
449 list_for_each_entry(d, &hci_dev_list, list) {
450 if (d->dev_type == HCI_PRIMARY &&
451 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
455 rp_len = sizeof(*rp) + (2 * count);
456 rp = kmalloc(rp_len, GFP_ATOMIC);
458 read_unlock(&hci_dev_list_lock);
463 list_for_each_entry(d, &hci_dev_list, list) {
464 if (hci_dev_test_flag(d, HCI_SETUP) ||
465 hci_dev_test_flag(d, HCI_CONFIG) ||
466 hci_dev_test_flag(d, HCI_USER_CHANNEL))
469 /* Devices marked as raw-only are neither configured
470 * nor unconfigured controllers.
472 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
475 if (d->dev_type == HCI_PRIMARY &&
476 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
477 rp->index[count++] = cpu_to_le16(d->id);
478 bt_dev_dbg(hdev, "Added hci%u", d->id);
482 rp->num_controllers = cpu_to_le16(count);
483 rp_len = sizeof(*rp) + (2 * count);
485 read_unlock(&hci_dev_list_lock);
487 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
495 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
496 void *data, u16 data_len)
498 struct mgmt_rp_read_unconf_index_list *rp;
504 bt_dev_dbg(hdev, "sock %p", sk);
506 read_lock(&hci_dev_list_lock);
509 list_for_each_entry(d, &hci_dev_list, list) {
510 if (d->dev_type == HCI_PRIMARY &&
511 hci_dev_test_flag(d, HCI_UNCONFIGURED))
515 rp_len = sizeof(*rp) + (2 * count);
516 rp = kmalloc(rp_len, GFP_ATOMIC);
518 read_unlock(&hci_dev_list_lock);
523 list_for_each_entry(d, &hci_dev_list, list) {
524 if (hci_dev_test_flag(d, HCI_SETUP) ||
525 hci_dev_test_flag(d, HCI_CONFIG) ||
526 hci_dev_test_flag(d, HCI_USER_CHANNEL))
529 /* Devices marked as raw-only are neither configured
530 * nor unconfigured controllers.
532 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
535 if (d->dev_type == HCI_PRIMARY &&
536 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
537 rp->index[count++] = cpu_to_le16(d->id);
538 bt_dev_dbg(hdev, "Added hci%u", d->id);
542 rp->num_controllers = cpu_to_le16(count);
543 rp_len = sizeof(*rp) + (2 * count);
545 read_unlock(&hci_dev_list_lock);
547 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
548 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
555 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
556 void *data, u16 data_len)
558 struct mgmt_rp_read_ext_index_list *rp;
563 bt_dev_dbg(hdev, "sock %p", sk);
565 read_lock(&hci_dev_list_lock);
568 list_for_each_entry(d, &hci_dev_list, list) {
569 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
573 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
575 read_unlock(&hci_dev_list_lock);
580 list_for_each_entry(d, &hci_dev_list, list) {
581 if (hci_dev_test_flag(d, HCI_SETUP) ||
582 hci_dev_test_flag(d, HCI_CONFIG) ||
583 hci_dev_test_flag(d, HCI_USER_CHANNEL))
586 /* Devices marked as raw-only are neither configured
587 * nor unconfigured controllers.
589 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
592 if (d->dev_type == HCI_PRIMARY) {
593 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
594 rp->entry[count].type = 0x01;
596 rp->entry[count].type = 0x00;
597 } else if (d->dev_type == HCI_AMP) {
598 rp->entry[count].type = 0x02;
603 rp->entry[count].bus = d->bus;
604 rp->entry[count++].index = cpu_to_le16(d->id);
605 bt_dev_dbg(hdev, "Added hci%u", d->id);
608 rp->num_controllers = cpu_to_le16(count);
610 read_unlock(&hci_dev_list_lock);
612 /* If this command is called at least once, then all the
613 * default index and unconfigured index events are disabled
614 * and from now on only extended index events are used.
616 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
617 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
618 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
620 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
621 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
622 struct_size(rp, entry, count));
629 static bool is_configured(struct hci_dev *hdev)
631 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
635 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
643 static __le32 get_missing_options(struct hci_dev *hdev)
647 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
648 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
649 options |= MGMT_OPTION_EXTERNAL_CONFIG;
651 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
652 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
653 !bacmp(&hdev->public_addr, BDADDR_ANY))
654 options |= MGMT_OPTION_PUBLIC_ADDRESS;
656 return cpu_to_le32(options);
659 static int new_options(struct hci_dev *hdev, struct sock *skip)
661 __le32 options = get_missing_options(hdev);
663 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
664 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
667 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
669 __le32 options = get_missing_options(hdev);
671 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
675 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
676 void *data, u16 data_len)
678 struct mgmt_rp_read_config_info rp;
681 bt_dev_dbg(hdev, "sock %p", sk);
685 memset(&rp, 0, sizeof(rp));
686 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
688 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
689 options |= MGMT_OPTION_EXTERNAL_CONFIG;
691 if (hdev->set_bdaddr)
692 options |= MGMT_OPTION_PUBLIC_ADDRESS;
694 rp.supported_options = cpu_to_le32(options);
695 rp.missing_options = get_missing_options(hdev);
697 hci_dev_unlock(hdev);
699 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
703 static u32 get_supported_phys(struct hci_dev *hdev)
705 u32 supported_phys = 0;
707 if (lmp_bredr_capable(hdev)) {
708 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
710 if (hdev->features[0][0] & LMP_3SLOT)
711 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
713 if (hdev->features[0][0] & LMP_5SLOT)
714 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
716 if (lmp_edr_2m_capable(hdev)) {
717 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
719 if (lmp_edr_3slot_capable(hdev))
720 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
722 if (lmp_edr_5slot_capable(hdev))
723 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
725 if (lmp_edr_3m_capable(hdev)) {
726 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
728 if (lmp_edr_3slot_capable(hdev))
729 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
731 if (lmp_edr_5slot_capable(hdev))
732 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
737 if (lmp_le_capable(hdev)) {
738 supported_phys |= MGMT_PHY_LE_1M_TX;
739 supported_phys |= MGMT_PHY_LE_1M_RX;
741 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
742 supported_phys |= MGMT_PHY_LE_2M_TX;
743 supported_phys |= MGMT_PHY_LE_2M_RX;
746 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
747 supported_phys |= MGMT_PHY_LE_CODED_TX;
748 supported_phys |= MGMT_PHY_LE_CODED_RX;
752 return supported_phys;
755 static u32 get_selected_phys(struct hci_dev *hdev)
757 u32 selected_phys = 0;
759 if (lmp_bredr_capable(hdev)) {
760 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
762 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
763 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
765 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
766 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
768 if (lmp_edr_2m_capable(hdev)) {
769 if (!(hdev->pkt_type & HCI_2DH1))
770 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
772 if (lmp_edr_3slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_2DH3))
774 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
776 if (lmp_edr_5slot_capable(hdev) &&
777 !(hdev->pkt_type & HCI_2DH5))
778 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
780 if (lmp_edr_3m_capable(hdev)) {
781 if (!(hdev->pkt_type & HCI_3DH1))
782 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
784 if (lmp_edr_3slot_capable(hdev) &&
785 !(hdev->pkt_type & HCI_3DH3))
786 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
788 if (lmp_edr_5slot_capable(hdev) &&
789 !(hdev->pkt_type & HCI_3DH5))
790 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
795 if (lmp_le_capable(hdev)) {
796 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
797 selected_phys |= MGMT_PHY_LE_1M_TX;
799 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
800 selected_phys |= MGMT_PHY_LE_1M_RX;
802 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
803 selected_phys |= MGMT_PHY_LE_2M_TX;
805 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
806 selected_phys |= MGMT_PHY_LE_2M_RX;
808 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
809 selected_phys |= MGMT_PHY_LE_CODED_TX;
811 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
812 selected_phys |= MGMT_PHY_LE_CODED_RX;
815 return selected_phys;
818 static u32 get_configurable_phys(struct hci_dev *hdev)
820 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
821 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
824 static u32 get_supported_settings(struct hci_dev *hdev)
828 settings |= MGMT_SETTING_POWERED;
829 settings |= MGMT_SETTING_BONDABLE;
830 settings |= MGMT_SETTING_DEBUG_KEYS;
831 settings |= MGMT_SETTING_CONNECTABLE;
832 settings |= MGMT_SETTING_DISCOVERABLE;
834 if (lmp_bredr_capable(hdev)) {
835 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
836 settings |= MGMT_SETTING_FAST_CONNECTABLE;
837 settings |= MGMT_SETTING_BREDR;
838 settings |= MGMT_SETTING_LINK_SECURITY;
840 if (lmp_ssp_capable(hdev)) {
841 settings |= MGMT_SETTING_SSP;
842 if (IS_ENABLED(CONFIG_BT_HS))
843 settings |= MGMT_SETTING_HS;
846 if (lmp_sc_capable(hdev))
847 settings |= MGMT_SETTING_SECURE_CONN;
849 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
851 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
854 if (lmp_le_capable(hdev)) {
855 settings |= MGMT_SETTING_LE;
856 settings |= MGMT_SETTING_SECURE_CONN;
857 settings |= MGMT_SETTING_PRIVACY;
858 settings |= MGMT_SETTING_STATIC_ADDRESS;
859 settings |= MGMT_SETTING_ADVERTISING;
862 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
864 settings |= MGMT_SETTING_CONFIGURATION;
866 if (cis_central_capable(hdev))
867 settings |= MGMT_SETTING_CIS_CENTRAL;
869 if (cis_peripheral_capable(hdev))
870 settings |= MGMT_SETTING_CIS_PERIPHERAL;
872 settings |= MGMT_SETTING_PHY_CONFIGURATION;
877 static u32 get_current_settings(struct hci_dev *hdev)
881 if (hdev_is_powered(hdev))
882 settings |= MGMT_SETTING_POWERED;
884 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
885 settings |= MGMT_SETTING_CONNECTABLE;
887 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
888 settings |= MGMT_SETTING_FAST_CONNECTABLE;
890 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
891 settings |= MGMT_SETTING_DISCOVERABLE;
893 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
894 settings |= MGMT_SETTING_BONDABLE;
896 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
897 settings |= MGMT_SETTING_BREDR;
899 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
900 settings |= MGMT_SETTING_LE;
902 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
903 settings |= MGMT_SETTING_LINK_SECURITY;
905 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
906 settings |= MGMT_SETTING_SSP;
908 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
909 settings |= MGMT_SETTING_HS;
911 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
912 settings |= MGMT_SETTING_ADVERTISING;
914 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
915 settings |= MGMT_SETTING_SECURE_CONN;
917 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
918 settings |= MGMT_SETTING_DEBUG_KEYS;
920 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
921 settings |= MGMT_SETTING_PRIVACY;
923 /* The current setting for static address has two purposes. The
924 * first is to indicate if the static address will be used and
925 * the second is to indicate if it is actually set.
927 * This means if the static address is not configured, this flag
928 * will never be set. If the address is configured, then if the
929 * address is actually used decides if the flag is set or not.
931 * For single mode LE only controllers and dual-mode controllers
932 * with BR/EDR disabled, the existence of the static address will
935 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
936 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
937 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
938 if (bacmp(&hdev->static_addr, BDADDR_ANY))
939 settings |= MGMT_SETTING_STATIC_ADDRESS;
942 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
943 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
945 if (cis_central_capable(hdev))
946 settings |= MGMT_SETTING_CIS_CENTRAL;
948 if (cis_peripheral_capable(hdev))
949 settings |= MGMT_SETTING_CIS_PERIPHERAL;
951 if (bis_capable(hdev))
952 settings |= MGMT_SETTING_ISO_BROADCASTER;
954 if (sync_recv_capable(hdev))
955 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
960 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
962 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
965 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
967 struct mgmt_pending_cmd *cmd;
969 /* If there's a pending mgmt command the flags will not yet have
970 * their final values, so check for this first.
972 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
974 struct mgmt_mode *cp = cmd->param;
976 return LE_AD_GENERAL;
977 else if (cp->val == 0x02)
978 return LE_AD_LIMITED;
980 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
981 return LE_AD_LIMITED;
982 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
983 return LE_AD_GENERAL;
989 bool mgmt_get_connectable(struct hci_dev *hdev)
991 struct mgmt_pending_cmd *cmd;
993 /* If there's a pending mgmt command the flag will not yet have
994 * it's final value, so check for this first.
996 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
998 struct mgmt_mode *cp = cmd->param;
1003 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1006 static int service_cache_sync(struct hci_dev *hdev, void *data)
1008 hci_update_eir_sync(hdev);
1009 hci_update_class_sync(hdev);
1014 static void service_cache_off(struct work_struct *work)
1016 struct hci_dev *hdev = container_of(work, struct hci_dev,
1017 service_cache.work);
1019 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1022 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1025 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1027 /* The generation of a new RPA and programming it into the
1028 * controller happens in the hci_req_enable_advertising()
1031 if (ext_adv_capable(hdev))
1032 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1034 return hci_enable_advertising_sync(hdev);
1037 static void rpa_expired(struct work_struct *work)
1039 struct hci_dev *hdev = container_of(work, struct hci_dev,
1042 bt_dev_dbg(hdev, "");
1044 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1046 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1049 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1052 static void discov_off(struct work_struct *work)
1054 struct hci_dev *hdev = container_of(work, struct hci_dev,
1057 bt_dev_dbg(hdev, "");
1061 /* When discoverable timeout triggers, then just make sure
1062 * the limited discoverable flag is cleared. Even in the case
1063 * of a timeout triggered from general discoverable, it is
1064 * safe to unconditionally clear the flag.
1066 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1067 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1068 hdev->discov_timeout = 0;
1070 hci_update_discoverable(hdev);
1072 mgmt_new_settings(hdev);
1074 hci_dev_unlock(hdev);
1077 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1079 static void mesh_send_complete(struct hci_dev *hdev,
1080 struct mgmt_mesh_tx *mesh_tx, bool silent)
1082 u8 handle = mesh_tx->handle;
1085 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1086 sizeof(handle), NULL);
1088 mgmt_mesh_remove(mesh_tx);
1091 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1093 struct mgmt_mesh_tx *mesh_tx;
1095 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1096 hci_disable_advertising_sync(hdev);
1097 mesh_tx = mgmt_mesh_next(hdev, NULL);
1100 mesh_send_complete(hdev, mesh_tx, false);
1105 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1106 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1107 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1109 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1114 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1115 mesh_send_start_complete);
1118 mesh_send_complete(hdev, mesh_tx, false);
1120 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1123 static void mesh_send_done(struct work_struct *work)
1125 struct hci_dev *hdev = container_of(work, struct hci_dev,
1126 mesh_send_done.work);
1128 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1131 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1134 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1136 if (hci_dev_test_flag(hdev, HCI_MGMT))
1139 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1141 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1142 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1143 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1144 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1146 /* Non-mgmt controlled devices get this bit set
1147 * implicitly so that pairing works for them, however
1148 * for mgmt we require user-space to explicitly enable
1151 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1153 hci_dev_set_flag(hdev, HCI_MGMT);
1156 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1157 void *data, u16 data_len)
1159 struct mgmt_rp_read_info rp;
1161 bt_dev_dbg(hdev, "sock %p", sk);
1165 memset(&rp, 0, sizeof(rp));
1167 bacpy(&rp.bdaddr, &hdev->bdaddr);
1169 rp.version = hdev->hci_ver;
1170 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1172 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1173 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1175 memcpy(rp.dev_class, hdev->dev_class, 3);
1177 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1178 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1180 hci_dev_unlock(hdev);
1182 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1186 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1191 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1192 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1193 hdev->dev_class, 3);
1195 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1196 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1199 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1200 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1201 hdev->dev_name, name_len);
1203 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1204 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1205 hdev->short_name, name_len);
1210 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1211 void *data, u16 data_len)
1214 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1217 bt_dev_dbg(hdev, "sock %p", sk);
1219 memset(&buf, 0, sizeof(buf));
1223 bacpy(&rp->bdaddr, &hdev->bdaddr);
1225 rp->version = hdev->hci_ver;
1226 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1228 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1229 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1232 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1233 rp->eir_len = cpu_to_le16(eir_len);
1235 hci_dev_unlock(hdev);
1237 /* If this command is called at least once, then the events
1238 * for class of device and local name changes are disabled
1239 * and only the new extended controller information event
1242 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1243 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1244 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1246 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1247 sizeof(*rp) + eir_len);
1250 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1253 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1256 memset(buf, 0, sizeof(buf));
1258 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1259 ev->eir_len = cpu_to_le16(eir_len);
1261 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1262 sizeof(*ev) + eir_len,
1263 HCI_MGMT_EXT_INFO_EVENTS, skip);
1266 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1268 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1270 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1274 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1276 struct mgmt_ev_advertising_added ev;
1278 ev.instance = instance;
1280 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1283 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1286 struct mgmt_ev_advertising_removed ev;
1288 ev.instance = instance;
1290 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1293 static void cancel_adv_timeout(struct hci_dev *hdev)
1295 if (hdev->adv_instance_timeout) {
1296 hdev->adv_instance_timeout = 0;
1297 cancel_delayed_work(&hdev->adv_instance_expire);
1301 /* This function requires the caller holds hdev->lock */
1302 static void restart_le_actions(struct hci_dev *hdev)
1304 struct hci_conn_params *p;
1306 list_for_each_entry(p, &hdev->le_conn_params, list) {
1307 /* Needed for AUTO_OFF case where might not "really"
1308 * have been powered off.
1310 hci_pend_le_list_del_init(p);
1312 switch (p->auto_connect) {
1313 case HCI_AUTO_CONN_DIRECT:
1314 case HCI_AUTO_CONN_ALWAYS:
1315 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1317 case HCI_AUTO_CONN_REPORT:
1318 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1326 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1328 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1330 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1331 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1334 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1336 struct mgmt_pending_cmd *cmd = data;
1337 struct mgmt_mode *cp;
1339 /* Make sure cmd still outstanding. */
1340 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1345 bt_dev_dbg(hdev, "err %d", err);
1350 restart_le_actions(hdev);
1351 hci_update_passive_scan(hdev);
1352 hci_dev_unlock(hdev);
1355 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1357 /* Only call new_setting for power on as power off is deferred
1358 * to hdev->power_off work which does call hci_dev_do_close.
1361 new_settings(hdev, cmd->sk);
1363 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1367 mgmt_pending_remove(cmd);
1370 static int set_powered_sync(struct hci_dev *hdev, void *data)
1372 struct mgmt_pending_cmd *cmd = data;
1373 struct mgmt_mode *cp = cmd->param;
1375 BT_DBG("%s", hdev->name);
1377 return hci_set_powered_sync(hdev, cp->val);
1380 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1383 struct mgmt_mode *cp = data;
1384 struct mgmt_pending_cmd *cmd;
1387 bt_dev_dbg(hdev, "sock %p", sk);
1389 if (cp->val != 0x00 && cp->val != 0x01)
1390 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1391 MGMT_STATUS_INVALID_PARAMS);
1395 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1396 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1401 if (!!cp->val == hdev_is_powered(hdev)) {
1402 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1406 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1412 /* Cancel potentially blocking sync operation before power off */
1413 if (cp->val == 0x00) {
1414 __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
1415 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1416 mgmt_set_powered_complete);
1418 /* Use hci_cmd_sync_submit since hdev might not be running */
1419 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1420 mgmt_set_powered_complete);
1424 mgmt_pending_remove(cmd);
1427 hci_dev_unlock(hdev);
1431 int mgmt_new_settings(struct hci_dev *hdev)
1433 return new_settings(hdev, NULL);
1438 struct hci_dev *hdev;
1442 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1444 struct cmd_lookup *match = data;
1446 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1448 list_del(&cmd->list);
1450 if (match->sk == NULL) {
1451 match->sk = cmd->sk;
1452 sock_hold(match->sk);
1455 mgmt_pending_free(cmd);
1458 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1462 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1463 mgmt_pending_remove(cmd);
1466 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1468 if (cmd->cmd_complete) {
1471 cmd->cmd_complete(cmd, *status);
1472 mgmt_pending_remove(cmd);
1477 cmd_status_rsp(cmd, data);
1480 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1482 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1483 cmd->param, cmd->param_len);
1486 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1488 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1489 cmd->param, sizeof(struct mgmt_addr_info));
1492 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1494 if (!lmp_bredr_capable(hdev))
1495 return MGMT_STATUS_NOT_SUPPORTED;
1496 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1497 return MGMT_STATUS_REJECTED;
1499 return MGMT_STATUS_SUCCESS;
1502 static u8 mgmt_le_support(struct hci_dev *hdev)
1504 if (!lmp_le_capable(hdev))
1505 return MGMT_STATUS_NOT_SUPPORTED;
1506 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1507 return MGMT_STATUS_REJECTED;
1509 return MGMT_STATUS_SUCCESS;
1512 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1515 struct mgmt_pending_cmd *cmd = data;
1517 bt_dev_dbg(hdev, "err %d", err);
1519 /* Make sure cmd still outstanding. */
1520 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1526 u8 mgmt_err = mgmt_status(err);
1527 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1528 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1532 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1533 hdev->discov_timeout > 0) {
1534 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1535 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1538 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1539 new_settings(hdev, cmd->sk);
1542 mgmt_pending_remove(cmd);
1543 hci_dev_unlock(hdev);
1546 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1548 BT_DBG("%s", hdev->name);
1550 return hci_update_discoverable_sync(hdev);
1553 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1556 struct mgmt_cp_set_discoverable *cp = data;
1557 struct mgmt_pending_cmd *cmd;
1561 bt_dev_dbg(hdev, "sock %p", sk);
1563 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1564 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1565 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1566 MGMT_STATUS_REJECTED);
1568 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1569 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1570 MGMT_STATUS_INVALID_PARAMS);
1572 timeout = __le16_to_cpu(cp->timeout);
1574 /* Disabling discoverable requires that no timeout is set,
1575 * and enabling limited discoverable requires a timeout.
1577 if ((cp->val == 0x00 && timeout > 0) ||
1578 (cp->val == 0x02 && timeout == 0))
1579 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1580 MGMT_STATUS_INVALID_PARAMS);
1584 if (!hdev_is_powered(hdev) && timeout > 0) {
1585 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1586 MGMT_STATUS_NOT_POWERED);
1590 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1591 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1597 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1598 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1599 MGMT_STATUS_REJECTED);
1603 if (hdev->advertising_paused) {
1604 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1609 if (!hdev_is_powered(hdev)) {
1610 bool changed = false;
1612 /* Setting limited discoverable when powered off is
1613 * not a valid operation since it requires a timeout
1614 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1616 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1617 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1621 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1626 err = new_settings(hdev, sk);
1631 /* If the current mode is the same, then just update the timeout
1632 * value with the new value. And if only the timeout gets updated,
1633 * then no need for any HCI transactions.
1635 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1636 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1637 HCI_LIMITED_DISCOVERABLE)) {
1638 cancel_delayed_work(&hdev->discov_off);
1639 hdev->discov_timeout = timeout;
1641 if (cp->val && hdev->discov_timeout > 0) {
1642 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1643 queue_delayed_work(hdev->req_workqueue,
1644 &hdev->discov_off, to);
1647 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1651 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1657 /* Cancel any potential discoverable timeout that might be
1658 * still active and store new timeout value. The arming of
1659 * the timeout happens in the complete handler.
1661 cancel_delayed_work(&hdev->discov_off);
1662 hdev->discov_timeout = timeout;
1665 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1667 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1669 /* Limited discoverable mode */
1670 if (cp->val == 0x02)
1671 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1673 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1675 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1676 mgmt_set_discoverable_complete);
1679 mgmt_pending_remove(cmd);
1682 hci_dev_unlock(hdev);
1686 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1689 struct mgmt_pending_cmd *cmd = data;
1691 bt_dev_dbg(hdev, "err %d", err);
1693 /* Make sure cmd still outstanding. */
1694 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1700 u8 mgmt_err = mgmt_status(err);
1701 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1705 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1706 new_settings(hdev, cmd->sk);
1710 mgmt_pending_remove(cmd);
1712 hci_dev_unlock(hdev);
1715 static int set_connectable_update_settings(struct hci_dev *hdev,
1716 struct sock *sk, u8 val)
1718 bool changed = false;
1721 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1725 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1727 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1728 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1731 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1736 hci_update_scan(hdev);
1737 hci_update_passive_scan(hdev);
1738 return new_settings(hdev, sk);
1744 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1746 BT_DBG("%s", hdev->name);
1748 return hci_update_connectable_sync(hdev);
1751 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1754 struct mgmt_mode *cp = data;
1755 struct mgmt_pending_cmd *cmd;
1758 bt_dev_dbg(hdev, "sock %p", sk);
1760 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1761 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1762 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1763 MGMT_STATUS_REJECTED);
1765 if (cp->val != 0x00 && cp->val != 0x01)
1766 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1767 MGMT_STATUS_INVALID_PARAMS);
1771 if (!hdev_is_powered(hdev)) {
1772 err = set_connectable_update_settings(hdev, sk, cp->val);
1776 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1777 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1778 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1783 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1790 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1792 if (hdev->discov_timeout > 0)
1793 cancel_delayed_work(&hdev->discov_off);
1795 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1796 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1797 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1800 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1801 mgmt_set_connectable_complete);
1804 mgmt_pending_remove(cmd);
1807 hci_dev_unlock(hdev);
1811 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1814 struct mgmt_mode *cp = data;
1818 bt_dev_dbg(hdev, "sock %p", sk);
1820 if (cp->val != 0x00 && cp->val != 0x01)
1821 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1822 MGMT_STATUS_INVALID_PARAMS);
1827 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1829 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1831 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1836 /* In limited privacy mode the change of bondable mode
1837 * may affect the local advertising address.
1839 hci_update_discoverable(hdev);
1841 err = new_settings(hdev, sk);
1845 hci_dev_unlock(hdev);
1849 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1852 struct mgmt_mode *cp = data;
1853 struct mgmt_pending_cmd *cmd;
1857 bt_dev_dbg(hdev, "sock %p", sk);
1859 status = mgmt_bredr_support(hdev);
1861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1864 if (cp->val != 0x00 && cp->val != 0x01)
1865 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1866 MGMT_STATUS_INVALID_PARAMS);
1870 if (!hdev_is_powered(hdev)) {
1871 bool changed = false;
1873 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1874 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1878 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1883 err = new_settings(hdev, sk);
1888 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1889 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1896 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1897 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1901 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1907 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1909 mgmt_pending_remove(cmd);
1914 hci_dev_unlock(hdev);
1918 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1920 struct cmd_lookup match = { NULL, hdev };
1921 struct mgmt_pending_cmd *cmd = data;
1922 struct mgmt_mode *cp = cmd->param;
1923 u8 enable = cp->val;
1926 /* Make sure cmd still outstanding. */
1927 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1931 u8 mgmt_err = mgmt_status(err);
1933 if (enable && hci_dev_test_and_clear_flag(hdev,
1935 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1936 new_settings(hdev, NULL);
1939 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1945 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1947 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1950 changed = hci_dev_test_and_clear_flag(hdev,
1953 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1956 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1959 new_settings(hdev, match.sk);
1964 hci_update_eir_sync(hdev);
1967 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1969 struct mgmt_pending_cmd *cmd = data;
1970 struct mgmt_mode *cp = cmd->param;
1971 bool changed = false;
1975 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1977 err = hci_write_ssp_mode_sync(hdev, cp->val);
1979 if (!err && changed)
1980 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1985 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1987 struct mgmt_mode *cp = data;
1988 struct mgmt_pending_cmd *cmd;
1992 bt_dev_dbg(hdev, "sock %p", sk);
1994 status = mgmt_bredr_support(hdev);
1996 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1998 if (!lmp_ssp_capable(hdev))
1999 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2000 MGMT_STATUS_NOT_SUPPORTED);
2002 if (cp->val != 0x00 && cp->val != 0x01)
2003 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2004 MGMT_STATUS_INVALID_PARAMS);
2008 if (!hdev_is_powered(hdev)) {
2012 changed = !hci_dev_test_and_set_flag(hdev,
2015 changed = hci_dev_test_and_clear_flag(hdev,
2018 changed = hci_dev_test_and_clear_flag(hdev,
2021 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2024 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2029 err = new_settings(hdev, sk);
2034 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2035 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2040 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2041 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2045 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2049 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2053 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2054 MGMT_STATUS_FAILED);
2057 mgmt_pending_remove(cmd);
2061 hci_dev_unlock(hdev);
2065 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2067 struct mgmt_mode *cp = data;
2072 bt_dev_dbg(hdev, "sock %p", sk);
2074 if (!IS_ENABLED(CONFIG_BT_HS))
2075 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2076 MGMT_STATUS_NOT_SUPPORTED);
2078 status = mgmt_bredr_support(hdev);
2080 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2082 if (!lmp_ssp_capable(hdev))
2083 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2084 MGMT_STATUS_NOT_SUPPORTED);
2086 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2087 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2088 MGMT_STATUS_REJECTED);
2090 if (cp->val != 0x00 && cp->val != 0x01)
2091 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2092 MGMT_STATUS_INVALID_PARAMS);
2096 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2097 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2103 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2105 if (hdev_is_powered(hdev)) {
2106 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2107 MGMT_STATUS_REJECTED);
2111 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2114 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2119 err = new_settings(hdev, sk);
2122 hci_dev_unlock(hdev);
2126 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2128 struct cmd_lookup match = { NULL, hdev };
2129 u8 status = mgmt_status(err);
2131 bt_dev_dbg(hdev, "err %d", err);
2134 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2139 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2141 new_settings(hdev, match.sk);
2147 static int set_le_sync(struct hci_dev *hdev, void *data)
2149 struct mgmt_pending_cmd *cmd = data;
2150 struct mgmt_mode *cp = cmd->param;
2155 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2157 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2158 hci_disable_advertising_sync(hdev);
2160 if (ext_adv_capable(hdev))
2161 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2163 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2166 err = hci_write_le_host_supported_sync(hdev, val, 0);
2168 /* Make sure the controller has a good default for
2169 * advertising data. Restrict the update to when LE
2170 * has actually been enabled. During power on, the
2171 * update in powered_update_hci will take care of it.
2173 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2174 if (ext_adv_capable(hdev)) {
2177 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2179 hci_update_scan_rsp_data_sync(hdev, 0x00);
2181 hci_update_adv_data_sync(hdev, 0x00);
2182 hci_update_scan_rsp_data_sync(hdev, 0x00);
2185 hci_update_passive_scan(hdev);
2191 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2193 struct mgmt_pending_cmd *cmd = data;
2194 u8 status = mgmt_status(err);
2195 struct sock *sk = cmd->sk;
2198 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2199 cmd_status_rsp, &status);
2203 mgmt_pending_remove(cmd);
2204 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2207 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2209 struct mgmt_pending_cmd *cmd = data;
2210 struct mgmt_cp_set_mesh *cp = cmd->param;
2211 size_t len = cmd->param_len;
2213 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2216 hci_dev_set_flag(hdev, HCI_MESH);
2218 hci_dev_clear_flag(hdev, HCI_MESH);
2222 /* If filters don't fit, forward all adv pkts */
2223 if (len <= sizeof(hdev->mesh_ad_types))
2224 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2226 hci_update_passive_scan_sync(hdev);
2230 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2232 struct mgmt_cp_set_mesh *cp = data;
2233 struct mgmt_pending_cmd *cmd;
2236 bt_dev_dbg(hdev, "sock %p", sk);
2238 if (!lmp_le_capable(hdev) ||
2239 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2240 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2241 MGMT_STATUS_NOT_SUPPORTED);
2243 if (cp->enable != 0x00 && cp->enable != 0x01)
2244 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2245 MGMT_STATUS_INVALID_PARAMS);
2249 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2253 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2257 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2258 MGMT_STATUS_FAILED);
2261 mgmt_pending_remove(cmd);
2264 hci_dev_unlock(hdev);
2268 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2270 struct mgmt_mesh_tx *mesh_tx = data;
2271 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2272 unsigned long mesh_send_interval;
2273 u8 mgmt_err = mgmt_status(err);
2275 /* Report any errors here, but don't report completion */
2278 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2279 /* Send Complete Error Code for handle */
2280 mesh_send_complete(hdev, mesh_tx, false);
2284 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2285 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2286 mesh_send_interval);
2289 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2291 struct mgmt_mesh_tx *mesh_tx = data;
2292 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2293 struct adv_info *adv, *next_instance;
2294 u8 instance = hdev->le_num_of_adv_sets + 1;
2295 u16 timeout, duration;
2298 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2299 return MGMT_STATUS_BUSY;
2302 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2303 adv = hci_add_adv_instance(hdev, instance, 0,
2304 send->adv_data_len, send->adv_data,
2307 HCI_ADV_TX_POWER_NO_PREFERENCE,
2308 hdev->le_adv_min_interval,
2309 hdev->le_adv_max_interval,
2313 mesh_tx->instance = instance;
2317 if (hdev->cur_adv_instance == instance) {
2318 /* If the currently advertised instance is being changed then
2319 * cancel the current advertising and schedule the next
2320 * instance. If there is only one instance then the overridden
2321 * advertising data will be visible right away.
2323 cancel_adv_timeout(hdev);
2325 next_instance = hci_get_next_instance(hdev, instance);
2327 instance = next_instance->instance;
2330 } else if (hdev->adv_instance_timeout) {
2331 /* Immediately advertise the new instance if no other, or
2332 * let it go naturally from queue if ADV is already happening
2338 return hci_schedule_adv_instance_sync(hdev, instance, true);
2343 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2345 struct mgmt_rp_mesh_read_features *rp = data;
2347 if (rp->used_handles >= rp->max_handles)
2350 rp->handles[rp->used_handles++] = mesh_tx->handle;
2353 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2354 void *data, u16 len)
2356 struct mgmt_rp_mesh_read_features rp;
2358 if (!lmp_le_capable(hdev) ||
2359 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2360 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2361 MGMT_STATUS_NOT_SUPPORTED);
2363 memset(&rp, 0, sizeof(rp));
2364 rp.index = cpu_to_le16(hdev->id);
2365 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2366 rp.max_handles = MESH_HANDLES_MAX;
2371 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2373 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2374 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2376 hci_dev_unlock(hdev);
2380 static int send_cancel(struct hci_dev *hdev, void *data)
2382 struct mgmt_pending_cmd *cmd = data;
2383 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2384 struct mgmt_mesh_tx *mesh_tx;
2386 if (!cancel->handle) {
2388 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2391 mesh_send_complete(hdev, mesh_tx, false);
2394 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2396 if (mesh_tx && mesh_tx->sk == cmd->sk)
2397 mesh_send_complete(hdev, mesh_tx, false);
2400 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2402 mgmt_pending_free(cmd);
2407 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2408 void *data, u16 len)
2410 struct mgmt_pending_cmd *cmd;
2413 if (!lmp_le_capable(hdev) ||
2414 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2415 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2416 MGMT_STATUS_NOT_SUPPORTED);
2418 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2419 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2420 MGMT_STATUS_REJECTED);
2423 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2427 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2430 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2431 MGMT_STATUS_FAILED);
2434 mgmt_pending_free(cmd);
2437 hci_dev_unlock(hdev);
2441 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2443 struct mgmt_mesh_tx *mesh_tx;
2444 struct mgmt_cp_mesh_send *send = data;
2445 struct mgmt_rp_mesh_read_features rp;
2449 if (!lmp_le_capable(hdev) ||
2450 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2451 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2452 MGMT_STATUS_NOT_SUPPORTED);
2453 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2454 len <= MGMT_MESH_SEND_SIZE ||
2455 len > (MGMT_MESH_SEND_SIZE + 31))
2456 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2457 MGMT_STATUS_REJECTED);
2461 memset(&rp, 0, sizeof(rp));
2462 rp.max_handles = MESH_HANDLES_MAX;
2464 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2466 if (rp.max_handles <= rp.used_handles) {
2467 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2472 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2473 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2478 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2479 mesh_send_start_complete);
2482 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2483 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2484 MGMT_STATUS_FAILED);
2488 mgmt_mesh_remove(mesh_tx);
2491 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2493 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2494 &mesh_tx->handle, 1);
2498 hci_dev_unlock(hdev);
2502 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2504 struct mgmt_mode *cp = data;
2505 struct mgmt_pending_cmd *cmd;
2509 bt_dev_dbg(hdev, "sock %p", sk);
2511 if (!lmp_le_capable(hdev))
2512 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2513 MGMT_STATUS_NOT_SUPPORTED);
2515 if (cp->val != 0x00 && cp->val != 0x01)
2516 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2517 MGMT_STATUS_INVALID_PARAMS);
2519 /* Bluetooth single mode LE only controllers or dual-mode
2520 * controllers configured as LE only devices, do not allow
2521 * switching LE off. These have either LE enabled explicitly
2522 * or BR/EDR has been previously switched off.
2524 * When trying to enable an already enabled LE, then gracefully
2525 * send a positive response. Trying to disable it however will
2526 * result into rejection.
2528 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2529 if (cp->val == 0x01)
2530 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2532 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2533 MGMT_STATUS_REJECTED);
2539 enabled = lmp_host_le_capable(hdev);
2541 if (!hdev_is_powered(hdev) || val == enabled) {
2542 bool changed = false;
2544 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2545 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2549 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2550 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2554 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2559 err = new_settings(hdev, sk);
2564 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2565 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2566 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2571 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2575 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2579 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2580 MGMT_STATUS_FAILED);
2583 mgmt_pending_remove(cmd);
2587 hci_dev_unlock(hdev);
2591 /* This is a helper function to test for pending mgmt commands that can
2592 * cause CoD or EIR HCI commands. We can only allow one such pending
2593 * mgmt command at a time since otherwise we cannot easily track what
2594 * the current values are, will be, and based on that calculate if a new
2595 * HCI command needs to be sent and if yes with what value.
2597 static bool pending_eir_or_class(struct hci_dev *hdev)
2599 struct mgmt_pending_cmd *cmd;
2601 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2602 switch (cmd->opcode) {
2603 case MGMT_OP_ADD_UUID:
2604 case MGMT_OP_REMOVE_UUID:
2605 case MGMT_OP_SET_DEV_CLASS:
2606 case MGMT_OP_SET_POWERED:
2614 static const u8 bluetooth_base_uuid[] = {
2615 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2616 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2619 static u8 get_uuid_size(const u8 *uuid)
2623 if (memcmp(uuid, bluetooth_base_uuid, 12))
2626 val = get_unaligned_le32(&uuid[12]);
2633 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2635 struct mgmt_pending_cmd *cmd = data;
2637 bt_dev_dbg(hdev, "err %d", err);
2639 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2640 mgmt_status(err), hdev->dev_class, 3);
2642 mgmt_pending_free(cmd);
2645 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2649 err = hci_update_class_sync(hdev);
2653 return hci_update_eir_sync(hdev);
2656 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2658 struct mgmt_cp_add_uuid *cp = data;
2659 struct mgmt_pending_cmd *cmd;
2660 struct bt_uuid *uuid;
2663 bt_dev_dbg(hdev, "sock %p", sk);
2667 if (pending_eir_or_class(hdev)) {
2668 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2673 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2679 memcpy(uuid->uuid, cp->uuid, 16);
2680 uuid->svc_hint = cp->svc_hint;
2681 uuid->size = get_uuid_size(cp->uuid);
2683 list_add_tail(&uuid->list, &hdev->uuids);
2685 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2691 err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2693 mgmt_pending_free(cmd);
2698 hci_dev_unlock(hdev);
2702 static bool enable_service_cache(struct hci_dev *hdev)
2704 if (!hdev_is_powered(hdev))
2707 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2708 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2716 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2720 err = hci_update_class_sync(hdev);
2724 return hci_update_eir_sync(hdev);
2727 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2730 struct mgmt_cp_remove_uuid *cp = data;
2731 struct mgmt_pending_cmd *cmd;
2732 struct bt_uuid *match, *tmp;
2733 static const u8 bt_uuid_any[] = {
2734 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2738 bt_dev_dbg(hdev, "sock %p", sk);
2742 if (pending_eir_or_class(hdev)) {
2743 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2748 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2749 hci_uuids_clear(hdev);
2751 if (enable_service_cache(hdev)) {
2752 err = mgmt_cmd_complete(sk, hdev->id,
2753 MGMT_OP_REMOVE_UUID,
2754 0, hdev->dev_class, 3);
2763 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2764 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2767 list_del(&match->list);
2773 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2774 MGMT_STATUS_INVALID_PARAMS);
2779 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2785 err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2786 mgmt_class_complete);
2788 mgmt_pending_free(cmd);
2791 hci_dev_unlock(hdev);
2795 static int set_class_sync(struct hci_dev *hdev, void *data)
2799 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2800 cancel_delayed_work_sync(&hdev->service_cache);
2801 err = hci_update_eir_sync(hdev);
2807 return hci_update_class_sync(hdev);
2810 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2813 struct mgmt_cp_set_dev_class *cp = data;
2814 struct mgmt_pending_cmd *cmd;
2817 bt_dev_dbg(hdev, "sock %p", sk);
2819 if (!lmp_bredr_capable(hdev))
2820 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2821 MGMT_STATUS_NOT_SUPPORTED);
2825 if (pending_eir_or_class(hdev)) {
2826 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2831 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2832 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2833 MGMT_STATUS_INVALID_PARAMS);
2837 hdev->major_class = cp->major;
2838 hdev->minor_class = cp->minor;
2840 if (!hdev_is_powered(hdev)) {
2841 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2842 hdev->dev_class, 3);
2846 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2852 err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2853 mgmt_class_complete);
2855 mgmt_pending_free(cmd);
2858 hci_dev_unlock(hdev);
2862 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2865 struct mgmt_cp_load_link_keys *cp = data;
2866 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2867 sizeof(struct mgmt_link_key_info));
2868 u16 key_count, expected_len;
2872 bt_dev_dbg(hdev, "sock %p", sk);
2874 if (!lmp_bredr_capable(hdev))
2875 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2876 MGMT_STATUS_NOT_SUPPORTED);
2878 key_count = __le16_to_cpu(cp->key_count);
2879 if (key_count > max_key_count) {
2880 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2882 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2883 MGMT_STATUS_INVALID_PARAMS);
2886 expected_len = struct_size(cp, keys, key_count);
2887 if (expected_len != len) {
2888 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2890 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2891 MGMT_STATUS_INVALID_PARAMS);
2894 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2895 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2896 MGMT_STATUS_INVALID_PARAMS);
2898 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2901 for (i = 0; i < key_count; i++) {
2902 struct mgmt_link_key_info *key = &cp->keys[i];
2904 /* Considering SMP over BREDR/LE, there is no need to check addr_type */
2905 if (key->type > 0x08)
2906 return mgmt_cmd_status(sk, hdev->id,
2907 MGMT_OP_LOAD_LINK_KEYS,
2908 MGMT_STATUS_INVALID_PARAMS);
2913 hci_link_keys_clear(hdev);
2916 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2918 changed = hci_dev_test_and_clear_flag(hdev,
2919 HCI_KEEP_DEBUG_KEYS);
2922 new_settings(hdev, NULL);
2924 for (i = 0; i < key_count; i++) {
2925 struct mgmt_link_key_info *key = &cp->keys[i];
2927 if (hci_is_blocked_key(hdev,
2928 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2930 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2935 /* Always ignore debug keys and require a new pairing if
2936 * the user wants to use them.
2938 if (key->type == HCI_LK_DEBUG_COMBINATION)
2941 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2942 key->type, key->pin_len, NULL);
2945 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2947 hci_dev_unlock(hdev);
2952 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2953 u8 addr_type, struct sock *skip_sk)
2955 struct mgmt_ev_device_unpaired ev;
2957 bacpy(&ev.addr.bdaddr, bdaddr);
2958 ev.addr.type = addr_type;
2960 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2964 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2966 struct mgmt_pending_cmd *cmd = data;
2967 struct mgmt_cp_unpair_device *cp = cmd->param;
2970 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2972 cmd->cmd_complete(cmd, err);
2973 mgmt_pending_free(cmd);
2976 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2978 struct mgmt_pending_cmd *cmd = data;
2979 struct mgmt_cp_unpair_device *cp = cmd->param;
2980 struct hci_conn *conn;
2982 if (cp->addr.type == BDADDR_BREDR)
2983 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2986 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2987 le_addr_type(cp->addr.type));
2992 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2995 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2998 struct mgmt_cp_unpair_device *cp = data;
2999 struct mgmt_rp_unpair_device rp;
3000 struct hci_conn_params *params;
3001 struct mgmt_pending_cmd *cmd;
3002 struct hci_conn *conn;
3006 memset(&rp, 0, sizeof(rp));
3007 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3008 rp.addr.type = cp->addr.type;
3010 if (!bdaddr_type_is_valid(cp->addr.type))
3011 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3012 MGMT_STATUS_INVALID_PARAMS,
3015 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3016 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3017 MGMT_STATUS_INVALID_PARAMS,
3022 if (!hdev_is_powered(hdev)) {
3023 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3024 MGMT_STATUS_NOT_POWERED, &rp,
3029 if (cp->addr.type == BDADDR_BREDR) {
3030 /* If disconnection is requested, then look up the
3031 * connection. If the remote device is connected, it
3032 * will be later used to terminate the link.
3034 * Setting it to NULL explicitly will cause no
3035 * termination of the link.
3038 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3043 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3045 err = mgmt_cmd_complete(sk, hdev->id,
3046 MGMT_OP_UNPAIR_DEVICE,
3047 MGMT_STATUS_NOT_PAIRED, &rp,
3055 /* LE address type */
3056 addr_type = le_addr_type(cp->addr.type);
3058 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3059 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3061 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3062 MGMT_STATUS_NOT_PAIRED, &rp,
3067 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3069 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3074 /* Defer clearing up the connection parameters until closing to
3075 * give a chance of keeping them if a repairing happens.
3077 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3079 /* Disable auto-connection parameters if present */
3080 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3082 if (params->explicit_connect)
3083 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3085 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3088 /* If disconnection is not requested, then clear the connection
3089 * variable so that the link is not terminated.
3091 if (!cp->disconnect)
3095 /* If the connection variable is set, then termination of the
3096 * link is requested.
3099 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3101 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3105 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3112 cmd->cmd_complete = addr_cmd_complete;
3114 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3115 unpair_device_complete);
3117 mgmt_pending_free(cmd);
3120 hci_dev_unlock(hdev);
3124 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3127 struct mgmt_cp_disconnect *cp = data;
3128 struct mgmt_rp_disconnect rp;
3129 struct mgmt_pending_cmd *cmd;
3130 struct hci_conn *conn;
3133 bt_dev_dbg(hdev, "sock %p", sk);
3135 memset(&rp, 0, sizeof(rp));
3136 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3137 rp.addr.type = cp->addr.type;
3139 if (!bdaddr_type_is_valid(cp->addr.type))
3140 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3141 MGMT_STATUS_INVALID_PARAMS,
3146 if (!test_bit(HCI_UP, &hdev->flags)) {
3147 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3148 MGMT_STATUS_NOT_POWERED, &rp,
3153 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3154 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3155 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3159 if (cp->addr.type == BDADDR_BREDR)
3160 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3163 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3164 le_addr_type(cp->addr.type));
3166 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3167 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3168 MGMT_STATUS_NOT_CONNECTED, &rp,
3173 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3179 cmd->cmd_complete = generic_cmd_complete;
3181 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3183 mgmt_pending_remove(cmd);
3186 hci_dev_unlock(hdev);
3190 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3192 switch (link_type) {
3194 switch (addr_type) {
3195 case ADDR_LE_DEV_PUBLIC:
3196 return BDADDR_LE_PUBLIC;
3199 /* Fallback to LE Random address type */
3200 return BDADDR_LE_RANDOM;
3204 /* Fallback to BR/EDR type */
3205 return BDADDR_BREDR;
3209 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3212 struct mgmt_rp_get_connections *rp;
3217 bt_dev_dbg(hdev, "sock %p", sk);
3221 if (!hdev_is_powered(hdev)) {
3222 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3223 MGMT_STATUS_NOT_POWERED);
3228 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3229 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3233 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3240 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3241 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3243 bacpy(&rp->addr[i].bdaddr, &c->dst);
3244 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3245 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3250 rp->conn_count = cpu_to_le16(i);
3252 /* Recalculate length in case of filtered SCO connections, etc */
3253 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3254 struct_size(rp, addr, i));
3259 hci_dev_unlock(hdev);
3263 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3264 struct mgmt_cp_pin_code_neg_reply *cp)
3266 struct mgmt_pending_cmd *cmd;
3269 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3274 cmd->cmd_complete = addr_cmd_complete;
3276 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3277 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3279 mgmt_pending_remove(cmd);
3284 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3287 struct hci_conn *conn;
3288 struct mgmt_cp_pin_code_reply *cp = data;
3289 struct hci_cp_pin_code_reply reply;
3290 struct mgmt_pending_cmd *cmd;
3293 bt_dev_dbg(hdev, "sock %p", sk);
3297 if (!hdev_is_powered(hdev)) {
3298 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3299 MGMT_STATUS_NOT_POWERED);
3303 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3305 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3306 MGMT_STATUS_NOT_CONNECTED);
3310 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3311 struct mgmt_cp_pin_code_neg_reply ncp;
3313 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3315 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3317 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3319 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3320 MGMT_STATUS_INVALID_PARAMS);
3325 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3331 cmd->cmd_complete = addr_cmd_complete;
3333 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3334 reply.pin_len = cp->pin_len;
3335 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3337 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3339 mgmt_pending_remove(cmd);
3342 hci_dev_unlock(hdev);
3346 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3349 struct mgmt_cp_set_io_capability *cp = data;
3351 bt_dev_dbg(hdev, "sock %p", sk);
3353 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3354 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3355 MGMT_STATUS_INVALID_PARAMS);
3359 hdev->io_capability = cp->io_capability;
3361 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3363 hci_dev_unlock(hdev);
3365 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3369 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3371 struct hci_dev *hdev = conn->hdev;
3372 struct mgmt_pending_cmd *cmd;
3374 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3375 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3378 if (cmd->user_data != conn)
3387 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3389 struct mgmt_rp_pair_device rp;
3390 struct hci_conn *conn = cmd->user_data;
3393 bacpy(&rp.addr.bdaddr, &conn->dst);
3394 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3396 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3397 status, &rp, sizeof(rp));
3399 /* So we don't get further callbacks for this connection */
3400 conn->connect_cfm_cb = NULL;
3401 conn->security_cfm_cb = NULL;
3402 conn->disconn_cfm_cb = NULL;
3404 hci_conn_drop(conn);
3406 /* The device is paired so there is no need to remove
3407 * its connection parameters anymore.
3409 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3416 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3418 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3419 struct mgmt_pending_cmd *cmd;
3421 cmd = find_pairing(conn);
3423 cmd->cmd_complete(cmd, status);
3424 mgmt_pending_remove(cmd);
3428 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3430 struct mgmt_pending_cmd *cmd;
3432 BT_DBG("status %u", status);
3434 cmd = find_pairing(conn);
3436 BT_DBG("Unable to find a pending command");
3440 cmd->cmd_complete(cmd, mgmt_status(status));
3441 mgmt_pending_remove(cmd);
3444 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3446 struct mgmt_pending_cmd *cmd;
3448 BT_DBG("status %u", status);
3453 cmd = find_pairing(conn);
3455 BT_DBG("Unable to find a pending command");
3459 cmd->cmd_complete(cmd, mgmt_status(status));
3460 mgmt_pending_remove(cmd);
3463 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3466 struct mgmt_cp_pair_device *cp = data;
3467 struct mgmt_rp_pair_device rp;
3468 struct mgmt_pending_cmd *cmd;
3469 u8 sec_level, auth_type;
3470 struct hci_conn *conn;
3473 bt_dev_dbg(hdev, "sock %p", sk);
3475 memset(&rp, 0, sizeof(rp));
3476 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3477 rp.addr.type = cp->addr.type;
3479 if (!bdaddr_type_is_valid(cp->addr.type))
3480 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3481 MGMT_STATUS_INVALID_PARAMS,
3484 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3485 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3486 MGMT_STATUS_INVALID_PARAMS,
3491 if (!hdev_is_powered(hdev)) {
3492 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3493 MGMT_STATUS_NOT_POWERED, &rp,
3498 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3499 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3500 MGMT_STATUS_ALREADY_PAIRED, &rp,
3505 sec_level = BT_SECURITY_MEDIUM;
3506 auth_type = HCI_AT_DEDICATED_BONDING;
3508 if (cp->addr.type == BDADDR_BREDR) {
3509 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3510 auth_type, CONN_REASON_PAIR_DEVICE);
3512 u8 addr_type = le_addr_type(cp->addr.type);
3513 struct hci_conn_params *p;
3515 /* When pairing a new device, it is expected to remember
3516 * this device for future connections. Adding the connection
3517 * parameter information ahead of time allows tracking
3518 * of the peripheral preferred values and will speed up any
3519 * further connection establishment.
3521 * If connection parameters already exist, then they
3522 * will be kept and this function does nothing.
3524 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3526 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3527 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3529 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3530 sec_level, HCI_LE_CONN_TIMEOUT,
3531 CONN_REASON_PAIR_DEVICE);
3537 if (PTR_ERR(conn) == -EBUSY)
3538 status = MGMT_STATUS_BUSY;
3539 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3540 status = MGMT_STATUS_NOT_SUPPORTED;
3541 else if (PTR_ERR(conn) == -ECONNREFUSED)
3542 status = MGMT_STATUS_REJECTED;
3544 status = MGMT_STATUS_CONNECT_FAILED;
3546 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3547 status, &rp, sizeof(rp));
3551 if (conn->connect_cfm_cb) {
3552 hci_conn_drop(conn);
3553 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3554 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3558 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3561 hci_conn_drop(conn);
3565 cmd->cmd_complete = pairing_complete;
3567 /* For LE, just connecting isn't a proof that the pairing finished */
3568 if (cp->addr.type == BDADDR_BREDR) {
3569 conn->connect_cfm_cb = pairing_complete_cb;
3570 conn->security_cfm_cb = pairing_complete_cb;
3571 conn->disconn_cfm_cb = pairing_complete_cb;
3573 conn->connect_cfm_cb = le_pairing_complete_cb;
3574 conn->security_cfm_cb = le_pairing_complete_cb;
3575 conn->disconn_cfm_cb = le_pairing_complete_cb;
3578 conn->io_capability = cp->io_cap;
3579 cmd->user_data = hci_conn_get(conn);
3581 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3582 hci_conn_security(conn, sec_level, auth_type, true)) {
3583 cmd->cmd_complete(cmd, 0);
3584 mgmt_pending_remove(cmd);
3590 hci_dev_unlock(hdev);
3594 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3597 struct mgmt_addr_info *addr = data;
3598 struct mgmt_pending_cmd *cmd;
3599 struct hci_conn *conn;
3602 bt_dev_dbg(hdev, "sock %p", sk);
3606 if (!hdev_is_powered(hdev)) {
3607 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3608 MGMT_STATUS_NOT_POWERED);
3612 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3614 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3615 MGMT_STATUS_INVALID_PARAMS);
3619 conn = cmd->user_data;
3621 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3622 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3623 MGMT_STATUS_INVALID_PARAMS);
3627 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3628 mgmt_pending_remove(cmd);
3630 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3631 addr, sizeof(*addr));
3633 /* Since user doesn't want to proceed with the connection, abort any
3634 * ongoing pairing and then terminate the link if it was created
3635 * because of the pair device action.
3637 if (addr->type == BDADDR_BREDR)
3638 hci_remove_link_key(hdev, &addr->bdaddr);
3640 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3641 le_addr_type(addr->type));
3643 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3644 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3647 hci_dev_unlock(hdev);
3651 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3652 struct mgmt_addr_info *addr, u16 mgmt_op,
3653 u16 hci_op, __le32 passkey)
3655 struct mgmt_pending_cmd *cmd;
3656 struct hci_conn *conn;
3661 if (!hdev_is_powered(hdev)) {
3662 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3663 MGMT_STATUS_NOT_POWERED, addr,
3668 if (addr->type == BDADDR_BREDR)
3669 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3671 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3672 le_addr_type(addr->type));
3675 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3676 MGMT_STATUS_NOT_CONNECTED, addr,
3681 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3682 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3684 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3685 MGMT_STATUS_SUCCESS, addr,
3688 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3689 MGMT_STATUS_FAILED, addr,
3695 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3701 cmd->cmd_complete = addr_cmd_complete;
3703 /* Continue with pairing via HCI */
3704 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3705 struct hci_cp_user_passkey_reply cp;
3707 bacpy(&cp.bdaddr, &addr->bdaddr);
3708 cp.passkey = passkey;
3709 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3711 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3715 mgmt_pending_remove(cmd);
3718 hci_dev_unlock(hdev);
3722 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3723 void *data, u16 len)
3725 struct mgmt_cp_pin_code_neg_reply *cp = data;
3727 bt_dev_dbg(hdev, "sock %p", sk);
3729 return user_pairing_resp(sk, hdev, &cp->addr,
3730 MGMT_OP_PIN_CODE_NEG_REPLY,
3731 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3734 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3737 struct mgmt_cp_user_confirm_reply *cp = data;
3739 bt_dev_dbg(hdev, "sock %p", sk);
3741 if (len != sizeof(*cp))
3742 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3743 MGMT_STATUS_INVALID_PARAMS);
3745 return user_pairing_resp(sk, hdev, &cp->addr,
3746 MGMT_OP_USER_CONFIRM_REPLY,
3747 HCI_OP_USER_CONFIRM_REPLY, 0);
3750 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3751 void *data, u16 len)
3753 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3755 bt_dev_dbg(hdev, "sock %p", sk);
3757 return user_pairing_resp(sk, hdev, &cp->addr,
3758 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3759 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3762 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3765 struct mgmt_cp_user_passkey_reply *cp = data;
3767 bt_dev_dbg(hdev, "sock %p", sk);
3769 return user_pairing_resp(sk, hdev, &cp->addr,
3770 MGMT_OP_USER_PASSKEY_REPLY,
3771 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3774 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3775 void *data, u16 len)
3777 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3779 bt_dev_dbg(hdev, "sock %p", sk);
3781 return user_pairing_resp(sk, hdev, &cp->addr,
3782 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3783 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3786 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3788 struct adv_info *adv_instance;
3790 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3794 /* stop if current instance doesn't need to be changed */
3795 if (!(adv_instance->flags & flags))
3798 cancel_adv_timeout(hdev);
3800 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3804 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3809 static int name_changed_sync(struct hci_dev *hdev, void *data)
3811 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3814 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3816 struct mgmt_pending_cmd *cmd = data;
3817 struct mgmt_cp_set_local_name *cp = cmd->param;
3818 u8 status = mgmt_status(err);
3820 bt_dev_dbg(hdev, "err %d", err);
3822 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3826 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3829 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3832 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3833 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3836 mgmt_pending_remove(cmd);
3839 static int set_name_sync(struct hci_dev *hdev, void *data)
3841 if (lmp_bredr_capable(hdev)) {
3842 hci_update_name_sync(hdev);
3843 hci_update_eir_sync(hdev);
3846 /* The name is stored in the scan response data and so
3847 * no need to update the advertising data here.
3849 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3850 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3855 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3858 struct mgmt_cp_set_local_name *cp = data;
3859 struct mgmt_pending_cmd *cmd;
3862 bt_dev_dbg(hdev, "sock %p", sk);
3866 /* If the old values are the same as the new ones just return a
3867 * direct command complete event.
3869 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3870 !memcmp(hdev->short_name, cp->short_name,
3871 sizeof(hdev->short_name))) {
3872 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3877 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3879 if (!hdev_is_powered(hdev)) {
3880 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3882 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3887 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3888 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3889 ext_info_changed(hdev, sk);
3894 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3898 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3902 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3903 MGMT_STATUS_FAILED);
3906 mgmt_pending_remove(cmd);
3911 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3914 hci_dev_unlock(hdev);
3918 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3920 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3923 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3926 struct mgmt_cp_set_appearance *cp = data;
3930 bt_dev_dbg(hdev, "sock %p", sk);
3932 if (!lmp_le_capable(hdev))
3933 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3934 MGMT_STATUS_NOT_SUPPORTED);
3936 appearance = le16_to_cpu(cp->appearance);
3940 if (hdev->appearance != appearance) {
3941 hdev->appearance = appearance;
3943 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3944 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3947 ext_info_changed(hdev, sk);
3950 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3953 hci_dev_unlock(hdev);
3958 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3959 void *data, u16 len)
3961 struct mgmt_rp_get_phy_configuration rp;
3963 bt_dev_dbg(hdev, "sock %p", sk);
3967 memset(&rp, 0, sizeof(rp));
3969 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3970 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3971 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3973 hci_dev_unlock(hdev);
3975 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3979 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3981 struct mgmt_ev_phy_configuration_changed ev;
3983 memset(&ev, 0, sizeof(ev));
3985 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3987 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3991 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3993 struct mgmt_pending_cmd *cmd = data;
3994 struct sk_buff *skb = cmd->skb;
3995 u8 status = mgmt_status(err);
3997 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4002 status = MGMT_STATUS_FAILED;
4003 else if (IS_ERR(skb))
4004 status = mgmt_status(PTR_ERR(skb));
4006 status = mgmt_status(skb->data[0]);
4009 bt_dev_dbg(hdev, "status %d", status);
4012 mgmt_cmd_status(cmd->sk, hdev->id,
4013 MGMT_OP_SET_PHY_CONFIGURATION, status);
4015 mgmt_cmd_complete(cmd->sk, hdev->id,
4016 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4019 mgmt_phy_configuration_changed(hdev, cmd->sk);
4022 if (skb && !IS_ERR(skb))
4025 mgmt_pending_remove(cmd);
4028 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4030 struct mgmt_pending_cmd *cmd = data;
4031 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4032 struct hci_cp_le_set_default_phy cp_phy;
4033 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4035 memset(&cp_phy, 0, sizeof(cp_phy));
4037 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4038 cp_phy.all_phys |= 0x01;
4040 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4041 cp_phy.all_phys |= 0x02;
4043 if (selected_phys & MGMT_PHY_LE_1M_TX)
4044 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4046 if (selected_phys & MGMT_PHY_LE_2M_TX)
4047 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4049 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4050 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4052 if (selected_phys & MGMT_PHY_LE_1M_RX)
4053 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4055 if (selected_phys & MGMT_PHY_LE_2M_RX)
4056 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4058 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4059 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4061 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4062 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4067 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4068 void *data, u16 len)
4070 struct mgmt_cp_set_phy_configuration *cp = data;
4071 struct mgmt_pending_cmd *cmd;
4072 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4073 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4074 bool changed = false;
4077 bt_dev_dbg(hdev, "sock %p", sk);
4079 configurable_phys = get_configurable_phys(hdev);
4080 supported_phys = get_supported_phys(hdev);
4081 selected_phys = __le32_to_cpu(cp->selected_phys);
4083 if (selected_phys & ~supported_phys)
4084 return mgmt_cmd_status(sk, hdev->id,
4085 MGMT_OP_SET_PHY_CONFIGURATION,
4086 MGMT_STATUS_INVALID_PARAMS);
4088 unconfigure_phys = supported_phys & ~configurable_phys;
4090 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4091 return mgmt_cmd_status(sk, hdev->id,
4092 MGMT_OP_SET_PHY_CONFIGURATION,
4093 MGMT_STATUS_INVALID_PARAMS);
4095 if (selected_phys == get_selected_phys(hdev))
4096 return mgmt_cmd_complete(sk, hdev->id,
4097 MGMT_OP_SET_PHY_CONFIGURATION,
4102 if (!hdev_is_powered(hdev)) {
4103 err = mgmt_cmd_status(sk, hdev->id,
4104 MGMT_OP_SET_PHY_CONFIGURATION,
4105 MGMT_STATUS_REJECTED);
4109 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4110 err = mgmt_cmd_status(sk, hdev->id,
4111 MGMT_OP_SET_PHY_CONFIGURATION,
4116 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4117 pkt_type |= (HCI_DH3 | HCI_DM3);
4119 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4121 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4122 pkt_type |= (HCI_DH5 | HCI_DM5);
4124 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4126 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4127 pkt_type &= ~HCI_2DH1;
4129 pkt_type |= HCI_2DH1;
4131 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4132 pkt_type &= ~HCI_2DH3;
4134 pkt_type |= HCI_2DH3;
4136 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4137 pkt_type &= ~HCI_2DH5;
4139 pkt_type |= HCI_2DH5;
4141 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4142 pkt_type &= ~HCI_3DH1;
4144 pkt_type |= HCI_3DH1;
4146 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4147 pkt_type &= ~HCI_3DH3;
4149 pkt_type |= HCI_3DH3;
4151 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4152 pkt_type &= ~HCI_3DH5;
4154 pkt_type |= HCI_3DH5;
4156 if (pkt_type != hdev->pkt_type) {
4157 hdev->pkt_type = pkt_type;
4161 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4162 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4164 mgmt_phy_configuration_changed(hdev, sk);
4166 err = mgmt_cmd_complete(sk, hdev->id,
4167 MGMT_OP_SET_PHY_CONFIGURATION,
4173 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4178 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4179 set_default_phy_complete);
4182 err = mgmt_cmd_status(sk, hdev->id,
4183 MGMT_OP_SET_PHY_CONFIGURATION,
4184 MGMT_STATUS_FAILED);
4187 mgmt_pending_remove(cmd);
4191 hci_dev_unlock(hdev);
4196 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4199 int err = MGMT_STATUS_SUCCESS;
4200 struct mgmt_cp_set_blocked_keys *keys = data;
4201 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4202 sizeof(struct mgmt_blocked_key_info));
4203 u16 key_count, expected_len;
4206 bt_dev_dbg(hdev, "sock %p", sk);
4208 key_count = __le16_to_cpu(keys->key_count);
4209 if (key_count > max_key_count) {
4210 bt_dev_err(hdev, "too big key_count value %u", key_count);
4211 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4212 MGMT_STATUS_INVALID_PARAMS);
4215 expected_len = struct_size(keys, keys, key_count);
4216 if (expected_len != len) {
4217 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4219 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4220 MGMT_STATUS_INVALID_PARAMS);
4225 hci_blocked_keys_clear(hdev);
4227 for (i = 0; i < key_count; ++i) {
4228 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4231 err = MGMT_STATUS_NO_RESOURCES;
4235 b->type = keys->keys[i].type;
4236 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4237 list_add_rcu(&b->list, &hdev->blocked_keys);
4239 hci_dev_unlock(hdev);
4241 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4245 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4246 void *data, u16 len)
4248 struct mgmt_mode *cp = data;
4250 bool changed = false;
4252 bt_dev_dbg(hdev, "sock %p", sk);
4254 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4255 return mgmt_cmd_status(sk, hdev->id,
4256 MGMT_OP_SET_WIDEBAND_SPEECH,
4257 MGMT_STATUS_NOT_SUPPORTED);
4259 if (cp->val != 0x00 && cp->val != 0x01)
4260 return mgmt_cmd_status(sk, hdev->id,
4261 MGMT_OP_SET_WIDEBAND_SPEECH,
4262 MGMT_STATUS_INVALID_PARAMS);
4266 if (hdev_is_powered(hdev) &&
4267 !!cp->val != hci_dev_test_flag(hdev,
4268 HCI_WIDEBAND_SPEECH_ENABLED)) {
4269 err = mgmt_cmd_status(sk, hdev->id,
4270 MGMT_OP_SET_WIDEBAND_SPEECH,
4271 MGMT_STATUS_REJECTED);
4276 changed = !hci_dev_test_and_set_flag(hdev,
4277 HCI_WIDEBAND_SPEECH_ENABLED);
4279 changed = hci_dev_test_and_clear_flag(hdev,
4280 HCI_WIDEBAND_SPEECH_ENABLED);
4282 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4287 err = new_settings(hdev, sk);
4290 hci_dev_unlock(hdev);
4294 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4295 void *data, u16 data_len)
4298 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4301 u8 tx_power_range[2];
4303 bt_dev_dbg(hdev, "sock %p", sk);
4305 memset(&buf, 0, sizeof(buf));
4309 /* When the Read Simple Pairing Options command is supported, then
4310 * the remote public key validation is supported.
4312 * Alternatively, when Microsoft extensions are available, they can
4313 * indicate support for public key validation as well.
4315 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4316 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4318 flags |= 0x02; /* Remote public key validation (LE) */
4320 /* When the Read Encryption Key Size command is supported, then the
4321 * encryption key size is enforced.
4323 if (hdev->commands[20] & 0x10)
4324 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4326 flags |= 0x08; /* Encryption key size enforcement (LE) */
4328 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4331 /* When the Read Simple Pairing Options command is supported, then
4332 * also max encryption key size information is provided.
4334 if (hdev->commands[41] & 0x08)
4335 cap_len = eir_append_le16(rp->cap, cap_len,
4336 MGMT_CAP_MAX_ENC_KEY_SIZE,
4337 hdev->max_enc_key_size);
4339 cap_len = eir_append_le16(rp->cap, cap_len,
4340 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4341 SMP_MAX_ENC_KEY_SIZE);
4343 /* Append the min/max LE tx power parameters if we were able to fetch
4344 * it from the controller
4346 if (hdev->commands[38] & 0x80) {
4347 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4348 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4349 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4353 rp->cap_len = cpu_to_le16(cap_len);
4355 hci_dev_unlock(hdev);
4357 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4358 rp, sizeof(*rp) + cap_len);
4361 #ifdef CONFIG_BT_FEATURE_DEBUG
4362 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4363 static const u8 debug_uuid[16] = {
4364 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4365 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4369 /* 330859bc-7506-492d-9370-9a6f0614037f */
4370 static const u8 quality_report_uuid[16] = {
4371 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4372 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4375 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4376 static const u8 offload_codecs_uuid[16] = {
4377 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4378 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4381 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4382 static const u8 le_simultaneous_roles_uuid[16] = {
4383 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4384 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4387 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4388 static const u8 rpa_resolution_uuid[16] = {
4389 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4390 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4393 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4394 static const u8 iso_socket_uuid[16] = {
4395 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4396 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4399 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4400 static const u8 mgmt_mesh_uuid[16] = {
4401 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4402 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4405 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4406 void *data, u16 data_len)
4408 struct mgmt_rp_read_exp_features_info *rp;
4414 bt_dev_dbg(hdev, "sock %p", sk);
4416 /* Enough space for 7 features */
4417 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4418 rp = kzalloc(len, GFP_KERNEL);
4422 #ifdef CONFIG_BT_FEATURE_DEBUG
4424 flags = bt_dbg_get() ? BIT(0) : 0;
4426 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4427 rp->features[idx].flags = cpu_to_le32(flags);
4432 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4433 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4438 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4439 rp->features[idx].flags = cpu_to_le32(flags);
4443 if (hdev && ll_privacy_capable(hdev)) {
4444 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4445 flags = BIT(0) | BIT(1);
4449 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4450 rp->features[idx].flags = cpu_to_le32(flags);
4454 if (hdev && (aosp_has_quality_report(hdev) ||
4455 hdev->set_quality_report)) {
4456 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4461 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4462 rp->features[idx].flags = cpu_to_le32(flags);
4466 if (hdev && hdev->get_data_path_id) {
4467 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4472 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4473 rp->features[idx].flags = cpu_to_le32(flags);
4477 if (IS_ENABLED(CONFIG_BT_LE)) {
4478 flags = iso_enabled() ? BIT(0) : 0;
4479 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4480 rp->features[idx].flags = cpu_to_le32(flags);
4484 if (hdev && lmp_le_capable(hdev)) {
4485 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4490 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4491 rp->features[idx].flags = cpu_to_le32(flags);
4495 rp->feature_count = cpu_to_le16(idx);
4497 /* After reading the experimental features information, enable
4498 * the events to update client on any future change.
4500 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4502 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4503 MGMT_OP_READ_EXP_FEATURES_INFO,
4504 0, rp, sizeof(*rp) + (20 * idx));
4510 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4513 struct mgmt_ev_exp_feature_changed ev;
4515 memset(&ev, 0, sizeof(ev));
4516 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4517 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4519 // Do we need to be atomic with the conn_flags?
4520 if (enabled && privacy_mode_capable(hdev))
4521 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4523 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4525 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4527 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4531 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4532 bool enabled, struct sock *skip)
4534 struct mgmt_ev_exp_feature_changed ev;
4536 memset(&ev, 0, sizeof(ev));
4537 memcpy(ev.uuid, uuid, 16);
4538 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4540 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4542 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4545 #define EXP_FEAT(_uuid, _set_func) \
4548 .set_func = _set_func, \
4551 /* The zero key uuid is special. Multiple exp features are set through it. */
4552 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4553 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4555 struct mgmt_rp_set_exp_feature rp;
4557 memset(rp.uuid, 0, 16);
4558 rp.flags = cpu_to_le32(0);
4560 #ifdef CONFIG_BT_FEATURE_DEBUG
4562 bool changed = bt_dbg_get();
4567 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4571 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4574 changed = hci_dev_test_and_clear_flag(hdev,
4575 HCI_ENABLE_LL_PRIVACY);
4577 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4581 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4583 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4584 MGMT_OP_SET_EXP_FEATURE, 0,
4588 #ifdef CONFIG_BT_FEATURE_DEBUG
4589 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4590 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4592 struct mgmt_rp_set_exp_feature rp;
4597 /* Command requires to use the non-controller index */
4599 return mgmt_cmd_status(sk, hdev->id,
4600 MGMT_OP_SET_EXP_FEATURE,
4601 MGMT_STATUS_INVALID_INDEX);
4603 /* Parameters are limited to a single octet */
4604 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4605 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4606 MGMT_OP_SET_EXP_FEATURE,
4607 MGMT_STATUS_INVALID_PARAMS);
4609 /* Only boolean on/off is supported */
4610 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4611 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4612 MGMT_OP_SET_EXP_FEATURE,
4613 MGMT_STATUS_INVALID_PARAMS);
4615 val = !!cp->param[0];
4616 changed = val ? !bt_dbg_get() : bt_dbg_get();
4619 memcpy(rp.uuid, debug_uuid, 16);
4620 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4622 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4624 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4625 MGMT_OP_SET_EXP_FEATURE, 0,
4629 exp_feature_changed(hdev, debug_uuid, val, sk);
4635 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4636 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4638 struct mgmt_rp_set_exp_feature rp;
4642 /* Command requires to use the controller index */
4644 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4645 MGMT_OP_SET_EXP_FEATURE,
4646 MGMT_STATUS_INVALID_INDEX);
4648 /* Parameters are limited to a single octet */
4649 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4650 return mgmt_cmd_status(sk, hdev->id,
4651 MGMT_OP_SET_EXP_FEATURE,
4652 MGMT_STATUS_INVALID_PARAMS);
4654 /* Only boolean on/off is supported */
4655 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4656 return mgmt_cmd_status(sk, hdev->id,
4657 MGMT_OP_SET_EXP_FEATURE,
4658 MGMT_STATUS_INVALID_PARAMS);
4660 val = !!cp->param[0];
4663 changed = !hci_dev_test_and_set_flag(hdev,
4664 HCI_MESH_EXPERIMENTAL);
4666 hci_dev_clear_flag(hdev, HCI_MESH);
4667 changed = hci_dev_test_and_clear_flag(hdev,
4668 HCI_MESH_EXPERIMENTAL);
4671 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4672 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4674 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4676 err = mgmt_cmd_complete(sk, hdev->id,
4677 MGMT_OP_SET_EXP_FEATURE, 0,
4681 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4686 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4687 struct mgmt_cp_set_exp_feature *cp,
4690 struct mgmt_rp_set_exp_feature rp;
4695 /* Command requires to use the controller index */
4697 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4698 MGMT_OP_SET_EXP_FEATURE,
4699 MGMT_STATUS_INVALID_INDEX);
4701 /* Changes can only be made when controller is powered down */
4702 if (hdev_is_powered(hdev))
4703 return mgmt_cmd_status(sk, hdev->id,
4704 MGMT_OP_SET_EXP_FEATURE,
4705 MGMT_STATUS_REJECTED);
4707 /* Parameters are limited to a single octet */
4708 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4709 return mgmt_cmd_status(sk, hdev->id,
4710 MGMT_OP_SET_EXP_FEATURE,
4711 MGMT_STATUS_INVALID_PARAMS);
4713 /* Only boolean on/off is supported */
4714 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4715 return mgmt_cmd_status(sk, hdev->id,
4716 MGMT_OP_SET_EXP_FEATURE,
4717 MGMT_STATUS_INVALID_PARAMS);
4719 val = !!cp->param[0];
4722 changed = !hci_dev_test_and_set_flag(hdev,
4723 HCI_ENABLE_LL_PRIVACY);
4724 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4726 /* Enable LL privacy + supported settings changed */
4727 flags = BIT(0) | BIT(1);
4729 changed = hci_dev_test_and_clear_flag(hdev,
4730 HCI_ENABLE_LL_PRIVACY);
4732 /* Disable LL privacy + supported settings changed */
4736 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4737 rp.flags = cpu_to_le32(flags);
4739 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4741 err = mgmt_cmd_complete(sk, hdev->id,
4742 MGMT_OP_SET_EXP_FEATURE, 0,
4746 exp_ll_privacy_feature_changed(val, hdev, sk);
4751 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4752 struct mgmt_cp_set_exp_feature *cp,
4755 struct mgmt_rp_set_exp_feature rp;
4759 /* Command requires to use a valid controller index */
4761 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4762 MGMT_OP_SET_EXP_FEATURE,
4763 MGMT_STATUS_INVALID_INDEX);
4765 /* Parameters are limited to a single octet */
4766 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4767 return mgmt_cmd_status(sk, hdev->id,
4768 MGMT_OP_SET_EXP_FEATURE,
4769 MGMT_STATUS_INVALID_PARAMS);
4771 /* Only boolean on/off is supported */
4772 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4773 return mgmt_cmd_status(sk, hdev->id,
4774 MGMT_OP_SET_EXP_FEATURE,
4775 MGMT_STATUS_INVALID_PARAMS);
4777 hci_req_sync_lock(hdev);
4779 val = !!cp->param[0];
4780 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4782 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4783 err = mgmt_cmd_status(sk, hdev->id,
4784 MGMT_OP_SET_EXP_FEATURE,
4785 MGMT_STATUS_NOT_SUPPORTED);
4786 goto unlock_quality_report;
4790 if (hdev->set_quality_report)
4791 err = hdev->set_quality_report(hdev, val);
4793 err = aosp_set_quality_report(hdev, val);
4796 err = mgmt_cmd_status(sk, hdev->id,
4797 MGMT_OP_SET_EXP_FEATURE,
4798 MGMT_STATUS_FAILED);
4799 goto unlock_quality_report;
4803 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4805 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4808 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4810 memcpy(rp.uuid, quality_report_uuid, 16);
4811 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4812 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4814 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4818 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4820 unlock_quality_report:
4821 hci_req_sync_unlock(hdev);
4825 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4826 struct mgmt_cp_set_exp_feature *cp,
4831 struct mgmt_rp_set_exp_feature rp;
4833 /* Command requires to use a valid controller index */
4835 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4836 MGMT_OP_SET_EXP_FEATURE,
4837 MGMT_STATUS_INVALID_INDEX);
4839 /* Parameters are limited to a single octet */
4840 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4841 return mgmt_cmd_status(sk, hdev->id,
4842 MGMT_OP_SET_EXP_FEATURE,
4843 MGMT_STATUS_INVALID_PARAMS);
4845 /* Only boolean on/off is supported */
4846 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4847 return mgmt_cmd_status(sk, hdev->id,
4848 MGMT_OP_SET_EXP_FEATURE,
4849 MGMT_STATUS_INVALID_PARAMS);
4851 val = !!cp->param[0];
4852 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4854 if (!hdev->get_data_path_id) {
4855 return mgmt_cmd_status(sk, hdev->id,
4856 MGMT_OP_SET_EXP_FEATURE,
4857 MGMT_STATUS_NOT_SUPPORTED);
4862 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4864 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4867 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4870 memcpy(rp.uuid, offload_codecs_uuid, 16);
4871 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4872 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4873 err = mgmt_cmd_complete(sk, hdev->id,
4874 MGMT_OP_SET_EXP_FEATURE, 0,
4878 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4883 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4884 struct mgmt_cp_set_exp_feature *cp,
4889 struct mgmt_rp_set_exp_feature rp;
4891 /* Command requires to use a valid controller index */
4893 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4894 MGMT_OP_SET_EXP_FEATURE,
4895 MGMT_STATUS_INVALID_INDEX);
4897 /* Parameters are limited to a single octet */
4898 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4899 return mgmt_cmd_status(sk, hdev->id,
4900 MGMT_OP_SET_EXP_FEATURE,
4901 MGMT_STATUS_INVALID_PARAMS);
4903 /* Only boolean on/off is supported */
4904 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4905 return mgmt_cmd_status(sk, hdev->id,
4906 MGMT_OP_SET_EXP_FEATURE,
4907 MGMT_STATUS_INVALID_PARAMS);
4909 val = !!cp->param[0];
4910 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4912 if (!hci_dev_le_state_simultaneous(hdev)) {
4913 return mgmt_cmd_status(sk, hdev->id,
4914 MGMT_OP_SET_EXP_FEATURE,
4915 MGMT_STATUS_NOT_SUPPORTED);
4920 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4922 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4925 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4928 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4929 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4930 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4931 err = mgmt_cmd_complete(sk, hdev->id,
4932 MGMT_OP_SET_EXP_FEATURE, 0,
4936 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4942 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4943 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4945 struct mgmt_rp_set_exp_feature rp;
4946 bool val, changed = false;
4949 /* Command requires to use the non-controller index */
4951 return mgmt_cmd_status(sk, hdev->id,
4952 MGMT_OP_SET_EXP_FEATURE,
4953 MGMT_STATUS_INVALID_INDEX);
4955 /* Parameters are limited to a single octet */
4956 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4957 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4958 MGMT_OP_SET_EXP_FEATURE,
4959 MGMT_STATUS_INVALID_PARAMS);
4961 /* Only boolean on/off is supported */
4962 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4963 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4964 MGMT_OP_SET_EXP_FEATURE,
4965 MGMT_STATUS_INVALID_PARAMS);
4967 val = cp->param[0] ? true : false;
4976 memcpy(rp.uuid, iso_socket_uuid, 16);
4977 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4979 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4981 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4982 MGMT_OP_SET_EXP_FEATURE, 0,
4986 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4992 static const struct mgmt_exp_feature {
4994 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4995 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4996 } exp_features[] = {
4997 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4998 #ifdef CONFIG_BT_FEATURE_DEBUG
4999 EXP_FEAT(debug_uuid, set_debug_func),
5001 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5002 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
5003 EXP_FEAT(quality_report_uuid, set_quality_report_func),
5004 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5005 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5007 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5010 /* end with a null feature */
5011 EXP_FEAT(NULL, NULL)
5014 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5015 void *data, u16 data_len)
5017 struct mgmt_cp_set_exp_feature *cp = data;
5020 bt_dev_dbg(hdev, "sock %p", sk);
5022 for (i = 0; exp_features[i].uuid; i++) {
5023 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5024 return exp_features[i].set_func(sk, hdev, cp, data_len);
5027 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5028 MGMT_OP_SET_EXP_FEATURE,
5029 MGMT_STATUS_NOT_SUPPORTED);
5032 static u32 get_params_flags(struct hci_dev *hdev,
5033 struct hci_conn_params *params)
5035 u32 flags = hdev->conn_flags;
5037 /* Devices using RPAs can only be programmed in the acceptlist if
5038 * LL Privacy has been enable otherwise they cannot mark
5039 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5041 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5042 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
5043 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5048 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5051 struct mgmt_cp_get_device_flags *cp = data;
5052 struct mgmt_rp_get_device_flags rp;
5053 struct bdaddr_list_with_flags *br_params;
5054 struct hci_conn_params *params;
5055 u32 supported_flags;
5056 u32 current_flags = 0;
5057 u8 status = MGMT_STATUS_INVALID_PARAMS;
5059 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5060 &cp->addr.bdaddr, cp->addr.type);
5064 supported_flags = hdev->conn_flags;
5066 memset(&rp, 0, sizeof(rp));
5068 if (cp->addr.type == BDADDR_BREDR) {
5069 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5075 current_flags = br_params->flags;
5077 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5078 le_addr_type(cp->addr.type));
5082 supported_flags = get_params_flags(hdev, params);
5083 current_flags = params->flags;
5086 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5087 rp.addr.type = cp->addr.type;
5088 rp.supported_flags = cpu_to_le32(supported_flags);
5089 rp.current_flags = cpu_to_le32(current_flags);
5091 status = MGMT_STATUS_SUCCESS;
5094 hci_dev_unlock(hdev);
5096 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5100 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5101 bdaddr_t *bdaddr, u8 bdaddr_type,
5102 u32 supported_flags, u32 current_flags)
5104 struct mgmt_ev_device_flags_changed ev;
5106 bacpy(&ev.addr.bdaddr, bdaddr);
5107 ev.addr.type = bdaddr_type;
5108 ev.supported_flags = cpu_to_le32(supported_flags);
5109 ev.current_flags = cpu_to_le32(current_flags);
5111 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5114 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5117 struct mgmt_cp_set_device_flags *cp = data;
5118 struct bdaddr_list_with_flags *br_params;
5119 struct hci_conn_params *params;
5120 u8 status = MGMT_STATUS_INVALID_PARAMS;
5121 u32 supported_flags;
5122 u32 current_flags = __le32_to_cpu(cp->current_flags);
5124 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5125 &cp->addr.bdaddr, cp->addr.type, current_flags);
5127 // We should take hci_dev_lock() early, I think.. conn_flags can change
5128 supported_flags = hdev->conn_flags;
5130 if ((supported_flags | current_flags) != supported_flags) {
5131 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5132 current_flags, supported_flags);
5138 if (cp->addr.type == BDADDR_BREDR) {
5139 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5144 br_params->flags = current_flags;
5145 status = MGMT_STATUS_SUCCESS;
5147 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5148 &cp->addr.bdaddr, cp->addr.type);
5154 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5155 le_addr_type(cp->addr.type));
5157 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5158 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5162 supported_flags = get_params_flags(hdev, params);
5164 if ((supported_flags | current_flags) != supported_flags) {
5165 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5166 current_flags, supported_flags);
5170 WRITE_ONCE(params->flags, current_flags);
5171 status = MGMT_STATUS_SUCCESS;
5173 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5176 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5177 hci_update_passive_scan(hdev);
5180 hci_dev_unlock(hdev);
5183 if (status == MGMT_STATUS_SUCCESS)
5184 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5185 supported_flags, current_flags);
5187 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5188 &cp->addr, sizeof(cp->addr));
5191 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5194 struct mgmt_ev_adv_monitor_added ev;
5196 ev.monitor_handle = cpu_to_le16(handle);
5198 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5201 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5203 struct mgmt_ev_adv_monitor_removed ev;
5204 struct mgmt_pending_cmd *cmd;
5205 struct sock *sk_skip = NULL;
5206 struct mgmt_cp_remove_adv_monitor *cp;
5208 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5212 if (cp->monitor_handle)
5216 ev.monitor_handle = cpu_to_le16(handle);
5218 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5221 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5222 void *data, u16 len)
5224 struct adv_monitor *monitor = NULL;
5225 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5228 __u32 supported = 0;
5230 __u16 num_handles = 0;
5231 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5233 BT_DBG("request for %s", hdev->name);
5237 if (msft_monitor_supported(hdev))
5238 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5240 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5241 handles[num_handles++] = monitor->handle;
5243 hci_dev_unlock(hdev);
5245 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5246 rp = kmalloc(rp_size, GFP_KERNEL);
5250 /* All supported features are currently enabled */
5251 enabled = supported;
5253 rp->supported_features = cpu_to_le32(supported);
5254 rp->enabled_features = cpu_to_le32(enabled);
5255 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5256 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5257 rp->num_handles = cpu_to_le16(num_handles);
5259 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5261 err = mgmt_cmd_complete(sk, hdev->id,
5262 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5263 MGMT_STATUS_SUCCESS, rp, rp_size);
5270 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5271 void *data, int status)
5273 struct mgmt_rp_add_adv_patterns_monitor rp;
5274 struct mgmt_pending_cmd *cmd = data;
5275 struct adv_monitor *monitor = cmd->user_data;
5279 rp.monitor_handle = cpu_to_le16(monitor->handle);
5282 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5283 hdev->adv_monitors_cnt++;
5284 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5285 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5286 hci_update_passive_scan(hdev);
5289 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5290 mgmt_status(status), &rp, sizeof(rp));
5291 mgmt_pending_remove(cmd);
5293 hci_dev_unlock(hdev);
5294 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5295 rp.monitor_handle, status);
5298 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5300 struct mgmt_pending_cmd *cmd = data;
5301 struct adv_monitor *monitor = cmd->user_data;
5303 return hci_add_adv_monitor(hdev, monitor);
5306 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5307 struct adv_monitor *m, u8 status,
5308 void *data, u16 len, u16 op)
5310 struct mgmt_pending_cmd *cmd;
5318 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5319 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5320 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5321 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5322 status = MGMT_STATUS_BUSY;
5326 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5328 status = MGMT_STATUS_NO_RESOURCES;
5333 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5334 mgmt_add_adv_patterns_monitor_complete);
5337 status = MGMT_STATUS_NO_RESOURCES;
5339 status = MGMT_STATUS_FAILED;
5344 hci_dev_unlock(hdev);
5349 hci_free_adv_monitor(hdev, m);
5350 hci_dev_unlock(hdev);
5351 return mgmt_cmd_status(sk, hdev->id, op, status);
5354 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5355 struct mgmt_adv_rssi_thresholds *rssi)
5358 m->rssi.low_threshold = rssi->low_threshold;
5359 m->rssi.low_threshold_timeout =
5360 __le16_to_cpu(rssi->low_threshold_timeout);
5361 m->rssi.high_threshold = rssi->high_threshold;
5362 m->rssi.high_threshold_timeout =
5363 __le16_to_cpu(rssi->high_threshold_timeout);
5364 m->rssi.sampling_period = rssi->sampling_period;
5366 /* Default values. These numbers are the least constricting
5367 * parameters for MSFT API to work, so it behaves as if there
5368 * are no rssi parameter to consider. May need to be changed
5369 * if other API are to be supported.
5371 m->rssi.low_threshold = -127;
5372 m->rssi.low_threshold_timeout = 60;
5373 m->rssi.high_threshold = -127;
5374 m->rssi.high_threshold_timeout = 0;
5375 m->rssi.sampling_period = 0;
5379 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5380 struct mgmt_adv_pattern *patterns)
5382 u8 offset = 0, length = 0;
5383 struct adv_pattern *p = NULL;
5386 for (i = 0; i < pattern_count; i++) {
5387 offset = patterns[i].offset;
5388 length = patterns[i].length;
5389 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5390 length > HCI_MAX_EXT_AD_LENGTH ||
5391 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5392 return MGMT_STATUS_INVALID_PARAMS;
5394 p = kmalloc(sizeof(*p), GFP_KERNEL);
5396 return MGMT_STATUS_NO_RESOURCES;
5398 p->ad_type = patterns[i].ad_type;
5399 p->offset = patterns[i].offset;
5400 p->length = patterns[i].length;
5401 memcpy(p->value, patterns[i].value, p->length);
5403 INIT_LIST_HEAD(&p->list);
5404 list_add(&p->list, &m->patterns);
5407 return MGMT_STATUS_SUCCESS;
5410 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5411 void *data, u16 len)
5413 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5414 struct adv_monitor *m = NULL;
5415 u8 status = MGMT_STATUS_SUCCESS;
5416 size_t expected_size = sizeof(*cp);
5418 BT_DBG("request for %s", hdev->name);
5420 if (len <= sizeof(*cp)) {
5421 status = MGMT_STATUS_INVALID_PARAMS;
5425 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5426 if (len != expected_size) {
5427 status = MGMT_STATUS_INVALID_PARAMS;
5431 m = kzalloc(sizeof(*m), GFP_KERNEL);
5433 status = MGMT_STATUS_NO_RESOURCES;
5437 INIT_LIST_HEAD(&m->patterns);
5439 parse_adv_monitor_rssi(m, NULL);
5440 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5443 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5444 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5447 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5448 void *data, u16 len)
5450 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5451 struct adv_monitor *m = NULL;
5452 u8 status = MGMT_STATUS_SUCCESS;
5453 size_t expected_size = sizeof(*cp);
5455 BT_DBG("request for %s", hdev->name);
5457 if (len <= sizeof(*cp)) {
5458 status = MGMT_STATUS_INVALID_PARAMS;
5462 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5463 if (len != expected_size) {
5464 status = MGMT_STATUS_INVALID_PARAMS;
5468 m = kzalloc(sizeof(*m), GFP_KERNEL);
5470 status = MGMT_STATUS_NO_RESOURCES;
5474 INIT_LIST_HEAD(&m->patterns);
5476 parse_adv_monitor_rssi(m, &cp->rssi);
5477 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5480 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5481 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5484 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5485 void *data, int status)
5487 struct mgmt_rp_remove_adv_monitor rp;
5488 struct mgmt_pending_cmd *cmd = data;
5489 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5493 rp.monitor_handle = cp->monitor_handle;
5496 hci_update_passive_scan(hdev);
5498 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5499 mgmt_status(status), &rp, sizeof(rp));
5500 mgmt_pending_remove(cmd);
5502 hci_dev_unlock(hdev);
5503 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5504 rp.monitor_handle, status);
5507 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5509 struct mgmt_pending_cmd *cmd = data;
5510 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5511 u16 handle = __le16_to_cpu(cp->monitor_handle);
5514 return hci_remove_all_adv_monitor(hdev);
5516 return hci_remove_single_adv_monitor(hdev, handle);
5519 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5520 void *data, u16 len)
5522 struct mgmt_pending_cmd *cmd;
5527 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5528 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5529 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5530 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5531 status = MGMT_STATUS_BUSY;
5535 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5537 status = MGMT_STATUS_NO_RESOURCES;
5541 err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5542 mgmt_remove_adv_monitor_complete);
5545 mgmt_pending_remove(cmd);
5548 status = MGMT_STATUS_NO_RESOURCES;
5550 status = MGMT_STATUS_FAILED;
5555 hci_dev_unlock(hdev);
5560 hci_dev_unlock(hdev);
5561 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5565 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5567 struct mgmt_rp_read_local_oob_data mgmt_rp;
5568 size_t rp_size = sizeof(mgmt_rp);
5569 struct mgmt_pending_cmd *cmd = data;
5570 struct sk_buff *skb = cmd->skb;
5571 u8 status = mgmt_status(err);
5575 status = MGMT_STATUS_FAILED;
5576 else if (IS_ERR(skb))
5577 status = mgmt_status(PTR_ERR(skb));
5579 status = mgmt_status(skb->data[0]);
5582 bt_dev_dbg(hdev, "status %d", status);
5585 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5589 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5591 if (!bredr_sc_enabled(hdev)) {
5592 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5594 if (skb->len < sizeof(*rp)) {
5595 mgmt_cmd_status(cmd->sk, hdev->id,
5596 MGMT_OP_READ_LOCAL_OOB_DATA,
5597 MGMT_STATUS_FAILED);
5601 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5602 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5604 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5606 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5608 if (skb->len < sizeof(*rp)) {
5609 mgmt_cmd_status(cmd->sk, hdev->id,
5610 MGMT_OP_READ_LOCAL_OOB_DATA,
5611 MGMT_STATUS_FAILED);
5615 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5616 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5618 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5619 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5622 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5623 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5626 if (skb && !IS_ERR(skb))
5629 mgmt_pending_free(cmd);
5632 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5634 struct mgmt_pending_cmd *cmd = data;
5636 if (bredr_sc_enabled(hdev))
5637 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5639 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5641 if (IS_ERR(cmd->skb))
5642 return PTR_ERR(cmd->skb);
5647 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5648 void *data, u16 data_len)
5650 struct mgmt_pending_cmd *cmd;
5653 bt_dev_dbg(hdev, "sock %p", sk);
5657 if (!hdev_is_powered(hdev)) {
5658 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5659 MGMT_STATUS_NOT_POWERED);
5663 if (!lmp_ssp_capable(hdev)) {
5664 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5665 MGMT_STATUS_NOT_SUPPORTED);
5669 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5673 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5674 read_local_oob_data_complete);
5677 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5678 MGMT_STATUS_FAILED);
5681 mgmt_pending_free(cmd);
5685 hci_dev_unlock(hdev);
5689 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5690 void *data, u16 len)
5692 struct mgmt_addr_info *addr = data;
5695 bt_dev_dbg(hdev, "sock %p", sk);
5697 if (!bdaddr_type_is_valid(addr->type))
5698 return mgmt_cmd_complete(sk, hdev->id,
5699 MGMT_OP_ADD_REMOTE_OOB_DATA,
5700 MGMT_STATUS_INVALID_PARAMS,
5701 addr, sizeof(*addr));
5705 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5706 struct mgmt_cp_add_remote_oob_data *cp = data;
5709 if (cp->addr.type != BDADDR_BREDR) {
5710 err = mgmt_cmd_complete(sk, hdev->id,
5711 MGMT_OP_ADD_REMOTE_OOB_DATA,
5712 MGMT_STATUS_INVALID_PARAMS,
5713 &cp->addr, sizeof(cp->addr));
5717 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5718 cp->addr.type, cp->hash,
5719 cp->rand, NULL, NULL);
5721 status = MGMT_STATUS_FAILED;
5723 status = MGMT_STATUS_SUCCESS;
5725 err = mgmt_cmd_complete(sk, hdev->id,
5726 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5727 &cp->addr, sizeof(cp->addr));
5728 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5729 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5730 u8 *rand192, *hash192, *rand256, *hash256;
5733 if (bdaddr_type_is_le(cp->addr.type)) {
5734 /* Enforce zero-valued 192-bit parameters as
5735 * long as legacy SMP OOB isn't implemented.
5737 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5738 memcmp(cp->hash192, ZERO_KEY, 16)) {
5739 err = mgmt_cmd_complete(sk, hdev->id,
5740 MGMT_OP_ADD_REMOTE_OOB_DATA,
5741 MGMT_STATUS_INVALID_PARAMS,
5742 addr, sizeof(*addr));
5749 /* In case one of the P-192 values is set to zero,
5750 * then just disable OOB data for P-192.
5752 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5753 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5757 rand192 = cp->rand192;
5758 hash192 = cp->hash192;
5762 /* In case one of the P-256 values is set to zero, then just
5763 * disable OOB data for P-256.
5765 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5766 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5770 rand256 = cp->rand256;
5771 hash256 = cp->hash256;
5774 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5775 cp->addr.type, hash192, rand192,
5778 status = MGMT_STATUS_FAILED;
5780 status = MGMT_STATUS_SUCCESS;
5782 err = mgmt_cmd_complete(sk, hdev->id,
5783 MGMT_OP_ADD_REMOTE_OOB_DATA,
5784 status, &cp->addr, sizeof(cp->addr));
5786 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5788 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5789 MGMT_STATUS_INVALID_PARAMS);
5793 hci_dev_unlock(hdev);
5797 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5798 void *data, u16 len)
5800 struct mgmt_cp_remove_remote_oob_data *cp = data;
5804 bt_dev_dbg(hdev, "sock %p", sk);
5806 if (cp->addr.type != BDADDR_BREDR)
5807 return mgmt_cmd_complete(sk, hdev->id,
5808 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5809 MGMT_STATUS_INVALID_PARAMS,
5810 &cp->addr, sizeof(cp->addr));
5814 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5815 hci_remote_oob_data_clear(hdev);
5816 status = MGMT_STATUS_SUCCESS;
5820 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5822 status = MGMT_STATUS_INVALID_PARAMS;
5824 status = MGMT_STATUS_SUCCESS;
5827 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5828 status, &cp->addr, sizeof(cp->addr));
5830 hci_dev_unlock(hdev);
5834 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5836 struct mgmt_pending_cmd *cmd;
5838 bt_dev_dbg(hdev, "status %u", status);
5842 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5844 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5847 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5850 cmd->cmd_complete(cmd, mgmt_status(status));
5851 mgmt_pending_remove(cmd);
5854 hci_dev_unlock(hdev);
5857 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5858 uint8_t *mgmt_status)
5861 case DISCOV_TYPE_LE:
5862 *mgmt_status = mgmt_le_support(hdev);
5866 case DISCOV_TYPE_INTERLEAVED:
5867 *mgmt_status = mgmt_le_support(hdev);
5871 case DISCOV_TYPE_BREDR:
5872 *mgmt_status = mgmt_bredr_support(hdev);
5877 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5884 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5886 struct mgmt_pending_cmd *cmd = data;
5888 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5889 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5890 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5893 bt_dev_dbg(hdev, "err %d", err);
5895 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5897 mgmt_pending_remove(cmd);
5899 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5903 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5905 return hci_start_discovery_sync(hdev);
5908 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5909 u16 op, void *data, u16 len)
5911 struct mgmt_cp_start_discovery *cp = data;
5912 struct mgmt_pending_cmd *cmd;
5916 bt_dev_dbg(hdev, "sock %p", sk);
5920 if (!hdev_is_powered(hdev)) {
5921 err = mgmt_cmd_complete(sk, hdev->id, op,
5922 MGMT_STATUS_NOT_POWERED,
5923 &cp->type, sizeof(cp->type));
5927 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5928 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5929 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5930 &cp->type, sizeof(cp->type));
5934 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5935 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5936 &cp->type, sizeof(cp->type));
5940 /* Can't start discovery when it is paused */
5941 if (hdev->discovery_paused) {
5942 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5943 &cp->type, sizeof(cp->type));
5947 /* Clear the discovery filter first to free any previously
5948 * allocated memory for the UUID list.
5950 hci_discovery_filter_clear(hdev);
5952 hdev->discovery.type = cp->type;
5953 hdev->discovery.report_invalid_rssi = false;
5954 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5955 hdev->discovery.limited = true;
5957 hdev->discovery.limited = false;
5959 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5965 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5966 start_discovery_complete);
5968 mgmt_pending_remove(cmd);
5972 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5975 hci_dev_unlock(hdev);
5979 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5980 void *data, u16 len)
5982 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5986 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5987 void *data, u16 len)
5989 return start_discovery_internal(sk, hdev,
5990 MGMT_OP_START_LIMITED_DISCOVERY,
5994 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5995 void *data, u16 len)
5997 struct mgmt_cp_start_service_discovery *cp = data;
5998 struct mgmt_pending_cmd *cmd;
5999 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
6000 u16 uuid_count, expected_len;
6004 bt_dev_dbg(hdev, "sock %p", sk);
6008 if (!hdev_is_powered(hdev)) {
6009 err = mgmt_cmd_complete(sk, hdev->id,
6010 MGMT_OP_START_SERVICE_DISCOVERY,
6011 MGMT_STATUS_NOT_POWERED,
6012 &cp->type, sizeof(cp->type));
6016 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6017 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6018 err = mgmt_cmd_complete(sk, hdev->id,
6019 MGMT_OP_START_SERVICE_DISCOVERY,
6020 MGMT_STATUS_BUSY, &cp->type,
6025 if (hdev->discovery_paused) {
6026 err = mgmt_cmd_complete(sk, hdev->id,
6027 MGMT_OP_START_SERVICE_DISCOVERY,
6028 MGMT_STATUS_BUSY, &cp->type,
6033 uuid_count = __le16_to_cpu(cp->uuid_count);
6034 if (uuid_count > max_uuid_count) {
6035 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6037 err = mgmt_cmd_complete(sk, hdev->id,
6038 MGMT_OP_START_SERVICE_DISCOVERY,
6039 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6044 expected_len = sizeof(*cp) + uuid_count * 16;
6045 if (expected_len != len) {
6046 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6048 err = mgmt_cmd_complete(sk, hdev->id,
6049 MGMT_OP_START_SERVICE_DISCOVERY,
6050 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6055 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6056 err = mgmt_cmd_complete(sk, hdev->id,
6057 MGMT_OP_START_SERVICE_DISCOVERY,
6058 status, &cp->type, sizeof(cp->type));
6062 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6069 /* Clear the discovery filter first to free any previously
6070 * allocated memory for the UUID list.
6072 hci_discovery_filter_clear(hdev);
6074 hdev->discovery.result_filtering = true;
6075 hdev->discovery.type = cp->type;
6076 hdev->discovery.rssi = cp->rssi;
6077 hdev->discovery.uuid_count = uuid_count;
6079 if (uuid_count > 0) {
6080 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6082 if (!hdev->discovery.uuids) {
6083 err = mgmt_cmd_complete(sk, hdev->id,
6084 MGMT_OP_START_SERVICE_DISCOVERY,
6086 &cp->type, sizeof(cp->type));
6087 mgmt_pending_remove(cmd);
6092 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6093 start_discovery_complete);
6095 mgmt_pending_remove(cmd);
6099 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6102 hci_dev_unlock(hdev);
6106 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6108 struct mgmt_pending_cmd *cmd;
6110 bt_dev_dbg(hdev, "status %u", status);
6114 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6116 cmd->cmd_complete(cmd, mgmt_status(status));
6117 mgmt_pending_remove(cmd);
6120 hci_dev_unlock(hdev);
6123 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6125 struct mgmt_pending_cmd *cmd = data;
6127 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6130 bt_dev_dbg(hdev, "err %d", err);
6132 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6134 mgmt_pending_remove(cmd);
6137 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6140 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6142 return hci_stop_discovery_sync(hdev);
6145 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6148 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6149 struct mgmt_pending_cmd *cmd;
6152 bt_dev_dbg(hdev, "sock %p", sk);
6156 if (!hci_discovery_active(hdev)) {
6157 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6158 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6159 sizeof(mgmt_cp->type));
6163 if (hdev->discovery.type != mgmt_cp->type) {
6164 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6165 MGMT_STATUS_INVALID_PARAMS,
6166 &mgmt_cp->type, sizeof(mgmt_cp->type));
6170 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6176 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6177 stop_discovery_complete);
6179 mgmt_pending_remove(cmd);
6183 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6186 hci_dev_unlock(hdev);
6190 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6193 struct mgmt_cp_confirm_name *cp = data;
6194 struct inquiry_entry *e;
6197 bt_dev_dbg(hdev, "sock %p", sk);
6201 if (!hci_discovery_active(hdev)) {
6202 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6203 MGMT_STATUS_FAILED, &cp->addr,
6208 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6210 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6211 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6216 if (cp->name_known) {
6217 e->name_state = NAME_KNOWN;
6220 e->name_state = NAME_NEEDED;
6221 hci_inquiry_cache_update_resolve(hdev, e);
6224 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6225 &cp->addr, sizeof(cp->addr));
6228 hci_dev_unlock(hdev);
6232 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6235 struct mgmt_cp_block_device *cp = data;
6239 bt_dev_dbg(hdev, "sock %p", sk);
6241 if (!bdaddr_type_is_valid(cp->addr.type))
6242 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6243 MGMT_STATUS_INVALID_PARAMS,
6244 &cp->addr, sizeof(cp->addr));
6248 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6251 status = MGMT_STATUS_FAILED;
6255 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6257 status = MGMT_STATUS_SUCCESS;
6260 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6261 &cp->addr, sizeof(cp->addr));
6263 hci_dev_unlock(hdev);
6268 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6271 struct mgmt_cp_unblock_device *cp = data;
6275 bt_dev_dbg(hdev, "sock %p", sk);
6277 if (!bdaddr_type_is_valid(cp->addr.type))
6278 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6279 MGMT_STATUS_INVALID_PARAMS,
6280 &cp->addr, sizeof(cp->addr));
6284 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6287 status = MGMT_STATUS_INVALID_PARAMS;
6291 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6293 status = MGMT_STATUS_SUCCESS;
6296 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6297 &cp->addr, sizeof(cp->addr));
6299 hci_dev_unlock(hdev);
6304 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6306 return hci_update_eir_sync(hdev);
6309 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6312 struct mgmt_cp_set_device_id *cp = data;
6316 bt_dev_dbg(hdev, "sock %p", sk);
6318 source = __le16_to_cpu(cp->source);
6320 if (source > 0x0002)
6321 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6322 MGMT_STATUS_INVALID_PARAMS);
6326 hdev->devid_source = source;
6327 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6328 hdev->devid_product = __le16_to_cpu(cp->product);
6329 hdev->devid_version = __le16_to_cpu(cp->version);
6331 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6334 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6336 hci_dev_unlock(hdev);
6341 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6344 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6346 bt_dev_dbg(hdev, "status %d", err);
6349 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6351 struct cmd_lookup match = { NULL, hdev };
6353 struct adv_info *adv_instance;
6354 u8 status = mgmt_status(err);
6357 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6358 cmd_status_rsp, &status);
6362 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6363 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6365 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6367 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6370 new_settings(hdev, match.sk);
6375 /* If "Set Advertising" was just disabled and instance advertising was
6376 * set up earlier, then re-enable multi-instance advertising.
6378 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6379 list_empty(&hdev->adv_instances))
6382 instance = hdev->cur_adv_instance;
6384 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6385 struct adv_info, list);
6389 instance = adv_instance->instance;
6392 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6394 enable_advertising_instance(hdev, err);
6397 static int set_adv_sync(struct hci_dev *hdev, void *data)
6399 struct mgmt_pending_cmd *cmd = data;
6400 struct mgmt_mode *cp = cmd->param;
6403 if (cp->val == 0x02)
6404 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6406 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6408 cancel_adv_timeout(hdev);
6411 /* Switch to instance "0" for the Set Advertising setting.
6412 * We cannot use update_[adv|scan_rsp]_data() here as the
6413 * HCI_ADVERTISING flag is not yet set.
6415 hdev->cur_adv_instance = 0x00;
6417 if (ext_adv_capable(hdev)) {
6418 hci_start_ext_adv_sync(hdev, 0x00);
6420 hci_update_adv_data_sync(hdev, 0x00);
6421 hci_update_scan_rsp_data_sync(hdev, 0x00);
6422 hci_enable_advertising_sync(hdev);
6425 hci_disable_advertising_sync(hdev);
6431 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6434 struct mgmt_mode *cp = data;
6435 struct mgmt_pending_cmd *cmd;
6439 bt_dev_dbg(hdev, "sock %p", sk);
6441 status = mgmt_le_support(hdev);
6443 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6446 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6448 MGMT_STATUS_INVALID_PARAMS);
6450 if (hdev->advertising_paused)
6451 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6458 /* The following conditions are ones which mean that we should
6459 * not do any HCI communication but directly send a mgmt
6460 * response to user space (after toggling the flag if
6463 if (!hdev_is_powered(hdev) ||
6464 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6465 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6466 hci_dev_test_flag(hdev, HCI_MESH) ||
6467 hci_conn_num(hdev, LE_LINK) > 0 ||
6468 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6469 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6473 hdev->cur_adv_instance = 0x00;
6474 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6475 if (cp->val == 0x02)
6476 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6478 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6480 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6481 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6484 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6489 err = new_settings(hdev, sk);
6494 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6495 pending_find(MGMT_OP_SET_LE, hdev)) {
6496 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6501 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6505 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6506 set_advertising_complete);
6509 mgmt_pending_remove(cmd);
6512 hci_dev_unlock(hdev);
6516 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6517 void *data, u16 len)
6519 struct mgmt_cp_set_static_address *cp = data;
6522 bt_dev_dbg(hdev, "sock %p", sk);
6524 if (!lmp_le_capable(hdev))
6525 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6526 MGMT_STATUS_NOT_SUPPORTED);
6528 if (hdev_is_powered(hdev))
6529 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6530 MGMT_STATUS_REJECTED);
6532 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6533 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6534 return mgmt_cmd_status(sk, hdev->id,
6535 MGMT_OP_SET_STATIC_ADDRESS,
6536 MGMT_STATUS_INVALID_PARAMS);
6538 /* Two most significant bits shall be set */
6539 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6540 return mgmt_cmd_status(sk, hdev->id,
6541 MGMT_OP_SET_STATIC_ADDRESS,
6542 MGMT_STATUS_INVALID_PARAMS);
6547 bacpy(&hdev->static_addr, &cp->bdaddr);
6549 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6553 err = new_settings(hdev, sk);
6556 hci_dev_unlock(hdev);
6560 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6561 void *data, u16 len)
6563 struct mgmt_cp_set_scan_params *cp = data;
6564 __u16 interval, window;
6567 bt_dev_dbg(hdev, "sock %p", sk);
6569 if (!lmp_le_capable(hdev))
6570 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6571 MGMT_STATUS_NOT_SUPPORTED);
6573 interval = __le16_to_cpu(cp->interval);
6575 if (interval < 0x0004 || interval > 0x4000)
6576 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6577 MGMT_STATUS_INVALID_PARAMS);
6579 window = __le16_to_cpu(cp->window);
6581 if (window < 0x0004 || window > 0x4000)
6582 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6583 MGMT_STATUS_INVALID_PARAMS);
6585 if (window > interval)
6586 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6587 MGMT_STATUS_INVALID_PARAMS);
6591 hdev->le_scan_interval = interval;
6592 hdev->le_scan_window = window;
6594 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6597 /* If background scan is running, restart it so new parameters are
6600 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6601 hdev->discovery.state == DISCOVERY_STOPPED)
6602 hci_update_passive_scan(hdev);
6604 hci_dev_unlock(hdev);
6609 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6611 struct mgmt_pending_cmd *cmd = data;
6613 bt_dev_dbg(hdev, "err %d", err);
6616 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6619 struct mgmt_mode *cp = cmd->param;
6622 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6624 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6626 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6627 new_settings(hdev, cmd->sk);
6630 mgmt_pending_free(cmd);
6633 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6635 struct mgmt_pending_cmd *cmd = data;
6636 struct mgmt_mode *cp = cmd->param;
6638 return hci_write_fast_connectable_sync(hdev, cp->val);
6641 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6642 void *data, u16 len)
6644 struct mgmt_mode *cp = data;
6645 struct mgmt_pending_cmd *cmd;
6648 bt_dev_dbg(hdev, "sock %p", sk);
6650 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6651 hdev->hci_ver < BLUETOOTH_VER_1_2)
6652 return mgmt_cmd_status(sk, hdev->id,
6653 MGMT_OP_SET_FAST_CONNECTABLE,
6654 MGMT_STATUS_NOT_SUPPORTED);
6656 if (cp->val != 0x00 && cp->val != 0x01)
6657 return mgmt_cmd_status(sk, hdev->id,
6658 MGMT_OP_SET_FAST_CONNECTABLE,
6659 MGMT_STATUS_INVALID_PARAMS);
6663 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6664 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6668 if (!hdev_is_powered(hdev)) {
6669 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6670 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6671 new_settings(hdev, sk);
6675 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6680 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6681 fast_connectable_complete);
6684 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6685 MGMT_STATUS_FAILED);
6688 mgmt_pending_free(cmd);
6692 hci_dev_unlock(hdev);
6697 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6699 struct mgmt_pending_cmd *cmd = data;
6701 bt_dev_dbg(hdev, "err %d", err);
6704 u8 mgmt_err = mgmt_status(err);
6706 /* We need to restore the flag if related HCI commands
6709 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6711 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6713 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6714 new_settings(hdev, cmd->sk);
6717 mgmt_pending_free(cmd);
6720 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6724 status = hci_write_fast_connectable_sync(hdev, false);
6727 status = hci_update_scan_sync(hdev);
6729 /* Since only the advertising data flags will change, there
6730 * is no need to update the scan response data.
6733 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6738 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6740 struct mgmt_mode *cp = data;
6741 struct mgmt_pending_cmd *cmd;
6744 bt_dev_dbg(hdev, "sock %p", sk);
6746 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6747 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6748 MGMT_STATUS_NOT_SUPPORTED);
6750 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6751 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6752 MGMT_STATUS_REJECTED);
6754 if (cp->val != 0x00 && cp->val != 0x01)
6755 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6756 MGMT_STATUS_INVALID_PARAMS);
6760 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6761 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6765 if (!hdev_is_powered(hdev)) {
6767 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6768 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6769 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6770 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6771 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6774 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6776 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6780 err = new_settings(hdev, sk);
6784 /* Reject disabling when powered on */
6786 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6787 MGMT_STATUS_REJECTED);
6790 /* When configuring a dual-mode controller to operate
6791 * with LE only and using a static address, then switching
6792 * BR/EDR back on is not allowed.
6794 * Dual-mode controllers shall operate with the public
6795 * address as its identity address for BR/EDR and LE. So
6796 * reject the attempt to create an invalid configuration.
6798 * The same restrictions applies when secure connections
6799 * has been enabled. For BR/EDR this is a controller feature
6800 * while for LE it is a host stack feature. This means that
6801 * switching BR/EDR back on when secure connections has been
6802 * enabled is not a supported transaction.
6804 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6805 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6806 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6807 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6808 MGMT_STATUS_REJECTED);
6813 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6817 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6818 set_bredr_complete);
6821 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6822 MGMT_STATUS_FAILED);
6824 mgmt_pending_free(cmd);
6829 /* We need to flip the bit already here so that
6830 * hci_req_update_adv_data generates the correct flags.
6832 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6835 hci_dev_unlock(hdev);
6839 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6841 struct mgmt_pending_cmd *cmd = data;
6842 struct mgmt_mode *cp;
6844 bt_dev_dbg(hdev, "err %d", err);
6847 u8 mgmt_err = mgmt_status(err);
6849 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6857 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6858 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6861 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6862 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6865 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6866 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6870 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6871 new_settings(hdev, cmd->sk);
6874 mgmt_pending_free(cmd);
6877 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6879 struct mgmt_pending_cmd *cmd = data;
6880 struct mgmt_mode *cp = cmd->param;
6883 /* Force write of val */
6884 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6886 return hci_write_sc_support_sync(hdev, val);
6889 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6890 void *data, u16 len)
6892 struct mgmt_mode *cp = data;
6893 struct mgmt_pending_cmd *cmd;
6897 bt_dev_dbg(hdev, "sock %p", sk);
6899 if (!lmp_sc_capable(hdev) &&
6900 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6901 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6902 MGMT_STATUS_NOT_SUPPORTED);
6904 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6905 lmp_sc_capable(hdev) &&
6906 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6907 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6908 MGMT_STATUS_REJECTED);
6910 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6911 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6912 MGMT_STATUS_INVALID_PARAMS);
6916 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6917 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6921 changed = !hci_dev_test_and_set_flag(hdev,
6923 if (cp->val == 0x02)
6924 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6926 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6928 changed = hci_dev_test_and_clear_flag(hdev,
6930 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6933 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6938 err = new_settings(hdev, sk);
6945 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6946 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6947 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6951 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6955 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6956 set_secure_conn_complete);
6959 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6960 MGMT_STATUS_FAILED);
6962 mgmt_pending_free(cmd);
6966 hci_dev_unlock(hdev);
6970 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6971 void *data, u16 len)
6973 struct mgmt_mode *cp = data;
6974 bool changed, use_changed;
6977 bt_dev_dbg(hdev, "sock %p", sk);
6979 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6980 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6981 MGMT_STATUS_INVALID_PARAMS);
6986 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6988 changed = hci_dev_test_and_clear_flag(hdev,
6989 HCI_KEEP_DEBUG_KEYS);
6991 if (cp->val == 0x02)
6992 use_changed = !hci_dev_test_and_set_flag(hdev,
6993 HCI_USE_DEBUG_KEYS);
6995 use_changed = hci_dev_test_and_clear_flag(hdev,
6996 HCI_USE_DEBUG_KEYS);
6998 if (hdev_is_powered(hdev) && use_changed &&
6999 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7000 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
7001 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
7002 sizeof(mode), &mode);
7005 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7010 err = new_settings(hdev, sk);
7013 hci_dev_unlock(hdev);
7017 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7020 struct mgmt_cp_set_privacy *cp = cp_data;
7024 bt_dev_dbg(hdev, "sock %p", sk);
7026 if (!lmp_le_capable(hdev))
7027 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7028 MGMT_STATUS_NOT_SUPPORTED);
7030 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7031 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7032 MGMT_STATUS_INVALID_PARAMS);
7035 /* commenting out since set privacy command is always rejected
7036 * if this condition is enabled.
7038 if (hdev_is_powered(hdev))
7039 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7040 MGMT_STATUS_REJECTED);
7045 /* If user space supports this command it is also expected to
7046 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7048 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7051 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7052 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7053 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7054 hci_adv_instances_set_rpa_expired(hdev, true);
7055 if (cp->privacy == 0x02)
7056 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7058 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7060 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7061 memset(hdev->irk, 0, sizeof(hdev->irk));
7062 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7063 hci_adv_instances_set_rpa_expired(hdev, false);
7064 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7067 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7072 err = new_settings(hdev, sk);
7075 hci_dev_unlock(hdev);
7079 static bool irk_is_valid(struct mgmt_irk_info *irk)
7081 switch (irk->addr.type) {
7082 case BDADDR_LE_PUBLIC:
7085 case BDADDR_LE_RANDOM:
7086 /* Two most significant bits shall be set */
7087 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7095 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7098 struct mgmt_cp_load_irks *cp = cp_data;
7099 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7100 sizeof(struct mgmt_irk_info));
7101 u16 irk_count, expected_len;
7104 bt_dev_dbg(hdev, "sock %p", sk);
7106 if (!lmp_le_capable(hdev))
7107 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7108 MGMT_STATUS_NOT_SUPPORTED);
7110 irk_count = __le16_to_cpu(cp->irk_count);
7111 if (irk_count > max_irk_count) {
7112 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7114 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7115 MGMT_STATUS_INVALID_PARAMS);
7118 expected_len = struct_size(cp, irks, irk_count);
7119 if (expected_len != len) {
7120 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7122 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7123 MGMT_STATUS_INVALID_PARAMS);
7126 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7128 for (i = 0; i < irk_count; i++) {
7129 struct mgmt_irk_info *key = &cp->irks[i];
7131 if (!irk_is_valid(key))
7132 return mgmt_cmd_status(sk, hdev->id,
7134 MGMT_STATUS_INVALID_PARAMS);
7139 hci_smp_irks_clear(hdev);
7141 for (i = 0; i < irk_count; i++) {
7142 struct mgmt_irk_info *irk = &cp->irks[i];
7143 u8 addr_type = le_addr_type(irk->addr.type);
7145 if (hci_is_blocked_key(hdev,
7146 HCI_BLOCKED_KEY_TYPE_IRK,
7148 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7153 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7154 if (irk->addr.type == BDADDR_BREDR)
7155 addr_type = BDADDR_BREDR;
7157 hci_add_irk(hdev, &irk->addr.bdaddr,
7158 addr_type, irk->val,
7162 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7164 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7166 hci_dev_unlock(hdev);
7172 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
7173 void *data, u16 len)
7175 struct mgmt_cp_set_advertising_params *cp = data;
7180 BT_DBG("%s", hdev->name);
7182 if (!lmp_le_capable(hdev))
7183 return mgmt_cmd_status(sk, hdev->id,
7184 MGMT_OP_SET_ADVERTISING_PARAMS,
7185 MGMT_STATUS_NOT_SUPPORTED);
7187 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7188 return mgmt_cmd_status(sk, hdev->id,
7189 MGMT_OP_SET_ADVERTISING_PARAMS,
7192 min_interval = __le16_to_cpu(cp->interval_min);
7193 max_interval = __le16_to_cpu(cp->interval_max);
7195 if (min_interval > max_interval ||
7196 min_interval < 0x0020 || max_interval > 0x4000)
7197 return mgmt_cmd_status(sk, hdev->id,
7198 MGMT_OP_SET_ADVERTISING_PARAMS,
7199 MGMT_STATUS_INVALID_PARAMS);
7203 hdev->le_adv_min_interval = min_interval;
7204 hdev->le_adv_max_interval = max_interval;
7205 hdev->adv_filter_policy = cp->filter_policy;
7206 hdev->adv_type = cp->type;
7208 err = mgmt_cmd_complete(sk, hdev->id,
7209 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
7211 hci_dev_unlock(hdev);
7216 static void set_advertising_data_complete(struct hci_dev *hdev,
7217 u8 status, u16 opcode)
7219 struct mgmt_cp_set_advertising_data *cp;
7220 struct mgmt_pending_cmd *cmd;
7222 BT_DBG("status 0x%02x", status);
7226 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
7233 mgmt_cmd_status(cmd->sk, hdev->id,
7234 MGMT_OP_SET_ADVERTISING_DATA,
7235 mgmt_status(status));
7237 mgmt_cmd_complete(cmd->sk, hdev->id,
7238 MGMT_OP_SET_ADVERTISING_DATA, 0,
7241 mgmt_pending_remove(cmd);
7244 hci_dev_unlock(hdev);
7247 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
7248 void *data, u16 len)
7250 struct mgmt_pending_cmd *cmd;
7251 struct hci_request req;
7252 struct mgmt_cp_set_advertising_data *cp = data;
7253 struct hci_cp_le_set_adv_data adv;
7256 BT_DBG("%s", hdev->name);
7258 if (!lmp_le_capable(hdev)) {
7259 return mgmt_cmd_status(sk, hdev->id,
7260 MGMT_OP_SET_ADVERTISING_DATA,
7261 MGMT_STATUS_NOT_SUPPORTED);
7266 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
7267 err = mgmt_cmd_status(sk, hdev->id,
7268 MGMT_OP_SET_ADVERTISING_DATA,
7273 if (len > HCI_MAX_AD_LENGTH) {
7274 err = mgmt_cmd_status(sk, hdev->id,
7275 MGMT_OP_SET_ADVERTISING_DATA,
7276 MGMT_STATUS_INVALID_PARAMS);
7280 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
7287 hci_req_init(&req, hdev);
7289 memset(&adv, 0, sizeof(adv));
7290 memcpy(adv.data, cp->data, len);
7293 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
7295 err = hci_req_run(&req, set_advertising_data_complete);
7297 mgmt_pending_remove(cmd);
7300 hci_dev_unlock(hdev);
7305 /* Adv White List feature */
7306 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7308 struct mgmt_cp_add_dev_white_list *cp;
7309 struct mgmt_pending_cmd *cmd;
7311 BT_DBG("status 0x%02x", status);
7315 cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
7322 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7323 mgmt_status(status));
7325 mgmt_cmd_complete(cmd->sk, hdev->id,
7326 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
7328 mgmt_pending_remove(cmd);
7331 hci_dev_unlock(hdev);
7334 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
7335 void *data, u16 len)
7337 struct mgmt_pending_cmd *cmd;
7338 struct mgmt_cp_add_dev_white_list *cp = data;
7339 struct hci_request req;
7342 BT_DBG("%s", hdev->name);
7344 if (!lmp_le_capable(hdev))
7345 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7346 MGMT_STATUS_NOT_SUPPORTED);
7348 if (!hdev_is_powered(hdev))
7349 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7350 MGMT_STATUS_REJECTED);
7354 if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
7355 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7360 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
7366 hci_req_init(&req, hdev);
7368 hci_req_add(&req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(*cp), cp);
7370 err = hci_req_run(&req, add_white_list_complete);
7372 mgmt_pending_remove(cmd);
7377 hci_dev_unlock(hdev);
7382 static void remove_from_white_list_complete(struct hci_dev *hdev,
7383 u8 status, u16 opcode)
7385 struct mgmt_cp_remove_dev_from_white_list *cp;
7386 struct mgmt_pending_cmd *cmd;
7388 BT_DBG("status 0x%02x", status);
7392 cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
7399 mgmt_cmd_status(cmd->sk, hdev->id,
7400 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7401 mgmt_status(status));
7403 mgmt_cmd_complete(cmd->sk, hdev->id,
7404 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
7407 mgmt_pending_remove(cmd);
7410 hci_dev_unlock(hdev);
7413 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
7414 void *data, u16 len)
7416 struct mgmt_pending_cmd *cmd;
7417 struct mgmt_cp_remove_dev_from_white_list *cp = data;
7418 struct hci_request req;
7421 BT_DBG("%s", hdev->name);
7423 if (!lmp_le_capable(hdev))
7424 return mgmt_cmd_status(sk, hdev->id,
7425 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7426 MGMT_STATUS_NOT_SUPPORTED);
7428 if (!hdev_is_powered(hdev))
7429 return mgmt_cmd_status(sk, hdev->id,
7430 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7431 MGMT_STATUS_REJECTED);
7435 if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
7436 err = mgmt_cmd_status(sk, hdev->id,
7437 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7442 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7449 hci_req_init(&req, hdev);
7451 hci_req_add(&req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(*cp), cp);
7453 err = hci_req_run(&req, remove_from_white_list_complete);
7455 mgmt_pending_remove(cmd);
7460 hci_dev_unlock(hdev);
7465 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
7468 struct mgmt_pending_cmd *cmd;
7470 BT_DBG("status 0x%02x", status);
7474 cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
7479 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
7480 mgmt_status(status));
7482 mgmt_cmd_complete(cmd->sk, hdev->id,
7483 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7486 mgmt_pending_remove(cmd);
7489 hci_dev_unlock(hdev);
7492 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
7493 void *data, u16 len)
7495 struct mgmt_pending_cmd *cmd;
7496 struct hci_request req;
7499 BT_DBG("%s", hdev->name);
7501 if (!lmp_le_capable(hdev))
7502 return mgmt_cmd_status(sk, hdev->id,
7503 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7504 MGMT_STATUS_NOT_SUPPORTED);
7506 if (!hdev_is_powered(hdev))
7507 return mgmt_cmd_status(sk, hdev->id,
7508 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7509 MGMT_STATUS_REJECTED);
7513 if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
7514 err = mgmt_cmd_status(sk, hdev->id,
7515 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7520 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
7527 hci_req_init(&req, hdev);
7529 hci_req_add(&req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
7531 err = hci_req_run(&req, clear_white_list_complete);
7533 mgmt_pending_remove(cmd);
7538 hci_dev_unlock(hdev);
7543 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
7546 struct mgmt_cp_set_scan_rsp_data *cp;
7547 struct mgmt_pending_cmd *cmd;
7549 BT_DBG("status 0x%02x", status);
7553 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
7560 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7561 mgmt_status(status));
7563 mgmt_cmd_complete(cmd->sk, hdev->id,
7564 MGMT_OP_SET_SCAN_RSP_DATA, 0,
7567 mgmt_pending_remove(cmd);
7570 hci_dev_unlock(hdev);
7573 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
7576 struct mgmt_pending_cmd *cmd;
7577 struct hci_request req;
7578 struct mgmt_cp_set_scan_rsp_data *cp = data;
7579 struct hci_cp_le_set_scan_rsp_data rsp;
7582 BT_DBG("%s", hdev->name);
7584 if (!lmp_le_capable(hdev))
7585 return mgmt_cmd_status(sk, hdev->id,
7586 MGMT_OP_SET_SCAN_RSP_DATA,
7587 MGMT_STATUS_NOT_SUPPORTED);
7591 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
7592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7597 if (len > HCI_MAX_AD_LENGTH) {
7598 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7599 MGMT_STATUS_INVALID_PARAMS);
7603 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
7609 hci_req_init(&req, hdev);
7611 memset(&rsp, 0, sizeof(rsp));
7612 memcpy(rsp.data, cp->data, len);
7615 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
7617 err = hci_req_run(&req, set_scan_rsp_data_complete);
7619 mgmt_pending_remove(cmd);
7622 hci_dev_unlock(hdev);
7627 static void set_rssi_threshold_complete(struct hci_dev *hdev,
7628 u8 status, u16 opcode)
7630 struct mgmt_pending_cmd *cmd;
7632 BT_DBG("status 0x%02x", status);
7636 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7641 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7642 mgmt_status(status));
7644 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7647 mgmt_pending_remove(cmd);
7650 hci_dev_unlock(hdev);
7653 static void set_rssi_disable_complete(struct hci_dev *hdev,
7654 u8 status, u16 opcode)
7656 struct mgmt_pending_cmd *cmd;
7658 BT_DBG("status 0x%02x", status);
7662 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7667 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7668 mgmt_status(status));
7670 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7673 mgmt_pending_remove(cmd);
7676 hci_dev_unlock(hdev);
7679 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
7680 void *data, u16 len)
7683 struct hci_cp_set_rssi_threshold th = { 0, };
7684 struct mgmt_cp_set_enable_rssi *cp = data;
7685 struct hci_conn *conn;
7686 struct mgmt_pending_cmd *cmd;
7687 struct hci_request req;
7692 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7694 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7695 MGMT_STATUS_FAILED);
7699 if (!lmp_le_capable(hdev)) {
7700 mgmt_pending_remove(cmd);
7701 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7702 MGMT_STATUS_NOT_SUPPORTED);
7706 if (!hdev_is_powered(hdev)) {
7707 BT_DBG("%s", hdev->name);
7708 mgmt_pending_remove(cmd);
7709 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7710 MGMT_STATUS_NOT_POWERED);
7714 if (cp->link_type == 0x01)
7715 dest_type = LE_LINK;
7717 dest_type = ACL_LINK;
7719 /* Get LE/ACL link handle info */
7720 conn = hci_conn_hash_lookup_ba(hdev,
7721 dest_type, &cp->bdaddr);
7724 err = mgmt_cmd_complete(sk, hdev->id,
7725 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
7726 mgmt_pending_remove(cmd);
7730 hci_req_init(&req, hdev);
7732 th.hci_le_ext_opcode = 0x0B;
7734 th.conn_handle = conn->handle;
7735 th.alert_mask = 0x07;
7736 th.low_th = cp->low_th;
7737 th.in_range_th = cp->in_range_th;
7738 th.high_th = cp->high_th;
7740 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
7741 err = hci_req_run(&req, set_rssi_threshold_complete);
7744 mgmt_pending_remove(cmd);
7745 BT_ERR("Error in requesting hci_req_run");
7750 hci_dev_unlock(hdev);
7754 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
7755 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
7757 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
7758 struct mgmt_cp_set_enable_rssi *cp = data;
7759 struct mgmt_pending_cmd *cmd;
7764 mgmt_rp.status = rp->status;
7765 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
7766 mgmt_rp.bt_address = cp->bdaddr;
7767 mgmt_rp.link_type = cp->link_type;
7769 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7770 MGMT_STATUS_SUCCESS, &mgmt_rp,
7771 sizeof(struct mgmt_cc_rsp_enable_rssi));
7773 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
7774 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
7776 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
7777 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
7778 &mgmt_rp.bt_address, true);
7782 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7784 mgmt_pending_remove(cmd);
7786 hci_dev_unlock(hdev);
7789 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
7790 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
7792 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
7793 struct mgmt_cp_disable_rssi *cp = data;
7794 struct mgmt_pending_cmd *cmd;
7799 mgmt_rp.status = rp->status;
7800 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
7801 mgmt_rp.bt_address = cp->bdaddr;
7802 mgmt_rp.link_type = cp->link_type;
7804 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7805 MGMT_STATUS_SUCCESS, &mgmt_rp,
7806 sizeof(struct mgmt_cc_rsp_enable_rssi));
7808 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
7809 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
7811 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
7812 &mgmt_rp.bt_address, false);
7816 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7818 mgmt_pending_remove(cmd);
7820 hci_dev_unlock(hdev);
7823 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
7824 void *data, u16 len)
7826 struct mgmt_pending_cmd *cmd;
7827 struct hci_request req;
7828 struct hci_cp_set_enable_rssi cp_en = { 0, };
7831 BT_DBG("Set Disable RSSI.");
7833 cp_en.hci_le_ext_opcode = 0x01;
7834 cp_en.le_enable_cs_Features = 0x00;
7835 cp_en.data[0] = 0x00;
7836 cp_en.data[1] = 0x00;
7837 cp_en.data[2] = 0x00;
7841 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7843 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7844 MGMT_STATUS_FAILED);
7848 if (!lmp_le_capable(hdev)) {
7849 mgmt_pending_remove(cmd);
7850 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7851 MGMT_STATUS_NOT_SUPPORTED);
7855 if (!hdev_is_powered(hdev)) {
7856 BT_DBG("%s", hdev->name);
7857 mgmt_pending_remove(cmd);
7858 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7859 MGMT_STATUS_NOT_POWERED);
7863 hci_req_init(&req, hdev);
7865 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
7866 sizeof(struct hci_cp_set_enable_rssi),
7867 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
7868 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
7870 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
7871 err = hci_req_run(&req, set_rssi_disable_complete);
7874 mgmt_pending_remove(cmd);
7875 BT_ERR("Error in requesting hci_req_run");
7880 hci_dev_unlock(hdev);
7884 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
7886 struct hci_cc_rsp_enable_rssi *rp = response;
7887 struct mgmt_pending_cmd *cmd_enable = NULL;
7888 struct mgmt_pending_cmd *cmd_disable = NULL;
7889 struct mgmt_cp_set_enable_rssi *cp_en;
7890 struct mgmt_cp_disable_rssi *cp_dis;
7893 cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7894 cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7895 hci_dev_unlock(hdev);
7898 BT_DBG("Enable Request");
7901 BT_DBG("Disable Request");
7904 cp_en = cmd_enable->param;
7909 switch (rp->le_ext_opcode) {
7911 BT_DBG("RSSI enabled.. Setting Threshold...");
7912 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
7913 cp_en, sizeof(*cp_en));
7917 BT_DBG("Sending RSSI enable success");
7918 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
7919 cp_en, rp, rp->status);
7923 } else if (cmd_disable) {
7924 cp_dis = cmd_disable->param;
7929 switch (rp->le_ext_opcode) {
7931 BT_DBG("Sending RSSI disable success");
7932 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
7933 cp_dis, rp, rp->status);
7938 * Only unset RSSI Threshold values for the Link if
7939 * RSSI is monitored for other BREDR or LE Links
7941 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
7942 BT_DBG("Unset Threshold. Other links being monitored");
7943 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
7944 cp_dis, rp, rp->status);
7946 BT_DBG("Unset Threshold. Disabling...");
7947 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
7948 cp_dis, sizeof(*cp_dis));
7955 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
7958 struct mgmt_pending_cmd *cmd;
7960 BT_DBG("status 0x%02x", status);
7964 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7969 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7970 mgmt_status(status));
7972 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7975 mgmt_pending_remove(cmd);
7978 hci_dev_unlock(hdev);
7981 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
7982 void *data, u16 len)
7984 struct mgmt_pending_cmd *cmd;
7985 struct hci_request req;
7986 struct mgmt_cp_set_enable_rssi *cp = data;
7987 struct hci_cp_set_enable_rssi cp_en = { 0, };
7990 BT_DBG("Set Enable RSSI.");
7992 cp_en.hci_le_ext_opcode = 0x01;
7993 cp_en.le_enable_cs_Features = 0x04;
7994 cp_en.data[0] = 0x00;
7995 cp_en.data[1] = 0x00;
7996 cp_en.data[2] = 0x00;
8000 if (!lmp_le_capable(hdev)) {
8001 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
8002 MGMT_STATUS_NOT_SUPPORTED);
8006 if (!hdev_is_powered(hdev)) {
8007 BT_DBG("%s", hdev->name);
8008 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
8009 MGMT_STATUS_NOT_POWERED);
8013 if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
8014 BT_DBG("%s", hdev->name);
8015 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
8020 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
8023 BT_DBG("%s", hdev->name);
8028 /* If RSSI is already enabled directly set Threshold values */
8029 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
8030 hci_dev_unlock(hdev);
8031 BT_DBG("RSSI Enabled. Directly set Threshold");
8032 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
8036 hci_req_init(&req, hdev);
8038 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
8039 sizeof(struct hci_cp_set_enable_rssi),
8040 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
8041 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
8043 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
8044 err = hci_req_run(&req, set_rssi_enable_complete);
8047 mgmt_pending_remove(cmd);
8048 BT_ERR("Error in requesting hci_req_run");
8053 hci_dev_unlock(hdev);
8058 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8060 struct mgmt_pending_cmd *cmd;
8062 BT_DBG("status 0x%02x", status);
8066 cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
8070 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8071 MGMT_STATUS_SUCCESS, &status, 1);
8073 mgmt_pending_remove(cmd);
8076 hci_dev_unlock(hdev);
8079 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
8082 struct mgmt_pending_cmd *cmd;
8083 struct hci_request req;
8084 struct mgmt_cp_get_raw_rssi *cp = data;
8085 struct hci_cp_get_raw_rssi hci_cp;
8087 struct hci_conn *conn;
8091 BT_DBG("Get Raw RSSI.");
8095 if (!lmp_le_capable(hdev)) {
8096 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8097 MGMT_STATUS_NOT_SUPPORTED);
8101 if (cp->link_type == 0x01)
8102 dest_type = LE_LINK;
8104 dest_type = ACL_LINK;
8106 /* Get LE/BREDR link handle info */
8107 conn = hci_conn_hash_lookup_ba(hdev,
8108 dest_type, &cp->bt_address);
8110 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8111 MGMT_STATUS_NOT_CONNECTED);
8114 hci_cp.conn_handle = conn->handle;
8116 if (!hdev_is_powered(hdev)) {
8117 BT_DBG("%s", hdev->name);
8118 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8119 MGMT_STATUS_NOT_POWERED);
8123 if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
8124 BT_DBG("%s", hdev->name);
8125 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8130 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
8132 BT_DBG("%s", hdev->name);
8137 hci_req_init(&req, hdev);
8139 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
8140 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
8141 err = hci_req_run(&req, get_raw_rssi_complete);
8144 mgmt_pending_remove(cmd);
8145 BT_ERR("Error in requesting hci_req_run");
8149 hci_dev_unlock(hdev);
8154 void mgmt_raw_rssi_response(struct hci_dev *hdev,
8155 struct hci_cc_rp_get_raw_rssi *rp, int success)
8157 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
8158 struct hci_conn *conn;
8160 mgmt_rp.status = rp->status;
8161 mgmt_rp.rssi_dbm = rp->rssi_dbm;
8163 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
8167 bacpy(&mgmt_rp.bt_address, &conn->dst);
8168 if (conn->type == LE_LINK)
8169 mgmt_rp.link_type = 0x01;
8171 mgmt_rp.link_type = 0x00;
8173 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
8174 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
8177 static void set_disable_threshold_complete(struct hci_dev *hdev,
8178 u8 status, u16 opcode)
8180 struct mgmt_pending_cmd *cmd;
8182 BT_DBG("status 0x%02x", status);
8186 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
8190 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8191 MGMT_STATUS_SUCCESS, &status, 1);
8193 mgmt_pending_remove(cmd);
8196 hci_dev_unlock(hdev);
8199 /** Removes monitoring for a link*/
8200 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
8201 void *data, u16 len)
8204 struct hci_cp_set_rssi_threshold th = { 0, };
8205 struct mgmt_cp_disable_rssi *cp = data;
8206 struct hci_conn *conn;
8207 struct mgmt_pending_cmd *cmd;
8208 struct hci_request req;
8211 BT_DBG("Set Disable RSSI.");
8215 if (!lmp_le_capable(hdev)) {
8216 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8217 MGMT_STATUS_NOT_SUPPORTED);
8221 /* Get LE/ACL link handle info*/
8222 if (cp->link_type == 0x01)
8223 dest_type = LE_LINK;
8225 dest_type = ACL_LINK;
8227 conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
8229 err = mgmt_cmd_complete(sk, hdev->id,
8230 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
8234 th.hci_le_ext_opcode = 0x0B;
8236 th.conn_handle = conn->handle;
8237 th.alert_mask = 0x00;
8239 th.in_range_th = 0x00;
8242 if (!hdev_is_powered(hdev)) {
8243 BT_DBG("%s", hdev->name);
8244 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8249 if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
8250 BT_DBG("%s", hdev->name);
8251 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8256 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
8259 BT_DBG("%s", hdev->name);
8264 hci_req_init(&req, hdev);
8266 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
8267 err = hci_req_run(&req, set_disable_threshold_complete);
8269 mgmt_pending_remove(cmd);
8270 BT_ERR("Error in requesting hci_req_run");
8275 hci_dev_unlock(hdev);
8280 void mgmt_rssi_alert_evt(struct hci_dev *hdev, u16 conn_handle,
8281 s8 alert_type, s8 rssi_dbm)
8283 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
8284 struct hci_conn *conn;
8286 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
8287 conn_handle, alert_type, rssi_dbm);
8289 conn = hci_conn_hash_lookup_handle(hdev, conn_handle);
8292 BT_ERR("RSSI alert Error: Device not found for handle");
8295 bacpy(&mgmt_ev.bdaddr, &conn->dst);
8297 if (conn->type == LE_LINK)
8298 mgmt_ev.link_type = 0x01;
8300 mgmt_ev.link_type = 0x00;
8302 mgmt_ev.alert_type = alert_type;
8303 mgmt_ev.rssi_dbm = rssi_dbm;
8305 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
8306 sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
8310 static int mgmt_start_le_discovery_failed(struct hci_dev *hdev, u8 status)
8312 struct mgmt_pending_cmd *cmd;
8316 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
8318 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
8322 type = hdev->le_discovery.type;
8324 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
8325 mgmt_status(status), &type, sizeof(type));
8326 mgmt_pending_remove(cmd);
8331 static void start_le_discovery_complete(struct hci_dev *hdev, u8 status,
8334 unsigned long timeout = 0;
8336 BT_DBG("status %d", status);
8340 mgmt_start_le_discovery_failed(hdev, status);
8341 hci_dev_unlock(hdev);
8346 hci_le_discovery_set_state(hdev, DISCOVERY_FINDING);
8347 hci_dev_unlock(hdev);
8349 if (hdev->le_discovery.type != DISCOV_TYPE_LE)
8350 BT_ERR("Invalid discovery type %d", hdev->le_discovery.type);
8355 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
8358 static int start_le_discovery(struct sock *sk, struct hci_dev *hdev,
8359 void *data, u16 len)
8361 struct mgmt_cp_start_le_discovery *cp = data;
8362 struct mgmt_pending_cmd *cmd;
8363 struct hci_cp_le_set_scan_param param_cp;
8364 struct hci_cp_le_set_scan_enable enable_cp;
8365 struct hci_request req;
8366 u8 status, own_addr_type;
8369 BT_DBG("%s", hdev->name);
8371 if (!hdev_is_powered(hdev)) {
8372 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8373 MGMT_STATUS_NOT_POWERED);
8377 if (hdev->le_discovery.state != DISCOVERY_STOPPED) {
8378 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8383 if (cp->type != DISCOV_TYPE_LE) {
8384 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8385 MGMT_STATUS_INVALID_PARAMS);
8389 cmd = mgmt_pending_add(sk, MGMT_OP_START_LE_DISCOVERY, hdev, NULL, 0);
8395 hdev->le_discovery.type = cp->type;
8397 hci_req_init(&req, hdev);
8399 status = mgmt_le_support(hdev);
8401 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8403 mgmt_pending_remove(cmd);
8407 /* If controller is scanning, it means the background scanning
8408 * is running. Thus, we should temporarily stop it in order to
8409 * set the discovery scanning parameters.
8411 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
8412 hci_req_add_le_scan_disable(&req, false);
8414 memset(¶m_cp, 0, sizeof(param_cp));
8416 /* All active scans will be done with either a resolvable
8417 * private address (when privacy feature has been enabled)
8418 * or unresolvable private address.
8420 err = hci_update_random_address_sync(hdev, true, hci_dev_test_flag(hdev, HCI_PRIVACY), &own_addr_type);
8422 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8423 MGMT_STATUS_FAILED);
8424 mgmt_pending_remove(cmd);
8428 param_cp.type = hdev->le_scan_type;
8429 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
8430 param_cp.window = cpu_to_le16(hdev->le_scan_window);
8431 param_cp.own_address_type = own_addr_type;
8432 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
8435 memset(&enable_cp, 0, sizeof(enable_cp));
8436 enable_cp.enable = LE_SCAN_ENABLE;
8437 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
8439 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
8442 err = hci_req_run(&req, start_le_discovery_complete);
8444 mgmt_pending_remove(cmd);
8446 hci_le_discovery_set_state(hdev, DISCOVERY_STARTING);
8452 static int mgmt_stop_le_discovery_failed(struct hci_dev *hdev, u8 status)
8454 struct mgmt_pending_cmd *cmd;
8457 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
8461 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
8462 mgmt_status(status), &hdev->le_discovery.type,
8463 sizeof(hdev->le_discovery.type));
8464 mgmt_pending_remove(cmd);
8469 static void stop_le_discovery_complete(struct hci_dev *hdev, u8 status,
8472 BT_DBG("status %d", status);
8477 mgmt_stop_le_discovery_failed(hdev, status);
8481 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
8484 hci_dev_unlock(hdev);
8487 static int stop_le_discovery(struct sock *sk, struct hci_dev *hdev,
8488 void *data, u16 len)
8490 struct mgmt_cp_stop_le_discovery *mgmt_cp = data;
8491 struct mgmt_pending_cmd *cmd;
8492 struct hci_request req;
8495 BT_DBG("%s", hdev->name);
8499 if (!hci_le_discovery_active(hdev)) {
8500 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8501 MGMT_STATUS_REJECTED, &mgmt_cp->type,
8502 sizeof(mgmt_cp->type));
8506 if (hdev->le_discovery.type != mgmt_cp->type) {
8507 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8508 MGMT_STATUS_INVALID_PARAMS,
8509 &mgmt_cp->type, sizeof(mgmt_cp->type));
8513 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_LE_DISCOVERY, hdev, NULL, 0);
8519 hci_req_init(&req, hdev);
8521 if (hdev->le_discovery.state != DISCOVERY_FINDING) {
8522 BT_DBG("unknown le discovery state %u",
8523 hdev->le_discovery.state);
8525 mgmt_pending_remove(cmd);
8526 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8527 MGMT_STATUS_FAILED, &mgmt_cp->type,
8528 sizeof(mgmt_cp->type));
8532 cancel_delayed_work(&hdev->le_scan_disable);
8533 hci_req_add_le_scan_disable(&req, false);
8535 err = hci_req_run(&req, stop_le_discovery_complete);
8537 mgmt_pending_remove(cmd);
8539 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
8542 hci_dev_unlock(hdev);
8546 /* Separate LE discovery */
8547 void mgmt_le_discovering(struct hci_dev *hdev, u8 discovering)
8549 struct mgmt_ev_discovering ev;
8550 struct mgmt_pending_cmd *cmd;
8552 BT_DBG("%s le discovering %u", hdev->name, discovering);
8555 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
8557 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
8560 u8 type = hdev->le_discovery.type;
8562 mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
8564 mgmt_pending_remove(cmd);
8567 memset(&ev, 0, sizeof(ev));
8568 ev.type = hdev->le_discovery.type;
8569 ev.discovering = discovering;
8571 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8574 static int disable_le_auto_connect(struct sock *sk, struct hci_dev *hdev,
8575 void *data, u16 len)
8579 BT_DBG("%s", hdev->name);
8583 err = hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
8585 BT_ERR("HCI_OP_LE_CREATE_CONN_CANCEL is failed");
8587 hci_dev_unlock(hdev);
8592 static inline int check_le_conn_update_param(u16 min, u16 max, u16 latency,
8597 if (min > max || min < 6 || max > 3200)
8600 if (to_multiplier < 10 || to_multiplier > 3200)
8603 if (max >= to_multiplier * 8)
8606 max_latency = (to_multiplier * 8 / max) - 1;
8608 if (latency > 499 || latency > max_latency)
8614 static int le_conn_update(struct sock *sk, struct hci_dev *hdev, void *data,
8617 struct mgmt_cp_le_conn_update *cp = data;
8619 struct hci_conn *conn;
8620 u16 min, max, latency, supervision_timeout;
8623 if (!hdev_is_powered(hdev))
8624 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8625 MGMT_STATUS_NOT_POWERED);
8627 min = __le16_to_cpu(cp->conn_interval_min);
8628 max = __le16_to_cpu(cp->conn_interval_max);
8629 latency = __le16_to_cpu(cp->conn_latency);
8630 supervision_timeout = __le16_to_cpu(cp->supervision_timeout);
8632 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x supervision_timeout: 0x%4.4x",
8633 min, max, latency, supervision_timeout);
8635 err = check_le_conn_update_param(min, max, latency,
8636 supervision_timeout);
8639 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8640 MGMT_STATUS_INVALID_PARAMS);
8644 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
8646 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8647 MGMT_STATUS_NOT_CONNECTED);
8648 hci_dev_unlock(hdev);
8652 hci_dev_unlock(hdev);
8654 hci_le_conn_update(conn, min, max, latency, supervision_timeout);
8656 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE, 0,
8660 static void set_manufacturer_data_complete(struct hci_dev *hdev, u8 status,
8663 struct mgmt_cp_set_manufacturer_data *cp;
8664 struct mgmt_pending_cmd *cmd;
8666 BT_DBG("status 0x%02x", status);
8670 cmd = pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev);
8677 mgmt_cmd_status(cmd->sk, hdev->id,
8678 MGMT_OP_SET_MANUFACTURER_DATA,
8679 mgmt_status(status));
8681 mgmt_cmd_complete(cmd->sk, hdev->id,
8682 MGMT_OP_SET_MANUFACTURER_DATA, 0,
8685 mgmt_pending_remove(cmd);
8688 hci_dev_unlock(hdev);
8691 static int set_manufacturer_data(struct sock *sk, struct hci_dev *hdev,
8692 void *data, u16 len)
8694 struct mgmt_pending_cmd *cmd;
8695 struct hci_request req;
8696 struct mgmt_cp_set_manufacturer_data *cp = data;
8697 u8 old_data[HCI_MAX_EIR_LENGTH] = {0, };
8701 BT_DBG("%s", hdev->name);
8703 if (!lmp_bredr_capable(hdev))
8704 return mgmt_cmd_status(sk, hdev->id,
8705 MGMT_OP_SET_MANUFACTURER_DATA,
8706 MGMT_STATUS_NOT_SUPPORTED);
8708 if (cp->data[0] == 0 ||
8709 cp->data[0] - 1 > sizeof(hdev->manufacturer_data))
8710 return mgmt_cmd_status(sk, hdev->id,
8711 MGMT_OP_SET_MANUFACTURER_DATA,
8712 MGMT_STATUS_INVALID_PARAMS);
8714 if (cp->data[1] != 0xFF)
8715 return mgmt_cmd_status(sk, hdev->id,
8716 MGMT_OP_SET_MANUFACTURER_DATA,
8717 MGMT_STATUS_NOT_SUPPORTED);
8721 if (pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev)) {
8722 err = mgmt_cmd_status(sk, hdev->id,
8723 MGMT_OP_SET_MANUFACTURER_DATA,
8728 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MANUFACTURER_DATA, hdev, data,
8735 hci_req_init(&req, hdev);
8737 /* if new data is same as previous data then return command
8740 if (hdev->manufacturer_len == cp->data[0] - 1 &&
8741 !memcmp(hdev->manufacturer_data, cp->data + 2, cp->data[0] - 1)) {
8742 mgmt_pending_remove(cmd);
8743 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
8744 0, cp, sizeof(*cp));
8749 old_len = hdev->manufacturer_len;
8751 memcpy(old_data, hdev->manufacturer_data, old_len);
8753 hdev->manufacturer_len = cp->data[0] - 1;
8754 if (hdev->manufacturer_len > 0)
8755 memcpy(hdev->manufacturer_data, cp->data + 2,
8756 hdev->manufacturer_len);
8758 hci_update_eir_sync(hdev);
8760 err = hci_req_run(&req, set_manufacturer_data_complete);
8762 mgmt_pending_remove(cmd);
8767 hci_dev_unlock(hdev);
8772 memset(hdev->manufacturer_data, 0x00, sizeof(hdev->manufacturer_data));
8773 hdev->manufacturer_len = old_len;
8774 if (hdev->manufacturer_len > 0)
8775 memcpy(hdev->manufacturer_data, old_data,
8776 hdev->manufacturer_len);
8777 hci_dev_unlock(hdev);
8781 static int le_set_scan_params(struct sock *sk, struct hci_dev *hdev,
8782 void *data, u16 len)
8784 struct mgmt_cp_le_set_scan_params *cp = data;
8785 __u16 interval, window;
8788 BT_DBG("%s", hdev->name);
8790 if (!lmp_le_capable(hdev))
8791 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8792 MGMT_STATUS_NOT_SUPPORTED);
8794 interval = __le16_to_cpu(cp->interval);
8796 if (interval < 0x0004 || interval > 0x4000)
8797 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8798 MGMT_STATUS_INVALID_PARAMS);
8800 window = __le16_to_cpu(cp->window);
8802 if (window < 0x0004 || window > 0x4000)
8803 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8804 MGMT_STATUS_INVALID_PARAMS);
8806 if (window > interval)
8807 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8808 MGMT_STATUS_INVALID_PARAMS);
8812 hdev->le_scan_type = cp->type;
8813 hdev->le_scan_interval = interval;
8814 hdev->le_scan_window = window;
8816 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS, 0,
8819 /* If background scan is running, restart it so new parameters are
8822 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
8823 hdev->discovery.state == DISCOVERY_STOPPED) {
8824 struct hci_request req;
8826 hci_req_init(&req, hdev);
8828 hci_req_add_le_scan_disable(&req, false);
8829 hci_req_add_le_passive_scan(&req);
8831 hci_req_run(&req, NULL);
8834 hci_dev_unlock(hdev);
8839 static int set_voice_setting(struct sock *sk, struct hci_dev *hdev,
8840 void *data, u16 len)
8842 struct mgmt_cp_set_voice_setting *cp = data;
8843 struct hci_conn *conn;
8844 struct hci_conn *sco_conn;
8848 BT_DBG("%s", hdev->name);
8850 if (!lmp_bredr_capable(hdev)) {
8851 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING,
8852 MGMT_STATUS_NOT_SUPPORTED);
8857 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
8859 err = mgmt_cmd_complete(sk, hdev->id,
8860 MGMT_OP_SET_VOICE_SETTING, 0, NULL, 0);
8864 conn->voice_setting = cp->voice_setting;
8865 conn->sco_role = cp->sco_role;
8867 sco_conn = hci_conn_hash_lookup_sco(hdev);
8868 if (sco_conn && bacmp(&sco_conn->dst, &cp->bdaddr) != 0) {
8869 BT_ERR("There is other SCO connection.");
8873 if (conn->sco_role == MGMT_SCO_ROLE_HANDSFREE) {
8874 if (conn->voice_setting == 0x0063)
8875 sco_connect_set_wbc(hdev);
8877 sco_connect_set_nbc(hdev);
8879 if (conn->voice_setting == 0x0063)
8880 sco_connect_set_gw_wbc(hdev);
8882 sco_connect_set_gw_nbc(hdev);
8886 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING, 0,
8890 hci_dev_unlock(hdev);
8894 static int get_adv_tx_power(struct sock *sk, struct hci_dev *hdev,
8895 void *data, u16 len)
8897 struct mgmt_rp_get_adv_tx_power *rp;
8901 BT_DBG("%s", hdev->name);
8905 rp_len = sizeof(*rp);
8906 rp = kmalloc(rp_len, GFP_KERNEL);
8912 rp->adv_tx_power = hdev->adv_tx_power;
8914 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_TX_POWER, 0, rp,
8920 hci_dev_unlock(hdev);
8925 void mgmt_hardware_error(struct hci_dev *hdev, u8 err_code)
8927 struct mgmt_ev_hardware_error ev;
8929 ev.error_code = err_code;
8930 mgmt_event(MGMT_EV_HARDWARE_ERROR, hdev, &ev, sizeof(ev), NULL);
8933 void mgmt_tx_timeout_error(struct hci_dev *hdev)
8935 mgmt_event(MGMT_EV_TX_TIMEOUT_ERROR, hdev, NULL, 0, NULL);
8938 void mgmt_multi_adv_state_change_evt(struct hci_dev *hdev, u8 adv_instance,
8939 u8 state_change_reason, u16 connection_handle)
8941 struct mgmt_ev_vendor_specific_multi_adv_state_changed mgmt_ev;
8943 BT_DBG("Multi adv state changed [%2.2X %2.2X %2.2X]",
8944 adv_instance, state_change_reason, connection_handle);
8946 mgmt_ev.adv_instance = adv_instance;
8947 mgmt_ev.state_change_reason = state_change_reason;
8948 mgmt_ev.connection_handle = connection_handle;
8950 mgmt_event(MGMT_EV_MULTI_ADV_STATE_CHANGED, hdev, &mgmt_ev,
8951 sizeof(struct mgmt_ev_vendor_specific_multi_adv_state_changed),
8955 static int enable_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
8956 void *data, u16 len)
8959 struct mgmt_cp_enable_6lowpan *cp = data;
8961 BT_DBG("%s", hdev->name);
8965 if (!hdev_is_powered(hdev)) {
8966 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8967 MGMT_STATUS_NOT_POWERED);
8971 if (!lmp_le_capable(hdev)) {
8972 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8973 MGMT_STATUS_NOT_SUPPORTED);
8977 if (cp->enable_6lowpan)
8978 bt_6lowpan_enable();
8980 bt_6lowpan_disable();
8982 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8983 MGMT_STATUS_SUCCESS, NULL, 0);
8985 hci_dev_unlock(hdev);
8988 #endif /* TIZEN_BT */
8990 static bool ltk_is_valid(struct mgmt_ltk_info *key)
8992 if (key->initiator != 0x00 && key->initiator != 0x01)
8995 switch (key->addr.type) {
8996 case BDADDR_LE_PUBLIC:
8999 case BDADDR_LE_RANDOM:
9000 /* Two most significant bits shall be set */
9001 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
9009 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
9010 void *cp_data, u16 len)
9012 struct mgmt_cp_load_long_term_keys *cp = cp_data;
9013 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
9014 sizeof(struct mgmt_ltk_info));
9015 u16 key_count, expected_len;
9018 bt_dev_dbg(hdev, "sock %p", sk);
9020 if (!lmp_le_capable(hdev))
9021 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
9022 MGMT_STATUS_NOT_SUPPORTED);
9024 key_count = __le16_to_cpu(cp->key_count);
9025 if (key_count > max_key_count) {
9026 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
9028 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
9029 MGMT_STATUS_INVALID_PARAMS);
9032 expected_len = struct_size(cp, keys, key_count);
9033 if (expected_len != len) {
9034 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
9036 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
9037 MGMT_STATUS_INVALID_PARAMS);
9040 bt_dev_dbg(hdev, "key_count %u", key_count);
9042 for (i = 0; i < key_count; i++) {
9043 struct mgmt_ltk_info *key = &cp->keys[i];
9045 if (!ltk_is_valid(key))
9046 return mgmt_cmd_status(sk, hdev->id,
9047 MGMT_OP_LOAD_LONG_TERM_KEYS,
9048 MGMT_STATUS_INVALID_PARAMS);
9053 hci_smp_ltks_clear(hdev);
9055 for (i = 0; i < key_count; i++) {
9056 struct mgmt_ltk_info *key = &cp->keys[i];
9057 u8 type, authenticated;
9058 u8 addr_type = le_addr_type(key->addr.type);
9060 if (hci_is_blocked_key(hdev,
9061 HCI_BLOCKED_KEY_TYPE_LTK,
9063 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
9068 switch (key->type) {
9069 case MGMT_LTK_UNAUTHENTICATED:
9070 authenticated = 0x00;
9071 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
9073 case MGMT_LTK_AUTHENTICATED:
9074 authenticated = 0x01;
9075 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
9077 case MGMT_LTK_P256_UNAUTH:
9078 authenticated = 0x00;
9079 type = SMP_LTK_P256;
9081 case MGMT_LTK_P256_AUTH:
9082 authenticated = 0x01;
9083 type = SMP_LTK_P256;
9085 case MGMT_LTK_P256_DEBUG:
9086 authenticated = 0x00;
9087 type = SMP_LTK_P256_DEBUG;
9093 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
9094 if (key->addr.type == BDADDR_BREDR)
9095 addr_type = BDADDR_BREDR;
9097 hci_add_ltk(hdev, &key->addr.bdaddr,
9098 addr_type, type, authenticated,
9099 key->val, key->enc_size, key->ediv, key->rand);
9102 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
9105 hci_dev_unlock(hdev);
9110 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
9112 struct mgmt_pending_cmd *cmd = data;
9113 struct hci_conn *conn = cmd->user_data;
9114 struct mgmt_cp_get_conn_info *cp = cmd->param;
9115 struct mgmt_rp_get_conn_info rp;
9118 bt_dev_dbg(hdev, "err %d", err);
9120 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
9122 status = mgmt_status(err);
9123 if (status == MGMT_STATUS_SUCCESS) {
9124 rp.rssi = conn->rssi;
9125 rp.tx_power = conn->tx_power;
9126 rp.max_tx_power = conn->max_tx_power;
9128 rp.rssi = HCI_RSSI_INVALID;
9129 rp.tx_power = HCI_TX_POWER_INVALID;
9130 rp.max_tx_power = HCI_TX_POWER_INVALID;
9133 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
9136 mgmt_pending_free(cmd);
9139 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
9141 struct mgmt_pending_cmd *cmd = data;
9142 struct mgmt_cp_get_conn_info *cp = cmd->param;
9143 struct hci_conn *conn;
9147 /* Make sure we are still connected */
9148 if (cp->addr.type == BDADDR_BREDR)
9149 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9152 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
9154 if (!conn || conn->state != BT_CONNECTED)
9155 return MGMT_STATUS_NOT_CONNECTED;
9157 cmd->user_data = conn;
9158 handle = cpu_to_le16(conn->handle);
9160 /* Refresh RSSI each time */
9161 err = hci_read_rssi_sync(hdev, handle);
9163 /* For LE links TX power does not change thus we don't need to
9164 * query for it once value is known.
9166 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
9167 conn->tx_power == HCI_TX_POWER_INVALID))
9168 err = hci_read_tx_power_sync(hdev, handle, 0x00);
9170 /* Max TX power needs to be read only once per connection */
9171 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
9172 err = hci_read_tx_power_sync(hdev, handle, 0x01);
9177 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
9180 struct mgmt_cp_get_conn_info *cp = data;
9181 struct mgmt_rp_get_conn_info rp;
9182 struct hci_conn *conn;
9183 unsigned long conn_info_age;
9186 bt_dev_dbg(hdev, "sock %p", sk);
9188 memset(&rp, 0, sizeof(rp));
9189 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9190 rp.addr.type = cp->addr.type;
9192 if (!bdaddr_type_is_valid(cp->addr.type))
9193 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9194 MGMT_STATUS_INVALID_PARAMS,
9199 if (!hdev_is_powered(hdev)) {
9200 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9201 MGMT_STATUS_NOT_POWERED, &rp,
9206 if (cp->addr.type == BDADDR_BREDR)
9207 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9210 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
9212 if (!conn || conn->state != BT_CONNECTED) {
9213 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9214 MGMT_STATUS_NOT_CONNECTED, &rp,
9219 /* To avoid client trying to guess when to poll again for information we
9220 * calculate conn info age as random value between min/max set in hdev.
9222 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
9223 hdev->conn_info_max_age - 1);
9225 /* Query controller to refresh cached values if they are too old or were
9228 if (time_after(jiffies, conn->conn_info_timestamp +
9229 msecs_to_jiffies(conn_info_age)) ||
9230 !conn->conn_info_timestamp) {
9231 struct mgmt_pending_cmd *cmd;
9233 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
9238 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
9239 cmd, get_conn_info_complete);
9243 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9244 MGMT_STATUS_FAILED, &rp, sizeof(rp));
9247 mgmt_pending_free(cmd);
9252 conn->conn_info_timestamp = jiffies;
9254 /* Cache is valid, just reply with values cached in hci_conn */
9255 rp.rssi = conn->rssi;
9256 rp.tx_power = conn->tx_power;
9257 rp.max_tx_power = conn->max_tx_power;
9259 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9260 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9264 hci_dev_unlock(hdev);
9268 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
9270 struct mgmt_pending_cmd *cmd = data;
9271 struct mgmt_cp_get_clock_info *cp = cmd->param;
9272 struct mgmt_rp_get_clock_info rp;
9273 struct hci_conn *conn = cmd->user_data;
9274 u8 status = mgmt_status(err);
9276 bt_dev_dbg(hdev, "err %d", err);
9278 memset(&rp, 0, sizeof(rp));
9279 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9280 rp.addr.type = cp->addr.type;
9285 rp.local_clock = cpu_to_le32(hdev->clock);
9288 rp.piconet_clock = cpu_to_le32(conn->clock);
9289 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
9293 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
9296 mgmt_pending_free(cmd);
9299 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
9301 struct mgmt_pending_cmd *cmd = data;
9302 struct mgmt_cp_get_clock_info *cp = cmd->param;
9303 struct hci_cp_read_clock hci_cp;
9304 struct hci_conn *conn;
9306 memset(&hci_cp, 0, sizeof(hci_cp));
9307 hci_read_clock_sync(hdev, &hci_cp);
9309 /* Make sure connection still exists */
9310 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
9311 if (!conn || conn->state != BT_CONNECTED)
9312 return MGMT_STATUS_NOT_CONNECTED;
9314 cmd->user_data = conn;
9315 hci_cp.handle = cpu_to_le16(conn->handle);
9316 hci_cp.which = 0x01; /* Piconet clock */
9318 return hci_read_clock_sync(hdev, &hci_cp);
9321 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
9324 struct mgmt_cp_get_clock_info *cp = data;
9325 struct mgmt_rp_get_clock_info rp;
9326 struct mgmt_pending_cmd *cmd;
9327 struct hci_conn *conn;
9330 bt_dev_dbg(hdev, "sock %p", sk);
9332 memset(&rp, 0, sizeof(rp));
9333 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9334 rp.addr.type = cp->addr.type;
9336 if (cp->addr.type != BDADDR_BREDR)
9337 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9338 MGMT_STATUS_INVALID_PARAMS,
9343 if (!hdev_is_powered(hdev)) {
9344 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9345 MGMT_STATUS_NOT_POWERED, &rp,
9350 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
9351 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9353 if (!conn || conn->state != BT_CONNECTED) {
9354 err = mgmt_cmd_complete(sk, hdev->id,
9355 MGMT_OP_GET_CLOCK_INFO,
9356 MGMT_STATUS_NOT_CONNECTED,
9364 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
9368 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
9369 get_clock_info_complete);
9372 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9373 MGMT_STATUS_FAILED, &rp, sizeof(rp));
9376 mgmt_pending_free(cmd);
9381 hci_dev_unlock(hdev);
9385 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
9387 struct hci_conn *conn;
9389 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
9393 if (conn->dst_type != type)
9396 if (conn->state != BT_CONNECTED)
9402 /* This function requires the caller holds hdev->lock */
9403 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
9404 u8 addr_type, u8 auto_connect)
9406 struct hci_conn_params *params;
9408 params = hci_conn_params_add(hdev, addr, addr_type);
9412 if (params->auto_connect == auto_connect)
9415 hci_pend_le_list_del_init(params);
9417 switch (auto_connect) {
9418 case HCI_AUTO_CONN_DISABLED:
9419 case HCI_AUTO_CONN_LINK_LOSS:
9420 /* If auto connect is being disabled when we're trying to
9421 * connect to device, keep connecting.
9423 if (params->explicit_connect)
9424 hci_pend_le_list_add(params, &hdev->pend_le_conns);
9426 case HCI_AUTO_CONN_REPORT:
9427 if (params->explicit_connect)
9428 hci_pend_le_list_add(params, &hdev->pend_le_conns);
9430 hci_pend_le_list_add(params, &hdev->pend_le_reports);
9432 case HCI_AUTO_CONN_DIRECT:
9433 case HCI_AUTO_CONN_ALWAYS:
9434 if (!is_connected(hdev, addr, addr_type))
9435 hci_pend_le_list_add(params, &hdev->pend_le_conns);
9439 params->auto_connect = auto_connect;
9441 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
9442 addr, addr_type, auto_connect);
9447 static void device_added(struct sock *sk, struct hci_dev *hdev,
9448 bdaddr_t *bdaddr, u8 type, u8 action)
9450 struct mgmt_ev_device_added ev;
9452 bacpy(&ev.addr.bdaddr, bdaddr);
9453 ev.addr.type = type;
9456 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
9459 static int add_device_sync(struct hci_dev *hdev, void *data)
9461 return hci_update_passive_scan_sync(hdev);
9464 static int add_device(struct sock *sk, struct hci_dev *hdev,
9465 void *data, u16 len)
9467 struct mgmt_cp_add_device *cp = data;
9468 u8 auto_conn, addr_type;
9469 struct hci_conn_params *params;
9471 u32 current_flags = 0;
9472 u32 supported_flags;
9474 bt_dev_dbg(hdev, "sock %p", sk);
9476 if (!bdaddr_type_is_valid(cp->addr.type) ||
9477 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
9478 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9479 MGMT_STATUS_INVALID_PARAMS,
9480 &cp->addr, sizeof(cp->addr));
9482 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
9483 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9484 MGMT_STATUS_INVALID_PARAMS,
9485 &cp->addr, sizeof(cp->addr));
9489 if (cp->addr.type == BDADDR_BREDR) {
9490 /* Only incoming connections action is supported for now */
9491 if (cp->action != 0x01) {
9492 err = mgmt_cmd_complete(sk, hdev->id,
9494 MGMT_STATUS_INVALID_PARAMS,
9495 &cp->addr, sizeof(cp->addr));
9499 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
9505 hci_update_scan(hdev);
9510 addr_type = le_addr_type(cp->addr.type);
9512 if (cp->action == 0x02)
9513 auto_conn = HCI_AUTO_CONN_ALWAYS;
9514 else if (cp->action == 0x01)
9515 auto_conn = HCI_AUTO_CONN_DIRECT;
9517 auto_conn = HCI_AUTO_CONN_REPORT;
9519 /* Kernel internally uses conn_params with resolvable private
9520 * address, but Add Device allows only identity addresses.
9521 * Make sure it is enforced before calling
9522 * hci_conn_params_lookup.
9524 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
9525 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9526 MGMT_STATUS_INVALID_PARAMS,
9527 &cp->addr, sizeof(cp->addr));
9531 /* If the connection parameters don't exist for this device,
9532 * they will be created and configured with defaults.
9534 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
9536 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9537 MGMT_STATUS_FAILED, &cp->addr,
9541 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
9544 current_flags = params->flags;
9547 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
9552 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
9553 supported_flags = hdev->conn_flags;
9554 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
9555 supported_flags, current_flags);
9557 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9558 MGMT_STATUS_SUCCESS, &cp->addr,
9562 hci_dev_unlock(hdev);
9566 static void device_removed(struct sock *sk, struct hci_dev *hdev,
9567 bdaddr_t *bdaddr, u8 type)
9569 struct mgmt_ev_device_removed ev;
9571 bacpy(&ev.addr.bdaddr, bdaddr);
9572 ev.addr.type = type;
9574 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
9577 static int remove_device_sync(struct hci_dev *hdev, void *data)
9579 return hci_update_passive_scan_sync(hdev);
9582 static int remove_device(struct sock *sk, struct hci_dev *hdev,
9583 void *data, u16 len)
9585 struct mgmt_cp_remove_device *cp = data;
9588 bt_dev_dbg(hdev, "sock %p", sk);
9592 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
9593 struct hci_conn_params *params;
9596 if (!bdaddr_type_is_valid(cp->addr.type)) {
9597 err = mgmt_cmd_complete(sk, hdev->id,
9598 MGMT_OP_REMOVE_DEVICE,
9599 MGMT_STATUS_INVALID_PARAMS,
9600 &cp->addr, sizeof(cp->addr));
9604 if (cp->addr.type == BDADDR_BREDR) {
9605 err = hci_bdaddr_list_del(&hdev->accept_list,
9609 err = mgmt_cmd_complete(sk, hdev->id,
9610 MGMT_OP_REMOVE_DEVICE,
9611 MGMT_STATUS_INVALID_PARAMS,
9617 hci_update_scan(hdev);
9619 device_removed(sk, hdev, &cp->addr.bdaddr,
9624 addr_type = le_addr_type(cp->addr.type);
9626 /* Kernel internally uses conn_params with resolvable private
9627 * address, but Remove Device allows only identity addresses.
9628 * Make sure it is enforced before calling
9629 * hci_conn_params_lookup.
9631 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
9632 err = mgmt_cmd_complete(sk, hdev->id,
9633 MGMT_OP_REMOVE_DEVICE,
9634 MGMT_STATUS_INVALID_PARAMS,
9635 &cp->addr, sizeof(cp->addr));
9639 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
9642 err = mgmt_cmd_complete(sk, hdev->id,
9643 MGMT_OP_REMOVE_DEVICE,
9644 MGMT_STATUS_INVALID_PARAMS,
9645 &cp->addr, sizeof(cp->addr));
9649 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
9650 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
9651 err = mgmt_cmd_complete(sk, hdev->id,
9652 MGMT_OP_REMOVE_DEVICE,
9653 MGMT_STATUS_INVALID_PARAMS,
9654 &cp->addr, sizeof(cp->addr));
9658 hci_conn_params_free(params);
9660 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
9662 struct hci_conn_params *p, *tmp;
9663 struct bdaddr_list *b, *btmp;
9665 if (cp->addr.type) {
9666 err = mgmt_cmd_complete(sk, hdev->id,
9667 MGMT_OP_REMOVE_DEVICE,
9668 MGMT_STATUS_INVALID_PARAMS,
9669 &cp->addr, sizeof(cp->addr));
9673 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
9674 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
9679 hci_update_scan(hdev);
9681 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
9682 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
9684 device_removed(sk, hdev, &p->addr, p->addr_type);
9685 if (p->explicit_connect) {
9686 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
9689 hci_conn_params_free(p);
9692 bt_dev_dbg(hdev, "All LE connection parameters were removed");
9695 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
9698 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
9699 MGMT_STATUS_SUCCESS, &cp->addr,
9702 hci_dev_unlock(hdev);
9706 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
9709 struct mgmt_cp_load_conn_param *cp = data;
9710 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
9711 sizeof(struct mgmt_conn_param));
9712 u16 param_count, expected_len;
9715 if (!lmp_le_capable(hdev))
9716 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9717 MGMT_STATUS_NOT_SUPPORTED);
9719 param_count = __le16_to_cpu(cp->param_count);
9720 if (param_count > max_param_count) {
9721 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
9723 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9724 MGMT_STATUS_INVALID_PARAMS);
9727 expected_len = struct_size(cp, params, param_count);
9728 if (expected_len != len) {
9729 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
9731 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
9732 MGMT_STATUS_INVALID_PARAMS);
9735 bt_dev_dbg(hdev, "param_count %u", param_count);
9739 hci_conn_params_clear_disabled(hdev);
9741 for (i = 0; i < param_count; i++) {
9742 struct mgmt_conn_param *param = &cp->params[i];
9743 struct hci_conn_params *hci_param;
9744 u16 min, max, latency, timeout;
9747 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
9750 if (param->addr.type == BDADDR_LE_PUBLIC) {
9751 addr_type = ADDR_LE_DEV_PUBLIC;
9752 } else if (param->addr.type == BDADDR_LE_RANDOM) {
9753 addr_type = ADDR_LE_DEV_RANDOM;
9755 bt_dev_err(hdev, "ignoring invalid connection parameters");
9759 min = le16_to_cpu(param->min_interval);
9760 max = le16_to_cpu(param->max_interval);
9761 latency = le16_to_cpu(param->latency);
9762 timeout = le16_to_cpu(param->timeout);
9764 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
9765 min, max, latency, timeout);
9767 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
9768 bt_dev_err(hdev, "ignoring invalid connection parameters");
9772 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
9775 bt_dev_err(hdev, "failed to add connection parameters");
9779 hci_param->conn_min_interval = min;
9780 hci_param->conn_max_interval = max;
9781 hci_param->conn_latency = latency;
9782 hci_param->supervision_timeout = timeout;
9785 hci_dev_unlock(hdev);
9787 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
9791 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
9792 void *data, u16 len)
9794 struct mgmt_cp_set_external_config *cp = data;
9798 bt_dev_dbg(hdev, "sock %p", sk);
9800 if (hdev_is_powered(hdev))
9801 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9802 MGMT_STATUS_REJECTED);
9804 if (cp->config != 0x00 && cp->config != 0x01)
9805 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9806 MGMT_STATUS_INVALID_PARAMS);
9808 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
9809 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
9810 MGMT_STATUS_NOT_SUPPORTED);
9815 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
9817 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
9819 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
9826 err = new_options(hdev, sk);
9828 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
9829 mgmt_index_removed(hdev);
9831 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
9832 hci_dev_set_flag(hdev, HCI_CONFIG);
9833 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
9835 queue_work(hdev->req_workqueue, &hdev->power_on);
9837 set_bit(HCI_RAW, &hdev->flags);
9838 mgmt_index_added(hdev);
9843 hci_dev_unlock(hdev);
9847 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
9848 void *data, u16 len)
9850 struct mgmt_cp_set_public_address *cp = data;
9854 bt_dev_dbg(hdev, "sock %p", sk);
9856 if (hdev_is_powered(hdev))
9857 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9858 MGMT_STATUS_REJECTED);
9860 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
9861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9862 MGMT_STATUS_INVALID_PARAMS);
9864 if (!hdev->set_bdaddr)
9865 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
9866 MGMT_STATUS_NOT_SUPPORTED);
9870 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
9871 bacpy(&hdev->public_addr, &cp->bdaddr);
9873 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
9880 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
9881 err = new_options(hdev, sk);
9883 if (is_configured(hdev)) {
9884 mgmt_index_removed(hdev);
9886 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
9888 hci_dev_set_flag(hdev, HCI_CONFIG);
9889 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
9891 queue_work(hdev->req_workqueue, &hdev->power_on);
9895 hci_dev_unlock(hdev);
9900 int mgmt_device_name_update(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name,
9904 struct mgmt_ev_device_name_update *ev = (void *)buf;
9910 bacpy(&ev->addr.bdaddr, bdaddr);
9911 ev->addr.type = BDADDR_BREDR;
9913 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
9916 ev->eir_len = cpu_to_le16(eir_len);
9918 return mgmt_event(MGMT_EV_DEVICE_NAME_UPDATE, hdev, buf,
9919 sizeof(*ev) + eir_len, NULL);
9922 int mgmt_le_conn_update_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9923 u8 link_type, u8 addr_type, u8 status)
9925 struct mgmt_ev_conn_update_failed ev;
9927 bacpy(&ev.addr.bdaddr, bdaddr);
9928 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9931 return mgmt_event(MGMT_EV_CONN_UPDATE_FAILED, hdev,
9932 &ev, sizeof(ev), NULL);
9935 int mgmt_le_conn_updated(struct hci_dev *hdev, bdaddr_t *bdaddr,
9936 u8 link_type, u8 addr_type, u16 conn_interval,
9937 u16 conn_latency, u16 supervision_timeout)
9939 struct mgmt_ev_conn_updated ev;
9941 bacpy(&ev.addr.bdaddr, bdaddr);
9942 ev.addr.type = link_to_bdaddr(link_type, addr_type);
9943 ev.conn_interval = cpu_to_le16(conn_interval);
9944 ev.conn_latency = cpu_to_le16(conn_latency);
9945 ev.supervision_timeout = cpu_to_le16(supervision_timeout);
9947 return mgmt_event(MGMT_EV_CONN_UPDATED, hdev,
9948 &ev, sizeof(ev), NULL);
9951 /* le device found event - Pass adv type */
9952 void mgmt_le_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9953 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir,
9954 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, u8 adv_type)
9957 struct mgmt_ev_le_device_found *ev = (void *)buf;
9960 if (!hci_discovery_active(hdev) && !hci_le_discovery_active(hdev))
9963 /* Make sure that the buffer is big enough. The 5 extra bytes
9964 * are for the potential CoD field.
9966 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
9969 memset(buf, 0, sizeof(buf));
9971 bacpy(&ev->addr.bdaddr, bdaddr);
9972 ev->addr.type = link_to_bdaddr(link_type, addr_type);
9974 ev->flags = cpu_to_le32(flags);
9975 ev->adv_type = adv_type;
9978 memcpy(ev->eir, eir, eir_len);
9980 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, NULL))
9981 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
9984 if (scan_rsp_len > 0)
9985 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
9987 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9988 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
9990 mgmt_event(MGMT_EV_LE_DEVICE_FOUND, hdev, ev, ev_size, NULL);
9994 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
9997 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
9998 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
9999 u8 *h192, *r192, *h256, *r256;
10000 struct mgmt_pending_cmd *cmd = data;
10001 struct sk_buff *skb = cmd->skb;
10002 u8 status = mgmt_status(err);
10005 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
10010 status = MGMT_STATUS_FAILED;
10011 else if (IS_ERR(skb))
10012 status = mgmt_status(PTR_ERR(skb));
10014 status = mgmt_status(skb->data[0]);
10017 bt_dev_dbg(hdev, "status %u", status);
10019 mgmt_cp = cmd->param;
10022 status = mgmt_status(status);
10029 } else if (!bredr_sc_enabled(hdev)) {
10030 struct hci_rp_read_local_oob_data *rp;
10032 if (skb->len != sizeof(*rp)) {
10033 status = MGMT_STATUS_FAILED;
10036 status = MGMT_STATUS_SUCCESS;
10037 rp = (void *)skb->data;
10039 eir_len = 5 + 18 + 18;
10046 struct hci_rp_read_local_oob_ext_data *rp;
10048 if (skb->len != sizeof(*rp)) {
10049 status = MGMT_STATUS_FAILED;
10052 status = MGMT_STATUS_SUCCESS;
10053 rp = (void *)skb->data;
10055 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
10056 eir_len = 5 + 18 + 18;
10060 eir_len = 5 + 18 + 18 + 18 + 18;
10061 h192 = rp->hash192;
10062 r192 = rp->rand192;
10065 h256 = rp->hash256;
10066 r256 = rp->rand256;
10070 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
10077 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
10078 hdev->dev_class, 3);
10080 if (h192 && r192) {
10081 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10082 EIR_SSP_HASH_C192, h192, 16);
10083 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10084 EIR_SSP_RAND_R192, r192, 16);
10087 if (h256 && r256) {
10088 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10089 EIR_SSP_HASH_C256, h256, 16);
10090 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10091 EIR_SSP_RAND_R256, r256, 16);
10095 mgmt_rp->type = mgmt_cp->type;
10096 mgmt_rp->eir_len = cpu_to_le16(eir_len);
10098 err = mgmt_cmd_complete(cmd->sk, hdev->id,
10099 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
10100 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
10101 if (err < 0 || status)
10104 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
10106 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
10107 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
10108 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
10110 if (skb && !IS_ERR(skb))
10114 mgmt_pending_remove(cmd);
10117 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
10118 struct mgmt_cp_read_local_oob_ext_data *cp)
10120 struct mgmt_pending_cmd *cmd;
10123 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
10128 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
10129 read_local_oob_ext_data_complete);
10132 mgmt_pending_remove(cmd);
10139 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
10140 void *data, u16 data_len)
10142 struct mgmt_cp_read_local_oob_ext_data *cp = data;
10143 struct mgmt_rp_read_local_oob_ext_data *rp;
10146 u8 status, flags, role, addr[7], hash[16], rand[16];
10149 bt_dev_dbg(hdev, "sock %p", sk);
10151 if (hdev_is_powered(hdev)) {
10152 switch (cp->type) {
10153 case BIT(BDADDR_BREDR):
10154 status = mgmt_bredr_support(hdev);
10160 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
10161 status = mgmt_le_support(hdev);
10165 eir_len = 9 + 3 + 18 + 18 + 3;
10168 status = MGMT_STATUS_INVALID_PARAMS;
10173 status = MGMT_STATUS_NOT_POWERED;
10177 rp_len = sizeof(*rp) + eir_len;
10178 rp = kmalloc(rp_len, GFP_ATOMIC);
10182 if (!status && !lmp_ssp_capable(hdev)) {
10183 status = MGMT_STATUS_NOT_SUPPORTED;
10190 hci_dev_lock(hdev);
10193 switch (cp->type) {
10194 case BIT(BDADDR_BREDR):
10195 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
10196 err = read_local_ssp_oob_req(hdev, sk, cp);
10197 hci_dev_unlock(hdev);
10201 status = MGMT_STATUS_FAILED;
10204 eir_len = eir_append_data(rp->eir, eir_len,
10206 hdev->dev_class, 3);
10209 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
10210 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
10211 smp_generate_oob(hdev, hash, rand) < 0) {
10212 hci_dev_unlock(hdev);
10213 status = MGMT_STATUS_FAILED;
10217 /* This should return the active RPA, but since the RPA
10218 * is only programmed on demand, it is really hard to fill
10219 * this in at the moment. For now disallow retrieving
10220 * local out-of-band data when privacy is in use.
10222 * Returning the identity address will not help here since
10223 * pairing happens before the identity resolving key is
10224 * known and thus the connection establishment happens
10225 * based on the RPA and not the identity address.
10227 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
10228 hci_dev_unlock(hdev);
10229 status = MGMT_STATUS_REJECTED;
10233 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
10234 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
10235 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
10236 bacmp(&hdev->static_addr, BDADDR_ANY))) {
10237 memcpy(addr, &hdev->static_addr, 6);
10240 memcpy(addr, &hdev->bdaddr, 6);
10244 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
10245 addr, sizeof(addr));
10247 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
10252 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
10253 &role, sizeof(role));
10255 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
10256 eir_len = eir_append_data(rp->eir, eir_len,
10258 hash, sizeof(hash));
10260 eir_len = eir_append_data(rp->eir, eir_len,
10262 rand, sizeof(rand));
10265 flags = mgmt_get_adv_discov_flags(hdev);
10267 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
10268 flags |= LE_AD_NO_BREDR;
10270 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
10271 &flags, sizeof(flags));
10275 hci_dev_unlock(hdev);
10277 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
10279 status = MGMT_STATUS_SUCCESS;
10282 rp->type = cp->type;
10283 rp->eir_len = cpu_to_le16(eir_len);
10285 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
10286 status, rp, sizeof(*rp) + eir_len);
10287 if (err < 0 || status)
10290 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
10291 rp, sizeof(*rp) + eir_len,
10292 HCI_MGMT_OOB_DATA_EVENTS, sk);
10300 static u32 get_supported_adv_flags(struct hci_dev *hdev)
10304 flags |= MGMT_ADV_FLAG_CONNECTABLE;
10305 flags |= MGMT_ADV_FLAG_DISCOV;
10306 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
10307 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
10308 flags |= MGMT_ADV_FLAG_APPEARANCE;
10309 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
10310 flags |= MGMT_ADV_PARAM_DURATION;
10311 flags |= MGMT_ADV_PARAM_TIMEOUT;
10312 flags |= MGMT_ADV_PARAM_INTERVALS;
10313 flags |= MGMT_ADV_PARAM_TX_POWER;
10314 flags |= MGMT_ADV_PARAM_SCAN_RSP;
10316 /* In extended adv TX_POWER returned from Set Adv Param
10317 * will be always valid.
10319 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
10320 flags |= MGMT_ADV_FLAG_TX_POWER;
10322 if (ext_adv_capable(hdev)) {
10323 flags |= MGMT_ADV_FLAG_SEC_1M;
10324 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
10325 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
10327 if (le_2m_capable(hdev))
10328 flags |= MGMT_ADV_FLAG_SEC_2M;
10330 if (le_coded_capable(hdev))
10331 flags |= MGMT_ADV_FLAG_SEC_CODED;
10337 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
10338 void *data, u16 data_len)
10340 struct mgmt_rp_read_adv_features *rp;
10343 struct adv_info *adv_instance;
10344 u32 supported_flags;
10347 bt_dev_dbg(hdev, "sock %p", sk);
10349 if (!lmp_le_capable(hdev))
10350 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
10351 MGMT_STATUS_REJECTED);
10353 hci_dev_lock(hdev);
10355 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
10356 rp = kmalloc(rp_len, GFP_ATOMIC);
10358 hci_dev_unlock(hdev);
10362 supported_flags = get_supported_adv_flags(hdev);
10364 rp->supported_flags = cpu_to_le32(supported_flags);
10365 rp->max_adv_data_len = max_adv_len(hdev);
10366 rp->max_scan_rsp_len = max_adv_len(hdev);
10367 rp->max_instances = hdev->le_num_of_adv_sets;
10368 rp->num_instances = hdev->adv_instance_cnt;
10370 instance = rp->instance;
10371 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
10372 /* Only instances 1-le_num_of_adv_sets are externally visible */
10373 if (adv_instance->instance <= hdev->adv_instance_cnt) {
10374 *instance = adv_instance->instance;
10377 rp->num_instances--;
10382 hci_dev_unlock(hdev);
10384 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
10385 MGMT_STATUS_SUCCESS, rp, rp_len);
10392 static u8 calculate_name_len(struct hci_dev *hdev)
10394 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
10396 return eir_append_local_name(hdev, buf, 0);
10399 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
10402 u8 max_len = max_adv_len(hdev);
10405 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
10406 MGMT_ADV_FLAG_LIMITED_DISCOV |
10407 MGMT_ADV_FLAG_MANAGED_FLAGS))
10410 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
10413 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
10414 max_len -= calculate_name_len(hdev);
10416 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
10423 static bool flags_managed(u32 adv_flags)
10425 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
10426 MGMT_ADV_FLAG_LIMITED_DISCOV |
10427 MGMT_ADV_FLAG_MANAGED_FLAGS);
10430 static bool tx_power_managed(u32 adv_flags)
10432 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
10435 static bool name_managed(u32 adv_flags)
10437 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
10440 static bool appearance_managed(u32 adv_flags)
10442 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
10445 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
10446 u8 len, bool is_adv_data)
10451 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
10456 /* Make sure that the data is correctly formatted. */
10457 for (i = 0; i < len; i += (cur_len + 1)) {
10463 if (data[i + 1] == EIR_FLAGS &&
10464 (!is_adv_data || flags_managed(adv_flags)))
10467 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
10470 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
10473 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
10476 if (data[i + 1] == EIR_APPEARANCE &&
10477 appearance_managed(adv_flags))
10480 /* If the current field length would exceed the total data
10481 * length, then it's invalid.
10483 if (i + cur_len >= len)
10490 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
10492 u32 supported_flags, phy_flags;
10494 /* The current implementation only supports a subset of the specified
10495 * flags. Also need to check mutual exclusiveness of sec flags.
10497 supported_flags = get_supported_adv_flags(hdev);
10498 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
10499 if (adv_flags & ~supported_flags ||
10500 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
10506 static bool adv_busy(struct hci_dev *hdev)
10508 return pending_find(MGMT_OP_SET_LE, hdev);
10511 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
10514 struct adv_info *adv, *n;
10516 bt_dev_dbg(hdev, "err %d", err);
10518 hci_dev_lock(hdev);
10520 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
10527 adv->pending = false;
10531 instance = adv->instance;
10533 if (hdev->cur_adv_instance == instance)
10534 cancel_adv_timeout(hdev);
10536 hci_remove_adv_instance(hdev, instance);
10537 mgmt_advertising_removed(sk, hdev, instance);
10540 hci_dev_unlock(hdev);
10543 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
10545 struct mgmt_pending_cmd *cmd = data;
10546 struct mgmt_cp_add_advertising *cp = cmd->param;
10547 struct mgmt_rp_add_advertising rp;
10549 memset(&rp, 0, sizeof(rp));
10551 rp.instance = cp->instance;
10554 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10557 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10558 mgmt_status(err), &rp, sizeof(rp));
10560 add_adv_complete(hdev, cmd->sk, cp->instance, err);
10562 mgmt_pending_free(cmd);
10565 static int add_advertising_sync(struct hci_dev *hdev, void *data)
10567 struct mgmt_pending_cmd *cmd = data;
10568 struct mgmt_cp_add_advertising *cp = cmd->param;
10570 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
10573 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
10574 void *data, u16 data_len)
10576 struct mgmt_cp_add_advertising *cp = data;
10577 struct mgmt_rp_add_advertising rp;
10580 u16 timeout, duration;
10581 unsigned int prev_instance_cnt;
10582 u8 schedule_instance = 0;
10583 struct adv_info *adv, *next_instance;
10585 struct mgmt_pending_cmd *cmd;
10587 bt_dev_dbg(hdev, "sock %p", sk);
10589 status = mgmt_le_support(hdev);
10591 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10594 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10595 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10596 MGMT_STATUS_INVALID_PARAMS);
10598 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
10599 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10600 MGMT_STATUS_INVALID_PARAMS);
10602 flags = __le32_to_cpu(cp->flags);
10603 timeout = __le16_to_cpu(cp->timeout);
10604 duration = __le16_to_cpu(cp->duration);
10606 if (!requested_adv_flags_are_valid(hdev, flags))
10607 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10608 MGMT_STATUS_INVALID_PARAMS);
10610 hci_dev_lock(hdev);
10612 if (timeout && !hdev_is_powered(hdev)) {
10613 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10614 MGMT_STATUS_REJECTED);
10618 if (adv_busy(hdev)) {
10619 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10624 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
10625 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
10626 cp->scan_rsp_len, false)) {
10627 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10628 MGMT_STATUS_INVALID_PARAMS);
10632 prev_instance_cnt = hdev->adv_instance_cnt;
10634 adv = hci_add_adv_instance(hdev, cp->instance, flags,
10635 cp->adv_data_len, cp->data,
10637 cp->data + cp->adv_data_len,
10639 HCI_ADV_TX_POWER_NO_PREFERENCE,
10640 hdev->le_adv_min_interval,
10641 hdev->le_adv_max_interval, 0);
10643 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10644 MGMT_STATUS_FAILED);
10648 /* Only trigger an advertising added event if a new instance was
10651 if (hdev->adv_instance_cnt > prev_instance_cnt)
10652 mgmt_advertising_added(sk, hdev, cp->instance);
10654 if (hdev->cur_adv_instance == cp->instance) {
10655 /* If the currently advertised instance is being changed then
10656 * cancel the current advertising and schedule the next
10657 * instance. If there is only one instance then the overridden
10658 * advertising data will be visible right away.
10660 cancel_adv_timeout(hdev);
10662 next_instance = hci_get_next_instance(hdev, cp->instance);
10664 schedule_instance = next_instance->instance;
10665 } else if (!hdev->adv_instance_timeout) {
10666 /* Immediately advertise the new instance if no other
10667 * instance is currently being advertised.
10669 schedule_instance = cp->instance;
10672 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
10673 * there is no instance to be advertised then we have no HCI
10674 * communication to make. Simply return.
10676 if (!hdev_is_powered(hdev) ||
10677 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
10678 !schedule_instance) {
10679 rp.instance = cp->instance;
10680 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10681 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10685 /* We're good to go, update advertising data, parameters, and start
10688 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
10695 cp->instance = schedule_instance;
10697 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
10698 add_advertising_complete);
10700 mgmt_pending_free(cmd);
10703 hci_dev_unlock(hdev);
10708 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
10711 struct mgmt_pending_cmd *cmd = data;
10712 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
10713 struct mgmt_rp_add_ext_adv_params rp;
10714 struct adv_info *adv;
10717 BT_DBG("%s", hdev->name);
10719 hci_dev_lock(hdev);
10721 adv = hci_find_adv_instance(hdev, cp->instance);
10725 rp.instance = cp->instance;
10726 rp.tx_power = adv->tx_power;
10728 /* While we're at it, inform userspace of the available space for this
10729 * advertisement, given the flags that will be used.
10731 flags = __le32_to_cpu(cp->flags);
10732 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10733 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10736 /* If this advertisement was previously advertising and we
10737 * failed to update it, we signal that it has been removed and
10738 * delete its structure
10741 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
10743 hci_remove_adv_instance(hdev, cp->instance);
10745 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10748 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10749 mgmt_status(err), &rp, sizeof(rp));
10754 mgmt_pending_free(cmd);
10756 hci_dev_unlock(hdev);
10759 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
10761 struct mgmt_pending_cmd *cmd = data;
10762 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
10764 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
10767 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
10768 void *data, u16 data_len)
10770 struct mgmt_cp_add_ext_adv_params *cp = data;
10771 struct mgmt_rp_add_ext_adv_params rp;
10772 struct mgmt_pending_cmd *cmd = NULL;
10773 struct adv_info *adv;
10774 u32 flags, min_interval, max_interval;
10775 u16 timeout, duration;
10780 BT_DBG("%s", hdev->name);
10782 status = mgmt_le_support(hdev);
10784 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10787 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10788 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10789 MGMT_STATUS_INVALID_PARAMS);
10791 /* The purpose of breaking add_advertising into two separate MGMT calls
10792 * for params and data is to allow more parameters to be added to this
10793 * structure in the future. For this reason, we verify that we have the
10794 * bare minimum structure we know of when the interface was defined. Any
10795 * extra parameters we don't know about will be ignored in this request.
10797 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
10798 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10799 MGMT_STATUS_INVALID_PARAMS);
10801 flags = __le32_to_cpu(cp->flags);
10803 if (!requested_adv_flags_are_valid(hdev, flags))
10804 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10805 MGMT_STATUS_INVALID_PARAMS);
10807 hci_dev_lock(hdev);
10809 /* In new interface, we require that we are powered to register */
10810 if (!hdev_is_powered(hdev)) {
10811 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10812 MGMT_STATUS_REJECTED);
10816 if (adv_busy(hdev)) {
10817 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10822 /* Parse defined parameters from request, use defaults otherwise */
10823 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
10824 __le16_to_cpu(cp->timeout) : 0;
10826 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
10827 __le16_to_cpu(cp->duration) :
10828 hdev->def_multi_adv_rotation_duration;
10830 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
10831 __le32_to_cpu(cp->min_interval) :
10832 hdev->le_adv_min_interval;
10834 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
10835 __le32_to_cpu(cp->max_interval) :
10836 hdev->le_adv_max_interval;
10838 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
10840 HCI_ADV_TX_POWER_NO_PREFERENCE;
10842 /* Create advertising instance with no advertising or response data */
10843 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
10844 timeout, duration, tx_power, min_interval,
10848 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
10849 MGMT_STATUS_FAILED);
10853 /* Submit request for advertising params if ext adv available */
10854 if (ext_adv_capable(hdev)) {
10855 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
10859 hci_remove_adv_instance(hdev, cp->instance);
10863 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
10864 add_ext_adv_params_complete);
10866 mgmt_pending_free(cmd);
10868 rp.instance = cp->instance;
10869 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
10870 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
10871 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
10872 err = mgmt_cmd_complete(sk, hdev->id,
10873 MGMT_OP_ADD_EXT_ADV_PARAMS,
10874 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10878 hci_dev_unlock(hdev);
10883 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
10885 struct mgmt_pending_cmd *cmd = data;
10886 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
10887 struct mgmt_rp_add_advertising rp;
10889 add_adv_complete(hdev, cmd->sk, cp->instance, err);
10891 memset(&rp, 0, sizeof(rp));
10893 rp.instance = cp->instance;
10896 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10899 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10900 mgmt_status(err), &rp, sizeof(rp));
10902 mgmt_pending_free(cmd);
10905 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
10907 struct mgmt_pending_cmd *cmd = data;
10908 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
10911 if (ext_adv_capable(hdev)) {
10912 err = hci_update_adv_data_sync(hdev, cp->instance);
10916 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
10920 return hci_enable_ext_advertising_sync(hdev, cp->instance);
10923 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
10926 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
10929 struct mgmt_cp_add_ext_adv_data *cp = data;
10930 struct mgmt_rp_add_ext_adv_data rp;
10931 u8 schedule_instance = 0;
10932 struct adv_info *next_instance;
10933 struct adv_info *adv_instance;
10935 struct mgmt_pending_cmd *cmd;
10937 BT_DBG("%s", hdev->name);
10939 hci_dev_lock(hdev);
10941 adv_instance = hci_find_adv_instance(hdev, cp->instance);
10943 if (!adv_instance) {
10944 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10945 MGMT_STATUS_INVALID_PARAMS);
10949 /* In new interface, we require that we are powered to register */
10950 if (!hdev_is_powered(hdev)) {
10951 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10952 MGMT_STATUS_REJECTED);
10953 goto clear_new_instance;
10956 if (adv_busy(hdev)) {
10957 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10959 goto clear_new_instance;
10962 /* Validate new data */
10963 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
10964 cp->adv_data_len, true) ||
10965 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
10966 cp->adv_data_len, cp->scan_rsp_len, false)) {
10967 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
10968 MGMT_STATUS_INVALID_PARAMS);
10969 goto clear_new_instance;
10972 /* Set the data in the advertising instance */
10973 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
10974 cp->data, cp->scan_rsp_len,
10975 cp->data + cp->adv_data_len);
10977 /* If using software rotation, determine next instance to use */
10978 if (hdev->cur_adv_instance == cp->instance) {
10979 /* If the currently advertised instance is being changed
10980 * then cancel the current advertising and schedule the
10981 * next instance. If there is only one instance then the
10982 * overridden advertising data will be visible right
10985 cancel_adv_timeout(hdev);
10987 next_instance = hci_get_next_instance(hdev, cp->instance);
10989 schedule_instance = next_instance->instance;
10990 } else if (!hdev->adv_instance_timeout) {
10991 /* Immediately advertise the new instance if no other
10992 * instance is currently being advertised.
10994 schedule_instance = cp->instance;
10997 /* If the HCI_ADVERTISING flag is set or there is no instance to
10998 * be advertised then we have no HCI communication to make.
11001 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
11002 if (adv_instance->pending) {
11003 mgmt_advertising_added(sk, hdev, cp->instance);
11004 adv_instance->pending = false;
11006 rp.instance = cp->instance;
11007 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
11008 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11012 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
11016 goto clear_new_instance;
11019 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
11020 add_ext_adv_data_complete);
11022 mgmt_pending_free(cmd);
11023 goto clear_new_instance;
11026 /* We were successful in updating data, so trigger advertising_added
11027 * event if this is an instance that wasn't previously advertising. If
11028 * a failure occurs in the requests we initiated, we will remove the
11029 * instance again in add_advertising_complete
11031 if (adv_instance->pending)
11032 mgmt_advertising_added(sk, hdev, cp->instance);
11036 clear_new_instance:
11037 hci_remove_adv_instance(hdev, cp->instance);
11040 hci_dev_unlock(hdev);
11045 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
11048 struct mgmt_pending_cmd *cmd = data;
11049 struct mgmt_cp_remove_advertising *cp = cmd->param;
11050 struct mgmt_rp_remove_advertising rp;
11052 bt_dev_dbg(hdev, "err %d", err);
11054 memset(&rp, 0, sizeof(rp));
11055 rp.instance = cp->instance;
11058 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
11061 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
11062 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11064 mgmt_pending_free(cmd);
11067 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
11069 struct mgmt_pending_cmd *cmd = data;
11070 struct mgmt_cp_remove_advertising *cp = cmd->param;
11073 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
11077 if (list_empty(&hdev->adv_instances))
11078 err = hci_disable_advertising_sync(hdev);
11083 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
11084 void *data, u16 data_len)
11086 struct mgmt_cp_remove_advertising *cp = data;
11087 struct mgmt_pending_cmd *cmd;
11090 bt_dev_dbg(hdev, "sock %p", sk);
11092 hci_dev_lock(hdev);
11094 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
11095 err = mgmt_cmd_status(sk, hdev->id,
11096 MGMT_OP_REMOVE_ADVERTISING,
11097 MGMT_STATUS_INVALID_PARAMS);
11101 if (pending_find(MGMT_OP_SET_LE, hdev)) {
11102 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
11107 if (list_empty(&hdev->adv_instances)) {
11108 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
11109 MGMT_STATUS_INVALID_PARAMS);
11113 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
11120 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
11121 remove_advertising_complete);
11123 mgmt_pending_free(cmd);
11126 hci_dev_unlock(hdev);
11131 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
11132 void *data, u16 data_len)
11134 struct mgmt_cp_get_adv_size_info *cp = data;
11135 struct mgmt_rp_get_adv_size_info rp;
11136 u32 flags, supported_flags;
11138 bt_dev_dbg(hdev, "sock %p", sk);
11140 if (!lmp_le_capable(hdev))
11141 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11142 MGMT_STATUS_REJECTED);
11144 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
11145 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11146 MGMT_STATUS_INVALID_PARAMS);
11148 flags = __le32_to_cpu(cp->flags);
11150 /* The current implementation only supports a subset of the specified
11153 supported_flags = get_supported_adv_flags(hdev);
11154 if (flags & ~supported_flags)
11155 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11156 MGMT_STATUS_INVALID_PARAMS);
11158 rp.instance = cp->instance;
11159 rp.flags = cp->flags;
11160 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
11161 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
11163 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11164 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11167 static const struct hci_mgmt_handler mgmt_handlers[] = {
11168 { NULL }, /* 0x0000 (no command) */
11169 { read_version, MGMT_READ_VERSION_SIZE,
11171 HCI_MGMT_UNTRUSTED },
11172 { read_commands, MGMT_READ_COMMANDS_SIZE,
11174 HCI_MGMT_UNTRUSTED },
11175 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
11177 HCI_MGMT_UNTRUSTED },
11178 { read_controller_info, MGMT_READ_INFO_SIZE,
11179 HCI_MGMT_UNTRUSTED },
11180 { set_powered, MGMT_SETTING_SIZE },
11181 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
11182 { set_connectable, MGMT_SETTING_SIZE },
11183 { set_fast_connectable, MGMT_SETTING_SIZE },
11184 { set_bondable, MGMT_SETTING_SIZE },
11185 { set_link_security, MGMT_SETTING_SIZE },
11186 { set_ssp, MGMT_SETTING_SIZE },
11187 { set_hs, MGMT_SETTING_SIZE },
11188 { set_le, MGMT_SETTING_SIZE },
11189 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
11190 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
11191 { add_uuid, MGMT_ADD_UUID_SIZE },
11192 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
11193 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
11194 HCI_MGMT_VAR_LEN },
11195 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
11196 HCI_MGMT_VAR_LEN },
11197 { disconnect, MGMT_DISCONNECT_SIZE },
11198 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
11199 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
11200 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
11201 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
11202 { pair_device, MGMT_PAIR_DEVICE_SIZE },
11203 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
11204 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
11205 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
11206 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
11207 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
11208 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
11209 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
11210 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
11211 HCI_MGMT_VAR_LEN },
11212 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
11213 { start_discovery, MGMT_START_DISCOVERY_SIZE },
11214 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
11215 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
11216 { block_device, MGMT_BLOCK_DEVICE_SIZE },
11217 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
11218 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
11219 { set_advertising, MGMT_SETTING_SIZE },
11220 { set_bredr, MGMT_SETTING_SIZE },
11221 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
11222 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
11223 { set_secure_conn, MGMT_SETTING_SIZE },
11224 { set_debug_keys, MGMT_SETTING_SIZE },
11225 { set_privacy, MGMT_SET_PRIVACY_SIZE },
11226 { load_irks, MGMT_LOAD_IRKS_SIZE,
11227 HCI_MGMT_VAR_LEN },
11228 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
11229 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
11230 { add_device, MGMT_ADD_DEVICE_SIZE },
11231 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
11232 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
11233 HCI_MGMT_VAR_LEN },
11234 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
11236 HCI_MGMT_UNTRUSTED },
11237 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
11238 HCI_MGMT_UNCONFIGURED |
11239 HCI_MGMT_UNTRUSTED },
11240 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
11241 HCI_MGMT_UNCONFIGURED },
11242 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
11243 HCI_MGMT_UNCONFIGURED },
11244 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
11245 HCI_MGMT_VAR_LEN },
11246 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
11247 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
11249 HCI_MGMT_UNTRUSTED },
11250 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
11251 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
11252 HCI_MGMT_VAR_LEN },
11253 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
11254 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
11255 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
11256 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
11257 HCI_MGMT_UNTRUSTED },
11258 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
11259 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
11260 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
11261 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
11262 HCI_MGMT_VAR_LEN },
11263 { set_wideband_speech, MGMT_SETTING_SIZE },
11264 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
11265 HCI_MGMT_UNTRUSTED },
11266 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
11267 HCI_MGMT_UNTRUSTED |
11268 HCI_MGMT_HDEV_OPTIONAL },
11269 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
11271 HCI_MGMT_HDEV_OPTIONAL },
11272 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
11273 HCI_MGMT_UNTRUSTED },
11274 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
11275 HCI_MGMT_VAR_LEN },
11276 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
11277 HCI_MGMT_UNTRUSTED },
11278 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
11279 HCI_MGMT_VAR_LEN },
11280 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
11281 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
11282 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
11283 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
11284 HCI_MGMT_VAR_LEN },
11285 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
11286 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
11287 HCI_MGMT_VAR_LEN },
11288 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
11289 HCI_MGMT_VAR_LEN },
11290 { add_adv_patterns_monitor_rssi,
11291 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
11292 HCI_MGMT_VAR_LEN },
11293 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
11294 HCI_MGMT_VAR_LEN },
11295 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
11296 { mesh_send, MGMT_MESH_SEND_SIZE,
11297 HCI_MGMT_VAR_LEN },
11298 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
11302 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
11303 { NULL }, /* 0x0000 (no command) */
11304 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
11305 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
11306 HCI_MGMT_VAR_LEN },
11307 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
11308 HCI_MGMT_VAR_LEN },
11309 { add_white_list, MGMT_ADD_DEV_WHITE_LIST_SIZE },
11310 { remove_from_white_list, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
11311 { clear_white_list, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
11312 { set_enable_rssi, MGMT_SET_RSSI_ENABLE_SIZE },
11313 { get_raw_rssi, MGMT_GET_RAW_RSSI_SIZE },
11314 { set_disable_threshold, MGMT_SET_RSSI_DISABLE_SIZE },
11315 { start_le_discovery, MGMT_START_LE_DISCOVERY_SIZE },
11316 { stop_le_discovery, MGMT_STOP_LE_DISCOVERY_SIZE },
11317 { disable_le_auto_connect, MGMT_DISABLE_LE_AUTO_CONNECT_SIZE },
11318 { le_conn_update, MGMT_LE_CONN_UPDATE_SIZE },
11319 { set_manufacturer_data, MGMT_SET_MANUFACTURER_DATA_SIZE },
11320 { le_set_scan_params, MGMT_LE_SET_SCAN_PARAMS_SIZE },
11321 { set_voice_setting, MGMT_SET_VOICE_SETTING_SIZE },
11322 { get_adv_tx_power, MGMT_GET_ADV_TX_POWER_SIZE },
11323 { enable_bt_6lowpan, MGMT_ENABLE_BT_6LOWPAN_SIZE },
11327 void mgmt_index_added(struct hci_dev *hdev)
11329 struct mgmt_ev_ext_index ev;
11331 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
11334 switch (hdev->dev_type) {
11336 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
11337 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
11338 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
11341 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
11342 HCI_MGMT_INDEX_EVENTS);
11353 ev.bus = hdev->bus;
11355 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
11356 HCI_MGMT_EXT_INDEX_EVENTS);
11359 void mgmt_index_removed(struct hci_dev *hdev)
11361 struct mgmt_ev_ext_index ev;
11362 u8 status = MGMT_STATUS_INVALID_INDEX;
11364 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
11367 switch (hdev->dev_type) {
11369 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
11371 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
11372 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
11373 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
11376 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
11377 HCI_MGMT_INDEX_EVENTS);
11388 ev.bus = hdev->bus;
11390 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
11391 HCI_MGMT_EXT_INDEX_EVENTS);
11393 /* Cancel any remaining timed work */
11394 if (!hci_dev_test_flag(hdev, HCI_MGMT))
11396 cancel_delayed_work_sync(&hdev->discov_off);
11397 cancel_delayed_work_sync(&hdev->service_cache);
11398 cancel_delayed_work_sync(&hdev->rpa_expired);
11401 void mgmt_power_on(struct hci_dev *hdev, int err)
11403 struct cmd_lookup match = { NULL, hdev };
11405 bt_dev_dbg(hdev, "err %d", err);
11407 hci_dev_lock(hdev);
11410 restart_le_actions(hdev);
11411 hci_update_passive_scan(hdev);
11414 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
11416 new_settings(hdev, match.sk);
11419 sock_put(match.sk);
11421 hci_dev_unlock(hdev);
11424 void __mgmt_power_off(struct hci_dev *hdev)
11426 struct cmd_lookup match = { NULL, hdev };
11427 u8 status, zero_cod[] = { 0, 0, 0 };
11429 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
11431 /* If the power off is because of hdev unregistration let
11432 * use the appropriate INVALID_INDEX status. Otherwise use
11433 * NOT_POWERED. We cover both scenarios here since later in
11434 * mgmt_index_removed() any hci_conn callbacks will have already
11435 * been triggered, potentially causing misleading DISCONNECTED
11436 * status responses.
11438 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
11439 status = MGMT_STATUS_INVALID_INDEX;
11441 status = MGMT_STATUS_NOT_POWERED;
11443 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
11445 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
11446 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
11447 zero_cod, sizeof(zero_cod),
11448 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
11449 ext_info_changed(hdev, NULL);
11452 new_settings(hdev, match.sk);
11455 sock_put(match.sk);
11458 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
11460 struct mgmt_pending_cmd *cmd;
11463 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
11467 if (err == -ERFKILL)
11468 status = MGMT_STATUS_RFKILLED;
11470 status = MGMT_STATUS_FAILED;
11472 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
11474 mgmt_pending_remove(cmd);
11477 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
11480 struct mgmt_ev_new_link_key ev;
11482 memset(&ev, 0, sizeof(ev));
11484 ev.store_hint = persistent;
11485 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
11486 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
11487 ev.key.type = key->type;
11488 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
11489 ev.key.pin_len = key->pin_len;
11491 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
11494 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
11496 switch (ltk->type) {
11498 case SMP_LTK_RESPONDER:
11499 if (ltk->authenticated)
11500 return MGMT_LTK_AUTHENTICATED;
11501 return MGMT_LTK_UNAUTHENTICATED;
11503 if (ltk->authenticated)
11504 return MGMT_LTK_P256_AUTH;
11505 return MGMT_LTK_P256_UNAUTH;
11506 case SMP_LTK_P256_DEBUG:
11507 return MGMT_LTK_P256_DEBUG;
11510 return MGMT_LTK_UNAUTHENTICATED;
11513 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
11515 struct mgmt_ev_new_long_term_key ev;
11517 memset(&ev, 0, sizeof(ev));
11519 /* Devices using resolvable or non-resolvable random addresses
11520 * without providing an identity resolving key don't require
11521 * to store long term keys. Their addresses will change the
11522 * next time around.
11524 * Only when a remote device provides an identity address
11525 * make sure the long term key is stored. If the remote
11526 * identity is known, the long term keys are internally
11527 * mapped to the identity address. So allow static random
11528 * and public addresses here.
11530 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
11531 (key->bdaddr.b[5] & 0xc0) != 0xc0)
11532 ev.store_hint = 0x00;
11534 ev.store_hint = persistent;
11536 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
11537 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
11538 ev.key.type = mgmt_ltk_type(key);
11539 ev.key.enc_size = key->enc_size;
11540 ev.key.ediv = key->ediv;
11541 ev.key.rand = key->rand;
11543 if (key->type == SMP_LTK)
11544 ev.key.initiator = 1;
11546 /* Make sure we copy only the significant bytes based on the
11547 * encryption key size, and set the rest of the value to zeroes.
11549 memcpy(ev.key.val, key->val, key->enc_size);
11550 memset(ev.key.val + key->enc_size, 0,
11551 sizeof(ev.key.val) - key->enc_size);
11553 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
11556 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
11558 struct mgmt_ev_new_irk ev;
11560 memset(&ev, 0, sizeof(ev));
11562 ev.store_hint = persistent;
11564 bacpy(&ev.rpa, &irk->rpa);
11565 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
11566 ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
11567 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
11569 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
11572 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
11575 struct mgmt_ev_new_csrk ev;
11577 memset(&ev, 0, sizeof(ev));
11579 /* Devices using resolvable or non-resolvable random addresses
11580 * without providing an identity resolving key don't require
11581 * to store signature resolving keys. Their addresses will change
11582 * the next time around.
11584 * Only when a remote device provides an identity address
11585 * make sure the signature resolving key is stored. So allow
11586 * static random and public addresses here.
11588 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
11589 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
11590 ev.store_hint = 0x00;
11592 ev.store_hint = persistent;
11594 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
11595 ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
11596 ev.key.type = csrk->type;
11597 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
11599 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
11602 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
11603 u8 bdaddr_type, u8 store_hint, u16 min_interval,
11604 u16 max_interval, u16 latency, u16 timeout)
11606 struct mgmt_ev_new_conn_param ev;
11608 if (!hci_is_identity_address(bdaddr, bdaddr_type))
11611 memset(&ev, 0, sizeof(ev));
11612 bacpy(&ev.addr.bdaddr, bdaddr);
11613 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
11614 ev.store_hint = store_hint;
11615 ev.min_interval = cpu_to_le16(min_interval);
11616 ev.max_interval = cpu_to_le16(max_interval);
11617 ev.latency = cpu_to_le16(latency);
11618 ev.timeout = cpu_to_le16(timeout);
11620 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
11623 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
11624 u8 *name, u8 name_len)
11626 struct sk_buff *skb;
11627 struct mgmt_ev_device_connected *ev;
11631 /* allocate buff for LE or BR/EDR adv */
11632 if (conn->le_adv_data_len > 0)
11633 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
11634 sizeof(*ev) + conn->le_adv_data_len);
11636 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
11637 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
11638 eir_precalc_len(sizeof(conn->dev_class)));
11640 ev = skb_put(skb, sizeof(*ev));
11641 bacpy(&ev->addr.bdaddr, &conn->dst);
11642 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
11645 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
11647 ev->flags = __cpu_to_le32(flags);
11649 /* We must ensure that the EIR Data fields are ordered and
11650 * unique. Keep it simple for now and avoid the problem by not
11651 * adding any BR/EDR data to the LE adv.
11653 if (conn->le_adv_data_len > 0) {
11654 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
11655 eir_len = conn->le_adv_data_len;
11658 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
11660 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
11661 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
11662 conn->dev_class, sizeof(conn->dev_class));
11665 ev->eir_len = cpu_to_le16(eir_len);
11667 mgmt_event_skb(skb, NULL);
11670 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
11672 struct sock **sk = data;
11674 cmd->cmd_complete(cmd, 0);
11679 mgmt_pending_remove(cmd);
11682 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
11684 struct hci_dev *hdev = data;
11685 struct mgmt_cp_unpair_device *cp = cmd->param;
11687 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
11689 cmd->cmd_complete(cmd, 0);
11690 mgmt_pending_remove(cmd);
11693 bool mgmt_powering_down(struct hci_dev *hdev)
11695 struct mgmt_pending_cmd *cmd;
11696 struct mgmt_mode *cp;
11698 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
11709 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
11710 u8 link_type, u8 addr_type, u8 reason,
11711 bool mgmt_connected)
11713 struct mgmt_ev_device_disconnected ev;
11714 struct sock *sk = NULL;
11716 /* The connection is still in hci_conn_hash so test for 1
11717 * instead of 0 to know if this is the last one.
11719 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
11720 cancel_delayed_work(&hdev->power_off);
11721 queue_work(hdev->req_workqueue, &hdev->power_off.work);
11724 if (!mgmt_connected)
11727 if (link_type != ACL_LINK && link_type != LE_LINK)
11730 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
11732 bacpy(&ev.addr.bdaddr, bdaddr);
11733 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11734 ev.reason = reason;
11736 /* Report disconnects due to suspend */
11737 if (hdev->suspended)
11738 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
11740 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
11745 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
11749 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
11750 u8 link_type, u8 addr_type, u8 status)
11752 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
11753 struct mgmt_cp_disconnect *cp;
11754 struct mgmt_pending_cmd *cmd;
11756 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
11759 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
11765 if (bacmp(bdaddr, &cp->addr.bdaddr))
11768 if (cp->addr.type != bdaddr_type)
11771 cmd->cmd_complete(cmd, mgmt_status(status));
11772 mgmt_pending_remove(cmd);
11775 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
11776 u8 addr_type, u8 status)
11778 struct mgmt_ev_connect_failed ev;
11780 /* The connection is still in hci_conn_hash so test for 1
11781 * instead of 0 to know if this is the last one.
11783 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
11784 cancel_delayed_work(&hdev->power_off);
11785 queue_work(hdev->req_workqueue, &hdev->power_off.work);
11788 bacpy(&ev.addr.bdaddr, bdaddr);
11789 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11790 ev.status = mgmt_status(status);
11792 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
11795 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
11797 struct mgmt_ev_pin_code_request ev;
11799 bacpy(&ev.addr.bdaddr, bdaddr);
11800 ev.addr.type = BDADDR_BREDR;
11801 ev.secure = secure;
11803 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
11806 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11809 struct mgmt_pending_cmd *cmd;
11811 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
11815 cmd->cmd_complete(cmd, mgmt_status(status));
11816 mgmt_pending_remove(cmd);
11819 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11822 struct mgmt_pending_cmd *cmd;
11824 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
11828 cmd->cmd_complete(cmd, mgmt_status(status));
11829 mgmt_pending_remove(cmd);
11832 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
11833 u8 link_type, u8 addr_type, u32 value,
11836 struct mgmt_ev_user_confirm_request ev;
11838 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11840 bacpy(&ev.addr.bdaddr, bdaddr);
11841 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11842 ev.confirm_hint = confirm_hint;
11843 ev.value = cpu_to_le32(value);
11845 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
11849 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
11850 u8 link_type, u8 addr_type)
11852 struct mgmt_ev_user_passkey_request ev;
11854 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11856 bacpy(&ev.addr.bdaddr, bdaddr);
11857 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11859 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
11863 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11864 u8 link_type, u8 addr_type, u8 status,
11867 struct mgmt_pending_cmd *cmd;
11869 cmd = pending_find(opcode, hdev);
11873 cmd->cmd_complete(cmd, mgmt_status(status));
11874 mgmt_pending_remove(cmd);
11879 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11880 u8 link_type, u8 addr_type, u8 status)
11882 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11883 status, MGMT_OP_USER_CONFIRM_REPLY);
11886 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11887 u8 link_type, u8 addr_type, u8 status)
11889 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11891 MGMT_OP_USER_CONFIRM_NEG_REPLY);
11894 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11895 u8 link_type, u8 addr_type, u8 status)
11897 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11898 status, MGMT_OP_USER_PASSKEY_REPLY);
11901 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
11902 u8 link_type, u8 addr_type, u8 status)
11904 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
11906 MGMT_OP_USER_PASSKEY_NEG_REPLY);
11909 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
11910 u8 link_type, u8 addr_type, u32 passkey,
11913 struct mgmt_ev_passkey_notify ev;
11915 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
11917 bacpy(&ev.addr.bdaddr, bdaddr);
11918 ev.addr.type = link_to_bdaddr(link_type, addr_type);
11919 ev.passkey = __cpu_to_le32(passkey);
11920 ev.entered = entered;
11922 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
11925 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
11927 struct mgmt_ev_auth_failed ev;
11928 struct mgmt_pending_cmd *cmd;
11929 u8 status = mgmt_status(hci_status);
11931 bacpy(&ev.addr.bdaddr, &conn->dst);
11932 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
11933 ev.status = status;
11935 cmd = find_pairing(conn);
11937 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
11938 cmd ? cmd->sk : NULL);
11941 cmd->cmd_complete(cmd, status);
11942 mgmt_pending_remove(cmd);
11946 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
11948 struct cmd_lookup match = { NULL, hdev };
11952 u8 mgmt_err = mgmt_status(status);
11953 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
11954 cmd_status_rsp, &mgmt_err);
11958 if (test_bit(HCI_AUTH, &hdev->flags))
11959 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
11961 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
11963 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
11967 new_settings(hdev, match.sk);
11970 sock_put(match.sk);
11973 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
11975 struct cmd_lookup *match = data;
11977 if (match->sk == NULL) {
11978 match->sk = cmd->sk;
11979 sock_hold(match->sk);
11983 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
11986 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
11988 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
11989 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
11990 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
11993 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
11994 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
11995 ext_info_changed(hdev, NULL);
11999 sock_put(match.sk);
12002 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
12004 struct mgmt_cp_set_local_name ev;
12005 struct mgmt_pending_cmd *cmd;
12010 memset(&ev, 0, sizeof(ev));
12011 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
12012 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
12014 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
12016 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
12018 /* If this is a HCI command related to powering on the
12019 * HCI dev don't send any mgmt signals.
12021 if (pending_find(MGMT_OP_SET_POWERED, hdev))
12025 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
12026 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
12027 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
12030 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
12034 for (i = 0; i < uuid_count; i++) {
12035 if (!memcmp(uuid, uuids[i], 16))
12042 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
12046 while (parsed < eir_len) {
12047 u8 field_len = eir[0];
12051 if (field_len == 0)
12054 if (eir_len - parsed < field_len + 1)
12058 case EIR_UUID16_ALL:
12059 case EIR_UUID16_SOME:
12060 for (i = 0; i + 3 <= field_len; i += 2) {
12061 memcpy(uuid, bluetooth_base_uuid, 16);
12062 uuid[13] = eir[i + 3];
12063 uuid[12] = eir[i + 2];
12064 if (has_uuid(uuid, uuid_count, uuids))
12068 case EIR_UUID32_ALL:
12069 case EIR_UUID32_SOME:
12070 for (i = 0; i + 5 <= field_len; i += 4) {
12071 memcpy(uuid, bluetooth_base_uuid, 16);
12072 uuid[15] = eir[i + 5];
12073 uuid[14] = eir[i + 4];
12074 uuid[13] = eir[i + 3];
12075 uuid[12] = eir[i + 2];
12076 if (has_uuid(uuid, uuid_count, uuids))
12080 case EIR_UUID128_ALL:
12081 case EIR_UUID128_SOME:
12082 for (i = 0; i + 17 <= field_len; i += 16) {
12083 memcpy(uuid, eir + i + 2, 16);
12084 if (has_uuid(uuid, uuid_count, uuids))
12090 parsed += field_len + 1;
12091 eir += field_len + 1;
12097 static void restart_le_scan(struct hci_dev *hdev)
12099 /* If controller is not scanning we are done. */
12100 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
12103 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
12104 hdev->discovery.scan_start +
12105 hdev->discovery.scan_duration))
12108 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
12109 DISCOV_LE_RESTART_DELAY);
12112 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
12113 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
12115 /* If a RSSI threshold has been specified, and
12116 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
12117 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
12118 * is set, let it through for further processing, as we might need to
12119 * restart the scan.
12121 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
12122 * the results are also dropped.
12124 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
12125 (rssi == HCI_RSSI_INVALID ||
12126 (rssi < hdev->discovery.rssi &&
12127 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
12130 if (hdev->discovery.uuid_count != 0) {
12131 /* If a list of UUIDs is provided in filter, results with no
12132 * matching UUID should be dropped.
12134 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
12135 hdev->discovery.uuids) &&
12136 !eir_has_uuids(scan_rsp, scan_rsp_len,
12137 hdev->discovery.uuid_count,
12138 hdev->discovery.uuids))
12142 /* If duplicate filtering does not report RSSI changes, then restart
12143 * scanning to ensure updated result with updated RSSI values.
12145 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
12146 restart_le_scan(hdev);
12148 /* Validate RSSI value against the RSSI threshold once more. */
12149 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
12150 rssi < hdev->discovery.rssi)
12157 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
12158 bdaddr_t *bdaddr, u8 addr_type)
12160 struct mgmt_ev_adv_monitor_device_lost ev;
12162 ev.monitor_handle = cpu_to_le16(handle);
12163 bacpy(&ev.addr.bdaddr, bdaddr);
12164 ev.addr.type = addr_type;
12166 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
12170 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
12171 struct sk_buff *skb,
12172 struct sock *skip_sk,
12175 struct sk_buff *advmon_skb;
12176 size_t advmon_skb_len;
12177 __le16 *monitor_handle;
12182 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
12183 sizeof(struct mgmt_ev_device_found)) + skb->len;
12184 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
12189 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
12190 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
12191 * store monitor_handle of the matched monitor.
12193 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
12194 *monitor_handle = cpu_to_le16(handle);
12195 skb_put_data(advmon_skb, skb->data, skb->len);
12197 mgmt_event_skb(advmon_skb, skip_sk);
12200 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
12201 bdaddr_t *bdaddr, bool report_device,
12202 struct sk_buff *skb,
12203 struct sock *skip_sk)
12205 struct monitored_device *dev, *tmp;
12206 bool matched = false;
12207 bool notified = false;
12209 /* We have received the Advertisement Report because:
12210 * 1. the kernel has initiated active discovery
12211 * 2. if not, we have pend_le_reports > 0 in which case we are doing
12213 * 3. if none of the above is true, we have one or more active
12214 * Advertisement Monitor
12216 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
12217 * and report ONLY one advertisement per device for the matched Monitor
12218 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
12220 * For case 3, since we are not active scanning and all advertisements
12221 * received are due to a matched Advertisement Monitor, report all
12222 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
12224 if (report_device && !hdev->advmon_pend_notify) {
12225 mgmt_event_skb(skb, skip_sk);
12229 hdev->advmon_pend_notify = false;
12231 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
12232 if (!bacmp(&dev->bdaddr, bdaddr)) {
12235 if (!dev->notified) {
12236 mgmt_send_adv_monitor_device_found(hdev, skb,
12240 dev->notified = true;
12244 if (!dev->notified)
12245 hdev->advmon_pend_notify = true;
12248 if (!report_device &&
12249 ((matched && !notified) || !msft_monitor_supported(hdev))) {
12250 /* Handle 0 indicates that we are not active scanning and this
12251 * is a subsequent advertisement report for an already matched
12252 * Advertisement Monitor or the controller offloading support
12253 * is not available.
12255 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
12259 mgmt_event_skb(skb, skip_sk);
12264 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
12265 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
12266 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
12269 struct sk_buff *skb;
12270 struct mgmt_ev_mesh_device_found *ev;
12273 if (!hdev->mesh_ad_types[0])
12276 /* Scan for requested AD types */
12278 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
12279 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
12280 if (!hdev->mesh_ad_types[j])
12283 if (hdev->mesh_ad_types[j] == eir[i + 1])
12289 if (scan_rsp_len > 0) {
12290 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
12291 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
12292 if (!hdev->mesh_ad_types[j])
12295 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
12304 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
12305 sizeof(*ev) + eir_len + scan_rsp_len);
12309 ev = skb_put(skb, sizeof(*ev));
12311 bacpy(&ev->addr.bdaddr, bdaddr);
12312 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
12314 ev->flags = cpu_to_le32(flags);
12315 ev->instant = cpu_to_le64(instant);
12318 /* Copy EIR or advertising data into event */
12319 skb_put_data(skb, eir, eir_len);
12321 if (scan_rsp_len > 0)
12322 /* Append scan response data to event */
12323 skb_put_data(skb, scan_rsp, scan_rsp_len);
12325 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
12327 mgmt_event_skb(skb, NULL);
12330 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
12331 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
12332 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
12335 struct sk_buff *skb;
12336 struct mgmt_ev_device_found *ev;
12337 bool report_device = hci_discovery_active(hdev);
12339 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
12340 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
12341 eir, eir_len, scan_rsp, scan_rsp_len,
12344 /* Don't send events for a non-kernel initiated discovery. With
12345 * LE one exception is if we have pend_le_reports > 0 in which
12346 * case we're doing passive scanning and want these events.
12348 if (!hci_discovery_active(hdev)) {
12349 if (link_type == ACL_LINK)
12351 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
12352 report_device = true;
12353 else if (!hci_is_adv_monitoring(hdev))
12357 if (hdev->discovery.result_filtering) {
12358 /* We are using service discovery */
12359 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
12364 if (hdev->discovery.limited) {
12365 /* Check for limited discoverable bit */
12367 if (!(dev_class[1] & 0x20))
12370 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
12371 if (!flags || !(flags[0] & LE_AD_LIMITED))
12376 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
12377 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
12378 sizeof(*ev) + eir_len + scan_rsp_len + 5);
12382 ev = skb_put(skb, sizeof(*ev));
12384 /* In case of device discovery with BR/EDR devices (pre 1.2), the
12385 * RSSI value was reported as 0 when not available. This behavior
12386 * is kept when using device discovery. This is required for full
12387 * backwards compatibility with the API.
12389 * However when using service discovery, the value 127 will be
12390 * returned when the RSSI is not available.
12392 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
12393 link_type == ACL_LINK)
12396 bacpy(&ev->addr.bdaddr, bdaddr);
12397 ev->addr.type = link_to_bdaddr(link_type, addr_type);
12399 ev->flags = cpu_to_le32(flags);
12402 /* Copy EIR or advertising data into event */
12403 skb_put_data(skb, eir, eir_len);
12405 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
12408 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
12410 skb_put_data(skb, eir_cod, sizeof(eir_cod));
12413 if (scan_rsp_len > 0)
12414 /* Append scan response data to event */
12415 skb_put_data(skb, scan_rsp, scan_rsp_len);
12417 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
12419 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
12422 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
12423 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
12425 struct sk_buff *skb;
12426 struct mgmt_ev_device_found *ev;
12430 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
12431 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
12433 ev = skb_put(skb, sizeof(*ev));
12434 bacpy(&ev->addr.bdaddr, bdaddr);
12435 ev->addr.type = link_to_bdaddr(link_type, addr_type);
12439 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
12441 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
12443 ev->eir_len = cpu_to_le16(eir_len);
12444 ev->flags = cpu_to_le32(flags);
12446 mgmt_event_skb(skb, NULL);
12449 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
12451 struct mgmt_ev_discovering ev;
12453 bt_dev_dbg(hdev, "discovering %u", discovering);
12455 memset(&ev, 0, sizeof(ev));
12456 ev.type = hdev->discovery.type;
12457 ev.discovering = discovering;
12459 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
12462 void mgmt_suspending(struct hci_dev *hdev, u8 state)
12464 struct mgmt_ev_controller_suspend ev;
12466 ev.suspend_state = state;
12467 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
12470 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
12473 struct mgmt_ev_controller_resume ev;
12475 ev.wake_reason = reason;
12477 bacpy(&ev.addr.bdaddr, bdaddr);
12478 ev.addr.type = addr_type;
12480 memset(&ev.addr, 0, sizeof(ev.addr));
12483 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
12486 static struct hci_mgmt_chan chan = {
12487 .channel = HCI_CHANNEL_CONTROL,
12488 .handler_count = ARRAY_SIZE(mgmt_handlers),
12489 .handlers = mgmt_handlers,
12491 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
12492 .tizen_handlers = tizen_mgmt_handlers,
12494 .hdev_init = mgmt_init_hdev,
12497 int mgmt_init(void)
12499 return hci_mgmt_chan_register(&chan);
12502 void mgmt_exit(void)
12504 hci_mgmt_chan_unregister(&chan);
12507 void mgmt_cleanup(struct sock *sk)
12509 struct mgmt_mesh_tx *mesh_tx;
12510 struct hci_dev *hdev;
12512 read_lock(&hci_dev_list_lock);
12514 list_for_each_entry(hdev, &hci_dev_list, list) {
12516 mesh_tx = mgmt_mesh_next(hdev, sk);
12519 mesh_send_complete(hdev, mesh_tx, true);
12523 read_unlock(&hci_dev_list_lock);