2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
37 #include <net/bluetooth/sco.h>
40 #include "hci_request.h"
42 #include "mgmt_util.h"
43 #include "mgmt_config.h"
48 #define MGMT_VERSION 1
49 #define MGMT_REVISION 22
51 static const u16 mgmt_commands[] = {
52 MGMT_OP_READ_INDEX_LIST,
55 MGMT_OP_SET_DISCOVERABLE,
56 MGMT_OP_SET_CONNECTABLE,
57 MGMT_OP_SET_FAST_CONNECTABLE,
59 MGMT_OP_SET_LINK_SECURITY,
63 MGMT_OP_SET_DEV_CLASS,
64 MGMT_OP_SET_LOCAL_NAME,
67 MGMT_OP_LOAD_LINK_KEYS,
68 MGMT_OP_LOAD_LONG_TERM_KEYS,
70 MGMT_OP_GET_CONNECTIONS,
71 MGMT_OP_PIN_CODE_REPLY,
72 MGMT_OP_PIN_CODE_NEG_REPLY,
73 MGMT_OP_SET_IO_CAPABILITY,
75 MGMT_OP_CANCEL_PAIR_DEVICE,
76 MGMT_OP_UNPAIR_DEVICE,
77 MGMT_OP_USER_CONFIRM_REPLY,
78 MGMT_OP_USER_CONFIRM_NEG_REPLY,
79 MGMT_OP_USER_PASSKEY_REPLY,
80 MGMT_OP_USER_PASSKEY_NEG_REPLY,
81 MGMT_OP_READ_LOCAL_OOB_DATA,
82 MGMT_OP_ADD_REMOTE_OOB_DATA,
83 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
84 MGMT_OP_START_DISCOVERY,
85 MGMT_OP_STOP_DISCOVERY,
88 MGMT_OP_UNBLOCK_DEVICE,
89 MGMT_OP_SET_DEVICE_ID,
90 MGMT_OP_SET_ADVERTISING,
92 MGMT_OP_SET_STATIC_ADDRESS,
93 MGMT_OP_SET_SCAN_PARAMS,
94 MGMT_OP_SET_SECURE_CONN,
95 MGMT_OP_SET_DEBUG_KEYS,
98 MGMT_OP_GET_CONN_INFO,
99 MGMT_OP_GET_CLOCK_INFO,
101 MGMT_OP_REMOVE_DEVICE,
102 MGMT_OP_LOAD_CONN_PARAM,
103 MGMT_OP_READ_UNCONF_INDEX_LIST,
104 MGMT_OP_READ_CONFIG_INFO,
105 MGMT_OP_SET_EXTERNAL_CONFIG,
106 MGMT_OP_SET_PUBLIC_ADDRESS,
107 MGMT_OP_START_SERVICE_DISCOVERY,
108 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
109 MGMT_OP_READ_EXT_INDEX_LIST,
110 MGMT_OP_READ_ADV_FEATURES,
111 MGMT_OP_ADD_ADVERTISING,
112 MGMT_OP_REMOVE_ADVERTISING,
113 MGMT_OP_GET_ADV_SIZE_INFO,
114 MGMT_OP_START_LIMITED_DISCOVERY,
115 MGMT_OP_READ_EXT_INFO,
116 MGMT_OP_SET_APPEARANCE,
117 MGMT_OP_GET_PHY_CONFIGURATION,
118 MGMT_OP_SET_PHY_CONFIGURATION,
119 MGMT_OP_SET_BLOCKED_KEYS,
120 MGMT_OP_SET_WIDEBAND_SPEECH,
121 MGMT_OP_READ_CONTROLLER_CAP,
122 MGMT_OP_READ_EXP_FEATURES_INFO,
123 MGMT_OP_SET_EXP_FEATURE,
124 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
125 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
126 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
127 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
128 MGMT_OP_GET_DEVICE_FLAGS,
129 MGMT_OP_SET_DEVICE_FLAGS,
130 MGMT_OP_READ_ADV_MONITOR_FEATURES,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
132 MGMT_OP_REMOVE_ADV_MONITOR,
133 MGMT_OP_ADD_EXT_ADV_PARAMS,
134 MGMT_OP_ADD_EXT_ADV_DATA,
135 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
136 MGMT_OP_SET_MESH_RECEIVER,
137 MGMT_OP_MESH_READ_FEATURES,
139 MGMT_OP_MESH_SEND_CANCEL,
142 static const u16 mgmt_events[] = {
143 MGMT_EV_CONTROLLER_ERROR,
145 MGMT_EV_INDEX_REMOVED,
146 MGMT_EV_NEW_SETTINGS,
147 MGMT_EV_CLASS_OF_DEV_CHANGED,
148 MGMT_EV_LOCAL_NAME_CHANGED,
149 MGMT_EV_NEW_LINK_KEY,
150 MGMT_EV_NEW_LONG_TERM_KEY,
151 MGMT_EV_DEVICE_CONNECTED,
152 MGMT_EV_DEVICE_DISCONNECTED,
153 MGMT_EV_CONNECT_FAILED,
154 MGMT_EV_PIN_CODE_REQUEST,
155 MGMT_EV_USER_CONFIRM_REQUEST,
156 MGMT_EV_USER_PASSKEY_REQUEST,
158 MGMT_EV_DEVICE_FOUND,
160 MGMT_EV_DEVICE_BLOCKED,
161 MGMT_EV_DEVICE_UNBLOCKED,
162 MGMT_EV_DEVICE_UNPAIRED,
163 MGMT_EV_PASSKEY_NOTIFY,
166 MGMT_EV_DEVICE_ADDED,
167 MGMT_EV_DEVICE_REMOVED,
168 MGMT_EV_NEW_CONN_PARAM,
169 MGMT_EV_UNCONF_INDEX_ADDED,
170 MGMT_EV_UNCONF_INDEX_REMOVED,
171 MGMT_EV_NEW_CONFIG_OPTIONS,
172 MGMT_EV_EXT_INDEX_ADDED,
173 MGMT_EV_EXT_INDEX_REMOVED,
174 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
175 MGMT_EV_ADVERTISING_ADDED,
176 MGMT_EV_ADVERTISING_REMOVED,
177 MGMT_EV_EXT_INFO_CHANGED,
178 MGMT_EV_PHY_CONFIGURATION_CHANGED,
179 MGMT_EV_EXP_FEATURE_CHANGED,
180 MGMT_EV_DEVICE_FLAGS_CHANGED,
181 MGMT_EV_ADV_MONITOR_ADDED,
182 MGMT_EV_ADV_MONITOR_REMOVED,
183 MGMT_EV_CONTROLLER_SUSPEND,
184 MGMT_EV_CONTROLLER_RESUME,
185 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
186 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
189 static const u16 mgmt_untrusted_commands[] = {
190 MGMT_OP_READ_INDEX_LIST,
192 MGMT_OP_READ_UNCONF_INDEX_LIST,
193 MGMT_OP_READ_CONFIG_INFO,
194 MGMT_OP_READ_EXT_INDEX_LIST,
195 MGMT_OP_READ_EXT_INFO,
196 MGMT_OP_READ_CONTROLLER_CAP,
197 MGMT_OP_READ_EXP_FEATURES_INFO,
198 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
199 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
202 static const u16 mgmt_untrusted_events[] = {
204 MGMT_EV_INDEX_REMOVED,
205 MGMT_EV_NEW_SETTINGS,
206 MGMT_EV_CLASS_OF_DEV_CHANGED,
207 MGMT_EV_LOCAL_NAME_CHANGED,
208 MGMT_EV_UNCONF_INDEX_ADDED,
209 MGMT_EV_UNCONF_INDEX_REMOVED,
210 MGMT_EV_NEW_CONFIG_OPTIONS,
211 MGMT_EV_EXT_INDEX_ADDED,
212 MGMT_EV_EXT_INDEX_REMOVED,
213 MGMT_EV_EXT_INFO_CHANGED,
214 MGMT_EV_EXP_FEATURE_CHANGED,
217 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
219 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
220 "\x00\x00\x00\x00\x00\x00\x00\x00"
222 /* HCI to MGMT error code conversion table */
223 static const u8 mgmt_status_table[] = {
225 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
226 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
227 MGMT_STATUS_FAILED, /* Hardware Failure */
228 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
229 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
230 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
231 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
232 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
233 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
234 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
235 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
236 MGMT_STATUS_BUSY, /* Command Disallowed */
237 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
238 MGMT_STATUS_REJECTED, /* Rejected Security */
239 MGMT_STATUS_REJECTED, /* Rejected Personal */
240 MGMT_STATUS_TIMEOUT, /* Host Timeout */
241 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
242 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
243 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
244 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
245 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
246 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
247 MGMT_STATUS_BUSY, /* Repeated Attempts */
248 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
249 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
250 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
251 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
252 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
253 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
254 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
255 MGMT_STATUS_FAILED, /* Unspecified Error */
256 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
257 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
258 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
259 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
260 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
261 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
262 MGMT_STATUS_FAILED, /* Unit Link Key Used */
263 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
264 MGMT_STATUS_TIMEOUT, /* Instant Passed */
265 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
266 MGMT_STATUS_FAILED, /* Transaction Collision */
267 MGMT_STATUS_FAILED, /* Reserved for future use */
268 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
269 MGMT_STATUS_REJECTED, /* QoS Rejected */
270 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
271 MGMT_STATUS_REJECTED, /* Insufficient Security */
272 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
273 MGMT_STATUS_FAILED, /* Reserved for future use */
274 MGMT_STATUS_BUSY, /* Role Switch Pending */
275 MGMT_STATUS_FAILED, /* Reserved for future use */
276 MGMT_STATUS_FAILED, /* Slot Violation */
277 MGMT_STATUS_FAILED, /* Role Switch Failed */
278 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
279 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
280 MGMT_STATUS_BUSY, /* Host Busy Pairing */
281 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
282 MGMT_STATUS_BUSY, /* Controller Busy */
283 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
284 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
285 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
286 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
287 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
290 static u8 mgmt_errno_status(int err)
294 return MGMT_STATUS_SUCCESS;
296 return MGMT_STATUS_REJECTED;
298 return MGMT_STATUS_INVALID_PARAMS;
300 return MGMT_STATUS_NOT_SUPPORTED;
302 return MGMT_STATUS_BUSY;
304 return MGMT_STATUS_AUTH_FAILED;
306 return MGMT_STATUS_NO_RESOURCES;
308 return MGMT_STATUS_ALREADY_CONNECTED;
310 return MGMT_STATUS_DISCONNECTED;
313 return MGMT_STATUS_FAILED;
316 static u8 mgmt_status(int err)
319 return mgmt_errno_status(err);
321 if (err < ARRAY_SIZE(mgmt_status_table))
322 return mgmt_status_table[err];
324 return MGMT_STATUS_FAILED;
327 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
330 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
335 u16 len, int flag, struct sock *skip_sk)
337 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
342 struct sock *skip_sk)
344 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
345 HCI_SOCK_TRUSTED, skip_sk);
348 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
350 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
354 static u8 le_addr_type(u8 mgmt_addr_type)
356 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
357 return ADDR_LE_DEV_PUBLIC;
359 return ADDR_LE_DEV_RANDOM;
362 void mgmt_fill_version_info(void *ver)
364 struct mgmt_rp_read_version *rp = ver;
366 rp->version = MGMT_VERSION;
367 rp->revision = cpu_to_le16(MGMT_REVISION);
370 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
373 struct mgmt_rp_read_version rp;
375 bt_dev_dbg(hdev, "sock %p", sk);
377 mgmt_fill_version_info(&rp);
379 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
383 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
386 struct mgmt_rp_read_commands *rp;
387 u16 num_commands, num_events;
391 bt_dev_dbg(hdev, "sock %p", sk);
393 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
394 num_commands = ARRAY_SIZE(mgmt_commands);
395 num_events = ARRAY_SIZE(mgmt_events);
397 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
398 num_events = ARRAY_SIZE(mgmt_untrusted_events);
401 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
403 rp = kmalloc(rp_size, GFP_KERNEL);
407 rp->num_commands = cpu_to_le16(num_commands);
408 rp->num_events = cpu_to_le16(num_events);
410 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
411 __le16 *opcode = rp->opcodes;
413 for (i = 0; i < num_commands; i++, opcode++)
414 put_unaligned_le16(mgmt_commands[i], opcode);
416 for (i = 0; i < num_events; i++, opcode++)
417 put_unaligned_le16(mgmt_events[i], opcode);
419 __le16 *opcode = rp->opcodes;
421 for (i = 0; i < num_commands; i++, opcode++)
422 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
424 for (i = 0; i < num_events; i++, opcode++)
425 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
428 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
435 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
438 struct mgmt_rp_read_index_list *rp;
444 bt_dev_dbg(hdev, "sock %p", sk);
446 read_lock(&hci_dev_list_lock);
449 list_for_each_entry(d, &hci_dev_list, list) {
450 if (d->dev_type == HCI_PRIMARY &&
451 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
455 rp_len = sizeof(*rp) + (2 * count);
456 rp = kmalloc(rp_len, GFP_ATOMIC);
458 read_unlock(&hci_dev_list_lock);
463 list_for_each_entry(d, &hci_dev_list, list) {
464 if (hci_dev_test_flag(d, HCI_SETUP) ||
465 hci_dev_test_flag(d, HCI_CONFIG) ||
466 hci_dev_test_flag(d, HCI_USER_CHANNEL))
469 /* Devices marked as raw-only are neither configured
470 * nor unconfigured controllers.
472 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
475 if (d->dev_type == HCI_PRIMARY &&
476 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
477 rp->index[count++] = cpu_to_le16(d->id);
478 bt_dev_dbg(hdev, "Added hci%u", d->id);
482 rp->num_controllers = cpu_to_le16(count);
483 rp_len = sizeof(*rp) + (2 * count);
485 read_unlock(&hci_dev_list_lock);
487 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
495 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
496 void *data, u16 data_len)
498 struct mgmt_rp_read_unconf_index_list *rp;
504 bt_dev_dbg(hdev, "sock %p", sk);
506 read_lock(&hci_dev_list_lock);
509 list_for_each_entry(d, &hci_dev_list, list) {
510 if (d->dev_type == HCI_PRIMARY &&
511 hci_dev_test_flag(d, HCI_UNCONFIGURED))
515 rp_len = sizeof(*rp) + (2 * count);
516 rp = kmalloc(rp_len, GFP_ATOMIC);
518 read_unlock(&hci_dev_list_lock);
523 list_for_each_entry(d, &hci_dev_list, list) {
524 if (hci_dev_test_flag(d, HCI_SETUP) ||
525 hci_dev_test_flag(d, HCI_CONFIG) ||
526 hci_dev_test_flag(d, HCI_USER_CHANNEL))
529 /* Devices marked as raw-only are neither configured
530 * nor unconfigured controllers.
532 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
535 if (d->dev_type == HCI_PRIMARY &&
536 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
537 rp->index[count++] = cpu_to_le16(d->id);
538 bt_dev_dbg(hdev, "Added hci%u", d->id);
542 rp->num_controllers = cpu_to_le16(count);
543 rp_len = sizeof(*rp) + (2 * count);
545 read_unlock(&hci_dev_list_lock);
547 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
548 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
555 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
556 void *data, u16 data_len)
558 struct mgmt_rp_read_ext_index_list *rp;
563 bt_dev_dbg(hdev, "sock %p", sk);
565 read_lock(&hci_dev_list_lock);
568 list_for_each_entry(d, &hci_dev_list, list) {
569 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
573 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
575 read_unlock(&hci_dev_list_lock);
580 list_for_each_entry(d, &hci_dev_list, list) {
581 if (hci_dev_test_flag(d, HCI_SETUP) ||
582 hci_dev_test_flag(d, HCI_CONFIG) ||
583 hci_dev_test_flag(d, HCI_USER_CHANNEL))
586 /* Devices marked as raw-only are neither configured
587 * nor unconfigured controllers.
589 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
592 if (d->dev_type == HCI_PRIMARY) {
593 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
594 rp->entry[count].type = 0x01;
596 rp->entry[count].type = 0x00;
597 } else if (d->dev_type == HCI_AMP) {
598 rp->entry[count].type = 0x02;
603 rp->entry[count].bus = d->bus;
604 rp->entry[count++].index = cpu_to_le16(d->id);
605 bt_dev_dbg(hdev, "Added hci%u", d->id);
608 rp->num_controllers = cpu_to_le16(count);
610 read_unlock(&hci_dev_list_lock);
612 /* If this command is called at least once, then all the
613 * default index and unconfigured index events are disabled
614 * and from now on only extended index events are used.
616 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
617 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
618 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
620 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
621 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
622 struct_size(rp, entry, count));
629 static bool is_configured(struct hci_dev *hdev)
631 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
635 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
643 static __le32 get_missing_options(struct hci_dev *hdev)
647 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
648 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
649 options |= MGMT_OPTION_EXTERNAL_CONFIG;
651 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
652 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
653 !bacmp(&hdev->public_addr, BDADDR_ANY))
654 options |= MGMT_OPTION_PUBLIC_ADDRESS;
656 return cpu_to_le32(options);
659 static int new_options(struct hci_dev *hdev, struct sock *skip)
661 __le32 options = get_missing_options(hdev);
663 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
664 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
667 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
669 __le32 options = get_missing_options(hdev);
671 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
675 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
676 void *data, u16 data_len)
678 struct mgmt_rp_read_config_info rp;
681 bt_dev_dbg(hdev, "sock %p", sk);
685 memset(&rp, 0, sizeof(rp));
686 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
688 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
689 options |= MGMT_OPTION_EXTERNAL_CONFIG;
691 if (hdev->set_bdaddr)
692 options |= MGMT_OPTION_PUBLIC_ADDRESS;
694 rp.supported_options = cpu_to_le32(options);
695 rp.missing_options = get_missing_options(hdev);
697 hci_dev_unlock(hdev);
699 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
703 static u32 get_supported_phys(struct hci_dev *hdev)
705 u32 supported_phys = 0;
707 if (lmp_bredr_capable(hdev)) {
708 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
710 if (hdev->features[0][0] & LMP_3SLOT)
711 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
713 if (hdev->features[0][0] & LMP_5SLOT)
714 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
716 if (lmp_edr_2m_capable(hdev)) {
717 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
719 if (lmp_edr_3slot_capable(hdev))
720 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
722 if (lmp_edr_5slot_capable(hdev))
723 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
725 if (lmp_edr_3m_capable(hdev)) {
726 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
728 if (lmp_edr_3slot_capable(hdev))
729 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
731 if (lmp_edr_5slot_capable(hdev))
732 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
737 if (lmp_le_capable(hdev)) {
738 supported_phys |= MGMT_PHY_LE_1M_TX;
739 supported_phys |= MGMT_PHY_LE_1M_RX;
741 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
742 supported_phys |= MGMT_PHY_LE_2M_TX;
743 supported_phys |= MGMT_PHY_LE_2M_RX;
746 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
747 supported_phys |= MGMT_PHY_LE_CODED_TX;
748 supported_phys |= MGMT_PHY_LE_CODED_RX;
752 return supported_phys;
755 static u32 get_selected_phys(struct hci_dev *hdev)
757 u32 selected_phys = 0;
759 if (lmp_bredr_capable(hdev)) {
760 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
762 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
763 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
765 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
766 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
768 if (lmp_edr_2m_capable(hdev)) {
769 if (!(hdev->pkt_type & HCI_2DH1))
770 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
772 if (lmp_edr_3slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_2DH3))
774 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
776 if (lmp_edr_5slot_capable(hdev) &&
777 !(hdev->pkt_type & HCI_2DH5))
778 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
780 if (lmp_edr_3m_capable(hdev)) {
781 if (!(hdev->pkt_type & HCI_3DH1))
782 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
784 if (lmp_edr_3slot_capable(hdev) &&
785 !(hdev->pkt_type & HCI_3DH3))
786 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
788 if (lmp_edr_5slot_capable(hdev) &&
789 !(hdev->pkt_type & HCI_3DH5))
790 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
795 if (lmp_le_capable(hdev)) {
796 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
797 selected_phys |= MGMT_PHY_LE_1M_TX;
799 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
800 selected_phys |= MGMT_PHY_LE_1M_RX;
802 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
803 selected_phys |= MGMT_PHY_LE_2M_TX;
805 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
806 selected_phys |= MGMT_PHY_LE_2M_RX;
808 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
809 selected_phys |= MGMT_PHY_LE_CODED_TX;
811 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
812 selected_phys |= MGMT_PHY_LE_CODED_RX;
815 return selected_phys;
818 static u32 get_configurable_phys(struct hci_dev *hdev)
820 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
821 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
824 static u32 get_supported_settings(struct hci_dev *hdev)
828 settings |= MGMT_SETTING_POWERED;
829 settings |= MGMT_SETTING_BONDABLE;
830 settings |= MGMT_SETTING_DEBUG_KEYS;
831 settings |= MGMT_SETTING_CONNECTABLE;
832 settings |= MGMT_SETTING_DISCOVERABLE;
834 if (lmp_bredr_capable(hdev)) {
835 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
836 settings |= MGMT_SETTING_FAST_CONNECTABLE;
837 settings |= MGMT_SETTING_BREDR;
838 settings |= MGMT_SETTING_LINK_SECURITY;
840 if (lmp_ssp_capable(hdev)) {
841 settings |= MGMT_SETTING_SSP;
842 if (IS_ENABLED(CONFIG_BT_HS))
843 settings |= MGMT_SETTING_HS;
846 if (lmp_sc_capable(hdev))
847 settings |= MGMT_SETTING_SECURE_CONN;
849 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
851 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
854 if (lmp_le_capable(hdev)) {
855 settings |= MGMT_SETTING_LE;
856 settings |= MGMT_SETTING_SECURE_CONN;
857 settings |= MGMT_SETTING_PRIVACY;
858 settings |= MGMT_SETTING_STATIC_ADDRESS;
859 settings |= MGMT_SETTING_ADVERTISING;
862 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
864 settings |= MGMT_SETTING_CONFIGURATION;
866 if (cis_central_capable(hdev))
867 settings |= MGMT_SETTING_CIS_CENTRAL;
869 if (cis_peripheral_capable(hdev))
870 settings |= MGMT_SETTING_CIS_PERIPHERAL;
872 settings |= MGMT_SETTING_PHY_CONFIGURATION;
877 static u32 get_current_settings(struct hci_dev *hdev)
881 if (hdev_is_powered(hdev))
882 settings |= MGMT_SETTING_POWERED;
884 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
885 settings |= MGMT_SETTING_CONNECTABLE;
887 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
888 settings |= MGMT_SETTING_FAST_CONNECTABLE;
890 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
891 settings |= MGMT_SETTING_DISCOVERABLE;
893 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
894 settings |= MGMT_SETTING_BONDABLE;
896 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
897 settings |= MGMT_SETTING_BREDR;
899 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
900 settings |= MGMT_SETTING_LE;
902 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
903 settings |= MGMT_SETTING_LINK_SECURITY;
905 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
906 settings |= MGMT_SETTING_SSP;
908 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
909 settings |= MGMT_SETTING_HS;
911 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
912 settings |= MGMT_SETTING_ADVERTISING;
914 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
915 settings |= MGMT_SETTING_SECURE_CONN;
917 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
918 settings |= MGMT_SETTING_DEBUG_KEYS;
920 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
921 settings |= MGMT_SETTING_PRIVACY;
923 /* The current setting for static address has two purposes. The
924 * first is to indicate if the static address will be used and
925 * the second is to indicate if it is actually set.
927 * This means if the static address is not configured, this flag
928 * will never be set. If the address is configured, then if the
929 * address is actually used decides if the flag is set or not.
931 * For single mode LE only controllers and dual-mode controllers
932 * with BR/EDR disabled, the existence of the static address will
935 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
936 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
937 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
938 if (bacmp(&hdev->static_addr, BDADDR_ANY))
939 settings |= MGMT_SETTING_STATIC_ADDRESS;
942 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
943 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
945 if (cis_central_capable(hdev))
946 settings |= MGMT_SETTING_CIS_CENTRAL;
948 if (cis_peripheral_capable(hdev))
949 settings |= MGMT_SETTING_CIS_PERIPHERAL;
951 if (bis_capable(hdev))
952 settings |= MGMT_SETTING_ISO_BROADCASTER;
954 if (sync_recv_capable(hdev))
955 settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
960 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
962 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
965 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
967 struct mgmt_pending_cmd *cmd;
969 /* If there's a pending mgmt command the flags will not yet have
970 * their final values, so check for this first.
972 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
974 struct mgmt_mode *cp = cmd->param;
976 return LE_AD_GENERAL;
977 else if (cp->val == 0x02)
978 return LE_AD_LIMITED;
980 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
981 return LE_AD_LIMITED;
982 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
983 return LE_AD_GENERAL;
989 bool mgmt_get_connectable(struct hci_dev *hdev)
991 struct mgmt_pending_cmd *cmd;
993 /* If there's a pending mgmt command the flag will not yet have
994 * it's final value, so check for this first.
996 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
998 struct mgmt_mode *cp = cmd->param;
1003 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1006 static int service_cache_sync(struct hci_dev *hdev, void *data)
1008 hci_update_eir_sync(hdev);
1009 hci_update_class_sync(hdev);
1014 static void service_cache_off(struct work_struct *work)
1016 struct hci_dev *hdev = container_of(work, struct hci_dev,
1017 service_cache.work);
1019 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1022 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1025 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1027 /* The generation of a new RPA and programming it into the
1028 * controller happens in the hci_req_enable_advertising()
1031 if (ext_adv_capable(hdev))
1032 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1034 return hci_enable_advertising_sync(hdev);
1037 static void rpa_expired(struct work_struct *work)
1039 struct hci_dev *hdev = container_of(work, struct hci_dev,
1042 bt_dev_dbg(hdev, "");
1044 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1046 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1049 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1052 static void discov_off(struct work_struct *work)
1054 struct hci_dev *hdev = container_of(work, struct hci_dev,
1057 bt_dev_dbg(hdev, "");
1061 /* When discoverable timeout triggers, then just make sure
1062 * the limited discoverable flag is cleared. Even in the case
1063 * of a timeout triggered from general discoverable, it is
1064 * safe to unconditionally clear the flag.
1066 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1067 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1068 hdev->discov_timeout = 0;
1070 hci_update_discoverable(hdev);
1072 mgmt_new_settings(hdev);
1074 hci_dev_unlock(hdev);
1077 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1079 static void mesh_send_complete(struct hci_dev *hdev,
1080 struct mgmt_mesh_tx *mesh_tx, bool silent)
1082 u8 handle = mesh_tx->handle;
1085 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1086 sizeof(handle), NULL);
1088 mgmt_mesh_remove(mesh_tx);
1091 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1093 struct mgmt_mesh_tx *mesh_tx;
1095 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1096 hci_disable_advertising_sync(hdev);
1097 mesh_tx = mgmt_mesh_next(hdev, NULL);
1100 mesh_send_complete(hdev, mesh_tx, false);
1105 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1106 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1107 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1109 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1114 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1115 mesh_send_start_complete);
1118 mesh_send_complete(hdev, mesh_tx, false);
1120 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1123 static void mesh_send_done(struct work_struct *work)
1125 struct hci_dev *hdev = container_of(work, struct hci_dev,
1126 mesh_send_done.work);
1128 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1131 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1134 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1136 if (hci_dev_test_flag(hdev, HCI_MGMT))
1139 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1141 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1142 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1143 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1144 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1146 /* Non-mgmt controlled devices get this bit set
1147 * implicitly so that pairing works for them, however
1148 * for mgmt we require user-space to explicitly enable
1151 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1153 hci_dev_set_flag(hdev, HCI_MGMT);
1156 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1157 void *data, u16 data_len)
1159 struct mgmt_rp_read_info rp;
1161 bt_dev_dbg(hdev, "sock %p", sk);
1165 memset(&rp, 0, sizeof(rp));
1167 bacpy(&rp.bdaddr, &hdev->bdaddr);
1169 rp.version = hdev->hci_ver;
1170 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1172 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1173 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1175 memcpy(rp.dev_class, hdev->dev_class, 3);
1177 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1178 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1180 hci_dev_unlock(hdev);
1182 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1186 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1191 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1192 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1193 hdev->dev_class, 3);
1195 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1196 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1199 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1200 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1201 hdev->dev_name, name_len);
1203 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1204 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1205 hdev->short_name, name_len);
1210 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1211 void *data, u16 data_len)
1214 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1217 bt_dev_dbg(hdev, "sock %p", sk);
1219 memset(&buf, 0, sizeof(buf));
1223 bacpy(&rp->bdaddr, &hdev->bdaddr);
1225 rp->version = hdev->hci_ver;
1226 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1228 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1229 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1232 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1233 rp->eir_len = cpu_to_le16(eir_len);
1235 hci_dev_unlock(hdev);
1237 /* If this command is called at least once, then the events
1238 * for class of device and local name changes are disabled
1239 * and only the new extended controller information event
1242 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1243 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1244 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1246 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1247 sizeof(*rp) + eir_len);
1250 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1253 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1256 memset(buf, 0, sizeof(buf));
1258 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1259 ev->eir_len = cpu_to_le16(eir_len);
1261 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1262 sizeof(*ev) + eir_len,
1263 HCI_MGMT_EXT_INFO_EVENTS, skip);
1266 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1268 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1270 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1274 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1276 struct mgmt_ev_advertising_added ev;
1278 ev.instance = instance;
1280 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1283 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1286 struct mgmt_ev_advertising_removed ev;
1288 ev.instance = instance;
1290 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1293 static void cancel_adv_timeout(struct hci_dev *hdev)
1295 if (hdev->adv_instance_timeout) {
1296 hdev->adv_instance_timeout = 0;
1297 cancel_delayed_work(&hdev->adv_instance_expire);
1301 /* This function requires the caller holds hdev->lock */
1302 static void restart_le_actions(struct hci_dev *hdev)
1304 struct hci_conn_params *p;
1306 list_for_each_entry(p, &hdev->le_conn_params, list) {
1307 /* Needed for AUTO_OFF case where might not "really"
1308 * have been powered off.
1310 hci_pend_le_list_del_init(p);
1312 switch (p->auto_connect) {
1313 case HCI_AUTO_CONN_DIRECT:
1314 case HCI_AUTO_CONN_ALWAYS:
1315 hci_pend_le_list_add(p, &hdev->pend_le_conns);
1317 case HCI_AUTO_CONN_REPORT:
1318 hci_pend_le_list_add(p, &hdev->pend_le_reports);
1326 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1328 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1330 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1331 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1334 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1336 struct mgmt_pending_cmd *cmd = data;
1337 struct mgmt_mode *cp;
1339 /* Make sure cmd still outstanding. */
1340 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1345 bt_dev_dbg(hdev, "err %d", err);
1350 restart_le_actions(hdev);
1351 hci_update_passive_scan(hdev);
1352 hci_dev_unlock(hdev);
1355 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1357 /* Only call new_setting for power on as power off is deferred
1358 * to hdev->power_off work which does call hci_dev_do_close.
1361 new_settings(hdev, cmd->sk);
1363 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1367 mgmt_pending_remove(cmd);
1370 static int set_powered_sync(struct hci_dev *hdev, void *data)
1372 struct mgmt_pending_cmd *cmd = data;
1373 struct mgmt_mode *cp = cmd->param;
1375 BT_DBG("%s", hdev->name);
1377 return hci_set_powered_sync(hdev, cp->val);
1380 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1383 struct mgmt_mode *cp = data;
1384 struct mgmt_pending_cmd *cmd;
1387 bt_dev_dbg(hdev, "sock %p", sk);
1389 if (cp->val != 0x00 && cp->val != 0x01)
1390 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1391 MGMT_STATUS_INVALID_PARAMS);
1395 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1396 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1401 if (!!cp->val == hdev_is_powered(hdev)) {
1402 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1406 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1412 /* Cancel potentially blocking sync operation before power off */
1413 if (cp->val == 0x00) {
1414 __hci_cmd_sync_cancel(hdev, -EHOSTDOWN);
1415 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1416 mgmt_set_powered_complete);
1418 /* Use hci_cmd_sync_submit since hdev might not be running */
1419 err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd,
1420 mgmt_set_powered_complete);
1424 mgmt_pending_remove(cmd);
1427 hci_dev_unlock(hdev);
1431 int mgmt_new_settings(struct hci_dev *hdev)
1433 return new_settings(hdev, NULL);
1438 struct hci_dev *hdev;
1442 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1444 struct cmd_lookup *match = data;
1446 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1448 list_del(&cmd->list);
1450 if (match->sk == NULL) {
1451 match->sk = cmd->sk;
1452 sock_hold(match->sk);
1455 mgmt_pending_free(cmd);
1458 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1462 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1463 mgmt_pending_remove(cmd);
1466 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1468 if (cmd->cmd_complete) {
1471 cmd->cmd_complete(cmd, *status);
1472 mgmt_pending_remove(cmd);
1477 cmd_status_rsp(cmd, data);
1480 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1482 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1483 cmd->param, cmd->param_len);
1486 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1488 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1489 cmd->param, sizeof(struct mgmt_addr_info));
1492 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1494 if (!lmp_bredr_capable(hdev))
1495 return MGMT_STATUS_NOT_SUPPORTED;
1496 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1497 return MGMT_STATUS_REJECTED;
1499 return MGMT_STATUS_SUCCESS;
1502 static u8 mgmt_le_support(struct hci_dev *hdev)
1504 if (!lmp_le_capable(hdev))
1505 return MGMT_STATUS_NOT_SUPPORTED;
1506 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1507 return MGMT_STATUS_REJECTED;
1509 return MGMT_STATUS_SUCCESS;
1512 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1515 struct mgmt_pending_cmd *cmd = data;
1517 bt_dev_dbg(hdev, "err %d", err);
1519 /* Make sure cmd still outstanding. */
1520 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1526 u8 mgmt_err = mgmt_status(err);
1527 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1528 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1532 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1533 hdev->discov_timeout > 0) {
1534 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1535 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1538 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1539 new_settings(hdev, cmd->sk);
1542 mgmt_pending_remove(cmd);
1543 hci_dev_unlock(hdev);
1546 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1548 BT_DBG("%s", hdev->name);
1550 return hci_update_discoverable_sync(hdev);
1553 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1556 struct mgmt_cp_set_discoverable *cp = data;
1557 struct mgmt_pending_cmd *cmd;
1561 bt_dev_dbg(hdev, "sock %p", sk);
1563 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1564 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1565 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1566 MGMT_STATUS_REJECTED);
1568 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1569 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1570 MGMT_STATUS_INVALID_PARAMS);
1572 timeout = __le16_to_cpu(cp->timeout);
1574 /* Disabling discoverable requires that no timeout is set,
1575 * and enabling limited discoverable requires a timeout.
1577 if ((cp->val == 0x00 && timeout > 0) ||
1578 (cp->val == 0x02 && timeout == 0))
1579 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1580 MGMT_STATUS_INVALID_PARAMS);
1584 if (!hdev_is_powered(hdev) && timeout > 0) {
1585 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1586 MGMT_STATUS_NOT_POWERED);
1590 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1591 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1597 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1598 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1599 MGMT_STATUS_REJECTED);
1603 if (hdev->advertising_paused) {
1604 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1609 if (!hdev_is_powered(hdev)) {
1610 bool changed = false;
1612 /* Setting limited discoverable when powered off is
1613 * not a valid operation since it requires a timeout
1614 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1616 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1617 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1621 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1626 err = new_settings(hdev, sk);
1631 /* If the current mode is the same, then just update the timeout
1632 * value with the new value. And if only the timeout gets updated,
1633 * then no need for any HCI transactions.
1635 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1636 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1637 HCI_LIMITED_DISCOVERABLE)) {
1638 cancel_delayed_work(&hdev->discov_off);
1639 hdev->discov_timeout = timeout;
1641 if (cp->val && hdev->discov_timeout > 0) {
1642 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1643 queue_delayed_work(hdev->req_workqueue,
1644 &hdev->discov_off, to);
1647 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1651 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1657 /* Cancel any potential discoverable timeout that might be
1658 * still active and store new timeout value. The arming of
1659 * the timeout happens in the complete handler.
1661 cancel_delayed_work(&hdev->discov_off);
1662 hdev->discov_timeout = timeout;
1665 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1667 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1669 /* Limited discoverable mode */
1670 if (cp->val == 0x02)
1671 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1673 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1675 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1676 mgmt_set_discoverable_complete);
1679 mgmt_pending_remove(cmd);
1682 hci_dev_unlock(hdev);
1686 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1689 struct mgmt_pending_cmd *cmd = data;
1691 bt_dev_dbg(hdev, "err %d", err);
1693 /* Make sure cmd still outstanding. */
1694 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1700 u8 mgmt_err = mgmt_status(err);
1701 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1705 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1706 new_settings(hdev, cmd->sk);
1710 mgmt_pending_remove(cmd);
1712 hci_dev_unlock(hdev);
1715 static int set_connectable_update_settings(struct hci_dev *hdev,
1716 struct sock *sk, u8 val)
1718 bool changed = false;
1721 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1725 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1727 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1728 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1731 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1736 hci_update_scan(hdev);
1737 hci_update_passive_scan(hdev);
1738 return new_settings(hdev, sk);
1744 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1746 BT_DBG("%s", hdev->name);
1748 return hci_update_connectable_sync(hdev);
1751 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1754 struct mgmt_mode *cp = data;
1755 struct mgmt_pending_cmd *cmd;
1758 bt_dev_dbg(hdev, "sock %p", sk);
1760 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1761 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1762 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1763 MGMT_STATUS_REJECTED);
1765 if (cp->val != 0x00 && cp->val != 0x01)
1766 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1767 MGMT_STATUS_INVALID_PARAMS);
1771 if (!hdev_is_powered(hdev)) {
1772 err = set_connectable_update_settings(hdev, sk, cp->val);
1776 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1777 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1778 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1783 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1790 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1792 if (hdev->discov_timeout > 0)
1793 cancel_delayed_work(&hdev->discov_off);
1795 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1796 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1797 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1800 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1801 mgmt_set_connectable_complete);
1804 mgmt_pending_remove(cmd);
1807 hci_dev_unlock(hdev);
1811 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1814 struct mgmt_mode *cp = data;
1818 bt_dev_dbg(hdev, "sock %p", sk);
1820 if (cp->val != 0x00 && cp->val != 0x01)
1821 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1822 MGMT_STATUS_INVALID_PARAMS);
1827 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1829 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1831 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1836 /* In limited privacy mode the change of bondable mode
1837 * may affect the local advertising address.
1839 hci_update_discoverable(hdev);
1841 err = new_settings(hdev, sk);
1845 hci_dev_unlock(hdev);
1849 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1852 struct mgmt_mode *cp = data;
1853 struct mgmt_pending_cmd *cmd;
1857 bt_dev_dbg(hdev, "sock %p", sk);
1859 status = mgmt_bredr_support(hdev);
1861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1864 if (cp->val != 0x00 && cp->val != 0x01)
1865 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1866 MGMT_STATUS_INVALID_PARAMS);
1870 if (!hdev_is_powered(hdev)) {
1871 bool changed = false;
1873 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1874 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1878 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1883 err = new_settings(hdev, sk);
1888 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1889 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1896 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1897 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1901 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1907 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1909 mgmt_pending_remove(cmd);
1914 hci_dev_unlock(hdev);
1918 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1920 struct cmd_lookup match = { NULL, hdev };
1921 struct mgmt_pending_cmd *cmd = data;
1922 struct mgmt_mode *cp = cmd->param;
1923 u8 enable = cp->val;
1926 /* Make sure cmd still outstanding. */
1927 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1931 u8 mgmt_err = mgmt_status(err);
1933 if (enable && hci_dev_test_and_clear_flag(hdev,
1935 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1936 new_settings(hdev, NULL);
1939 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1945 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1947 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1950 changed = hci_dev_test_and_clear_flag(hdev,
1953 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1956 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1959 new_settings(hdev, match.sk);
1964 hci_update_eir_sync(hdev);
1967 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1969 struct mgmt_pending_cmd *cmd = data;
1970 struct mgmt_mode *cp = cmd->param;
1971 bool changed = false;
1975 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1977 err = hci_write_ssp_mode_sync(hdev, cp->val);
1979 if (!err && changed)
1980 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1985 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1987 struct mgmt_mode *cp = data;
1988 struct mgmt_pending_cmd *cmd;
1992 bt_dev_dbg(hdev, "sock %p", sk);
1994 status = mgmt_bredr_support(hdev);
1996 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1998 if (!lmp_ssp_capable(hdev))
1999 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2000 MGMT_STATUS_NOT_SUPPORTED);
2002 if (cp->val != 0x00 && cp->val != 0x01)
2003 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2004 MGMT_STATUS_INVALID_PARAMS);
2008 if (!hdev_is_powered(hdev)) {
2012 changed = !hci_dev_test_and_set_flag(hdev,
2015 changed = hci_dev_test_and_clear_flag(hdev,
2018 changed = hci_dev_test_and_clear_flag(hdev,
2021 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2024 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2029 err = new_settings(hdev, sk);
2034 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2035 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2040 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2041 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2045 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2049 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2053 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2054 MGMT_STATUS_FAILED);
2057 mgmt_pending_remove(cmd);
2061 hci_dev_unlock(hdev);
2065 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2067 struct mgmt_mode *cp = data;
2072 bt_dev_dbg(hdev, "sock %p", sk);
2074 if (!IS_ENABLED(CONFIG_BT_HS))
2075 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2076 MGMT_STATUS_NOT_SUPPORTED);
2078 status = mgmt_bredr_support(hdev);
2080 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2082 if (!lmp_ssp_capable(hdev))
2083 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2084 MGMT_STATUS_NOT_SUPPORTED);
2086 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2087 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2088 MGMT_STATUS_REJECTED);
2090 if (cp->val != 0x00 && cp->val != 0x01)
2091 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2092 MGMT_STATUS_INVALID_PARAMS);
2096 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2097 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2103 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2105 if (hdev_is_powered(hdev)) {
2106 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2107 MGMT_STATUS_REJECTED);
2111 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2114 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2119 err = new_settings(hdev, sk);
2122 hci_dev_unlock(hdev);
2126 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2128 struct cmd_lookup match = { NULL, hdev };
2129 u8 status = mgmt_status(err);
2131 bt_dev_dbg(hdev, "err %d", err);
2134 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2139 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2141 new_settings(hdev, match.sk);
2147 static int set_le_sync(struct hci_dev *hdev, void *data)
2149 struct mgmt_pending_cmd *cmd = data;
2150 struct mgmt_mode *cp = cmd->param;
2155 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2157 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2158 hci_disable_advertising_sync(hdev);
2160 if (ext_adv_capable(hdev))
2161 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2163 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2166 err = hci_write_le_host_supported_sync(hdev, val, 0);
2168 /* Make sure the controller has a good default for
2169 * advertising data. Restrict the update to when LE
2170 * has actually been enabled. During power on, the
2171 * update in powered_update_hci will take care of it.
2173 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2174 if (ext_adv_capable(hdev)) {
2177 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2179 hci_update_scan_rsp_data_sync(hdev, 0x00);
2181 hci_update_adv_data_sync(hdev, 0x00);
2182 hci_update_scan_rsp_data_sync(hdev, 0x00);
2185 hci_update_passive_scan(hdev);
2191 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2193 struct mgmt_pending_cmd *cmd = data;
2194 u8 status = mgmt_status(err);
2195 struct sock *sk = cmd->sk;
2198 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2199 cmd_status_rsp, &status);
2203 mgmt_pending_remove(cmd);
2204 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2207 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2209 struct mgmt_pending_cmd *cmd = data;
2210 struct mgmt_cp_set_mesh *cp = cmd->param;
2211 size_t len = cmd->param_len;
2213 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2216 hci_dev_set_flag(hdev, HCI_MESH);
2218 hci_dev_clear_flag(hdev, HCI_MESH);
2222 /* If filters don't fit, forward all adv pkts */
2223 if (len <= sizeof(hdev->mesh_ad_types))
2224 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2226 hci_update_passive_scan_sync(hdev);
2230 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2232 struct mgmt_cp_set_mesh *cp = data;
2233 struct mgmt_pending_cmd *cmd;
2236 bt_dev_dbg(hdev, "sock %p", sk);
2238 if (!lmp_le_capable(hdev) ||
2239 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2240 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2241 MGMT_STATUS_NOT_SUPPORTED);
2243 if (cp->enable != 0x00 && cp->enable != 0x01)
2244 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2245 MGMT_STATUS_INVALID_PARAMS);
2249 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2253 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2257 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2258 MGMT_STATUS_FAILED);
2261 mgmt_pending_remove(cmd);
2264 hci_dev_unlock(hdev);
2268 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2270 struct mgmt_mesh_tx *mesh_tx = data;
2271 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2272 unsigned long mesh_send_interval;
2273 u8 mgmt_err = mgmt_status(err);
2275 /* Report any errors here, but don't report completion */
2278 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2279 /* Send Complete Error Code for handle */
2280 mesh_send_complete(hdev, mesh_tx, false);
2284 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2285 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2286 mesh_send_interval);
2289 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2291 struct mgmt_mesh_tx *mesh_tx = data;
2292 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2293 struct adv_info *adv, *next_instance;
2294 u8 instance = hdev->le_num_of_adv_sets + 1;
2295 u16 timeout, duration;
2298 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2299 return MGMT_STATUS_BUSY;
2302 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2303 adv = hci_add_adv_instance(hdev, instance, 0,
2304 send->adv_data_len, send->adv_data,
2307 HCI_ADV_TX_POWER_NO_PREFERENCE,
2308 hdev->le_adv_min_interval,
2309 hdev->le_adv_max_interval,
2313 mesh_tx->instance = instance;
2317 if (hdev->cur_adv_instance == instance) {
2318 /* If the currently advertised instance is being changed then
2319 * cancel the current advertising and schedule the next
2320 * instance. If there is only one instance then the overridden
2321 * advertising data will be visible right away.
2323 cancel_adv_timeout(hdev);
2325 next_instance = hci_get_next_instance(hdev, instance);
2327 instance = next_instance->instance;
2330 } else if (hdev->adv_instance_timeout) {
2331 /* Immediately advertise the new instance if no other, or
2332 * let it go naturally from queue if ADV is already happening
2338 return hci_schedule_adv_instance_sync(hdev, instance, true);
2343 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2345 struct mgmt_rp_mesh_read_features *rp = data;
2347 if (rp->used_handles >= rp->max_handles)
2350 rp->handles[rp->used_handles++] = mesh_tx->handle;
2353 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2354 void *data, u16 len)
2356 struct mgmt_rp_mesh_read_features rp;
2358 if (!lmp_le_capable(hdev) ||
2359 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2360 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2361 MGMT_STATUS_NOT_SUPPORTED);
2363 memset(&rp, 0, sizeof(rp));
2364 rp.index = cpu_to_le16(hdev->id);
2365 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2366 rp.max_handles = MESH_HANDLES_MAX;
2371 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2373 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2374 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2376 hci_dev_unlock(hdev);
2380 static int send_cancel(struct hci_dev *hdev, void *data)
2382 struct mgmt_pending_cmd *cmd = data;
2383 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2384 struct mgmt_mesh_tx *mesh_tx;
2386 if (!cancel->handle) {
2388 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2391 mesh_send_complete(hdev, mesh_tx, false);
2394 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2396 if (mesh_tx && mesh_tx->sk == cmd->sk)
2397 mesh_send_complete(hdev, mesh_tx, false);
2400 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2402 mgmt_pending_free(cmd);
2407 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2408 void *data, u16 len)
2410 struct mgmt_pending_cmd *cmd;
2413 if (!lmp_le_capable(hdev) ||
2414 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2415 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2416 MGMT_STATUS_NOT_SUPPORTED);
2418 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2419 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2420 MGMT_STATUS_REJECTED);
2423 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2427 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2430 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2431 MGMT_STATUS_FAILED);
2434 mgmt_pending_free(cmd);
2437 hci_dev_unlock(hdev);
2441 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2443 struct mgmt_mesh_tx *mesh_tx;
2444 struct mgmt_cp_mesh_send *send = data;
2445 struct mgmt_rp_mesh_read_features rp;
2449 if (!lmp_le_capable(hdev) ||
2450 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2451 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2452 MGMT_STATUS_NOT_SUPPORTED);
2453 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2454 len <= MGMT_MESH_SEND_SIZE ||
2455 len > (MGMT_MESH_SEND_SIZE + 31))
2456 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2457 MGMT_STATUS_REJECTED);
2461 memset(&rp, 0, sizeof(rp));
2462 rp.max_handles = MESH_HANDLES_MAX;
2464 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2466 if (rp.max_handles <= rp.used_handles) {
2467 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2472 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2473 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2478 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2479 mesh_send_start_complete);
2482 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2483 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2484 MGMT_STATUS_FAILED);
2488 mgmt_mesh_remove(mesh_tx);
2491 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2493 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2494 &mesh_tx->handle, 1);
2498 hci_dev_unlock(hdev);
2502 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2504 struct mgmt_mode *cp = data;
2505 struct mgmt_pending_cmd *cmd;
2509 bt_dev_dbg(hdev, "sock %p", sk);
2511 if (!lmp_le_capable(hdev))
2512 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2513 MGMT_STATUS_NOT_SUPPORTED);
2515 if (cp->val != 0x00 && cp->val != 0x01)
2516 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2517 MGMT_STATUS_INVALID_PARAMS);
2519 /* Bluetooth single mode LE only controllers or dual-mode
2520 * controllers configured as LE only devices, do not allow
2521 * switching LE off. These have either LE enabled explicitly
2522 * or BR/EDR has been previously switched off.
2524 * When trying to enable an already enabled LE, then gracefully
2525 * send a positive response. Trying to disable it however will
2526 * result into rejection.
2528 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2529 if (cp->val == 0x01)
2530 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2532 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2533 MGMT_STATUS_REJECTED);
2539 enabled = lmp_host_le_capable(hdev);
2541 if (!hdev_is_powered(hdev) || val == enabled) {
2542 bool changed = false;
2544 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2545 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2549 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2550 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2554 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2559 err = new_settings(hdev, sk);
2564 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2565 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2566 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2571 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2575 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2579 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2580 MGMT_STATUS_FAILED);
2583 mgmt_pending_remove(cmd);
2587 hci_dev_unlock(hdev);
2591 /* This is a helper function to test for pending mgmt commands that can
2592 * cause CoD or EIR HCI commands. We can only allow one such pending
2593 * mgmt command at a time since otherwise we cannot easily track what
2594 * the current values are, will be, and based on that calculate if a new
2595 * HCI command needs to be sent and if yes with what value.
2597 static bool pending_eir_or_class(struct hci_dev *hdev)
2599 struct mgmt_pending_cmd *cmd;
2601 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2602 switch (cmd->opcode) {
2603 case MGMT_OP_ADD_UUID:
2604 case MGMT_OP_REMOVE_UUID:
2605 case MGMT_OP_SET_DEV_CLASS:
2606 case MGMT_OP_SET_POWERED:
2614 static const u8 bluetooth_base_uuid[] = {
2615 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2616 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2619 static u8 get_uuid_size(const u8 *uuid)
2623 if (memcmp(uuid, bluetooth_base_uuid, 12))
2626 val = get_unaligned_le32(&uuid[12]);
2633 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2635 struct mgmt_pending_cmd *cmd = data;
2637 bt_dev_dbg(hdev, "err %d", err);
2639 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2640 mgmt_status(err), hdev->dev_class, 3);
2642 mgmt_pending_free(cmd);
2645 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2649 err = hci_update_class_sync(hdev);
2653 return hci_update_eir_sync(hdev);
2656 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2658 struct mgmt_cp_add_uuid *cp = data;
2659 struct mgmt_pending_cmd *cmd;
2660 struct bt_uuid *uuid;
2663 bt_dev_dbg(hdev, "sock %p", sk);
2667 if (pending_eir_or_class(hdev)) {
2668 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2673 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2679 memcpy(uuid->uuid, cp->uuid, 16);
2680 uuid->svc_hint = cp->svc_hint;
2681 uuid->size = get_uuid_size(cp->uuid);
2683 list_add_tail(&uuid->list, &hdev->uuids);
2685 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2691 err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2693 mgmt_pending_free(cmd);
2698 hci_dev_unlock(hdev);
2702 static bool enable_service_cache(struct hci_dev *hdev)
2704 if (!hdev_is_powered(hdev))
2707 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2708 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2716 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2720 err = hci_update_class_sync(hdev);
2724 return hci_update_eir_sync(hdev);
2727 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2730 struct mgmt_cp_remove_uuid *cp = data;
2731 struct mgmt_pending_cmd *cmd;
2732 struct bt_uuid *match, *tmp;
2733 static const u8 bt_uuid_any[] = {
2734 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2738 bt_dev_dbg(hdev, "sock %p", sk);
2742 if (pending_eir_or_class(hdev)) {
2743 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2748 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2749 hci_uuids_clear(hdev);
2751 if (enable_service_cache(hdev)) {
2752 err = mgmt_cmd_complete(sk, hdev->id,
2753 MGMT_OP_REMOVE_UUID,
2754 0, hdev->dev_class, 3);
2763 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2764 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2767 list_del(&match->list);
2773 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2774 MGMT_STATUS_INVALID_PARAMS);
2779 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2785 err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2786 mgmt_class_complete);
2788 mgmt_pending_free(cmd);
2791 hci_dev_unlock(hdev);
2795 static int set_class_sync(struct hci_dev *hdev, void *data)
2799 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2800 cancel_delayed_work_sync(&hdev->service_cache);
2801 err = hci_update_eir_sync(hdev);
2807 return hci_update_class_sync(hdev);
2810 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2813 struct mgmt_cp_set_dev_class *cp = data;
2814 struct mgmt_pending_cmd *cmd;
2817 bt_dev_dbg(hdev, "sock %p", sk);
2819 if (!lmp_bredr_capable(hdev))
2820 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2821 MGMT_STATUS_NOT_SUPPORTED);
2825 if (pending_eir_or_class(hdev)) {
2826 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2831 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2832 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2833 MGMT_STATUS_INVALID_PARAMS);
2837 hdev->major_class = cp->major;
2838 hdev->minor_class = cp->minor;
2840 if (!hdev_is_powered(hdev)) {
2841 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2842 hdev->dev_class, 3);
2846 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2852 err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2853 mgmt_class_complete);
2855 mgmt_pending_free(cmd);
2858 hci_dev_unlock(hdev);
2862 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2865 struct mgmt_cp_load_link_keys *cp = data;
2866 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2867 sizeof(struct mgmt_link_key_info));
2868 u16 key_count, expected_len;
2872 bt_dev_dbg(hdev, "sock %p", sk);
2874 if (!lmp_bredr_capable(hdev))
2875 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2876 MGMT_STATUS_NOT_SUPPORTED);
2878 key_count = __le16_to_cpu(cp->key_count);
2879 if (key_count > max_key_count) {
2880 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2882 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2883 MGMT_STATUS_INVALID_PARAMS);
2886 expected_len = struct_size(cp, keys, key_count);
2887 if (expected_len != len) {
2888 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2890 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2891 MGMT_STATUS_INVALID_PARAMS);
2894 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2895 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2896 MGMT_STATUS_INVALID_PARAMS);
2898 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2901 for (i = 0; i < key_count; i++) {
2902 struct mgmt_link_key_info *key = &cp->keys[i];
2904 /* Considering SMP over BREDR/LE, there is no need to check addr_type */
2905 if (key->type > 0x08)
2906 return mgmt_cmd_status(sk, hdev->id,
2907 MGMT_OP_LOAD_LINK_KEYS,
2908 MGMT_STATUS_INVALID_PARAMS);
2913 hci_link_keys_clear(hdev);
2916 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2918 changed = hci_dev_test_and_clear_flag(hdev,
2919 HCI_KEEP_DEBUG_KEYS);
2922 new_settings(hdev, NULL);
2924 for (i = 0; i < key_count; i++) {
2925 struct mgmt_link_key_info *key = &cp->keys[i];
2927 if (hci_is_blocked_key(hdev,
2928 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2930 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2935 /* Always ignore debug keys and require a new pairing if
2936 * the user wants to use them.
2938 if (key->type == HCI_LK_DEBUG_COMBINATION)
2941 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2942 key->type, key->pin_len, NULL);
2945 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2947 hci_dev_unlock(hdev);
2952 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2953 u8 addr_type, struct sock *skip_sk)
2955 struct mgmt_ev_device_unpaired ev;
2957 bacpy(&ev.addr.bdaddr, bdaddr);
2958 ev.addr.type = addr_type;
2960 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2964 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2966 struct mgmt_pending_cmd *cmd = data;
2967 struct mgmt_cp_unpair_device *cp = cmd->param;
2970 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2972 cmd->cmd_complete(cmd, err);
2973 mgmt_pending_free(cmd);
2976 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2978 struct mgmt_pending_cmd *cmd = data;
2979 struct mgmt_cp_unpair_device *cp = cmd->param;
2980 struct hci_conn *conn;
2982 if (cp->addr.type == BDADDR_BREDR)
2983 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2986 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2987 le_addr_type(cp->addr.type));
2992 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2995 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2998 struct mgmt_cp_unpair_device *cp = data;
2999 struct mgmt_rp_unpair_device rp;
3000 struct hci_conn_params *params;
3001 struct mgmt_pending_cmd *cmd;
3002 struct hci_conn *conn;
3006 memset(&rp, 0, sizeof(rp));
3007 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3008 rp.addr.type = cp->addr.type;
3010 if (!bdaddr_type_is_valid(cp->addr.type))
3011 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3012 MGMT_STATUS_INVALID_PARAMS,
3015 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3016 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3017 MGMT_STATUS_INVALID_PARAMS,
3022 if (!hdev_is_powered(hdev)) {
3023 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3024 MGMT_STATUS_NOT_POWERED, &rp,
3029 if (cp->addr.type == BDADDR_BREDR) {
3030 /* If disconnection is requested, then look up the
3031 * connection. If the remote device is connected, it
3032 * will be later used to terminate the link.
3034 * Setting it to NULL explicitly will cause no
3035 * termination of the link.
3038 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3043 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3045 err = mgmt_cmd_complete(sk, hdev->id,
3046 MGMT_OP_UNPAIR_DEVICE,
3047 MGMT_STATUS_NOT_PAIRED, &rp,
3055 /* LE address type */
3056 addr_type = le_addr_type(cp->addr.type);
3058 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3059 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3061 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3062 MGMT_STATUS_NOT_PAIRED, &rp,
3067 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3069 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3074 /* Defer clearing up the connection parameters until closing to
3075 * give a chance of keeping them if a repairing happens.
3077 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3079 /* Disable auto-connection parameters if present */
3080 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3082 if (params->explicit_connect)
3083 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3085 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3088 /* If disconnection is not requested, then clear the connection
3089 * variable so that the link is not terminated.
3091 if (!cp->disconnect)
3095 /* If the connection variable is set, then termination of the
3096 * link is requested.
3099 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3101 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3105 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3112 cmd->cmd_complete = addr_cmd_complete;
3114 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3115 unpair_device_complete);
3117 mgmt_pending_free(cmd);
3120 hci_dev_unlock(hdev);
3124 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3127 struct mgmt_cp_disconnect *cp = data;
3128 struct mgmt_rp_disconnect rp;
3129 struct mgmt_pending_cmd *cmd;
3130 struct hci_conn *conn;
3133 bt_dev_dbg(hdev, "sock %p", sk);
3135 memset(&rp, 0, sizeof(rp));
3136 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3137 rp.addr.type = cp->addr.type;
3139 if (!bdaddr_type_is_valid(cp->addr.type))
3140 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3141 MGMT_STATUS_INVALID_PARAMS,
3146 if (!test_bit(HCI_UP, &hdev->flags)) {
3147 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3148 MGMT_STATUS_NOT_POWERED, &rp,
3153 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3154 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3155 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3159 if (cp->addr.type == BDADDR_BREDR)
3160 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3163 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3164 le_addr_type(cp->addr.type));
3166 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3167 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3168 MGMT_STATUS_NOT_CONNECTED, &rp,
3173 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3179 cmd->cmd_complete = generic_cmd_complete;
3181 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3183 mgmt_pending_remove(cmd);
3186 hci_dev_unlock(hdev);
3190 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3192 switch (link_type) {
3194 switch (addr_type) {
3195 case ADDR_LE_DEV_PUBLIC:
3196 return BDADDR_LE_PUBLIC;
3199 /* Fallback to LE Random address type */
3200 return BDADDR_LE_RANDOM;
3204 /* Fallback to BR/EDR type */
3205 return BDADDR_BREDR;
3209 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3212 struct mgmt_rp_get_connections *rp;
3217 bt_dev_dbg(hdev, "sock %p", sk);
3221 if (!hdev_is_powered(hdev)) {
3222 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3223 MGMT_STATUS_NOT_POWERED);
3228 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3229 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3233 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3240 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3241 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3243 bacpy(&rp->addr[i].bdaddr, &c->dst);
3244 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3245 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3250 rp->conn_count = cpu_to_le16(i);
3252 /* Recalculate length in case of filtered SCO connections, etc */
3253 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3254 struct_size(rp, addr, i));
3259 hci_dev_unlock(hdev);
3263 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3264 struct mgmt_cp_pin_code_neg_reply *cp)
3266 struct mgmt_pending_cmd *cmd;
3269 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3274 cmd->cmd_complete = addr_cmd_complete;
3276 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3277 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3279 mgmt_pending_remove(cmd);
3284 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3287 struct hci_conn *conn;
3288 struct mgmt_cp_pin_code_reply *cp = data;
3289 struct hci_cp_pin_code_reply reply;
3290 struct mgmt_pending_cmd *cmd;
3293 bt_dev_dbg(hdev, "sock %p", sk);
3297 if (!hdev_is_powered(hdev)) {
3298 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3299 MGMT_STATUS_NOT_POWERED);
3303 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3305 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3306 MGMT_STATUS_NOT_CONNECTED);
3310 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3311 struct mgmt_cp_pin_code_neg_reply ncp;
3313 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3315 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3317 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3319 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3320 MGMT_STATUS_INVALID_PARAMS);
3325 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3331 cmd->cmd_complete = addr_cmd_complete;
3333 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3334 reply.pin_len = cp->pin_len;
3335 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3337 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3339 mgmt_pending_remove(cmd);
3342 hci_dev_unlock(hdev);
3346 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3349 struct mgmt_cp_set_io_capability *cp = data;
3351 bt_dev_dbg(hdev, "sock %p", sk);
3353 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3354 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3355 MGMT_STATUS_INVALID_PARAMS);
3359 hdev->io_capability = cp->io_capability;
3361 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3363 hci_dev_unlock(hdev);
3365 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3369 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3371 struct hci_dev *hdev = conn->hdev;
3372 struct mgmt_pending_cmd *cmd;
3374 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3375 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3378 if (cmd->user_data != conn)
3387 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3389 struct mgmt_rp_pair_device rp;
3390 struct hci_conn *conn = cmd->user_data;
3393 bacpy(&rp.addr.bdaddr, &conn->dst);
3394 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3396 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3397 status, &rp, sizeof(rp));
3399 /* So we don't get further callbacks for this connection */
3400 conn->connect_cfm_cb = NULL;
3401 conn->security_cfm_cb = NULL;
3402 conn->disconn_cfm_cb = NULL;
3404 hci_conn_drop(conn);
3406 /* The device is paired so there is no need to remove
3407 * its connection parameters anymore.
3409 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3416 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3418 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3419 struct mgmt_pending_cmd *cmd;
3421 cmd = find_pairing(conn);
3423 cmd->cmd_complete(cmd, status);
3424 mgmt_pending_remove(cmd);
3428 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3430 struct mgmt_pending_cmd *cmd;
3432 BT_DBG("status %u", status);
3434 cmd = find_pairing(conn);
3436 BT_DBG("Unable to find a pending command");
3440 cmd->cmd_complete(cmd, mgmt_status(status));
3441 mgmt_pending_remove(cmd);
3444 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3446 struct mgmt_pending_cmd *cmd;
3448 BT_DBG("status %u", status);
3453 cmd = find_pairing(conn);
3455 BT_DBG("Unable to find a pending command");
3459 cmd->cmd_complete(cmd, mgmt_status(status));
3460 mgmt_pending_remove(cmd);
3463 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3466 struct mgmt_cp_pair_device *cp = data;
3467 struct mgmt_rp_pair_device rp;
3468 struct mgmt_pending_cmd *cmd;
3469 u8 sec_level, auth_type;
3470 struct hci_conn *conn;
3473 bt_dev_dbg(hdev, "sock %p", sk);
3475 memset(&rp, 0, sizeof(rp));
3476 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3477 rp.addr.type = cp->addr.type;
3479 if (!bdaddr_type_is_valid(cp->addr.type))
3480 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3481 MGMT_STATUS_INVALID_PARAMS,
3484 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3485 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3486 MGMT_STATUS_INVALID_PARAMS,
3491 if (!hdev_is_powered(hdev)) {
3492 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3493 MGMT_STATUS_NOT_POWERED, &rp,
3498 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3499 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3500 MGMT_STATUS_ALREADY_PAIRED, &rp,
3505 sec_level = BT_SECURITY_MEDIUM;
3506 auth_type = HCI_AT_DEDICATED_BONDING;
3508 if (cp->addr.type == BDADDR_BREDR) {
3509 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3510 auth_type, CONN_REASON_PAIR_DEVICE);
3512 u8 addr_type = le_addr_type(cp->addr.type);
3513 struct hci_conn_params *p;
3515 /* When pairing a new device, it is expected to remember
3516 * this device for future connections. Adding the connection
3517 * parameter information ahead of time allows tracking
3518 * of the peripheral preferred values and will speed up any
3519 * further connection establishment.
3521 * If connection parameters already exist, then they
3522 * will be kept and this function does nothing.
3524 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3526 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3527 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3529 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3530 sec_level, HCI_LE_CONN_TIMEOUT,
3531 CONN_REASON_PAIR_DEVICE);
3537 if (PTR_ERR(conn) == -EBUSY)
3538 status = MGMT_STATUS_BUSY;
3539 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3540 status = MGMT_STATUS_NOT_SUPPORTED;
3541 else if (PTR_ERR(conn) == -ECONNREFUSED)
3542 status = MGMT_STATUS_REJECTED;
3544 status = MGMT_STATUS_CONNECT_FAILED;
3546 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3547 status, &rp, sizeof(rp));
3551 if (conn->connect_cfm_cb) {
3552 hci_conn_drop(conn);
3553 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3554 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3558 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3561 hci_conn_drop(conn);
3565 cmd->cmd_complete = pairing_complete;
3567 /* For LE, just connecting isn't a proof that the pairing finished */
3568 if (cp->addr.type == BDADDR_BREDR) {
3569 conn->connect_cfm_cb = pairing_complete_cb;
3570 conn->security_cfm_cb = pairing_complete_cb;
3571 conn->disconn_cfm_cb = pairing_complete_cb;
3573 conn->connect_cfm_cb = le_pairing_complete_cb;
3574 conn->security_cfm_cb = le_pairing_complete_cb;
3575 conn->disconn_cfm_cb = le_pairing_complete_cb;
3578 conn->io_capability = cp->io_cap;
3579 cmd->user_data = hci_conn_get(conn);
3581 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3582 hci_conn_security(conn, sec_level, auth_type, true)) {
3583 cmd->cmd_complete(cmd, 0);
3584 mgmt_pending_remove(cmd);
3590 hci_dev_unlock(hdev);
3594 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3597 struct mgmt_addr_info *addr = data;
3598 struct mgmt_pending_cmd *cmd;
3599 struct hci_conn *conn;
3602 bt_dev_dbg(hdev, "sock %p", sk);
3606 if (!hdev_is_powered(hdev)) {
3607 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3608 MGMT_STATUS_NOT_POWERED);
3612 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3614 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3615 MGMT_STATUS_INVALID_PARAMS);
3619 conn = cmd->user_data;
3621 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3622 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3623 MGMT_STATUS_INVALID_PARAMS);
3627 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3628 mgmt_pending_remove(cmd);
3630 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3631 addr, sizeof(*addr));
3633 /* Since user doesn't want to proceed with the connection, abort any
3634 * ongoing pairing and then terminate the link if it was created
3635 * because of the pair device action.
3637 if (addr->type == BDADDR_BREDR)
3638 hci_remove_link_key(hdev, &addr->bdaddr);
3640 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3641 le_addr_type(addr->type));
3643 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3644 hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3647 hci_dev_unlock(hdev);
3651 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3652 struct mgmt_addr_info *addr, u16 mgmt_op,
3653 u16 hci_op, __le32 passkey)
3655 struct mgmt_pending_cmd *cmd;
3656 struct hci_conn *conn;
3661 if (!hdev_is_powered(hdev)) {
3662 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3663 MGMT_STATUS_NOT_POWERED, addr,
3668 if (addr->type == BDADDR_BREDR)
3669 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3671 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3672 le_addr_type(addr->type));
3675 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3676 MGMT_STATUS_NOT_CONNECTED, addr,
3681 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3682 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3684 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3685 MGMT_STATUS_SUCCESS, addr,
3688 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3689 MGMT_STATUS_FAILED, addr,
3695 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3701 cmd->cmd_complete = addr_cmd_complete;
3703 /* Continue with pairing via HCI */
3704 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3705 struct hci_cp_user_passkey_reply cp;
3707 bacpy(&cp.bdaddr, &addr->bdaddr);
3708 cp.passkey = passkey;
3709 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3711 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3715 mgmt_pending_remove(cmd);
3718 hci_dev_unlock(hdev);
3722 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3723 void *data, u16 len)
3725 struct mgmt_cp_pin_code_neg_reply *cp = data;
3727 bt_dev_dbg(hdev, "sock %p", sk);
3729 return user_pairing_resp(sk, hdev, &cp->addr,
3730 MGMT_OP_PIN_CODE_NEG_REPLY,
3731 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3734 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3737 struct mgmt_cp_user_confirm_reply *cp = data;
3739 bt_dev_dbg(hdev, "sock %p", sk);
3741 if (len != sizeof(*cp))
3742 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3743 MGMT_STATUS_INVALID_PARAMS);
3745 return user_pairing_resp(sk, hdev, &cp->addr,
3746 MGMT_OP_USER_CONFIRM_REPLY,
3747 HCI_OP_USER_CONFIRM_REPLY, 0);
3750 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3751 void *data, u16 len)
3753 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3755 bt_dev_dbg(hdev, "sock %p", sk);
3757 return user_pairing_resp(sk, hdev, &cp->addr,
3758 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3759 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3762 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3765 struct mgmt_cp_user_passkey_reply *cp = data;
3767 bt_dev_dbg(hdev, "sock %p", sk);
3769 return user_pairing_resp(sk, hdev, &cp->addr,
3770 MGMT_OP_USER_PASSKEY_REPLY,
3771 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3774 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3775 void *data, u16 len)
3777 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3779 bt_dev_dbg(hdev, "sock %p", sk);
3781 return user_pairing_resp(sk, hdev, &cp->addr,
3782 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3783 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3786 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3788 struct adv_info *adv_instance;
3790 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3794 /* stop if current instance doesn't need to be changed */
3795 if (!(adv_instance->flags & flags))
3798 cancel_adv_timeout(hdev);
3800 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3804 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3809 static int name_changed_sync(struct hci_dev *hdev, void *data)
3811 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3814 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3816 struct mgmt_pending_cmd *cmd = data;
3817 struct mgmt_cp_set_local_name *cp = cmd->param;
3818 u8 status = mgmt_status(err);
3820 bt_dev_dbg(hdev, "err %d", err);
3822 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3826 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3829 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3832 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3833 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3836 mgmt_pending_remove(cmd);
3839 static int set_name_sync(struct hci_dev *hdev, void *data)
3841 if (lmp_bredr_capable(hdev)) {
3842 hci_update_name_sync(hdev);
3843 hci_update_eir_sync(hdev);
3846 /* The name is stored in the scan response data and so
3847 * no need to update the advertising data here.
3849 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3850 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3855 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3858 struct mgmt_cp_set_local_name *cp = data;
3859 struct mgmt_pending_cmd *cmd;
3862 bt_dev_dbg(hdev, "sock %p", sk);
3866 /* If the old values are the same as the new ones just return a
3867 * direct command complete event.
3869 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3870 !memcmp(hdev->short_name, cp->short_name,
3871 sizeof(hdev->short_name))) {
3872 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3877 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3879 if (!hdev_is_powered(hdev)) {
3880 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3882 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3887 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3888 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3889 ext_info_changed(hdev, sk);
3894 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3898 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3902 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3903 MGMT_STATUS_FAILED);
3906 mgmt_pending_remove(cmd);
3911 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3914 hci_dev_unlock(hdev);
3918 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3920 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3923 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3926 struct mgmt_cp_set_appearance *cp = data;
3930 bt_dev_dbg(hdev, "sock %p", sk);
3932 if (!lmp_le_capable(hdev))
3933 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3934 MGMT_STATUS_NOT_SUPPORTED);
3936 appearance = le16_to_cpu(cp->appearance);
3940 if (hdev->appearance != appearance) {
3941 hdev->appearance = appearance;
3943 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3944 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3947 ext_info_changed(hdev, sk);
3950 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3953 hci_dev_unlock(hdev);
3958 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3959 void *data, u16 len)
3961 struct mgmt_rp_get_phy_configuration rp;
3963 bt_dev_dbg(hdev, "sock %p", sk);
3967 memset(&rp, 0, sizeof(rp));
3969 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3970 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3971 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3973 hci_dev_unlock(hdev);
3975 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3979 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3981 struct mgmt_ev_phy_configuration_changed ev;
3983 memset(&ev, 0, sizeof(ev));
3985 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3987 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3991 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3993 struct mgmt_pending_cmd *cmd = data;
3994 struct sk_buff *skb = cmd->skb;
3995 u8 status = mgmt_status(err);
3997 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4002 status = MGMT_STATUS_FAILED;
4003 else if (IS_ERR(skb))
4004 status = mgmt_status(PTR_ERR(skb));
4006 status = mgmt_status(skb->data[0]);
4009 bt_dev_dbg(hdev, "status %d", status);
4012 mgmt_cmd_status(cmd->sk, hdev->id,
4013 MGMT_OP_SET_PHY_CONFIGURATION, status);
4015 mgmt_cmd_complete(cmd->sk, hdev->id,
4016 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4019 mgmt_phy_configuration_changed(hdev, cmd->sk);
4022 if (skb && !IS_ERR(skb))
4025 mgmt_pending_remove(cmd);
4028 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4030 struct mgmt_pending_cmd *cmd = data;
4031 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4032 struct hci_cp_le_set_default_phy cp_phy;
4033 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4035 memset(&cp_phy, 0, sizeof(cp_phy));
4037 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4038 cp_phy.all_phys |= 0x01;
4040 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4041 cp_phy.all_phys |= 0x02;
4043 if (selected_phys & MGMT_PHY_LE_1M_TX)
4044 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4046 if (selected_phys & MGMT_PHY_LE_2M_TX)
4047 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4049 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4050 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4052 if (selected_phys & MGMT_PHY_LE_1M_RX)
4053 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4055 if (selected_phys & MGMT_PHY_LE_2M_RX)
4056 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4058 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4059 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4061 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4062 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4067 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4068 void *data, u16 len)
4070 struct mgmt_cp_set_phy_configuration *cp = data;
4071 struct mgmt_pending_cmd *cmd;
4072 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4073 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4074 bool changed = false;
4077 bt_dev_dbg(hdev, "sock %p", sk);
4079 configurable_phys = get_configurable_phys(hdev);
4080 supported_phys = get_supported_phys(hdev);
4081 selected_phys = __le32_to_cpu(cp->selected_phys);
4083 if (selected_phys & ~supported_phys)
4084 return mgmt_cmd_status(sk, hdev->id,
4085 MGMT_OP_SET_PHY_CONFIGURATION,
4086 MGMT_STATUS_INVALID_PARAMS);
4088 unconfigure_phys = supported_phys & ~configurable_phys;
4090 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4091 return mgmt_cmd_status(sk, hdev->id,
4092 MGMT_OP_SET_PHY_CONFIGURATION,
4093 MGMT_STATUS_INVALID_PARAMS);
4095 if (selected_phys == get_selected_phys(hdev))
4096 return mgmt_cmd_complete(sk, hdev->id,
4097 MGMT_OP_SET_PHY_CONFIGURATION,
4102 if (!hdev_is_powered(hdev)) {
4103 err = mgmt_cmd_status(sk, hdev->id,
4104 MGMT_OP_SET_PHY_CONFIGURATION,
4105 MGMT_STATUS_REJECTED);
4109 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4110 err = mgmt_cmd_status(sk, hdev->id,
4111 MGMT_OP_SET_PHY_CONFIGURATION,
4116 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4117 pkt_type |= (HCI_DH3 | HCI_DM3);
4119 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4121 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4122 pkt_type |= (HCI_DH5 | HCI_DM5);
4124 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4126 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4127 pkt_type &= ~HCI_2DH1;
4129 pkt_type |= HCI_2DH1;
4131 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4132 pkt_type &= ~HCI_2DH3;
4134 pkt_type |= HCI_2DH3;
4136 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4137 pkt_type &= ~HCI_2DH5;
4139 pkt_type |= HCI_2DH5;
4141 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4142 pkt_type &= ~HCI_3DH1;
4144 pkt_type |= HCI_3DH1;
4146 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4147 pkt_type &= ~HCI_3DH3;
4149 pkt_type |= HCI_3DH3;
4151 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4152 pkt_type &= ~HCI_3DH5;
4154 pkt_type |= HCI_3DH5;
4156 if (pkt_type != hdev->pkt_type) {
4157 hdev->pkt_type = pkt_type;
4161 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4162 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4164 mgmt_phy_configuration_changed(hdev, sk);
4166 err = mgmt_cmd_complete(sk, hdev->id,
4167 MGMT_OP_SET_PHY_CONFIGURATION,
4173 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4178 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4179 set_default_phy_complete);
4182 err = mgmt_cmd_status(sk, hdev->id,
4183 MGMT_OP_SET_PHY_CONFIGURATION,
4184 MGMT_STATUS_FAILED);
4187 mgmt_pending_remove(cmd);
4191 hci_dev_unlock(hdev);
4196 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4199 int err = MGMT_STATUS_SUCCESS;
4200 struct mgmt_cp_set_blocked_keys *keys = data;
4201 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4202 sizeof(struct mgmt_blocked_key_info));
4203 u16 key_count, expected_len;
4206 bt_dev_dbg(hdev, "sock %p", sk);
4208 key_count = __le16_to_cpu(keys->key_count);
4209 if (key_count > max_key_count) {
4210 bt_dev_err(hdev, "too big key_count value %u", key_count);
4211 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4212 MGMT_STATUS_INVALID_PARAMS);
4215 expected_len = struct_size(keys, keys, key_count);
4216 if (expected_len != len) {
4217 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4219 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4220 MGMT_STATUS_INVALID_PARAMS);
4225 hci_blocked_keys_clear(hdev);
4227 for (i = 0; i < key_count; ++i) {
4228 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4231 err = MGMT_STATUS_NO_RESOURCES;
4235 b->type = keys->keys[i].type;
4236 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4237 list_add_rcu(&b->list, &hdev->blocked_keys);
4239 hci_dev_unlock(hdev);
4241 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4245 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4246 void *data, u16 len)
4248 struct mgmt_mode *cp = data;
4250 bool changed = false;
4252 bt_dev_dbg(hdev, "sock %p", sk);
4254 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4255 return mgmt_cmd_status(sk, hdev->id,
4256 MGMT_OP_SET_WIDEBAND_SPEECH,
4257 MGMT_STATUS_NOT_SUPPORTED);
4259 if (cp->val != 0x00 && cp->val != 0x01)
4260 return mgmt_cmd_status(sk, hdev->id,
4261 MGMT_OP_SET_WIDEBAND_SPEECH,
4262 MGMT_STATUS_INVALID_PARAMS);
4266 if (hdev_is_powered(hdev) &&
4267 !!cp->val != hci_dev_test_flag(hdev,
4268 HCI_WIDEBAND_SPEECH_ENABLED)) {
4269 err = mgmt_cmd_status(sk, hdev->id,
4270 MGMT_OP_SET_WIDEBAND_SPEECH,
4271 MGMT_STATUS_REJECTED);
4276 changed = !hci_dev_test_and_set_flag(hdev,
4277 HCI_WIDEBAND_SPEECH_ENABLED);
4279 changed = hci_dev_test_and_clear_flag(hdev,
4280 HCI_WIDEBAND_SPEECH_ENABLED);
4282 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4287 err = new_settings(hdev, sk);
4290 hci_dev_unlock(hdev);
4294 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4295 void *data, u16 data_len)
4298 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4301 u8 tx_power_range[2];
4303 bt_dev_dbg(hdev, "sock %p", sk);
4305 memset(&buf, 0, sizeof(buf));
4309 /* When the Read Simple Pairing Options command is supported, then
4310 * the remote public key validation is supported.
4312 * Alternatively, when Microsoft extensions are available, they can
4313 * indicate support for public key validation as well.
4315 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4316 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4318 flags |= 0x02; /* Remote public key validation (LE) */
4320 /* When the Read Encryption Key Size command is supported, then the
4321 * encryption key size is enforced.
4323 if (hdev->commands[20] & 0x10)
4324 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4326 flags |= 0x08; /* Encryption key size enforcement (LE) */
4328 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4331 /* When the Read Simple Pairing Options command is supported, then
4332 * also max encryption key size information is provided.
4334 if (hdev->commands[41] & 0x08)
4335 cap_len = eir_append_le16(rp->cap, cap_len,
4336 MGMT_CAP_MAX_ENC_KEY_SIZE,
4337 hdev->max_enc_key_size);
4339 cap_len = eir_append_le16(rp->cap, cap_len,
4340 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4341 SMP_MAX_ENC_KEY_SIZE);
4343 /* Append the min/max LE tx power parameters if we were able to fetch
4344 * it from the controller
4346 if (hdev->commands[38] & 0x80) {
4347 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4348 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4349 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4353 rp->cap_len = cpu_to_le16(cap_len);
4355 hci_dev_unlock(hdev);
4357 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4358 rp, sizeof(*rp) + cap_len);
4361 #ifdef CONFIG_BT_FEATURE_DEBUG
4362 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4363 static const u8 debug_uuid[16] = {
4364 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4365 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4369 /* 330859bc-7506-492d-9370-9a6f0614037f */
4370 static const u8 quality_report_uuid[16] = {
4371 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4372 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4375 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4376 static const u8 offload_codecs_uuid[16] = {
4377 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4378 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4381 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4382 static const u8 le_simultaneous_roles_uuid[16] = {
4383 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4384 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4387 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4388 static const u8 rpa_resolution_uuid[16] = {
4389 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4390 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4393 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4394 static const u8 iso_socket_uuid[16] = {
4395 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4396 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4399 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4400 static const u8 mgmt_mesh_uuid[16] = {
4401 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4402 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4405 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4406 void *data, u16 data_len)
4408 struct mgmt_rp_read_exp_features_info *rp;
4414 bt_dev_dbg(hdev, "sock %p", sk);
4416 /* Enough space for 7 features */
4417 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4418 rp = kzalloc(len, GFP_KERNEL);
4422 #ifdef CONFIG_BT_FEATURE_DEBUG
4424 flags = bt_dbg_get() ? BIT(0) : 0;
4426 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4427 rp->features[idx].flags = cpu_to_le32(flags);
4432 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4433 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4438 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4439 rp->features[idx].flags = cpu_to_le32(flags);
4443 if (hdev && ll_privacy_capable(hdev)) {
4444 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4445 flags = BIT(0) | BIT(1);
4449 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4450 rp->features[idx].flags = cpu_to_le32(flags);
4454 if (hdev && (aosp_has_quality_report(hdev) ||
4455 hdev->set_quality_report)) {
4456 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4461 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4462 rp->features[idx].flags = cpu_to_le32(flags);
4466 if (hdev && hdev->get_data_path_id) {
4467 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4472 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4473 rp->features[idx].flags = cpu_to_le32(flags);
4477 if (IS_ENABLED(CONFIG_BT_LE)) {
4478 flags = iso_enabled() ? BIT(0) : 0;
4479 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4480 rp->features[idx].flags = cpu_to_le32(flags);
4484 if (hdev && lmp_le_capable(hdev)) {
4485 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4490 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4491 rp->features[idx].flags = cpu_to_le32(flags);
4495 rp->feature_count = cpu_to_le16(idx);
4497 /* After reading the experimental features information, enable
4498 * the events to update client on any future change.
4500 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4502 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4503 MGMT_OP_READ_EXP_FEATURES_INFO,
4504 0, rp, sizeof(*rp) + (20 * idx));
4510 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4513 struct mgmt_ev_exp_feature_changed ev;
4515 memset(&ev, 0, sizeof(ev));
4516 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4517 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4519 // Do we need to be atomic with the conn_flags?
4520 if (enabled && privacy_mode_capable(hdev))
4521 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4523 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4525 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4527 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4531 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4532 bool enabled, struct sock *skip)
4534 struct mgmt_ev_exp_feature_changed ev;
4536 memset(&ev, 0, sizeof(ev));
4537 memcpy(ev.uuid, uuid, 16);
4538 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4540 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4542 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4545 #define EXP_FEAT(_uuid, _set_func) \
4548 .set_func = _set_func, \
4551 /* The zero key uuid is special. Multiple exp features are set through it. */
4552 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4553 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4555 struct mgmt_rp_set_exp_feature rp;
4557 memset(rp.uuid, 0, 16);
4558 rp.flags = cpu_to_le32(0);
4560 #ifdef CONFIG_BT_FEATURE_DEBUG
4562 bool changed = bt_dbg_get();
4567 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4571 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4574 changed = hci_dev_test_and_clear_flag(hdev,
4575 HCI_ENABLE_LL_PRIVACY);
4577 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4581 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4583 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4584 MGMT_OP_SET_EXP_FEATURE, 0,
4588 #ifdef CONFIG_BT_FEATURE_DEBUG
4589 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4590 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4592 struct mgmt_rp_set_exp_feature rp;
4597 /* Command requires to use the non-controller index */
4599 return mgmt_cmd_status(sk, hdev->id,
4600 MGMT_OP_SET_EXP_FEATURE,
4601 MGMT_STATUS_INVALID_INDEX);
4603 /* Parameters are limited to a single octet */
4604 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4605 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4606 MGMT_OP_SET_EXP_FEATURE,
4607 MGMT_STATUS_INVALID_PARAMS);
4609 /* Only boolean on/off is supported */
4610 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4611 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4612 MGMT_OP_SET_EXP_FEATURE,
4613 MGMT_STATUS_INVALID_PARAMS);
4615 val = !!cp->param[0];
4616 changed = val ? !bt_dbg_get() : bt_dbg_get();
4619 memcpy(rp.uuid, debug_uuid, 16);
4620 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4622 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4624 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4625 MGMT_OP_SET_EXP_FEATURE, 0,
4629 exp_feature_changed(hdev, debug_uuid, val, sk);
4635 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4636 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4638 struct mgmt_rp_set_exp_feature rp;
4642 /* Command requires to use the controller index */
4644 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4645 MGMT_OP_SET_EXP_FEATURE,
4646 MGMT_STATUS_INVALID_INDEX);
4648 /* Parameters are limited to a single octet */
4649 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4650 return mgmt_cmd_status(sk, hdev->id,
4651 MGMT_OP_SET_EXP_FEATURE,
4652 MGMT_STATUS_INVALID_PARAMS);
4654 /* Only boolean on/off is supported */
4655 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4656 return mgmt_cmd_status(sk, hdev->id,
4657 MGMT_OP_SET_EXP_FEATURE,
4658 MGMT_STATUS_INVALID_PARAMS);
4660 val = !!cp->param[0];
4663 changed = !hci_dev_test_and_set_flag(hdev,
4664 HCI_MESH_EXPERIMENTAL);
4666 hci_dev_clear_flag(hdev, HCI_MESH);
4667 changed = hci_dev_test_and_clear_flag(hdev,
4668 HCI_MESH_EXPERIMENTAL);
4671 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4672 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4674 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4676 err = mgmt_cmd_complete(sk, hdev->id,
4677 MGMT_OP_SET_EXP_FEATURE, 0,
4681 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4686 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4687 struct mgmt_cp_set_exp_feature *cp,
4690 struct mgmt_rp_set_exp_feature rp;
4695 /* Command requires to use the controller index */
4697 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4698 MGMT_OP_SET_EXP_FEATURE,
4699 MGMT_STATUS_INVALID_INDEX);
4701 /* Changes can only be made when controller is powered down */
4702 if (hdev_is_powered(hdev))
4703 return mgmt_cmd_status(sk, hdev->id,
4704 MGMT_OP_SET_EXP_FEATURE,
4705 MGMT_STATUS_REJECTED);
4707 /* Parameters are limited to a single octet */
4708 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4709 return mgmt_cmd_status(sk, hdev->id,
4710 MGMT_OP_SET_EXP_FEATURE,
4711 MGMT_STATUS_INVALID_PARAMS);
4713 /* Only boolean on/off is supported */
4714 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4715 return mgmt_cmd_status(sk, hdev->id,
4716 MGMT_OP_SET_EXP_FEATURE,
4717 MGMT_STATUS_INVALID_PARAMS);
4719 val = !!cp->param[0];
4722 changed = !hci_dev_test_and_set_flag(hdev,
4723 HCI_ENABLE_LL_PRIVACY);
4724 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4726 /* Enable LL privacy + supported settings changed */
4727 flags = BIT(0) | BIT(1);
4729 changed = hci_dev_test_and_clear_flag(hdev,
4730 HCI_ENABLE_LL_PRIVACY);
4732 /* Disable LL privacy + supported settings changed */
4736 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4737 rp.flags = cpu_to_le32(flags);
4739 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4741 err = mgmt_cmd_complete(sk, hdev->id,
4742 MGMT_OP_SET_EXP_FEATURE, 0,
4746 exp_ll_privacy_feature_changed(val, hdev, sk);
4751 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4752 struct mgmt_cp_set_exp_feature *cp,
4755 struct mgmt_rp_set_exp_feature rp;
4759 /* Command requires to use a valid controller index */
4761 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4762 MGMT_OP_SET_EXP_FEATURE,
4763 MGMT_STATUS_INVALID_INDEX);
4765 /* Parameters are limited to a single octet */
4766 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4767 return mgmt_cmd_status(sk, hdev->id,
4768 MGMT_OP_SET_EXP_FEATURE,
4769 MGMT_STATUS_INVALID_PARAMS);
4771 /* Only boolean on/off is supported */
4772 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4773 return mgmt_cmd_status(sk, hdev->id,
4774 MGMT_OP_SET_EXP_FEATURE,
4775 MGMT_STATUS_INVALID_PARAMS);
4777 hci_req_sync_lock(hdev);
4779 val = !!cp->param[0];
4780 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4782 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4783 err = mgmt_cmd_status(sk, hdev->id,
4784 MGMT_OP_SET_EXP_FEATURE,
4785 MGMT_STATUS_NOT_SUPPORTED);
4786 goto unlock_quality_report;
4790 if (hdev->set_quality_report)
4791 err = hdev->set_quality_report(hdev, val);
4793 err = aosp_set_quality_report(hdev, val);
4796 err = mgmt_cmd_status(sk, hdev->id,
4797 MGMT_OP_SET_EXP_FEATURE,
4798 MGMT_STATUS_FAILED);
4799 goto unlock_quality_report;
4803 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4805 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4808 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4810 memcpy(rp.uuid, quality_report_uuid, 16);
4811 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4812 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4814 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4818 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4820 unlock_quality_report:
4821 hci_req_sync_unlock(hdev);
4825 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4826 struct mgmt_cp_set_exp_feature *cp,
4831 struct mgmt_rp_set_exp_feature rp;
4833 /* Command requires to use a valid controller index */
4835 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4836 MGMT_OP_SET_EXP_FEATURE,
4837 MGMT_STATUS_INVALID_INDEX);
4839 /* Parameters are limited to a single octet */
4840 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4841 return mgmt_cmd_status(sk, hdev->id,
4842 MGMT_OP_SET_EXP_FEATURE,
4843 MGMT_STATUS_INVALID_PARAMS);
4845 /* Only boolean on/off is supported */
4846 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4847 return mgmt_cmd_status(sk, hdev->id,
4848 MGMT_OP_SET_EXP_FEATURE,
4849 MGMT_STATUS_INVALID_PARAMS);
4851 val = !!cp->param[0];
4852 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4854 if (!hdev->get_data_path_id) {
4855 return mgmt_cmd_status(sk, hdev->id,
4856 MGMT_OP_SET_EXP_FEATURE,
4857 MGMT_STATUS_NOT_SUPPORTED);
4862 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4864 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4867 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4870 memcpy(rp.uuid, offload_codecs_uuid, 16);
4871 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4872 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4873 err = mgmt_cmd_complete(sk, hdev->id,
4874 MGMT_OP_SET_EXP_FEATURE, 0,
4878 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4883 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4884 struct mgmt_cp_set_exp_feature *cp,
4889 struct mgmt_rp_set_exp_feature rp;
4891 /* Command requires to use a valid controller index */
4893 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4894 MGMT_OP_SET_EXP_FEATURE,
4895 MGMT_STATUS_INVALID_INDEX);
4897 /* Parameters are limited to a single octet */
4898 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4899 return mgmt_cmd_status(sk, hdev->id,
4900 MGMT_OP_SET_EXP_FEATURE,
4901 MGMT_STATUS_INVALID_PARAMS);
4903 /* Only boolean on/off is supported */
4904 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4905 return mgmt_cmd_status(sk, hdev->id,
4906 MGMT_OP_SET_EXP_FEATURE,
4907 MGMT_STATUS_INVALID_PARAMS);
4909 val = !!cp->param[0];
4910 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4912 if (!hci_dev_le_state_simultaneous(hdev)) {
4913 return mgmt_cmd_status(sk, hdev->id,
4914 MGMT_OP_SET_EXP_FEATURE,
4915 MGMT_STATUS_NOT_SUPPORTED);
4920 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4922 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4925 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4928 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4929 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4930 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4931 err = mgmt_cmd_complete(sk, hdev->id,
4932 MGMT_OP_SET_EXP_FEATURE, 0,
4936 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4942 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4943 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4945 struct mgmt_rp_set_exp_feature rp;
4946 bool val, changed = false;
4949 /* Command requires to use the non-controller index */
4951 return mgmt_cmd_status(sk, hdev->id,
4952 MGMT_OP_SET_EXP_FEATURE,
4953 MGMT_STATUS_INVALID_INDEX);
4955 /* Parameters are limited to a single octet */
4956 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4957 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4958 MGMT_OP_SET_EXP_FEATURE,
4959 MGMT_STATUS_INVALID_PARAMS);
4961 /* Only boolean on/off is supported */
4962 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4963 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4964 MGMT_OP_SET_EXP_FEATURE,
4965 MGMT_STATUS_INVALID_PARAMS);
4967 val = cp->param[0] ? true : false;
4976 memcpy(rp.uuid, iso_socket_uuid, 16);
4977 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4979 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4981 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4982 MGMT_OP_SET_EXP_FEATURE, 0,
4986 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4992 static const struct mgmt_exp_feature {
4994 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4995 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4996 } exp_features[] = {
4997 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4998 #ifdef CONFIG_BT_FEATURE_DEBUG
4999 EXP_FEAT(debug_uuid, set_debug_func),
5001 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5002 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
5003 EXP_FEAT(quality_report_uuid, set_quality_report_func),
5004 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5005 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5007 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5010 /* end with a null feature */
5011 EXP_FEAT(NULL, NULL)
5014 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5015 void *data, u16 data_len)
5017 struct mgmt_cp_set_exp_feature *cp = data;
5020 bt_dev_dbg(hdev, "sock %p", sk);
5022 for (i = 0; exp_features[i].uuid; i++) {
5023 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5024 return exp_features[i].set_func(sk, hdev, cp, data_len);
5027 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5028 MGMT_OP_SET_EXP_FEATURE,
5029 MGMT_STATUS_NOT_SUPPORTED);
5032 static u32 get_params_flags(struct hci_dev *hdev,
5033 struct hci_conn_params *params)
5035 u32 flags = hdev->conn_flags;
5037 /* Devices using RPAs can only be programmed in the acceptlist if
5038 * LL Privacy has been enable otherwise they cannot mark
5039 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5041 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5042 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
5043 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5048 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5051 struct mgmt_cp_get_device_flags *cp = data;
5052 struct mgmt_rp_get_device_flags rp;
5053 struct bdaddr_list_with_flags *br_params;
5054 struct hci_conn_params *params;
5055 u32 supported_flags;
5056 u32 current_flags = 0;
5057 u8 status = MGMT_STATUS_INVALID_PARAMS;
5059 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5060 &cp->addr.bdaddr, cp->addr.type);
5064 supported_flags = hdev->conn_flags;
5066 memset(&rp, 0, sizeof(rp));
5068 if (cp->addr.type == BDADDR_BREDR) {
5069 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5075 current_flags = br_params->flags;
5077 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5078 le_addr_type(cp->addr.type));
5082 supported_flags = get_params_flags(hdev, params);
5083 current_flags = params->flags;
5086 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5087 rp.addr.type = cp->addr.type;
5088 rp.supported_flags = cpu_to_le32(supported_flags);
5089 rp.current_flags = cpu_to_le32(current_flags);
5091 status = MGMT_STATUS_SUCCESS;
5094 hci_dev_unlock(hdev);
5096 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5100 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5101 bdaddr_t *bdaddr, u8 bdaddr_type,
5102 u32 supported_flags, u32 current_flags)
5104 struct mgmt_ev_device_flags_changed ev;
5106 bacpy(&ev.addr.bdaddr, bdaddr);
5107 ev.addr.type = bdaddr_type;
5108 ev.supported_flags = cpu_to_le32(supported_flags);
5109 ev.current_flags = cpu_to_le32(current_flags);
5111 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5114 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5117 struct mgmt_cp_set_device_flags *cp = data;
5118 struct bdaddr_list_with_flags *br_params;
5119 struct hci_conn_params *params;
5120 u8 status = MGMT_STATUS_INVALID_PARAMS;
5121 u32 supported_flags;
5122 u32 current_flags = __le32_to_cpu(cp->current_flags);
5124 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5125 &cp->addr.bdaddr, cp->addr.type, current_flags);
5127 // We should take hci_dev_lock() early, I think.. conn_flags can change
5128 supported_flags = hdev->conn_flags;
5130 if ((supported_flags | current_flags) != supported_flags) {
5131 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5132 current_flags, supported_flags);
5138 if (cp->addr.type == BDADDR_BREDR) {
5139 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5144 br_params->flags = current_flags;
5145 status = MGMT_STATUS_SUCCESS;
5147 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5148 &cp->addr.bdaddr, cp->addr.type);
5154 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5155 le_addr_type(cp->addr.type));
5157 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5158 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5162 supported_flags = get_params_flags(hdev, params);
5164 if ((supported_flags | current_flags) != supported_flags) {
5165 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5166 current_flags, supported_flags);
5170 WRITE_ONCE(params->flags, current_flags);
5171 status = MGMT_STATUS_SUCCESS;
5173 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5176 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5177 hci_update_passive_scan(hdev);
5180 hci_dev_unlock(hdev);
5183 if (status == MGMT_STATUS_SUCCESS)
5184 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5185 supported_flags, current_flags);
5187 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5188 &cp->addr, sizeof(cp->addr));
5191 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5194 struct mgmt_ev_adv_monitor_added ev;
5196 ev.monitor_handle = cpu_to_le16(handle);
5198 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5201 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5203 struct mgmt_ev_adv_monitor_removed ev;
5204 struct mgmt_pending_cmd *cmd;
5205 struct sock *sk_skip = NULL;
5206 struct mgmt_cp_remove_adv_monitor *cp;
5208 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5212 if (cp->monitor_handle)
5216 ev.monitor_handle = cpu_to_le16(handle);
5218 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5221 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5222 void *data, u16 len)
5224 struct adv_monitor *monitor = NULL;
5225 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5228 __u32 supported = 0;
5230 __u16 num_handles = 0;
5231 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5233 BT_DBG("request for %s", hdev->name);
5237 if (msft_monitor_supported(hdev))
5238 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5240 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5241 handles[num_handles++] = monitor->handle;
5243 hci_dev_unlock(hdev);
5245 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5246 rp = kmalloc(rp_size, GFP_KERNEL);
5250 /* All supported features are currently enabled */
5251 enabled = supported;
5253 rp->supported_features = cpu_to_le32(supported);
5254 rp->enabled_features = cpu_to_le32(enabled);
5255 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5256 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5257 rp->num_handles = cpu_to_le16(num_handles);
5259 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5261 err = mgmt_cmd_complete(sk, hdev->id,
5262 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5263 MGMT_STATUS_SUCCESS, rp, rp_size);
5270 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5271 void *data, int status)
5273 struct mgmt_rp_add_adv_patterns_monitor rp;
5274 struct mgmt_pending_cmd *cmd = data;
5275 struct adv_monitor *monitor = cmd->user_data;
5279 rp.monitor_handle = cpu_to_le16(monitor->handle);
5282 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5283 hdev->adv_monitors_cnt++;
5284 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5285 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5286 hci_update_passive_scan(hdev);
5289 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5290 mgmt_status(status), &rp, sizeof(rp));
5291 mgmt_pending_remove(cmd);
5293 hci_dev_unlock(hdev);
5294 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5295 rp.monitor_handle, status);
5298 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5300 struct mgmt_pending_cmd *cmd = data;
5301 struct adv_monitor *monitor = cmd->user_data;
5303 return hci_add_adv_monitor(hdev, monitor);
5306 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5307 struct adv_monitor *m, u8 status,
5308 void *data, u16 len, u16 op)
5310 struct mgmt_pending_cmd *cmd;
5318 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5319 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5320 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5321 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5322 status = MGMT_STATUS_BUSY;
5326 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5328 status = MGMT_STATUS_NO_RESOURCES;
5333 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5334 mgmt_add_adv_patterns_monitor_complete);
5337 status = MGMT_STATUS_NO_RESOURCES;
5339 status = MGMT_STATUS_FAILED;
5344 hci_dev_unlock(hdev);
5349 hci_free_adv_monitor(hdev, m);
5350 hci_dev_unlock(hdev);
5351 return mgmt_cmd_status(sk, hdev->id, op, status);
5354 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5355 struct mgmt_adv_rssi_thresholds *rssi)
5358 m->rssi.low_threshold = rssi->low_threshold;
5359 m->rssi.low_threshold_timeout =
5360 __le16_to_cpu(rssi->low_threshold_timeout);
5361 m->rssi.high_threshold = rssi->high_threshold;
5362 m->rssi.high_threshold_timeout =
5363 __le16_to_cpu(rssi->high_threshold_timeout);
5364 m->rssi.sampling_period = rssi->sampling_period;
5366 /* Default values. These numbers are the least constricting
5367 * parameters for MSFT API to work, so it behaves as if there
5368 * are no rssi parameter to consider. May need to be changed
5369 * if other API are to be supported.
5371 m->rssi.low_threshold = -127;
5372 m->rssi.low_threshold_timeout = 60;
5373 m->rssi.high_threshold = -127;
5374 m->rssi.high_threshold_timeout = 0;
5375 m->rssi.sampling_period = 0;
5379 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5380 struct mgmt_adv_pattern *patterns)
5382 u8 offset = 0, length = 0;
5383 struct adv_pattern *p = NULL;
5386 for (i = 0; i < pattern_count; i++) {
5387 offset = patterns[i].offset;
5388 length = patterns[i].length;
5389 if (offset >= HCI_MAX_EXT_AD_LENGTH ||
5390 length > HCI_MAX_EXT_AD_LENGTH ||
5391 (offset + length) > HCI_MAX_EXT_AD_LENGTH)
5392 return MGMT_STATUS_INVALID_PARAMS;
5394 p = kmalloc(sizeof(*p), GFP_KERNEL);
5396 return MGMT_STATUS_NO_RESOURCES;
5398 p->ad_type = patterns[i].ad_type;
5399 p->offset = patterns[i].offset;
5400 p->length = patterns[i].length;
5401 memcpy(p->value, patterns[i].value, p->length);
5403 INIT_LIST_HEAD(&p->list);
5404 list_add(&p->list, &m->patterns);
5407 return MGMT_STATUS_SUCCESS;
5410 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5411 void *data, u16 len)
5413 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5414 struct adv_monitor *m = NULL;
5415 u8 status = MGMT_STATUS_SUCCESS;
5416 size_t expected_size = sizeof(*cp);
5418 BT_DBG("request for %s", hdev->name);
5420 if (len <= sizeof(*cp)) {
5421 status = MGMT_STATUS_INVALID_PARAMS;
5425 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5426 if (len != expected_size) {
5427 status = MGMT_STATUS_INVALID_PARAMS;
5431 m = kzalloc(sizeof(*m), GFP_KERNEL);
5433 status = MGMT_STATUS_NO_RESOURCES;
5437 INIT_LIST_HEAD(&m->patterns);
5439 parse_adv_monitor_rssi(m, NULL);
5440 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5443 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5444 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5447 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5448 void *data, u16 len)
5450 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5451 struct adv_monitor *m = NULL;
5452 u8 status = MGMT_STATUS_SUCCESS;
5453 size_t expected_size = sizeof(*cp);
5455 BT_DBG("request for %s", hdev->name);
5457 if (len <= sizeof(*cp)) {
5458 status = MGMT_STATUS_INVALID_PARAMS;
5462 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5463 if (len != expected_size) {
5464 status = MGMT_STATUS_INVALID_PARAMS;
5468 m = kzalloc(sizeof(*m), GFP_KERNEL);
5470 status = MGMT_STATUS_NO_RESOURCES;
5474 INIT_LIST_HEAD(&m->patterns);
5476 parse_adv_monitor_rssi(m, &cp->rssi);
5477 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5480 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5481 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5484 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5485 void *data, int status)
5487 struct mgmt_rp_remove_adv_monitor rp;
5488 struct mgmt_pending_cmd *cmd = data;
5489 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5493 rp.monitor_handle = cp->monitor_handle;
5496 hci_update_passive_scan(hdev);
5498 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5499 mgmt_status(status), &rp, sizeof(rp));
5500 mgmt_pending_remove(cmd);
5502 hci_dev_unlock(hdev);
5503 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5504 rp.monitor_handle, status);
5507 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5509 struct mgmt_pending_cmd *cmd = data;
5510 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5511 u16 handle = __le16_to_cpu(cp->monitor_handle);
5514 return hci_remove_all_adv_monitor(hdev);
5516 return hci_remove_single_adv_monitor(hdev, handle);
5519 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5520 void *data, u16 len)
5522 struct mgmt_pending_cmd *cmd;
5527 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5528 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5529 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5530 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5531 status = MGMT_STATUS_BUSY;
5535 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5537 status = MGMT_STATUS_NO_RESOURCES;
5541 err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5542 mgmt_remove_adv_monitor_complete);
5545 mgmt_pending_remove(cmd);
5548 status = MGMT_STATUS_NO_RESOURCES;
5550 status = MGMT_STATUS_FAILED;
5555 hci_dev_unlock(hdev);
5560 hci_dev_unlock(hdev);
5561 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5565 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5567 struct mgmt_rp_read_local_oob_data mgmt_rp;
5568 size_t rp_size = sizeof(mgmt_rp);
5569 struct mgmt_pending_cmd *cmd = data;
5570 struct sk_buff *skb = cmd->skb;
5571 u8 status = mgmt_status(err);
5575 status = MGMT_STATUS_FAILED;
5576 else if (IS_ERR(skb))
5577 status = mgmt_status(PTR_ERR(skb));
5579 status = mgmt_status(skb->data[0]);
5582 bt_dev_dbg(hdev, "status %d", status);
5585 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5589 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5591 if (!bredr_sc_enabled(hdev)) {
5592 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5594 if (skb->len < sizeof(*rp)) {
5595 mgmt_cmd_status(cmd->sk, hdev->id,
5596 MGMT_OP_READ_LOCAL_OOB_DATA,
5597 MGMT_STATUS_FAILED);
5601 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5602 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5604 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5606 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5608 if (skb->len < sizeof(*rp)) {
5609 mgmt_cmd_status(cmd->sk, hdev->id,
5610 MGMT_OP_READ_LOCAL_OOB_DATA,
5611 MGMT_STATUS_FAILED);
5615 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5616 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5618 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5619 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5622 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5623 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5626 if (skb && !IS_ERR(skb))
5629 mgmt_pending_free(cmd);
5632 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5634 struct mgmt_pending_cmd *cmd = data;
5636 if (bredr_sc_enabled(hdev))
5637 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5639 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5641 if (IS_ERR(cmd->skb))
5642 return PTR_ERR(cmd->skb);
5647 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5648 void *data, u16 data_len)
5650 struct mgmt_pending_cmd *cmd;
5653 bt_dev_dbg(hdev, "sock %p", sk);
5657 if (!hdev_is_powered(hdev)) {
5658 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5659 MGMT_STATUS_NOT_POWERED);
5663 if (!lmp_ssp_capable(hdev)) {
5664 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5665 MGMT_STATUS_NOT_SUPPORTED);
5669 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5673 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5674 read_local_oob_data_complete);
5677 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5678 MGMT_STATUS_FAILED);
5681 mgmt_pending_free(cmd);
5685 hci_dev_unlock(hdev);
5689 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5690 void *data, u16 len)
5692 struct mgmt_addr_info *addr = data;
5695 bt_dev_dbg(hdev, "sock %p", sk);
5697 if (!bdaddr_type_is_valid(addr->type))
5698 return mgmt_cmd_complete(sk, hdev->id,
5699 MGMT_OP_ADD_REMOTE_OOB_DATA,
5700 MGMT_STATUS_INVALID_PARAMS,
5701 addr, sizeof(*addr));
5705 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5706 struct mgmt_cp_add_remote_oob_data *cp = data;
5709 if (cp->addr.type != BDADDR_BREDR) {
5710 err = mgmt_cmd_complete(sk, hdev->id,
5711 MGMT_OP_ADD_REMOTE_OOB_DATA,
5712 MGMT_STATUS_INVALID_PARAMS,
5713 &cp->addr, sizeof(cp->addr));
5717 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5718 cp->addr.type, cp->hash,
5719 cp->rand, NULL, NULL);
5721 status = MGMT_STATUS_FAILED;
5723 status = MGMT_STATUS_SUCCESS;
5725 err = mgmt_cmd_complete(sk, hdev->id,
5726 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5727 &cp->addr, sizeof(cp->addr));
5728 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5729 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5730 u8 *rand192, *hash192, *rand256, *hash256;
5733 if (bdaddr_type_is_le(cp->addr.type)) {
5734 /* Enforce zero-valued 192-bit parameters as
5735 * long as legacy SMP OOB isn't implemented.
5737 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5738 memcmp(cp->hash192, ZERO_KEY, 16)) {
5739 err = mgmt_cmd_complete(sk, hdev->id,
5740 MGMT_OP_ADD_REMOTE_OOB_DATA,
5741 MGMT_STATUS_INVALID_PARAMS,
5742 addr, sizeof(*addr));
5749 /* In case one of the P-192 values is set to zero,
5750 * then just disable OOB data for P-192.
5752 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5753 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5757 rand192 = cp->rand192;
5758 hash192 = cp->hash192;
5762 /* In case one of the P-256 values is set to zero, then just
5763 * disable OOB data for P-256.
5765 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5766 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5770 rand256 = cp->rand256;
5771 hash256 = cp->hash256;
5774 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5775 cp->addr.type, hash192, rand192,
5778 status = MGMT_STATUS_FAILED;
5780 status = MGMT_STATUS_SUCCESS;
5782 err = mgmt_cmd_complete(sk, hdev->id,
5783 MGMT_OP_ADD_REMOTE_OOB_DATA,
5784 status, &cp->addr, sizeof(cp->addr));
5786 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5788 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5789 MGMT_STATUS_INVALID_PARAMS);
5793 hci_dev_unlock(hdev);
5797 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5798 void *data, u16 len)
5800 struct mgmt_cp_remove_remote_oob_data *cp = data;
5804 bt_dev_dbg(hdev, "sock %p", sk);
5806 if (cp->addr.type != BDADDR_BREDR)
5807 return mgmt_cmd_complete(sk, hdev->id,
5808 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5809 MGMT_STATUS_INVALID_PARAMS,
5810 &cp->addr, sizeof(cp->addr));
5814 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5815 hci_remote_oob_data_clear(hdev);
5816 status = MGMT_STATUS_SUCCESS;
5820 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5822 status = MGMT_STATUS_INVALID_PARAMS;
5824 status = MGMT_STATUS_SUCCESS;
5827 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5828 status, &cp->addr, sizeof(cp->addr));
5830 hci_dev_unlock(hdev);
5834 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5836 struct mgmt_pending_cmd *cmd;
5838 bt_dev_dbg(hdev, "status %u", status);
5842 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5844 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5847 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5850 cmd->cmd_complete(cmd, mgmt_status(status));
5851 mgmt_pending_remove(cmd);
5854 hci_dev_unlock(hdev);
5857 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5858 uint8_t *mgmt_status)
5861 case DISCOV_TYPE_LE:
5862 *mgmt_status = mgmt_le_support(hdev);
5866 case DISCOV_TYPE_INTERLEAVED:
5867 *mgmt_status = mgmt_le_support(hdev);
5871 case DISCOV_TYPE_BREDR:
5872 *mgmt_status = mgmt_bredr_support(hdev);
5877 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5884 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5886 struct mgmt_pending_cmd *cmd = data;
5888 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5889 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5890 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5893 bt_dev_dbg(hdev, "err %d", err);
5895 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5897 mgmt_pending_remove(cmd);
5899 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5903 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5905 return hci_start_discovery_sync(hdev);
5908 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5909 u16 op, void *data, u16 len)
5911 struct mgmt_cp_start_discovery *cp = data;
5912 struct mgmt_pending_cmd *cmd;
5916 bt_dev_dbg(hdev, "sock %p", sk);
5920 if (!hdev_is_powered(hdev)) {
5921 err = mgmt_cmd_complete(sk, hdev->id, op,
5922 MGMT_STATUS_NOT_POWERED,
5923 &cp->type, sizeof(cp->type));
5927 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5928 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5929 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5930 &cp->type, sizeof(cp->type));
5934 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5935 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5936 &cp->type, sizeof(cp->type));
5940 /* Can't start discovery when it is paused */
5941 if (hdev->discovery_paused) {
5942 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5943 &cp->type, sizeof(cp->type));
5947 /* Clear the discovery filter first to free any previously
5948 * allocated memory for the UUID list.
5950 hci_discovery_filter_clear(hdev);
5952 hdev->discovery.type = cp->type;
5953 hdev->discovery.report_invalid_rssi = false;
5954 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5955 hdev->discovery.limited = true;
5957 hdev->discovery.limited = false;
5959 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5965 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5966 start_discovery_complete);
5968 mgmt_pending_remove(cmd);
5972 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5975 hci_dev_unlock(hdev);
5979 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5980 void *data, u16 len)
5982 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5986 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5987 void *data, u16 len)
5989 return start_discovery_internal(sk, hdev,
5990 MGMT_OP_START_LIMITED_DISCOVERY,
5994 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5995 void *data, u16 len)
5997 struct mgmt_cp_start_service_discovery *cp = data;
5998 struct mgmt_pending_cmd *cmd;
5999 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
6000 u16 uuid_count, expected_len;
6004 bt_dev_dbg(hdev, "sock %p", sk);
6008 if (!hdev_is_powered(hdev)) {
6009 err = mgmt_cmd_complete(sk, hdev->id,
6010 MGMT_OP_START_SERVICE_DISCOVERY,
6011 MGMT_STATUS_NOT_POWERED,
6012 &cp->type, sizeof(cp->type));
6016 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6017 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6018 err = mgmt_cmd_complete(sk, hdev->id,
6019 MGMT_OP_START_SERVICE_DISCOVERY,
6020 MGMT_STATUS_BUSY, &cp->type,
6025 if (hdev->discovery_paused) {
6026 err = mgmt_cmd_complete(sk, hdev->id,
6027 MGMT_OP_START_SERVICE_DISCOVERY,
6028 MGMT_STATUS_BUSY, &cp->type,
6033 uuid_count = __le16_to_cpu(cp->uuid_count);
6034 if (uuid_count > max_uuid_count) {
6035 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6037 err = mgmt_cmd_complete(sk, hdev->id,
6038 MGMT_OP_START_SERVICE_DISCOVERY,
6039 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6044 expected_len = sizeof(*cp) + uuid_count * 16;
6045 if (expected_len != len) {
6046 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6048 err = mgmt_cmd_complete(sk, hdev->id,
6049 MGMT_OP_START_SERVICE_DISCOVERY,
6050 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6055 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6056 err = mgmt_cmd_complete(sk, hdev->id,
6057 MGMT_OP_START_SERVICE_DISCOVERY,
6058 status, &cp->type, sizeof(cp->type));
6062 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6069 /* Clear the discovery filter first to free any previously
6070 * allocated memory for the UUID list.
6072 hci_discovery_filter_clear(hdev);
6074 hdev->discovery.result_filtering = true;
6075 hdev->discovery.type = cp->type;
6076 hdev->discovery.rssi = cp->rssi;
6077 hdev->discovery.uuid_count = uuid_count;
6079 if (uuid_count > 0) {
6080 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6082 if (!hdev->discovery.uuids) {
6083 err = mgmt_cmd_complete(sk, hdev->id,
6084 MGMT_OP_START_SERVICE_DISCOVERY,
6086 &cp->type, sizeof(cp->type));
6087 mgmt_pending_remove(cmd);
6092 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6093 start_discovery_complete);
6095 mgmt_pending_remove(cmd);
6099 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6102 hci_dev_unlock(hdev);
6106 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6108 struct mgmt_pending_cmd *cmd;
6110 bt_dev_dbg(hdev, "status %u", status);
6114 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6116 cmd->cmd_complete(cmd, mgmt_status(status));
6117 mgmt_pending_remove(cmd);
6120 hci_dev_unlock(hdev);
6123 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6125 struct mgmt_pending_cmd *cmd = data;
6127 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6130 bt_dev_dbg(hdev, "err %d", err);
6132 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6134 mgmt_pending_remove(cmd);
6137 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6140 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6142 return hci_stop_discovery_sync(hdev);
6145 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6148 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6149 struct mgmt_pending_cmd *cmd;
6152 bt_dev_dbg(hdev, "sock %p", sk);
6156 if (!hci_discovery_active(hdev)) {
6157 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6158 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6159 sizeof(mgmt_cp->type));
6163 if (hdev->discovery.type != mgmt_cp->type) {
6164 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6165 MGMT_STATUS_INVALID_PARAMS,
6166 &mgmt_cp->type, sizeof(mgmt_cp->type));
6170 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6176 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6177 stop_discovery_complete);
6179 mgmt_pending_remove(cmd);
6183 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6186 hci_dev_unlock(hdev);
6190 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6193 struct mgmt_cp_confirm_name *cp = data;
6194 struct inquiry_entry *e;
6197 bt_dev_dbg(hdev, "sock %p", sk);
6201 if (!hci_discovery_active(hdev)) {
6202 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6203 MGMT_STATUS_FAILED, &cp->addr,
6208 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6210 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6211 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6216 if (cp->name_known) {
6217 e->name_state = NAME_KNOWN;
6220 e->name_state = NAME_NEEDED;
6221 hci_inquiry_cache_update_resolve(hdev, e);
6224 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6225 &cp->addr, sizeof(cp->addr));
6228 hci_dev_unlock(hdev);
6232 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6235 struct mgmt_cp_block_device *cp = data;
6239 bt_dev_dbg(hdev, "sock %p", sk);
6241 if (!bdaddr_type_is_valid(cp->addr.type))
6242 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6243 MGMT_STATUS_INVALID_PARAMS,
6244 &cp->addr, sizeof(cp->addr));
6248 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6251 status = MGMT_STATUS_FAILED;
6255 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6257 status = MGMT_STATUS_SUCCESS;
6260 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6261 &cp->addr, sizeof(cp->addr));
6263 hci_dev_unlock(hdev);
6268 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6271 struct mgmt_cp_unblock_device *cp = data;
6275 bt_dev_dbg(hdev, "sock %p", sk);
6277 if (!bdaddr_type_is_valid(cp->addr.type))
6278 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6279 MGMT_STATUS_INVALID_PARAMS,
6280 &cp->addr, sizeof(cp->addr));
6284 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6287 status = MGMT_STATUS_INVALID_PARAMS;
6291 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6293 status = MGMT_STATUS_SUCCESS;
6296 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6297 &cp->addr, sizeof(cp->addr));
6299 hci_dev_unlock(hdev);
6304 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6306 return hci_update_eir_sync(hdev);
6309 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6312 struct mgmt_cp_set_device_id *cp = data;
6316 bt_dev_dbg(hdev, "sock %p", sk);
6318 source = __le16_to_cpu(cp->source);
6320 if (source > 0x0002)
6321 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6322 MGMT_STATUS_INVALID_PARAMS);
6326 hdev->devid_source = source;
6327 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6328 hdev->devid_product = __le16_to_cpu(cp->product);
6329 hdev->devid_version = __le16_to_cpu(cp->version);
6331 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6334 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6336 hci_dev_unlock(hdev);
6341 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6344 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6346 bt_dev_dbg(hdev, "status %d", err);
6349 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6351 struct cmd_lookup match = { NULL, hdev };
6353 struct adv_info *adv_instance;
6354 u8 status = mgmt_status(err);
6357 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6358 cmd_status_rsp, &status);
6362 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6363 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6365 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6367 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6370 new_settings(hdev, match.sk);
6375 /* If "Set Advertising" was just disabled and instance advertising was
6376 * set up earlier, then re-enable multi-instance advertising.
6378 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6379 list_empty(&hdev->adv_instances))
6382 instance = hdev->cur_adv_instance;
6384 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6385 struct adv_info, list);
6389 instance = adv_instance->instance;
6392 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6394 enable_advertising_instance(hdev, err);
6397 static int set_adv_sync(struct hci_dev *hdev, void *data)
6399 struct mgmt_pending_cmd *cmd = data;
6400 struct mgmt_mode *cp = cmd->param;
6403 if (cp->val == 0x02)
6404 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6406 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6408 cancel_adv_timeout(hdev);
6411 /* Switch to instance "0" for the Set Advertising setting.
6412 * We cannot use update_[adv|scan_rsp]_data() here as the
6413 * HCI_ADVERTISING flag is not yet set.
6415 hdev->cur_adv_instance = 0x00;
6417 if (ext_adv_capable(hdev)) {
6418 hci_start_ext_adv_sync(hdev, 0x00);
6420 hci_update_adv_data_sync(hdev, 0x00);
6421 hci_update_scan_rsp_data_sync(hdev, 0x00);
6422 hci_enable_advertising_sync(hdev);
6425 hci_disable_advertising_sync(hdev);
6431 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6434 struct mgmt_mode *cp = data;
6435 struct mgmt_pending_cmd *cmd;
6439 bt_dev_dbg(hdev, "sock %p", sk);
6441 status = mgmt_le_support(hdev);
6443 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6446 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6448 MGMT_STATUS_INVALID_PARAMS);
6450 if (hdev->advertising_paused)
6451 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6458 /* The following conditions are ones which mean that we should
6459 * not do any HCI communication but directly send a mgmt
6460 * response to user space (after toggling the flag if
6463 if (!hdev_is_powered(hdev) ||
6464 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6465 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6466 hci_dev_test_flag(hdev, HCI_MESH) ||
6467 hci_conn_num(hdev, LE_LINK) > 0 ||
6468 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6469 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6473 hdev->cur_adv_instance = 0x00;
6474 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6475 if (cp->val == 0x02)
6476 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6478 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6480 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6481 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6484 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6489 err = new_settings(hdev, sk);
6494 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6495 pending_find(MGMT_OP_SET_LE, hdev)) {
6496 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6501 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6505 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6506 set_advertising_complete);
6509 mgmt_pending_remove(cmd);
6512 hci_dev_unlock(hdev);
6516 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6517 void *data, u16 len)
6519 struct mgmt_cp_set_static_address *cp = data;
6522 bt_dev_dbg(hdev, "sock %p", sk);
6524 if (!lmp_le_capable(hdev))
6525 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6526 MGMT_STATUS_NOT_SUPPORTED);
6528 if (hdev_is_powered(hdev))
6529 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6530 MGMT_STATUS_REJECTED);
6532 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6533 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6534 return mgmt_cmd_status(sk, hdev->id,
6535 MGMT_OP_SET_STATIC_ADDRESS,
6536 MGMT_STATUS_INVALID_PARAMS);
6538 /* Two most significant bits shall be set */
6539 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6540 return mgmt_cmd_status(sk, hdev->id,
6541 MGMT_OP_SET_STATIC_ADDRESS,
6542 MGMT_STATUS_INVALID_PARAMS);
6547 bacpy(&hdev->static_addr, &cp->bdaddr);
6549 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6553 err = new_settings(hdev, sk);
6556 hci_dev_unlock(hdev);
6560 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6561 void *data, u16 len)
6563 struct mgmt_cp_set_scan_params *cp = data;
6564 __u16 interval, window;
6567 bt_dev_dbg(hdev, "sock %p", sk);
6569 if (!lmp_le_capable(hdev))
6570 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6571 MGMT_STATUS_NOT_SUPPORTED);
6573 interval = __le16_to_cpu(cp->interval);
6575 if (interval < 0x0004 || interval > 0x4000)
6576 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6577 MGMT_STATUS_INVALID_PARAMS);
6579 window = __le16_to_cpu(cp->window);
6581 if (window < 0x0004 || window > 0x4000)
6582 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6583 MGMT_STATUS_INVALID_PARAMS);
6585 if (window > interval)
6586 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6587 MGMT_STATUS_INVALID_PARAMS);
6591 hdev->le_scan_interval = interval;
6592 hdev->le_scan_window = window;
6594 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6597 /* If background scan is running, restart it so new parameters are
6600 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6601 hdev->discovery.state == DISCOVERY_STOPPED)
6602 hci_update_passive_scan(hdev);
6604 hci_dev_unlock(hdev);
6609 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6611 struct mgmt_pending_cmd *cmd = data;
6613 bt_dev_dbg(hdev, "err %d", err);
6616 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6619 struct mgmt_mode *cp = cmd->param;
6622 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6624 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6626 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6627 new_settings(hdev, cmd->sk);
6630 mgmt_pending_free(cmd);
6633 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6635 struct mgmt_pending_cmd *cmd = data;
6636 struct mgmt_mode *cp = cmd->param;
6638 return hci_write_fast_connectable_sync(hdev, cp->val);
6641 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6642 void *data, u16 len)
6644 struct mgmt_mode *cp = data;
6645 struct mgmt_pending_cmd *cmd;
6648 bt_dev_dbg(hdev, "sock %p", sk);
6650 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6651 hdev->hci_ver < BLUETOOTH_VER_1_2)
6652 return mgmt_cmd_status(sk, hdev->id,
6653 MGMT_OP_SET_FAST_CONNECTABLE,
6654 MGMT_STATUS_NOT_SUPPORTED);
6656 if (cp->val != 0x00 && cp->val != 0x01)
6657 return mgmt_cmd_status(sk, hdev->id,
6658 MGMT_OP_SET_FAST_CONNECTABLE,
6659 MGMT_STATUS_INVALID_PARAMS);
6663 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6664 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6668 if (!hdev_is_powered(hdev)) {
6669 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6670 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6671 new_settings(hdev, sk);
6675 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6680 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6681 fast_connectable_complete);
6684 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6685 MGMT_STATUS_FAILED);
6688 mgmt_pending_free(cmd);
6692 hci_dev_unlock(hdev);
6697 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6699 struct mgmt_pending_cmd *cmd = data;
6701 bt_dev_dbg(hdev, "err %d", err);
6704 u8 mgmt_err = mgmt_status(err);
6706 /* We need to restore the flag if related HCI commands
6709 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6711 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6713 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6714 new_settings(hdev, cmd->sk);
6717 mgmt_pending_free(cmd);
6720 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6724 status = hci_write_fast_connectable_sync(hdev, false);
6727 status = hci_update_scan_sync(hdev);
6729 /* Since only the advertising data flags will change, there
6730 * is no need to update the scan response data.
6733 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6738 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6740 struct mgmt_mode *cp = data;
6741 struct mgmt_pending_cmd *cmd;
6744 bt_dev_dbg(hdev, "sock %p", sk);
6746 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6747 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6748 MGMT_STATUS_NOT_SUPPORTED);
6750 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6751 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6752 MGMT_STATUS_REJECTED);
6754 if (cp->val != 0x00 && cp->val != 0x01)
6755 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6756 MGMT_STATUS_INVALID_PARAMS);
6760 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6761 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6765 if (!hdev_is_powered(hdev)) {
6767 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6768 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6769 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6770 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6771 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6774 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6776 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6780 err = new_settings(hdev, sk);
6784 /* Reject disabling when powered on */
6786 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6787 MGMT_STATUS_REJECTED);
6790 /* When configuring a dual-mode controller to operate
6791 * with LE only and using a static address, then switching
6792 * BR/EDR back on is not allowed.
6794 * Dual-mode controllers shall operate with the public
6795 * address as its identity address for BR/EDR and LE. So
6796 * reject the attempt to create an invalid configuration.
6798 * The same restrictions applies when secure connections
6799 * has been enabled. For BR/EDR this is a controller feature
6800 * while for LE it is a host stack feature. This means that
6801 * switching BR/EDR back on when secure connections has been
6802 * enabled is not a supported transaction.
6804 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6805 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6806 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6807 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6808 MGMT_STATUS_REJECTED);
6813 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6817 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6818 set_bredr_complete);
6821 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6822 MGMT_STATUS_FAILED);
6824 mgmt_pending_free(cmd);
6829 /* We need to flip the bit already here so that
6830 * hci_req_update_adv_data generates the correct flags.
6832 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6835 hci_dev_unlock(hdev);
6839 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6841 struct mgmt_pending_cmd *cmd = data;
6842 struct mgmt_mode *cp;
6844 bt_dev_dbg(hdev, "err %d", err);
6847 u8 mgmt_err = mgmt_status(err);
6849 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6857 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6858 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6861 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6862 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6865 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6866 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6870 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6871 new_settings(hdev, cmd->sk);
6874 mgmt_pending_free(cmd);
6877 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6879 struct mgmt_pending_cmd *cmd = data;
6880 struct mgmt_mode *cp = cmd->param;
6883 /* Force write of val */
6884 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6886 return hci_write_sc_support_sync(hdev, val);
6889 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6890 void *data, u16 len)
6892 struct mgmt_mode *cp = data;
6893 struct mgmt_pending_cmd *cmd;
6897 bt_dev_dbg(hdev, "sock %p", sk);
6899 if (!lmp_sc_capable(hdev) &&
6900 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6901 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6902 MGMT_STATUS_NOT_SUPPORTED);
6904 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6905 lmp_sc_capable(hdev) &&
6906 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6907 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6908 MGMT_STATUS_REJECTED);
6910 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6911 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6912 MGMT_STATUS_INVALID_PARAMS);
6916 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6917 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6921 changed = !hci_dev_test_and_set_flag(hdev,
6923 if (cp->val == 0x02)
6924 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6926 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6928 changed = hci_dev_test_and_clear_flag(hdev,
6930 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6933 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6938 err = new_settings(hdev, sk);
6945 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6946 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6947 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6951 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6955 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6956 set_secure_conn_complete);
6959 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6960 MGMT_STATUS_FAILED);
6962 mgmt_pending_free(cmd);
6966 hci_dev_unlock(hdev);
6970 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6971 void *data, u16 len)
6973 struct mgmt_mode *cp = data;
6974 bool changed, use_changed;
6977 bt_dev_dbg(hdev, "sock %p", sk);
6979 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6980 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6981 MGMT_STATUS_INVALID_PARAMS);
6986 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6988 changed = hci_dev_test_and_clear_flag(hdev,
6989 HCI_KEEP_DEBUG_KEYS);
6991 if (cp->val == 0x02)
6992 use_changed = !hci_dev_test_and_set_flag(hdev,
6993 HCI_USE_DEBUG_KEYS);
6995 use_changed = hci_dev_test_and_clear_flag(hdev,
6996 HCI_USE_DEBUG_KEYS);
6998 if (hdev_is_powered(hdev) && use_changed &&
6999 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7000 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
7001 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
7002 sizeof(mode), &mode);
7005 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7010 err = new_settings(hdev, sk);
7013 hci_dev_unlock(hdev);
7017 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7020 struct mgmt_cp_set_privacy *cp = cp_data;
7024 bt_dev_dbg(hdev, "sock %p", sk);
7026 if (!lmp_le_capable(hdev))
7027 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7028 MGMT_STATUS_NOT_SUPPORTED);
7030 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7031 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7032 MGMT_STATUS_INVALID_PARAMS);
7035 /* commenting out since set privacy command is always rejected
7036 * if this condition is enabled.
7038 if (hdev_is_powered(hdev))
7039 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7040 MGMT_STATUS_REJECTED);
7045 /* If user space supports this command it is also expected to
7046 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7048 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7051 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7052 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7053 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7054 hci_adv_instances_set_rpa_expired(hdev, true);
7055 if (cp->privacy == 0x02)
7056 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7058 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7060 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7061 memset(hdev->irk, 0, sizeof(hdev->irk));
7062 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7063 hci_adv_instances_set_rpa_expired(hdev, false);
7064 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7067 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7072 err = new_settings(hdev, sk);
7075 hci_dev_unlock(hdev);
7079 static bool irk_is_valid(struct mgmt_irk_info *irk)
7081 switch (irk->addr.type) {
7082 case BDADDR_LE_PUBLIC:
7085 case BDADDR_LE_RANDOM:
7086 /* Two most significant bits shall be set */
7087 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7095 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7098 struct mgmt_cp_load_irks *cp = cp_data;
7099 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7100 sizeof(struct mgmt_irk_info));
7101 u16 irk_count, expected_len;
7104 bt_dev_dbg(hdev, "sock %p", sk);
7106 if (!lmp_le_capable(hdev))
7107 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7108 MGMT_STATUS_NOT_SUPPORTED);
7110 irk_count = __le16_to_cpu(cp->irk_count);
7111 if (irk_count > max_irk_count) {
7112 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7114 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7115 MGMT_STATUS_INVALID_PARAMS);
7118 expected_len = struct_size(cp, irks, irk_count);
7119 if (expected_len != len) {
7120 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7122 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7123 MGMT_STATUS_INVALID_PARAMS);
7126 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7128 for (i = 0; i < irk_count; i++) {
7129 struct mgmt_irk_info *key = &cp->irks[i];
7131 if (!irk_is_valid(key))
7132 return mgmt_cmd_status(sk, hdev->id,
7134 MGMT_STATUS_INVALID_PARAMS);
7139 hci_smp_irks_clear(hdev);
7141 for (i = 0; i < irk_count; i++) {
7142 struct mgmt_irk_info *irk = &cp->irks[i];
7143 u8 addr_type = le_addr_type(irk->addr.type);
7145 if (hci_is_blocked_key(hdev,
7146 HCI_BLOCKED_KEY_TYPE_IRK,
7148 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7153 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
7154 if (irk->addr.type == BDADDR_BREDR)
7155 addr_type = BDADDR_BREDR;
7157 hci_add_irk(hdev, &irk->addr.bdaddr,
7158 addr_type, irk->val,
7162 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7164 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7166 hci_dev_unlock(hdev);
7172 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
7173 void *data, u16 len)
7175 struct mgmt_cp_set_advertising_params *cp = data;
7180 BT_DBG("%s", hdev->name);
7182 if (!lmp_le_capable(hdev))
7183 return mgmt_cmd_status(sk, hdev->id,
7184 MGMT_OP_SET_ADVERTISING_PARAMS,
7185 MGMT_STATUS_NOT_SUPPORTED);
7187 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7188 return mgmt_cmd_status(sk, hdev->id,
7189 MGMT_OP_SET_ADVERTISING_PARAMS,
7192 min_interval = __le16_to_cpu(cp->interval_min);
7193 max_interval = __le16_to_cpu(cp->interval_max);
7195 if (min_interval > max_interval ||
7196 min_interval < 0x0020 || max_interval > 0x4000)
7197 return mgmt_cmd_status(sk, hdev->id,
7198 MGMT_OP_SET_ADVERTISING_PARAMS,
7199 MGMT_STATUS_INVALID_PARAMS);
7203 hdev->le_adv_min_interval = min_interval;
7204 hdev->le_adv_max_interval = max_interval;
7205 hdev->adv_filter_policy = cp->filter_policy;
7206 hdev->adv_type = cp->type;
7208 err = mgmt_cmd_complete(sk, hdev->id,
7209 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
7211 hci_dev_unlock(hdev);
7216 static void set_advertising_data_complete(struct hci_dev *hdev,
7217 u8 status, u16 opcode)
7219 struct mgmt_cp_set_advertising_data *cp;
7220 struct mgmt_pending_cmd *cmd;
7222 BT_DBG("status 0x%02x", status);
7226 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
7233 mgmt_cmd_status(cmd->sk, hdev->id,
7234 MGMT_OP_SET_ADVERTISING_DATA,
7235 mgmt_status(status));
7237 mgmt_cmd_complete(cmd->sk, hdev->id,
7238 MGMT_OP_SET_ADVERTISING_DATA, 0,
7241 mgmt_pending_remove(cmd);
7244 hci_dev_unlock(hdev);
7247 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
7248 void *data, u16 len)
7250 struct mgmt_pending_cmd *cmd;
7251 struct hci_request req;
7252 struct mgmt_cp_set_advertising_data *cp = data;
7253 struct hci_cp_le_set_adv_data adv;
7256 BT_DBG("%s", hdev->name);
7258 if (!lmp_le_capable(hdev)) {
7259 return mgmt_cmd_status(sk, hdev->id,
7260 MGMT_OP_SET_ADVERTISING_DATA,
7261 MGMT_STATUS_NOT_SUPPORTED);
7266 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
7267 err = mgmt_cmd_status(sk, hdev->id,
7268 MGMT_OP_SET_ADVERTISING_DATA,
7273 if (len > HCI_MAX_AD_LENGTH) {
7274 err = mgmt_cmd_status(sk, hdev->id,
7275 MGMT_OP_SET_ADVERTISING_DATA,
7276 MGMT_STATUS_INVALID_PARAMS);
7280 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
7287 hci_req_init(&req, hdev);
7289 memset(&adv, 0, sizeof(adv));
7290 memcpy(adv.data, cp->data, len);
7293 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
7295 err = hci_req_run(&req, set_advertising_data_complete);
7297 mgmt_pending_remove(cmd);
7300 hci_dev_unlock(hdev);
7305 /* Adv White List feature */
7306 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7308 struct mgmt_cp_add_dev_white_list *cp;
7309 struct mgmt_pending_cmd *cmd;
7311 BT_DBG("status 0x%02x", status);
7315 cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
7322 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7323 mgmt_status(status));
7325 mgmt_cmd_complete(cmd->sk, hdev->id,
7326 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
7328 mgmt_pending_remove(cmd);
7331 hci_dev_unlock(hdev);
7334 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
7335 void *data, u16 len)
7337 struct mgmt_pending_cmd *cmd;
7338 struct mgmt_cp_add_dev_white_list *cp = data;
7339 struct hci_request req;
7342 BT_DBG("%s", hdev->name);
7344 if (!lmp_le_capable(hdev))
7345 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7346 MGMT_STATUS_NOT_SUPPORTED);
7348 if (!hdev_is_powered(hdev))
7349 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7350 MGMT_STATUS_REJECTED);
7354 if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
7355 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7360 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
7366 hci_req_init(&req, hdev);
7368 hci_req_add(&req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(*cp), cp);
7370 err = hci_req_run(&req, add_white_list_complete);
7372 mgmt_pending_remove(cmd);
7377 hci_dev_unlock(hdev);
7382 static void remove_from_white_list_complete(struct hci_dev *hdev,
7383 u8 status, u16 opcode)
7385 struct mgmt_cp_remove_dev_from_white_list *cp;
7386 struct mgmt_pending_cmd *cmd;
7388 BT_DBG("status 0x%02x", status);
7392 cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
7399 mgmt_cmd_status(cmd->sk, hdev->id,
7400 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7401 mgmt_status(status));
7403 mgmt_cmd_complete(cmd->sk, hdev->id,
7404 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
7407 mgmt_pending_remove(cmd);
7410 hci_dev_unlock(hdev);
7413 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
7414 void *data, u16 len)
7416 struct mgmt_pending_cmd *cmd;
7417 struct mgmt_cp_remove_dev_from_white_list *cp = data;
7418 struct hci_request req;
7421 BT_DBG("%s", hdev->name);
7423 if (!lmp_le_capable(hdev))
7424 return mgmt_cmd_status(sk, hdev->id,
7425 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7426 MGMT_STATUS_NOT_SUPPORTED);
7428 if (!hdev_is_powered(hdev))
7429 return mgmt_cmd_status(sk, hdev->id,
7430 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7431 MGMT_STATUS_REJECTED);
7435 if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
7436 err = mgmt_cmd_status(sk, hdev->id,
7437 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7442 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7449 hci_req_init(&req, hdev);
7451 hci_req_add(&req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(*cp), cp);
7453 err = hci_req_run(&req, remove_from_white_list_complete);
7455 mgmt_pending_remove(cmd);
7460 hci_dev_unlock(hdev);
7465 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
7468 struct mgmt_pending_cmd *cmd;
7470 BT_DBG("status 0x%02x", status);
7474 cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
7479 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
7480 mgmt_status(status));
7482 mgmt_cmd_complete(cmd->sk, hdev->id,
7483 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7486 mgmt_pending_remove(cmd);
7489 hci_dev_unlock(hdev);
7492 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
7493 void *data, u16 len)
7495 struct mgmt_pending_cmd *cmd;
7496 struct hci_request req;
7499 BT_DBG("%s", hdev->name);
7501 if (!lmp_le_capable(hdev))
7502 return mgmt_cmd_status(sk, hdev->id,
7503 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7504 MGMT_STATUS_NOT_SUPPORTED);
7506 if (!hdev_is_powered(hdev))
7507 return mgmt_cmd_status(sk, hdev->id,
7508 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7509 MGMT_STATUS_REJECTED);
7513 if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
7514 err = mgmt_cmd_status(sk, hdev->id,
7515 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7520 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
7527 hci_req_init(&req, hdev);
7529 hci_req_add(&req, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL);
7531 err = hci_req_run(&req, clear_white_list_complete);
7533 mgmt_pending_remove(cmd);
7538 hci_dev_unlock(hdev);
7543 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
7546 struct mgmt_cp_set_scan_rsp_data *cp;
7547 struct mgmt_pending_cmd *cmd;
7549 BT_DBG("status 0x%02x", status);
7553 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
7560 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7561 mgmt_status(status));
7563 mgmt_cmd_complete(cmd->sk, hdev->id,
7564 MGMT_OP_SET_SCAN_RSP_DATA, 0,
7567 mgmt_pending_remove(cmd);
7570 hci_dev_unlock(hdev);
7573 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
7576 struct mgmt_pending_cmd *cmd;
7577 struct hci_request req;
7578 struct mgmt_cp_set_scan_rsp_data *cp = data;
7579 struct hci_cp_le_set_scan_rsp_data rsp;
7582 BT_DBG("%s", hdev->name);
7584 if (!lmp_le_capable(hdev))
7585 return mgmt_cmd_status(sk, hdev->id,
7586 MGMT_OP_SET_SCAN_RSP_DATA,
7587 MGMT_STATUS_NOT_SUPPORTED);
7591 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
7592 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7597 if (len > HCI_MAX_AD_LENGTH) {
7598 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7599 MGMT_STATUS_INVALID_PARAMS);
7603 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
7609 hci_req_init(&req, hdev);
7611 memset(&rsp, 0, sizeof(rsp));
7612 memcpy(rsp.data, cp->data, len);
7615 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
7617 err = hci_req_run(&req, set_scan_rsp_data_complete);
7619 mgmt_pending_remove(cmd);
7622 hci_dev_unlock(hdev);
7627 static void set_rssi_threshold_complete(struct hci_dev *hdev,
7628 u8 status, u16 opcode)
7630 struct mgmt_pending_cmd *cmd;
7632 BT_DBG("status 0x%02x", status);
7636 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7641 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7642 mgmt_status(status));
7644 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7647 mgmt_pending_remove(cmd);
7650 hci_dev_unlock(hdev);
7653 static void set_rssi_disable_complete(struct hci_dev *hdev,
7654 u8 status, u16 opcode)
7656 struct mgmt_pending_cmd *cmd;
7658 BT_DBG("status 0x%02x", status);
7662 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7667 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7668 mgmt_status(status));
7670 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7673 mgmt_pending_remove(cmd);
7676 hci_dev_unlock(hdev);
7679 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
7680 void *data, u16 len)
7683 struct hci_cp_set_rssi_threshold th = { 0, };
7684 struct mgmt_cp_set_enable_rssi *cp = data;
7685 struct hci_conn *conn;
7686 struct mgmt_pending_cmd *cmd;
7687 struct hci_request req;
7692 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7694 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7695 MGMT_STATUS_FAILED);
7699 if (!lmp_le_capable(hdev)) {
7700 mgmt_pending_remove(cmd);
7701 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7702 MGMT_STATUS_NOT_SUPPORTED);
7706 if (!hdev_is_powered(hdev)) {
7707 BT_DBG("%s", hdev->name);
7708 mgmt_pending_remove(cmd);
7709 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7710 MGMT_STATUS_NOT_POWERED);
7714 if (cp->link_type == 0x01)
7715 dest_type = LE_LINK;
7717 dest_type = ACL_LINK;
7719 /* Get LE/ACL link handle info */
7720 conn = hci_conn_hash_lookup_ba(hdev,
7721 dest_type, &cp->bdaddr);
7724 err = mgmt_cmd_complete(sk, hdev->id,
7725 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
7726 mgmt_pending_remove(cmd);
7730 hci_req_init(&req, hdev);
7732 th.hci_le_ext_opcode = 0x0B;
7734 th.conn_handle = conn->handle;
7735 th.alert_mask = 0x07;
7736 th.low_th = cp->low_th;
7737 th.in_range_th = cp->in_range_th;
7738 th.high_th = cp->high_th;
7740 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
7741 err = hci_req_run(&req, set_rssi_threshold_complete);
7744 mgmt_pending_remove(cmd);
7745 BT_ERR("Error in requesting hci_req_run");
7750 hci_dev_unlock(hdev);
7754 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
7755 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
7757 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
7758 struct mgmt_cp_set_enable_rssi *cp = data;
7759 struct mgmt_pending_cmd *cmd;
7764 mgmt_rp.status = rp->status;
7765 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
7766 mgmt_rp.bt_address = cp->bdaddr;
7767 mgmt_rp.link_type = cp->link_type;
7769 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7770 MGMT_STATUS_SUCCESS, &mgmt_rp,
7771 sizeof(struct mgmt_cc_rsp_enable_rssi));
7773 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
7774 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
7776 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
7777 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
7778 &mgmt_rp.bt_address, true);
7782 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7784 mgmt_pending_remove(cmd);
7786 hci_dev_unlock(hdev);
7789 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
7790 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
7792 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
7793 struct mgmt_cp_disable_rssi *cp = data;
7794 struct mgmt_pending_cmd *cmd;
7799 mgmt_rp.status = rp->status;
7800 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
7801 mgmt_rp.bt_address = cp->bdaddr;
7802 mgmt_rp.link_type = cp->link_type;
7804 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7805 MGMT_STATUS_SUCCESS, &mgmt_rp,
7806 sizeof(struct mgmt_cc_rsp_enable_rssi));
7808 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
7809 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
7811 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
7812 &mgmt_rp.bt_address, false);
7816 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7818 mgmt_pending_remove(cmd);
7820 hci_dev_unlock(hdev);
7823 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
7824 void *data, u16 len)
7826 struct mgmt_pending_cmd *cmd;
7827 struct hci_request req;
7828 struct hci_cp_set_enable_rssi cp_en = { 0, };
7831 BT_DBG("Set Disable RSSI.");
7833 cp_en.hci_le_ext_opcode = 0x01;
7834 cp_en.le_enable_cs_Features = 0x00;
7835 cp_en.data[0] = 0x00;
7836 cp_en.data[1] = 0x00;
7837 cp_en.data[2] = 0x00;
7841 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7843 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7844 MGMT_STATUS_FAILED);
7848 if (!lmp_le_capable(hdev)) {
7849 mgmt_pending_remove(cmd);
7850 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7851 MGMT_STATUS_NOT_SUPPORTED);
7855 if (!hdev_is_powered(hdev)) {
7856 BT_DBG("%s", hdev->name);
7857 mgmt_pending_remove(cmd);
7858 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7859 MGMT_STATUS_NOT_POWERED);
7863 hci_req_init(&req, hdev);
7865 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
7866 sizeof(struct hci_cp_set_enable_rssi),
7867 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
7868 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
7870 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
7871 err = hci_req_run(&req, set_rssi_disable_complete);
7874 mgmt_pending_remove(cmd);
7875 BT_ERR("Error in requesting hci_req_run");
7880 hci_dev_unlock(hdev);
7884 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
7886 struct hci_cc_rsp_enable_rssi *rp = response;
7887 struct mgmt_pending_cmd *cmd_enable = NULL;
7888 struct mgmt_pending_cmd *cmd_disable = NULL;
7889 struct mgmt_cp_set_enable_rssi *cp_en;
7890 struct mgmt_cp_disable_rssi *cp_dis;
7893 cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7894 cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7895 hci_dev_unlock(hdev);
7898 BT_DBG("Enable Request");
7901 BT_DBG("Disable Request");
7904 cp_en = cmd_enable->param;
7909 switch (rp->le_ext_opcode) {
7911 BT_DBG("RSSI enabled.. Setting Threshold...");
7912 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
7913 cp_en, sizeof(*cp_en));
7917 BT_DBG("Sending RSSI enable success");
7918 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
7919 cp_en, rp, rp->status);
7923 } else if (cmd_disable) {
7924 cp_dis = cmd_disable->param;
7929 switch (rp->le_ext_opcode) {
7931 BT_DBG("Sending RSSI disable success");
7932 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
7933 cp_dis, rp, rp->status);
7938 * Only unset RSSI Threshold values for the Link if
7939 * RSSI is monitored for other BREDR or LE Links
7941 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
7942 BT_DBG("Unset Threshold. Other links being monitored");
7943 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
7944 cp_dis, rp, rp->status);
7946 BT_DBG("Unset Threshold. Disabling...");
7947 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
7948 cp_dis, sizeof(*cp_dis));
7955 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
7958 struct mgmt_pending_cmd *cmd;
7960 BT_DBG("status 0x%02x", status);
7964 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7969 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7970 mgmt_status(status));
7972 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7975 mgmt_pending_remove(cmd);
7978 hci_dev_unlock(hdev);
7981 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
7982 void *data, u16 len)
7984 struct mgmt_pending_cmd *cmd;
7985 struct hci_request req;
7986 struct mgmt_cp_set_enable_rssi *cp = data;
7987 struct hci_cp_set_enable_rssi cp_en = { 0, };
7990 BT_DBG("Set Enable RSSI.");
7992 cp_en.hci_le_ext_opcode = 0x01;
7993 cp_en.le_enable_cs_Features = 0x04;
7994 cp_en.data[0] = 0x00;
7995 cp_en.data[1] = 0x00;
7996 cp_en.data[2] = 0x00;
8000 if (!lmp_le_capable(hdev)) {
8001 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
8002 MGMT_STATUS_NOT_SUPPORTED);
8006 if (!hdev_is_powered(hdev)) {
8007 BT_DBG("%s", hdev->name);
8008 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
8009 MGMT_STATUS_NOT_POWERED);
8013 if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
8014 BT_DBG("%s", hdev->name);
8015 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
8020 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
8023 BT_DBG("%s", hdev->name);
8028 /* If RSSI is already enabled directly set Threshold values */
8029 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
8030 hci_dev_unlock(hdev);
8031 BT_DBG("RSSI Enabled. Directly set Threshold");
8032 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
8036 hci_req_init(&req, hdev);
8038 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
8039 sizeof(struct hci_cp_set_enable_rssi),
8040 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
8041 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
8043 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
8044 err = hci_req_run(&req, set_rssi_enable_complete);
8047 mgmt_pending_remove(cmd);
8048 BT_ERR("Error in requesting hci_req_run");
8053 hci_dev_unlock(hdev);
8058 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8060 struct mgmt_pending_cmd *cmd;
8062 BT_DBG("status 0x%02x", status);
8066 cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
8070 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8071 MGMT_STATUS_SUCCESS, &status, 1);
8073 mgmt_pending_remove(cmd);
8076 hci_dev_unlock(hdev);
8079 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
8082 struct mgmt_pending_cmd *cmd;
8083 struct hci_request req;
8084 struct mgmt_cp_get_raw_rssi *cp = data;
8085 struct hci_cp_get_raw_rssi hci_cp;
8087 struct hci_conn *conn;
8091 BT_DBG("Get Raw RSSI.");
8095 if (!lmp_le_capable(hdev)) {
8096 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8097 MGMT_STATUS_NOT_SUPPORTED);
8101 if (cp->link_type == 0x01)
8102 dest_type = LE_LINK;
8104 dest_type = ACL_LINK;
8106 /* Get LE/BREDR link handle info */
8107 conn = hci_conn_hash_lookup_ba(hdev,
8108 dest_type, &cp->bt_address);
8110 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8111 MGMT_STATUS_NOT_CONNECTED);
8114 hci_cp.conn_handle = conn->handle;
8116 if (!hdev_is_powered(hdev)) {
8117 BT_DBG("%s", hdev->name);
8118 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8119 MGMT_STATUS_NOT_POWERED);
8123 if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
8124 BT_DBG("%s", hdev->name);
8125 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8130 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
8132 BT_DBG("%s", hdev->name);
8137 hci_req_init(&req, hdev);
8139 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
8140 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
8141 err = hci_req_run(&req, get_raw_rssi_complete);
8144 mgmt_pending_remove(cmd);
8145 BT_ERR("Error in requesting hci_req_run");
8149 hci_dev_unlock(hdev);
8154 void mgmt_raw_rssi_response(struct hci_dev *hdev,
8155 struct hci_cc_rp_get_raw_rssi *rp, int success)
8157 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
8158 struct hci_conn *conn;
8160 mgmt_rp.status = rp->status;
8161 mgmt_rp.rssi_dbm = rp->rssi_dbm;
8163 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
8167 bacpy(&mgmt_rp.bt_address, &conn->dst);
8168 if (conn->type == LE_LINK)
8169 mgmt_rp.link_type = 0x01;
8171 mgmt_rp.link_type = 0x00;
8173 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
8174 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
8177 static void set_disable_threshold_complete(struct hci_dev *hdev,
8178 u8 status, u16 opcode)
8180 struct mgmt_pending_cmd *cmd;
8182 BT_DBG("status 0x%02x", status);
8186 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
8190 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8191 MGMT_STATUS_SUCCESS, &status, 1);
8193 mgmt_pending_remove(cmd);
8196 hci_dev_unlock(hdev);
8199 /** Removes monitoring for a link*/
8200 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
8201 void *data, u16 len)
8204 struct hci_cp_set_rssi_threshold th = { 0, };
8205 struct mgmt_cp_disable_rssi *cp = data;
8206 struct hci_conn *conn;
8207 struct mgmt_pending_cmd *cmd;
8208 struct hci_request req;
8211 BT_DBG("Set Disable RSSI.");
8215 if (!lmp_le_capable(hdev)) {
8216 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8217 MGMT_STATUS_NOT_SUPPORTED);
8221 /* Get LE/ACL link handle info*/
8222 if (cp->link_type == 0x01)
8223 dest_type = LE_LINK;
8225 dest_type = ACL_LINK;
8227 conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
8229 err = mgmt_cmd_complete(sk, hdev->id,
8230 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
8234 th.hci_le_ext_opcode = 0x0B;
8236 th.conn_handle = conn->handle;
8237 th.alert_mask = 0x00;
8239 th.in_range_th = 0x00;
8242 if (!hdev_is_powered(hdev)) {
8243 BT_DBG("%s", hdev->name);
8244 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8249 if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
8250 BT_DBG("%s", hdev->name);
8251 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8256 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
8259 BT_DBG("%s", hdev->name);
8264 hci_req_init(&req, hdev);
8266 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
8267 err = hci_req_run(&req, set_disable_threshold_complete);
8269 mgmt_pending_remove(cmd);
8270 BT_ERR("Error in requesting hci_req_run");
8275 hci_dev_unlock(hdev);
8280 void mgmt_rssi_alert_evt(struct hci_dev *hdev, u16 conn_handle,
8281 s8 alert_type, s8 rssi_dbm)
8283 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
8284 struct hci_conn *conn;
8286 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
8287 conn_handle, alert_type, rssi_dbm);
8289 conn = hci_conn_hash_lookup_handle(hdev, conn_handle);
8292 BT_ERR("RSSI alert Error: Device not found for handle");
8295 bacpy(&mgmt_ev.bdaddr, &conn->dst);
8297 if (conn->type == LE_LINK)
8298 mgmt_ev.link_type = 0x01;
8300 mgmt_ev.link_type = 0x00;
8302 mgmt_ev.alert_type = alert_type;
8303 mgmt_ev.rssi_dbm = rssi_dbm;
8305 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
8306 sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
8310 static int mgmt_start_le_discovery_failed(struct hci_dev *hdev, u8 status)
8312 struct mgmt_pending_cmd *cmd;
8316 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
8318 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
8322 type = hdev->le_discovery.type;
8324 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
8325 mgmt_status(status), &type, sizeof(type));
8326 mgmt_pending_remove(cmd);
8331 static void start_le_discovery_complete(struct hci_dev *hdev, u8 status,
8334 unsigned long timeout = 0;
8336 BT_DBG("status %d", status);
8340 mgmt_start_le_discovery_failed(hdev, status);
8341 hci_dev_unlock(hdev);
8346 hci_le_discovery_set_state(hdev, DISCOVERY_FINDING);
8347 hci_dev_unlock(hdev);
8349 if (hdev->le_discovery.type != DISCOV_TYPE_LE)
8350 BT_ERR("Invalid discovery type %d", hdev->le_discovery.type);
8355 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
8358 static int start_le_discovery(struct sock *sk, struct hci_dev *hdev,
8359 void *data, u16 len)
8361 struct mgmt_cp_start_le_discovery *cp = data;
8362 struct mgmt_pending_cmd *cmd;
8363 struct hci_cp_le_set_scan_param param_cp;
8364 struct hci_cp_le_set_scan_enable enable_cp;
8365 struct hci_request req;
8366 u8 status, own_addr_type;
8369 BT_DBG("%s", hdev->name);
8371 if (!hdev_is_powered(hdev)) {
8372 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8373 MGMT_STATUS_NOT_POWERED);
8377 if (hdev->le_discovery.state != DISCOVERY_STOPPED) {
8378 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8383 if (cp->type != DISCOV_TYPE_LE) {
8384 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8385 MGMT_STATUS_INVALID_PARAMS);
8389 cmd = mgmt_pending_add(sk, MGMT_OP_START_LE_DISCOVERY, hdev, NULL, 0);
8395 hdev->le_discovery.type = cp->type;
8397 hci_req_init(&req, hdev);
8399 status = mgmt_le_support(hdev);
8401 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8403 mgmt_pending_remove(cmd);
8407 /* If controller is scanning, it means the background scanning
8408 * is running. Thus, we should temporarily stop it in order to
8409 * set the discovery scanning parameters.
8411 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
8412 hci_req_add_le_scan_disable(&req, false);
8414 memset(¶m_cp, 0, sizeof(param_cp));
8416 /* All active scans will be done with either a resolvable
8417 * private address (when privacy feature has been enabled)
8418 * or unresolvable private address.
8420 err = hci_update_random_address_sync(hdev, true, hci_dev_test_flag(hdev, HCI_PRIVACY), &own_addr_type);
8422 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8423 MGMT_STATUS_FAILED);
8424 mgmt_pending_remove(cmd);
8428 param_cp.type = hdev->le_scan_type;
8429 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
8430 param_cp.window = cpu_to_le16(hdev->le_scan_window);
8431 param_cp.own_address_type = own_addr_type;
8432 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
8435 memset(&enable_cp, 0, sizeof(enable_cp));
8436 enable_cp.enable = LE_SCAN_ENABLE;
8437 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
8439 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
8442 err = hci_req_run(&req, start_le_discovery_complete);
8444 mgmt_pending_remove(cmd);
8446 hci_le_discovery_set_state(hdev, DISCOVERY_STARTING);
8452 static int mgmt_stop_le_discovery_failed(struct hci_dev *hdev, u8 status)
8454 struct mgmt_pending_cmd *cmd;
8457 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
8461 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
8462 mgmt_status(status), &hdev->le_discovery.type,
8463 sizeof(hdev->le_discovery.type));
8464 mgmt_pending_remove(cmd);
8469 static void stop_le_discovery_complete(struct hci_dev *hdev, u8 status,
8472 BT_DBG("status %d", status);
8477 mgmt_stop_le_discovery_failed(hdev, status);
8481 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
8484 hci_dev_unlock(hdev);
8487 static int stop_le_discovery(struct sock *sk, struct hci_dev *hdev,
8488 void *data, u16 len)
8490 struct mgmt_cp_stop_le_discovery *mgmt_cp = data;
8491 struct mgmt_pending_cmd *cmd;
8492 struct hci_request req;
8495 BT_DBG("%s", hdev->name);
8499 if (!hci_le_discovery_active(hdev)) {
8500 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8501 MGMT_STATUS_REJECTED, &mgmt_cp->type,
8502 sizeof(mgmt_cp->type));
8506 if (hdev->le_discovery.type != mgmt_cp->type) {
8507 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8508 MGMT_STATUS_INVALID_PARAMS,
8509 &mgmt_cp->type, sizeof(mgmt_cp->type));
8513 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_LE_DISCOVERY, hdev, NULL, 0);
8519 hci_req_init(&req, hdev);
8521 if (hdev->le_discovery.state != DISCOVERY_FINDING) {
8522 BT_DBG("unknown le discovery state %u",
8523 hdev->le_discovery.state);
8525 mgmt_pending_remove(cmd);
8526 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8527 MGMT_STATUS_FAILED, &mgmt_cp->type,
8528 sizeof(mgmt_cp->type));
8532 cancel_delayed_work(&hdev->le_scan_disable);
8533 hci_req_add_le_scan_disable(&req, false);
8535 err = hci_req_run(&req, stop_le_discovery_complete);
8537 mgmt_pending_remove(cmd);
8539 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
8542 hci_dev_unlock(hdev);
8546 /* Separate LE discovery */
8547 void mgmt_le_discovering(struct hci_dev *hdev, u8 discovering)
8549 struct mgmt_ev_discovering ev;
8550 struct mgmt_pending_cmd *cmd;
8552 BT_DBG("%s le discovering %u", hdev->name, discovering);
8555 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
8557 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
8560 u8 type = hdev->le_discovery.type;
8562 mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
8564 mgmt_pending_remove(cmd);
8567 memset(&ev, 0, sizeof(ev));
8568 ev.type = hdev->le_discovery.type;
8569 ev.discovering = discovering;
8571 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8574 static int disable_le_auto_connect(struct sock *sk, struct hci_dev *hdev,
8575 void *data, u16 len)
8579 BT_DBG("%s", hdev->name);
8583 err = hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
8585 BT_ERR("HCI_OP_LE_CREATE_CONN_CANCEL is failed");
8587 hci_dev_unlock(hdev);
8592 static inline int check_le_conn_update_param(u16 min, u16 max, u16 latency,
8597 if (min > max || min < 6 || max > 3200)
8600 if (to_multiplier < 10 || to_multiplier > 3200)
8603 if (max >= to_multiplier * 8)
8606 max_latency = (to_multiplier * 8 / max) - 1;
8608 if (latency > 499 || latency > max_latency)
8614 static int le_conn_update(struct sock *sk, struct hci_dev *hdev, void *data,
8617 struct mgmt_cp_le_conn_update *cp = data;
8619 struct hci_conn *conn;
8620 u16 min, max, latency, supervision_timeout;
8623 if (!hdev_is_powered(hdev))
8624 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8625 MGMT_STATUS_NOT_POWERED);
8627 min = __le16_to_cpu(cp->conn_interval_min);
8628 max = __le16_to_cpu(cp->conn_interval_max);
8629 latency = __le16_to_cpu(cp->conn_latency);
8630 supervision_timeout = __le16_to_cpu(cp->supervision_timeout);
8632 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x supervision_timeout: 0x%4.4x",
8633 min, max, latency, supervision_timeout);
8635 err = check_le_conn_update_param(min, max, latency,
8636 supervision_timeout);
8639 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8640 MGMT_STATUS_INVALID_PARAMS);
8644 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
8646 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8647 MGMT_STATUS_NOT_CONNECTED);
8648 hci_dev_unlock(hdev);
8652 hci_dev_unlock(hdev);
8654 hci_le_conn_update(conn, min, max, latency, supervision_timeout);
8656 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE, 0,
8660 static void set_manufacturer_data_complete(struct hci_dev *hdev, u8 status,
8663 struct mgmt_cp_set_manufacturer_data *cp;
8664 struct mgmt_pending_cmd *cmd;
8666 BT_DBG("status 0x%02x", status);
8670 cmd = pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev);
8677 mgmt_cmd_status(cmd->sk, hdev->id,
8678 MGMT_OP_SET_MANUFACTURER_DATA,
8679 mgmt_status(status));
8681 mgmt_cmd_complete(cmd->sk, hdev->id,
8682 MGMT_OP_SET_MANUFACTURER_DATA, 0,
8685 mgmt_pending_remove(cmd);
8688 hci_dev_unlock(hdev);
8691 static int set_manufacturer_data(struct sock *sk, struct hci_dev *hdev,
8692 void *data, u16 len)
8694 struct mgmt_pending_cmd *cmd;
8695 struct hci_request req;
8696 struct mgmt_cp_set_manufacturer_data *cp = data;
8697 u8 old_data[HCI_MAX_EIR_LENGTH] = {0, };
8701 BT_DBG("%s", hdev->name);
8703 if (!lmp_bredr_capable(hdev))
8704 return mgmt_cmd_status(sk, hdev->id,
8705 MGMT_OP_SET_MANUFACTURER_DATA,
8706 MGMT_STATUS_NOT_SUPPORTED);
8708 if (cp->data[0] == 0 ||
8709 cp->data[0] - 1 > sizeof(hdev->manufacturer_data))
8710 return mgmt_cmd_status(sk, hdev->id,
8711 MGMT_OP_SET_MANUFACTURER_DATA,
8712 MGMT_STATUS_INVALID_PARAMS);
8714 if (cp->data[1] != 0xFF)
8715 return mgmt_cmd_status(sk, hdev->id,
8716 MGMT_OP_SET_MANUFACTURER_DATA,
8717 MGMT_STATUS_NOT_SUPPORTED);
8721 if (pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev)) {
8722 err = mgmt_cmd_status(sk, hdev->id,
8723 MGMT_OP_SET_MANUFACTURER_DATA,
8728 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MANUFACTURER_DATA, hdev, data,
8735 hci_req_init(&req, hdev);
8737 /* if new data is same as previous data then return command
8740 if (hdev->manufacturer_len == cp->data[0] - 1 &&
8741 !memcmp(hdev->manufacturer_data, cp->data + 2, cp->data[0] - 1)) {
8742 mgmt_pending_remove(cmd);
8743 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
8744 0, cp, sizeof(*cp));
8749 old_len = hdev->manufacturer_len;
8751 memcpy(old_data, hdev->manufacturer_data, old_len);
8753 hdev->manufacturer_len = cp->data[0] - 1;
8754 if (hdev->manufacturer_len > 0)
8755 memcpy(hdev->manufacturer_data, cp->data + 2,
8756 hdev->manufacturer_len);
8758 hci_update_eir_sync(hdev);
8760 err = hci_req_run(&req, set_manufacturer_data_complete);
8762 mgmt_pending_remove(cmd);
8767 hci_dev_unlock(hdev);
8772 memset(hdev->manufacturer_data, 0x00, sizeof(hdev->manufacturer_data));
8773 hdev->manufacturer_len = old_len;
8774 if (hdev->manufacturer_len > 0)
8775 memcpy(hdev->manufacturer_data, old_data,
8776 hdev->manufacturer_len);
8777 hci_dev_unlock(hdev);
8781 static int le_set_scan_params(struct sock *sk, struct hci_dev *hdev,
8782 void *data, u16 len)
8784 struct mgmt_cp_le_set_scan_params *cp = data;
8785 __u16 interval, window;
8788 BT_DBG("%s", hdev->name);
8790 if (!lmp_le_capable(hdev))
8791 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8792 MGMT_STATUS_NOT_SUPPORTED);
8794 interval = __le16_to_cpu(cp->interval);
8796 if (interval < 0x0004 || interval > 0x4000)
8797 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8798 MGMT_STATUS_INVALID_PARAMS);
8800 window = __le16_to_cpu(cp->window);
8802 if (window < 0x0004 || window > 0x4000)
8803 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8804 MGMT_STATUS_INVALID_PARAMS);
8806 if (window > interval)
8807 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8808 MGMT_STATUS_INVALID_PARAMS);
8812 hdev->le_scan_type = cp->type;
8813 hdev->le_scan_interval = interval;
8814 hdev->le_scan_window = window;
8816 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS, 0,
8819 /* If background scan is running, restart it so new parameters are
8822 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
8823 hdev->discovery.state == DISCOVERY_STOPPED) {
8824 struct hci_request req;
8826 hci_req_init(&req, hdev);
8828 hci_req_add_le_scan_disable(&req, false);
8829 hci_req_add_le_passive_scan(&req);
8831 hci_req_run(&req, NULL);
8834 hci_dev_unlock(hdev);
8839 static int set_voice_setting(struct sock *sk, struct hci_dev *hdev,
8840 void *data, u16 len)
8842 struct mgmt_cp_set_voice_setting *cp = data;
8843 struct hci_conn *conn;
8844 struct hci_conn *sco_conn;
8848 BT_DBG("%s", hdev->name);
8850 if (!lmp_bredr_capable(hdev)) {
8851 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING,
8852 MGMT_STATUS_NOT_SUPPORTED);
8857 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
8859 err = mgmt_cmd_complete(sk, hdev->id,
8860 MGMT_OP_SET_VOICE_SETTING, 0, NULL, 0);
8864 conn->voice_setting = cp->voice_setting;
8865 conn->sco_role = cp->sco_role;
8867 sco_conn = hci_conn_hash_lookup_sco(hdev);
8868 if (sco_conn && bacmp(&sco_conn->dst, &cp->bdaddr) != 0) {
8869 BT_ERR("There is other SCO connection.");
8873 if (conn->sco_role == MGMT_SCO_ROLE_HANDSFREE) {
8874 if (conn->voice_setting == 0x0063)
8875 sco_connect_set_wbc(hdev);
8877 sco_connect_set_nbc(hdev);
8879 if (conn->voice_setting == 0x0063)
8880 sco_connect_set_gw_wbc(hdev);
8882 sco_connect_set_gw_nbc(hdev);
8886 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING, 0,
8890 hci_dev_unlock(hdev);
8894 static int get_adv_tx_power(struct sock *sk, struct hci_dev *hdev,
8895 void *data, u16 len)
8897 struct mgmt_rp_get_adv_tx_power *rp;
8901 BT_DBG("%s", hdev->name);
8905 rp_len = sizeof(*rp);
8906 rp = kmalloc(rp_len, GFP_KERNEL);
8912 rp->adv_tx_power = hdev->adv_tx_power;
8914 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_TX_POWER, 0, rp,
8920 hci_dev_unlock(hdev);
8925 void mgmt_hardware_error(struct hci_dev *hdev, u8 err_code)
8927 struct mgmt_ev_hardware_error ev;
8929 ev.error_code = err_code;
8930 mgmt_event(MGMT_EV_HARDWARE_ERROR, hdev, &ev, sizeof(ev), NULL);
8933 void mgmt_tx_timeout_error(struct hci_dev *hdev)
8935 mgmt_event(MGMT_EV_TX_TIMEOUT_ERROR, hdev, NULL, 0, NULL);
8938 void mgmt_multi_adv_state_change_evt(struct hci_dev *hdev, u8 adv_instance,
8939 u8 state_change_reason, u16 connection_handle)
8941 struct mgmt_ev_vendor_specific_multi_adv_state_changed mgmt_ev;
8943 BT_DBG("Multi adv state changed [%2.2X %2.2X %2.2X]",
8944 adv_instance, state_change_reason, connection_handle);
8946 mgmt_ev.adv_instance = adv_instance;
8947 mgmt_ev.state_change_reason = state_change_reason;
8948 mgmt_ev.connection_handle = connection_handle;
8950 mgmt_event(MGMT_EV_MULTI_ADV_STATE_CHANGED, hdev, &mgmt_ev,
8951 sizeof(struct mgmt_ev_vendor_specific_multi_adv_state_changed),
8955 static int enable_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
8956 void *data, u16 len)
8959 struct mgmt_cp_enable_6lowpan *cp = data;
8961 BT_DBG("%s", hdev->name);
8965 if (!hdev_is_powered(hdev)) {
8966 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8967 MGMT_STATUS_NOT_POWERED);
8971 if (!lmp_le_capable(hdev)) {
8972 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8973 MGMT_STATUS_NOT_SUPPORTED);
8977 if (cp->enable_6lowpan)
8978 bt_6lowpan_enable();
8980 bt_6lowpan_disable();
8982 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8983 MGMT_STATUS_SUCCESS, NULL, 0);
8985 hci_dev_unlock(hdev);
8989 static int connect_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
8990 void *data, u16 len)
8992 struct mgmt_cp_connect_6lowpan *cp = data;
8993 __u8 addr_type = ADDR_LE_DEV_PUBLIC;
8996 BT_DBG("%s", hdev->name);
9000 if (!lmp_le_capable(hdev)) {
9001 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
9002 MGMT_STATUS_NOT_SUPPORTED);
9006 if (!hdev_is_powered(hdev)) {
9007 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
9008 MGMT_STATUS_REJECTED);
9012 if (bdaddr_type_is_le(cp->addr.type)) {
9013 if (cp->addr.type == BDADDR_LE_PUBLIC)
9014 addr_type = ADDR_LE_DEV_PUBLIC;
9016 addr_type = ADDR_LE_DEV_RANDOM;
9018 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
9019 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
9023 hci_dev_unlock(hdev);
9025 /* 6lowpan Connect */
9026 err = _bt_6lowpan_connect(&cp->addr.bdaddr, cp->addr.type);
9031 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
9032 MGMT_STATUS_REJECTED, NULL, 0);
9037 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN, 0,
9040 hci_dev_unlock(hdev);
9044 static int disconnect_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
9045 void *data, u16 len)
9047 struct mgmt_cp_disconnect_6lowpan *cp = data;
9048 struct hci_conn *conn = NULL;
9049 __u8 addr_type = ADDR_LE_DEV_PUBLIC;
9052 BT_DBG("%s", hdev->name);
9056 if (!lmp_le_capable(hdev)) {
9057 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT_6LOWPAN,
9058 MGMT_STATUS_NOT_SUPPORTED);
9062 if (!hdev_is_powered(hdev)) {
9063 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT_6LOWPAN,
9064 MGMT_STATUS_REJECTED);
9068 if (bdaddr_type_is_le(cp->addr.type)) {
9069 if (cp->addr.type == BDADDR_LE_PUBLIC)
9070 addr_type = ADDR_LE_DEV_PUBLIC;
9072 addr_type = ADDR_LE_DEV_RANDOM;
9074 err = mgmt_cmd_complete(sk, hdev->id,
9075 MGMT_OP_DISCONNECT_6LOWPAN,
9076 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
9080 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
9082 err = mgmt_cmd_complete(sk, hdev->id,
9083 MGMT_OP_DISCONNECT_6LOWPAN,
9084 MGMT_STATUS_NOT_CONNECTED, NULL, 0);
9088 if (conn->dst_type != addr_type) {
9089 err = mgmt_cmd_complete(sk, hdev->id,
9090 MGMT_OP_DISCONNECT_6LOWPAN,
9091 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
9095 if (conn->state != BT_CONNECTED) {
9096 err = mgmt_cmd_complete(sk, hdev->id,
9097 MGMT_OP_DISCONNECT_6LOWPAN,
9098 MGMT_STATUS_NOT_CONNECTED, NULL, 0);
9102 /* 6lowpan Disconnect */
9103 err = _bt_6lowpan_disconnect(conn->l2cap_data, cp->addr.type);
9105 err = mgmt_cmd_complete(sk, hdev->id,
9106 MGMT_OP_DISCONNECT_6LOWPAN,
9107 MGMT_STATUS_REJECTED, NULL, 0);
9111 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN, 0,
9115 hci_dev_unlock(hdev);
9119 void mgmt_6lowpan_conn_changed(struct hci_dev *hdev, char if_name[16],
9120 bdaddr_t *bdaddr, u8 addr_type, bool connected)
9123 struct mgmt_ev_6lowpan_conn_state_changed *ev = (void *)buf;
9126 memset(buf, 0, sizeof(buf));
9127 bacpy(&ev->addr.bdaddr, bdaddr);
9128 ev->addr.type = addr_type;
9129 ev->connected = connected;
9130 memcpy(ev->ifname, (__u8 *)if_name, 16);
9132 ev_size = sizeof(*ev);
9134 mgmt_event(MGMT_EV_6LOWPAN_CONN_STATE_CHANGED, hdev, ev, ev_size, NULL);
9137 void mgmt_le_read_maximum_data_length_complete(struct hci_dev *hdev, u8 status)
9139 struct mgmt_pending_cmd *cmd;
9140 struct mgmt_rp_le_read_maximum_data_length rp;
9142 BT_DBG("%s status %u", hdev->name, status);
9144 cmd = pending_find(MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, hdev);
9149 mgmt_cmd_status(cmd->sk, hdev->id,
9150 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
9151 mgmt_status(status));
9153 memset(&rp, 0, sizeof(rp));
9155 rp.max_tx_octets = cpu_to_le16(hdev->le_max_tx_len);
9156 rp.max_tx_time = cpu_to_le16(hdev->le_max_tx_time);
9157 rp.max_rx_octets = cpu_to_le16(hdev->le_max_rx_len);
9158 rp.max_rx_time = cpu_to_le16(hdev->le_max_rx_time);
9160 mgmt_cmd_complete(cmd->sk, hdev->id,
9161 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, 0,
9164 mgmt_pending_remove(cmd);
9167 static int read_maximum_le_data_length(struct sock *sk,
9168 struct hci_dev *hdev, void *data, u16 len)
9170 struct mgmt_pending_cmd *cmd;
9173 BT_DBG("read_maximum_le_data_length %s", hdev->name);
9177 if (!hdev_is_powered(hdev)) {
9178 err = mgmt_cmd_status(sk, hdev->id,
9179 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
9180 MGMT_STATUS_NOT_POWERED);
9184 if (!lmp_le_capable(hdev)) {
9185 err = mgmt_cmd_status(sk, hdev->id,
9186 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
9187 MGMT_STATUS_NOT_SUPPORTED);
9191 if (pending_find(MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, hdev)) {
9192 err = mgmt_cmd_status(sk, hdev->id,
9193 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
9198 cmd = mgmt_pending_add(sk, MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
9205 err = hci_send_cmd(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
9207 mgmt_pending_remove(cmd);
9210 hci_dev_unlock(hdev);
9214 void mgmt_le_write_host_suggested_data_length_complete(struct hci_dev *hdev,
9217 struct mgmt_pending_cmd *cmd;
9219 BT_DBG("status 0x%02x", status);
9223 cmd = pending_find(MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH, hdev);
9225 BT_ERR("cmd not found in the pending list");
9230 mgmt_cmd_status(cmd->sk, hdev->id,
9231 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9232 mgmt_status(status));
9234 mgmt_cmd_complete(cmd->sk, hdev->id,
9235 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9238 mgmt_pending_remove(cmd);
9241 hci_dev_unlock(hdev);
9244 static int write_host_suggested_le_data_length(struct sock *sk,
9245 struct hci_dev *hdev, void *data, u16 len)
9247 struct mgmt_pending_cmd *cmd;
9248 struct mgmt_cp_le_write_host_suggested_data_length *cp = data;
9249 struct hci_cp_le_write_def_data_len hci_data;
9252 BT_DBG("Write host suggested data length request for %s", hdev->name);
9256 if (!hdev_is_powered(hdev)) {
9257 err = mgmt_cmd_status(sk, hdev->id,
9258 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9259 MGMT_STATUS_NOT_POWERED);
9263 if (!lmp_le_capable(hdev)) {
9264 err = mgmt_cmd_status(sk, hdev->id,
9265 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9266 MGMT_STATUS_NOT_SUPPORTED);
9270 if (pending_find(MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH, hdev)) {
9271 err = mgmt_cmd_status(sk, hdev->id,
9272 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9277 cmd = mgmt_pending_add(sk, MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9284 hci_data.tx_len = cp->def_tx_octets;
9285 hci_data.tx_time = cp->def_tx_time;
9287 err = hci_send_cmd(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN,
9288 sizeof(hci_data), &hci_data);
9290 mgmt_pending_remove(cmd);
9293 hci_dev_unlock(hdev);
9297 #endif /* TIZEN_BT */
9299 static bool ltk_is_valid(struct mgmt_ltk_info *key)
9301 if (key->initiator != 0x00 && key->initiator != 0x01)
9304 switch (key->addr.type) {
9305 case BDADDR_LE_PUBLIC:
9308 case BDADDR_LE_RANDOM:
9309 /* Two most significant bits shall be set */
9310 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
9318 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
9319 void *cp_data, u16 len)
9321 struct mgmt_cp_load_long_term_keys *cp = cp_data;
9322 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
9323 sizeof(struct mgmt_ltk_info));
9324 u16 key_count, expected_len;
9327 bt_dev_dbg(hdev, "sock %p", sk);
9329 if (!lmp_le_capable(hdev))
9330 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
9331 MGMT_STATUS_NOT_SUPPORTED);
9333 key_count = __le16_to_cpu(cp->key_count);
9334 if (key_count > max_key_count) {
9335 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
9337 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
9338 MGMT_STATUS_INVALID_PARAMS);
9341 expected_len = struct_size(cp, keys, key_count);
9342 if (expected_len != len) {
9343 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
9345 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
9346 MGMT_STATUS_INVALID_PARAMS);
9349 bt_dev_dbg(hdev, "key_count %u", key_count);
9351 for (i = 0; i < key_count; i++) {
9352 struct mgmt_ltk_info *key = &cp->keys[i];
9354 if (!ltk_is_valid(key))
9355 return mgmt_cmd_status(sk, hdev->id,
9356 MGMT_OP_LOAD_LONG_TERM_KEYS,
9357 MGMT_STATUS_INVALID_PARAMS);
9362 hci_smp_ltks_clear(hdev);
9364 for (i = 0; i < key_count; i++) {
9365 struct mgmt_ltk_info *key = &cp->keys[i];
9366 u8 type, authenticated;
9367 u8 addr_type = le_addr_type(key->addr.type);
9369 if (hci_is_blocked_key(hdev,
9370 HCI_BLOCKED_KEY_TYPE_LTK,
9372 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
9377 switch (key->type) {
9378 case MGMT_LTK_UNAUTHENTICATED:
9379 authenticated = 0x00;
9380 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
9382 case MGMT_LTK_AUTHENTICATED:
9383 authenticated = 0x01;
9384 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
9386 case MGMT_LTK_P256_UNAUTH:
9387 authenticated = 0x00;
9388 type = SMP_LTK_P256;
9390 case MGMT_LTK_P256_AUTH:
9391 authenticated = 0x01;
9392 type = SMP_LTK_P256;
9394 case MGMT_LTK_P256_DEBUG:
9395 authenticated = 0x00;
9396 type = SMP_LTK_P256_DEBUG;
9402 /* When using SMP over BR/EDR, the addr type should be set to BREDR */
9403 if (key->addr.type == BDADDR_BREDR)
9404 addr_type = BDADDR_BREDR;
9406 hci_add_ltk(hdev, &key->addr.bdaddr,
9407 addr_type, type, authenticated,
9408 key->val, key->enc_size, key->ediv, key->rand);
9411 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
9414 hci_dev_unlock(hdev);
9419 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
9421 struct mgmt_pending_cmd *cmd = data;
9422 struct hci_conn *conn = cmd->user_data;
9423 struct mgmt_cp_get_conn_info *cp = cmd->param;
9424 struct mgmt_rp_get_conn_info rp;
9427 bt_dev_dbg(hdev, "err %d", err);
9429 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
9431 status = mgmt_status(err);
9432 if (status == MGMT_STATUS_SUCCESS) {
9433 rp.rssi = conn->rssi;
9434 rp.tx_power = conn->tx_power;
9435 rp.max_tx_power = conn->max_tx_power;
9437 rp.rssi = HCI_RSSI_INVALID;
9438 rp.tx_power = HCI_TX_POWER_INVALID;
9439 rp.max_tx_power = HCI_TX_POWER_INVALID;
9442 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
9445 mgmt_pending_free(cmd);
9448 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
9450 struct mgmt_pending_cmd *cmd = data;
9451 struct mgmt_cp_get_conn_info *cp = cmd->param;
9452 struct hci_conn *conn;
9456 /* Make sure we are still connected */
9457 if (cp->addr.type == BDADDR_BREDR)
9458 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9461 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
9463 if (!conn || conn->state != BT_CONNECTED)
9464 return MGMT_STATUS_NOT_CONNECTED;
9466 cmd->user_data = conn;
9467 handle = cpu_to_le16(conn->handle);
9469 /* Refresh RSSI each time */
9470 err = hci_read_rssi_sync(hdev, handle);
9472 /* For LE links TX power does not change thus we don't need to
9473 * query for it once value is known.
9475 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
9476 conn->tx_power == HCI_TX_POWER_INVALID))
9477 err = hci_read_tx_power_sync(hdev, handle, 0x00);
9479 /* Max TX power needs to be read only once per connection */
9480 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
9481 err = hci_read_tx_power_sync(hdev, handle, 0x01);
9486 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
9489 struct mgmt_cp_get_conn_info *cp = data;
9490 struct mgmt_rp_get_conn_info rp;
9491 struct hci_conn *conn;
9492 unsigned long conn_info_age;
9495 bt_dev_dbg(hdev, "sock %p", sk);
9497 memset(&rp, 0, sizeof(rp));
9498 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9499 rp.addr.type = cp->addr.type;
9501 if (!bdaddr_type_is_valid(cp->addr.type))
9502 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9503 MGMT_STATUS_INVALID_PARAMS,
9508 if (!hdev_is_powered(hdev)) {
9509 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9510 MGMT_STATUS_NOT_POWERED, &rp,
9515 if (cp->addr.type == BDADDR_BREDR)
9516 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9519 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
9521 if (!conn || conn->state != BT_CONNECTED) {
9522 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9523 MGMT_STATUS_NOT_CONNECTED, &rp,
9528 /* To avoid client trying to guess when to poll again for information we
9529 * calculate conn info age as random value between min/max set in hdev.
9531 conn_info_age = get_random_u32_inclusive(hdev->conn_info_min_age,
9532 hdev->conn_info_max_age - 1);
9534 /* Query controller to refresh cached values if they are too old or were
9537 if (time_after(jiffies, conn->conn_info_timestamp +
9538 msecs_to_jiffies(conn_info_age)) ||
9539 !conn->conn_info_timestamp) {
9540 struct mgmt_pending_cmd *cmd;
9542 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
9547 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
9548 cmd, get_conn_info_complete);
9552 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9553 MGMT_STATUS_FAILED, &rp, sizeof(rp));
9556 mgmt_pending_free(cmd);
9561 conn->conn_info_timestamp = jiffies;
9563 /* Cache is valid, just reply with values cached in hci_conn */
9564 rp.rssi = conn->rssi;
9565 rp.tx_power = conn->tx_power;
9566 rp.max_tx_power = conn->max_tx_power;
9568 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9569 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9573 hci_dev_unlock(hdev);
9577 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
9579 struct mgmt_pending_cmd *cmd = data;
9580 struct mgmt_cp_get_clock_info *cp = cmd->param;
9581 struct mgmt_rp_get_clock_info rp;
9582 struct hci_conn *conn = cmd->user_data;
9583 u8 status = mgmt_status(err);
9585 bt_dev_dbg(hdev, "err %d", err);
9587 memset(&rp, 0, sizeof(rp));
9588 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9589 rp.addr.type = cp->addr.type;
9594 rp.local_clock = cpu_to_le32(hdev->clock);
9597 rp.piconet_clock = cpu_to_le32(conn->clock);
9598 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
9602 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
9605 mgmt_pending_free(cmd);
9608 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
9610 struct mgmt_pending_cmd *cmd = data;
9611 struct mgmt_cp_get_clock_info *cp = cmd->param;
9612 struct hci_cp_read_clock hci_cp;
9613 struct hci_conn *conn;
9615 memset(&hci_cp, 0, sizeof(hci_cp));
9616 hci_read_clock_sync(hdev, &hci_cp);
9618 /* Make sure connection still exists */
9619 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
9620 if (!conn || conn->state != BT_CONNECTED)
9621 return MGMT_STATUS_NOT_CONNECTED;
9623 cmd->user_data = conn;
9624 hci_cp.handle = cpu_to_le16(conn->handle);
9625 hci_cp.which = 0x01; /* Piconet clock */
9627 return hci_read_clock_sync(hdev, &hci_cp);
9630 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
9633 struct mgmt_cp_get_clock_info *cp = data;
9634 struct mgmt_rp_get_clock_info rp;
9635 struct mgmt_pending_cmd *cmd;
9636 struct hci_conn *conn;
9639 bt_dev_dbg(hdev, "sock %p", sk);
9641 memset(&rp, 0, sizeof(rp));
9642 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9643 rp.addr.type = cp->addr.type;
9645 if (cp->addr.type != BDADDR_BREDR)
9646 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9647 MGMT_STATUS_INVALID_PARAMS,
9652 if (!hdev_is_powered(hdev)) {
9653 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9654 MGMT_STATUS_NOT_POWERED, &rp,
9659 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
9660 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9662 if (!conn || conn->state != BT_CONNECTED) {
9663 err = mgmt_cmd_complete(sk, hdev->id,
9664 MGMT_OP_GET_CLOCK_INFO,
9665 MGMT_STATUS_NOT_CONNECTED,
9673 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
9677 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
9678 get_clock_info_complete);
9681 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9682 MGMT_STATUS_FAILED, &rp, sizeof(rp));
9685 mgmt_pending_free(cmd);
9690 hci_dev_unlock(hdev);
9694 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
9696 struct hci_conn *conn;
9698 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
9702 if (conn->dst_type != type)
9705 if (conn->state != BT_CONNECTED)
9711 /* This function requires the caller holds hdev->lock */
9712 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
9713 u8 addr_type, u8 auto_connect)
9715 struct hci_conn_params *params;
9717 params = hci_conn_params_add(hdev, addr, addr_type);
9721 if (params->auto_connect == auto_connect)
9724 hci_pend_le_list_del_init(params);
9726 switch (auto_connect) {
9727 case HCI_AUTO_CONN_DISABLED:
9728 case HCI_AUTO_CONN_LINK_LOSS:
9729 /* If auto connect is being disabled when we're trying to
9730 * connect to device, keep connecting.
9732 if (params->explicit_connect)
9733 hci_pend_le_list_add(params, &hdev->pend_le_conns);
9735 case HCI_AUTO_CONN_REPORT:
9736 if (params->explicit_connect)
9737 hci_pend_le_list_add(params, &hdev->pend_le_conns);
9739 hci_pend_le_list_add(params, &hdev->pend_le_reports);
9741 case HCI_AUTO_CONN_DIRECT:
9742 case HCI_AUTO_CONN_ALWAYS:
9743 if (!is_connected(hdev, addr, addr_type))
9744 hci_pend_le_list_add(params, &hdev->pend_le_conns);
9748 params->auto_connect = auto_connect;
9750 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
9751 addr, addr_type, auto_connect);
9756 static void device_added(struct sock *sk, struct hci_dev *hdev,
9757 bdaddr_t *bdaddr, u8 type, u8 action)
9759 struct mgmt_ev_device_added ev;
9761 bacpy(&ev.addr.bdaddr, bdaddr);
9762 ev.addr.type = type;
9765 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
9768 static int add_device_sync(struct hci_dev *hdev, void *data)
9770 return hci_update_passive_scan_sync(hdev);
9773 static int add_device(struct sock *sk, struct hci_dev *hdev,
9774 void *data, u16 len)
9776 struct mgmt_cp_add_device *cp = data;
9777 u8 auto_conn, addr_type;
9778 struct hci_conn_params *params;
9780 u32 current_flags = 0;
9781 u32 supported_flags;
9783 bt_dev_dbg(hdev, "sock %p", sk);
9785 if (!bdaddr_type_is_valid(cp->addr.type) ||
9786 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
9787 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9788 MGMT_STATUS_INVALID_PARAMS,
9789 &cp->addr, sizeof(cp->addr));
9791 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
9792 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9793 MGMT_STATUS_INVALID_PARAMS,
9794 &cp->addr, sizeof(cp->addr));
9798 if (cp->addr.type == BDADDR_BREDR) {
9799 /* Only incoming connections action is supported for now */
9800 if (cp->action != 0x01) {
9801 err = mgmt_cmd_complete(sk, hdev->id,
9803 MGMT_STATUS_INVALID_PARAMS,
9804 &cp->addr, sizeof(cp->addr));
9808 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
9814 hci_update_scan(hdev);
9819 addr_type = le_addr_type(cp->addr.type);
9821 if (cp->action == 0x02)
9822 auto_conn = HCI_AUTO_CONN_ALWAYS;
9823 else if (cp->action == 0x01)
9824 auto_conn = HCI_AUTO_CONN_DIRECT;
9826 auto_conn = HCI_AUTO_CONN_REPORT;
9828 /* Kernel internally uses conn_params with resolvable private
9829 * address, but Add Device allows only identity addresses.
9830 * Make sure it is enforced before calling
9831 * hci_conn_params_lookup.
9833 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
9834 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9835 MGMT_STATUS_INVALID_PARAMS,
9836 &cp->addr, sizeof(cp->addr));
9840 /* If the connection parameters don't exist for this device,
9841 * they will be created and configured with defaults.
9843 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
9845 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9846 MGMT_STATUS_FAILED, &cp->addr,
9850 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
9853 current_flags = params->flags;
9856 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
9861 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
9862 supported_flags = hdev->conn_flags;
9863 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
9864 supported_flags, current_flags);
9866 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9867 MGMT_STATUS_SUCCESS, &cp->addr,
9871 hci_dev_unlock(hdev);
9875 static void device_removed(struct sock *sk, struct hci_dev *hdev,
9876 bdaddr_t *bdaddr, u8 type)
9878 struct mgmt_ev_device_removed ev;
9880 bacpy(&ev.addr.bdaddr, bdaddr);
9881 ev.addr.type = type;
9883 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
9886 static int remove_device_sync(struct hci_dev *hdev, void *data)
9888 return hci_update_passive_scan_sync(hdev);
9891 static int remove_device(struct sock *sk, struct hci_dev *hdev,
9892 void *data, u16 len)
9894 struct mgmt_cp_remove_device *cp = data;
9897 bt_dev_dbg(hdev, "sock %p", sk);
9901 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
9902 struct hci_conn_params *params;
9905 if (!bdaddr_type_is_valid(cp->addr.type)) {
9906 err = mgmt_cmd_complete(sk, hdev->id,
9907 MGMT_OP_REMOVE_DEVICE,
9908 MGMT_STATUS_INVALID_PARAMS,
9909 &cp->addr, sizeof(cp->addr));
9913 if (cp->addr.type == BDADDR_BREDR) {
9914 err = hci_bdaddr_list_del(&hdev->accept_list,
9918 err = mgmt_cmd_complete(sk, hdev->id,
9919 MGMT_OP_REMOVE_DEVICE,
9920 MGMT_STATUS_INVALID_PARAMS,
9926 hci_update_scan(hdev);
9928 device_removed(sk, hdev, &cp->addr.bdaddr,
9933 addr_type = le_addr_type(cp->addr.type);
9935 /* Kernel internally uses conn_params with resolvable private
9936 * address, but Remove Device allows only identity addresses.
9937 * Make sure it is enforced before calling
9938 * hci_conn_params_lookup.
9940 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
9941 err = mgmt_cmd_complete(sk, hdev->id,
9942 MGMT_OP_REMOVE_DEVICE,
9943 MGMT_STATUS_INVALID_PARAMS,
9944 &cp->addr, sizeof(cp->addr));
9948 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
9951 err = mgmt_cmd_complete(sk, hdev->id,
9952 MGMT_OP_REMOVE_DEVICE,
9953 MGMT_STATUS_INVALID_PARAMS,
9954 &cp->addr, sizeof(cp->addr));
9958 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
9959 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
9960 err = mgmt_cmd_complete(sk, hdev->id,
9961 MGMT_OP_REMOVE_DEVICE,
9962 MGMT_STATUS_INVALID_PARAMS,
9963 &cp->addr, sizeof(cp->addr));
9967 hci_conn_params_free(params);
9969 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
9971 struct hci_conn_params *p, *tmp;
9972 struct bdaddr_list *b, *btmp;
9974 if (cp->addr.type) {
9975 err = mgmt_cmd_complete(sk, hdev->id,
9976 MGMT_OP_REMOVE_DEVICE,
9977 MGMT_STATUS_INVALID_PARAMS,
9978 &cp->addr, sizeof(cp->addr));
9982 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
9983 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
9988 hci_update_scan(hdev);
9990 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
9991 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
9993 device_removed(sk, hdev, &p->addr, p->addr_type);
9994 if (p->explicit_connect) {
9995 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
9998 hci_conn_params_free(p);
10001 bt_dev_dbg(hdev, "All LE connection parameters were removed");
10004 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
10007 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
10008 MGMT_STATUS_SUCCESS, &cp->addr,
10011 hci_dev_unlock(hdev);
10015 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
10018 struct mgmt_cp_load_conn_param *cp = data;
10019 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
10020 sizeof(struct mgmt_conn_param));
10021 u16 param_count, expected_len;
10024 if (!lmp_le_capable(hdev))
10025 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
10026 MGMT_STATUS_NOT_SUPPORTED);
10028 param_count = __le16_to_cpu(cp->param_count);
10029 if (param_count > max_param_count) {
10030 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
10032 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
10033 MGMT_STATUS_INVALID_PARAMS);
10036 expected_len = struct_size(cp, params, param_count);
10037 if (expected_len != len) {
10038 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
10039 expected_len, len);
10040 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
10041 MGMT_STATUS_INVALID_PARAMS);
10044 bt_dev_dbg(hdev, "param_count %u", param_count);
10046 hci_dev_lock(hdev);
10048 hci_conn_params_clear_disabled(hdev);
10050 for (i = 0; i < param_count; i++) {
10051 struct mgmt_conn_param *param = &cp->params[i];
10052 struct hci_conn_params *hci_param;
10053 u16 min, max, latency, timeout;
10056 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
10059 if (param->addr.type == BDADDR_LE_PUBLIC) {
10060 addr_type = ADDR_LE_DEV_PUBLIC;
10061 } else if (param->addr.type == BDADDR_LE_RANDOM) {
10062 addr_type = ADDR_LE_DEV_RANDOM;
10064 bt_dev_err(hdev, "ignoring invalid connection parameters");
10068 min = le16_to_cpu(param->min_interval);
10069 max = le16_to_cpu(param->max_interval);
10070 latency = le16_to_cpu(param->latency);
10071 timeout = le16_to_cpu(param->timeout);
10073 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
10074 min, max, latency, timeout);
10076 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
10077 bt_dev_err(hdev, "ignoring invalid connection parameters");
10081 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
10084 bt_dev_err(hdev, "failed to add connection parameters");
10088 hci_param->conn_min_interval = min;
10089 hci_param->conn_max_interval = max;
10090 hci_param->conn_latency = latency;
10091 hci_param->supervision_timeout = timeout;
10094 hci_dev_unlock(hdev);
10096 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
10100 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
10101 void *data, u16 len)
10103 struct mgmt_cp_set_external_config *cp = data;
10107 bt_dev_dbg(hdev, "sock %p", sk);
10109 if (hdev_is_powered(hdev))
10110 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
10111 MGMT_STATUS_REJECTED);
10113 if (cp->config != 0x00 && cp->config != 0x01)
10114 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
10115 MGMT_STATUS_INVALID_PARAMS);
10117 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
10118 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
10119 MGMT_STATUS_NOT_SUPPORTED);
10121 hci_dev_lock(hdev);
10124 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
10126 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
10128 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
10135 err = new_options(hdev, sk);
10137 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
10138 mgmt_index_removed(hdev);
10140 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
10141 hci_dev_set_flag(hdev, HCI_CONFIG);
10142 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
10144 queue_work(hdev->req_workqueue, &hdev->power_on);
10146 set_bit(HCI_RAW, &hdev->flags);
10147 mgmt_index_added(hdev);
10152 hci_dev_unlock(hdev);
10156 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
10157 void *data, u16 len)
10159 struct mgmt_cp_set_public_address *cp = data;
10163 bt_dev_dbg(hdev, "sock %p", sk);
10165 if (hdev_is_powered(hdev))
10166 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
10167 MGMT_STATUS_REJECTED);
10169 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
10170 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
10171 MGMT_STATUS_INVALID_PARAMS);
10173 if (!hdev->set_bdaddr)
10174 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
10175 MGMT_STATUS_NOT_SUPPORTED);
10177 hci_dev_lock(hdev);
10179 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
10180 bacpy(&hdev->public_addr, &cp->bdaddr);
10182 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
10189 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
10190 err = new_options(hdev, sk);
10192 if (is_configured(hdev)) {
10193 mgmt_index_removed(hdev);
10195 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
10197 hci_dev_set_flag(hdev, HCI_CONFIG);
10198 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
10200 queue_work(hdev->req_workqueue, &hdev->power_on);
10204 hci_dev_unlock(hdev);
10209 int mgmt_device_name_update(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name,
10213 struct mgmt_ev_device_name_update *ev = (void *)buf;
10219 bacpy(&ev->addr.bdaddr, bdaddr);
10220 ev->addr.type = BDADDR_BREDR;
10222 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
10225 ev->eir_len = cpu_to_le16(eir_len);
10227 return mgmt_event(MGMT_EV_DEVICE_NAME_UPDATE, hdev, buf,
10228 sizeof(*ev) + eir_len, NULL);
10231 int mgmt_le_conn_update_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
10232 u8 link_type, u8 addr_type, u8 status)
10234 struct mgmt_ev_conn_update_failed ev;
10236 bacpy(&ev.addr.bdaddr, bdaddr);
10237 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10238 ev.status = status;
10240 return mgmt_event(MGMT_EV_CONN_UPDATE_FAILED, hdev,
10241 &ev, sizeof(ev), NULL);
10244 int mgmt_le_conn_updated(struct hci_dev *hdev, bdaddr_t *bdaddr,
10245 u8 link_type, u8 addr_type, u16 conn_interval,
10246 u16 conn_latency, u16 supervision_timeout)
10248 struct mgmt_ev_conn_updated ev;
10250 bacpy(&ev.addr.bdaddr, bdaddr);
10251 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10252 ev.conn_interval = cpu_to_le16(conn_interval);
10253 ev.conn_latency = cpu_to_le16(conn_latency);
10254 ev.supervision_timeout = cpu_to_le16(supervision_timeout);
10256 return mgmt_event(MGMT_EV_CONN_UPDATED, hdev,
10257 &ev, sizeof(ev), NULL);
10260 /* le device found event - Pass adv type */
10261 void mgmt_le_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10262 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir,
10263 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, u8 adv_type)
10266 struct mgmt_ev_le_device_found *ev = (void *)buf;
10269 if (!hci_discovery_active(hdev) && !hci_le_discovery_active(hdev))
10272 /* Make sure that the buffer is big enough. The 5 extra bytes
10273 * are for the potential CoD field.
10275 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
10278 memset(buf, 0, sizeof(buf));
10280 bacpy(&ev->addr.bdaddr, bdaddr);
10281 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10283 ev->flags = cpu_to_le32(flags);
10284 ev->adv_type = adv_type;
10287 memcpy(ev->eir, eir, eir_len);
10289 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, NULL))
10290 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
10293 if (scan_rsp_len > 0)
10294 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
10296 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10297 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
10299 mgmt_event(MGMT_EV_LE_DEVICE_FOUND, hdev, ev, ev_size, NULL);
10303 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
10306 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
10307 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
10308 u8 *h192, *r192, *h256, *r256;
10309 struct mgmt_pending_cmd *cmd = data;
10310 struct sk_buff *skb = cmd->skb;
10311 u8 status = mgmt_status(err);
10314 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
10319 status = MGMT_STATUS_FAILED;
10320 else if (IS_ERR(skb))
10321 status = mgmt_status(PTR_ERR(skb));
10323 status = mgmt_status(skb->data[0]);
10326 bt_dev_dbg(hdev, "status %u", status);
10328 mgmt_cp = cmd->param;
10331 status = mgmt_status(status);
10338 } else if (!bredr_sc_enabled(hdev)) {
10339 struct hci_rp_read_local_oob_data *rp;
10341 if (skb->len != sizeof(*rp)) {
10342 status = MGMT_STATUS_FAILED;
10345 status = MGMT_STATUS_SUCCESS;
10346 rp = (void *)skb->data;
10348 eir_len = 5 + 18 + 18;
10355 struct hci_rp_read_local_oob_ext_data *rp;
10357 if (skb->len != sizeof(*rp)) {
10358 status = MGMT_STATUS_FAILED;
10361 status = MGMT_STATUS_SUCCESS;
10362 rp = (void *)skb->data;
10364 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
10365 eir_len = 5 + 18 + 18;
10369 eir_len = 5 + 18 + 18 + 18 + 18;
10370 h192 = rp->hash192;
10371 r192 = rp->rand192;
10374 h256 = rp->hash256;
10375 r256 = rp->rand256;
10379 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
10386 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
10387 hdev->dev_class, 3);
10389 if (h192 && r192) {
10390 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10391 EIR_SSP_HASH_C192, h192, 16);
10392 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10393 EIR_SSP_RAND_R192, r192, 16);
10396 if (h256 && r256) {
10397 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10398 EIR_SSP_HASH_C256, h256, 16);
10399 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10400 EIR_SSP_RAND_R256, r256, 16);
10404 mgmt_rp->type = mgmt_cp->type;
10405 mgmt_rp->eir_len = cpu_to_le16(eir_len);
10407 err = mgmt_cmd_complete(cmd->sk, hdev->id,
10408 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
10409 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
10410 if (err < 0 || status)
10413 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
10415 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
10416 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
10417 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
10419 if (skb && !IS_ERR(skb))
10423 mgmt_pending_remove(cmd);
10426 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
10427 struct mgmt_cp_read_local_oob_ext_data *cp)
10429 struct mgmt_pending_cmd *cmd;
10432 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
10437 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
10438 read_local_oob_ext_data_complete);
10441 mgmt_pending_remove(cmd);
10448 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
10449 void *data, u16 data_len)
10451 struct mgmt_cp_read_local_oob_ext_data *cp = data;
10452 struct mgmt_rp_read_local_oob_ext_data *rp;
10455 u8 status, flags, role, addr[7], hash[16], rand[16];
10458 bt_dev_dbg(hdev, "sock %p", sk);
10460 if (hdev_is_powered(hdev)) {
10461 switch (cp->type) {
10462 case BIT(BDADDR_BREDR):
10463 status = mgmt_bredr_support(hdev);
10469 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
10470 status = mgmt_le_support(hdev);
10474 eir_len = 9 + 3 + 18 + 18 + 3;
10477 status = MGMT_STATUS_INVALID_PARAMS;
10482 status = MGMT_STATUS_NOT_POWERED;
10486 rp_len = sizeof(*rp) + eir_len;
10487 rp = kmalloc(rp_len, GFP_ATOMIC);
10491 if (!status && !lmp_ssp_capable(hdev)) {
10492 status = MGMT_STATUS_NOT_SUPPORTED;
10499 hci_dev_lock(hdev);
10502 switch (cp->type) {
10503 case BIT(BDADDR_BREDR):
10504 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
10505 err = read_local_ssp_oob_req(hdev, sk, cp);
10506 hci_dev_unlock(hdev);
10510 status = MGMT_STATUS_FAILED;
10513 eir_len = eir_append_data(rp->eir, eir_len,
10515 hdev->dev_class, 3);
10518 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
10519 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
10520 smp_generate_oob(hdev, hash, rand) < 0) {
10521 hci_dev_unlock(hdev);
10522 status = MGMT_STATUS_FAILED;
10526 /* This should return the active RPA, but since the RPA
10527 * is only programmed on demand, it is really hard to fill
10528 * this in at the moment. For now disallow retrieving
10529 * local out-of-band data when privacy is in use.
10531 * Returning the identity address will not help here since
10532 * pairing happens before the identity resolving key is
10533 * known and thus the connection establishment happens
10534 * based on the RPA and not the identity address.
10536 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
10537 hci_dev_unlock(hdev);
10538 status = MGMT_STATUS_REJECTED;
10542 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
10543 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
10544 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
10545 bacmp(&hdev->static_addr, BDADDR_ANY))) {
10546 memcpy(addr, &hdev->static_addr, 6);
10549 memcpy(addr, &hdev->bdaddr, 6);
10553 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
10554 addr, sizeof(addr));
10556 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
10561 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
10562 &role, sizeof(role));
10564 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
10565 eir_len = eir_append_data(rp->eir, eir_len,
10567 hash, sizeof(hash));
10569 eir_len = eir_append_data(rp->eir, eir_len,
10571 rand, sizeof(rand));
10574 flags = mgmt_get_adv_discov_flags(hdev);
10576 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
10577 flags |= LE_AD_NO_BREDR;
10579 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
10580 &flags, sizeof(flags));
10584 hci_dev_unlock(hdev);
10586 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
10588 status = MGMT_STATUS_SUCCESS;
10591 rp->type = cp->type;
10592 rp->eir_len = cpu_to_le16(eir_len);
10594 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
10595 status, rp, sizeof(*rp) + eir_len);
10596 if (err < 0 || status)
10599 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
10600 rp, sizeof(*rp) + eir_len,
10601 HCI_MGMT_OOB_DATA_EVENTS, sk);
10609 static u32 get_supported_adv_flags(struct hci_dev *hdev)
10613 flags |= MGMT_ADV_FLAG_CONNECTABLE;
10614 flags |= MGMT_ADV_FLAG_DISCOV;
10615 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
10616 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
10617 flags |= MGMT_ADV_FLAG_APPEARANCE;
10618 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
10619 flags |= MGMT_ADV_PARAM_DURATION;
10620 flags |= MGMT_ADV_PARAM_TIMEOUT;
10621 flags |= MGMT_ADV_PARAM_INTERVALS;
10622 flags |= MGMT_ADV_PARAM_TX_POWER;
10623 flags |= MGMT_ADV_PARAM_SCAN_RSP;
10625 /* In extended adv TX_POWER returned from Set Adv Param
10626 * will be always valid.
10628 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
10629 flags |= MGMT_ADV_FLAG_TX_POWER;
10631 if (ext_adv_capable(hdev)) {
10632 flags |= MGMT_ADV_FLAG_SEC_1M;
10633 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
10634 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
10636 if (le_2m_capable(hdev))
10637 flags |= MGMT_ADV_FLAG_SEC_2M;
10639 if (le_coded_capable(hdev))
10640 flags |= MGMT_ADV_FLAG_SEC_CODED;
10646 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
10647 void *data, u16 data_len)
10649 struct mgmt_rp_read_adv_features *rp;
10652 struct adv_info *adv_instance;
10653 u32 supported_flags;
10656 bt_dev_dbg(hdev, "sock %p", sk);
10658 if (!lmp_le_capable(hdev))
10659 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
10660 MGMT_STATUS_REJECTED);
10662 hci_dev_lock(hdev);
10664 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
10665 rp = kmalloc(rp_len, GFP_ATOMIC);
10667 hci_dev_unlock(hdev);
10671 supported_flags = get_supported_adv_flags(hdev);
10673 rp->supported_flags = cpu_to_le32(supported_flags);
10674 rp->max_adv_data_len = max_adv_len(hdev);
10675 rp->max_scan_rsp_len = max_adv_len(hdev);
10676 rp->max_instances = hdev->le_num_of_adv_sets;
10677 rp->num_instances = hdev->adv_instance_cnt;
10679 instance = rp->instance;
10680 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
10681 /* Only instances 1-le_num_of_adv_sets are externally visible */
10682 if (adv_instance->instance <= hdev->adv_instance_cnt) {
10683 *instance = adv_instance->instance;
10686 rp->num_instances--;
10691 hci_dev_unlock(hdev);
10693 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
10694 MGMT_STATUS_SUCCESS, rp, rp_len);
10701 static u8 calculate_name_len(struct hci_dev *hdev)
10703 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
10705 return eir_append_local_name(hdev, buf, 0);
10708 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
10711 u8 max_len = max_adv_len(hdev);
10714 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
10715 MGMT_ADV_FLAG_LIMITED_DISCOV |
10716 MGMT_ADV_FLAG_MANAGED_FLAGS))
10719 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
10722 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
10723 max_len -= calculate_name_len(hdev);
10725 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
10732 static bool flags_managed(u32 adv_flags)
10734 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
10735 MGMT_ADV_FLAG_LIMITED_DISCOV |
10736 MGMT_ADV_FLAG_MANAGED_FLAGS);
10739 static bool tx_power_managed(u32 adv_flags)
10741 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
10744 static bool name_managed(u32 adv_flags)
10746 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
10749 static bool appearance_managed(u32 adv_flags)
10751 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
10754 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
10755 u8 len, bool is_adv_data)
10760 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
10765 /* Make sure that the data is correctly formatted. */
10766 for (i = 0; i < len; i += (cur_len + 1)) {
10772 if (data[i + 1] == EIR_FLAGS &&
10773 (!is_adv_data || flags_managed(adv_flags)))
10776 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
10779 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
10782 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
10785 if (data[i + 1] == EIR_APPEARANCE &&
10786 appearance_managed(adv_flags))
10789 /* If the current field length would exceed the total data
10790 * length, then it's invalid.
10792 if (i + cur_len >= len)
10799 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
10801 u32 supported_flags, phy_flags;
10803 /* The current implementation only supports a subset of the specified
10804 * flags. Also need to check mutual exclusiveness of sec flags.
10806 supported_flags = get_supported_adv_flags(hdev);
10807 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
10808 if (adv_flags & ~supported_flags ||
10809 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
10815 static bool adv_busy(struct hci_dev *hdev)
10817 return pending_find(MGMT_OP_SET_LE, hdev);
10820 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
10823 struct adv_info *adv, *n;
10825 bt_dev_dbg(hdev, "err %d", err);
10827 hci_dev_lock(hdev);
10829 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
10836 adv->pending = false;
10840 instance = adv->instance;
10842 if (hdev->cur_adv_instance == instance)
10843 cancel_adv_timeout(hdev);
10845 hci_remove_adv_instance(hdev, instance);
10846 mgmt_advertising_removed(sk, hdev, instance);
10849 hci_dev_unlock(hdev);
10852 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
10854 struct mgmt_pending_cmd *cmd = data;
10855 struct mgmt_cp_add_advertising *cp = cmd->param;
10856 struct mgmt_rp_add_advertising rp;
10858 memset(&rp, 0, sizeof(rp));
10860 rp.instance = cp->instance;
10863 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10866 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10867 mgmt_status(err), &rp, sizeof(rp));
10869 add_adv_complete(hdev, cmd->sk, cp->instance, err);
10871 mgmt_pending_free(cmd);
10874 static int add_advertising_sync(struct hci_dev *hdev, void *data)
10876 struct mgmt_pending_cmd *cmd = data;
10877 struct mgmt_cp_add_advertising *cp = cmd->param;
10879 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
10882 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
10883 void *data, u16 data_len)
10885 struct mgmt_cp_add_advertising *cp = data;
10886 struct mgmt_rp_add_advertising rp;
10889 u16 timeout, duration;
10890 unsigned int prev_instance_cnt;
10891 u8 schedule_instance = 0;
10892 struct adv_info *adv, *next_instance;
10894 struct mgmt_pending_cmd *cmd;
10896 bt_dev_dbg(hdev, "sock %p", sk);
10898 status = mgmt_le_support(hdev);
10900 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10903 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10904 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10905 MGMT_STATUS_INVALID_PARAMS);
10907 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
10908 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10909 MGMT_STATUS_INVALID_PARAMS);
10911 flags = __le32_to_cpu(cp->flags);
10912 timeout = __le16_to_cpu(cp->timeout);
10913 duration = __le16_to_cpu(cp->duration);
10915 if (!requested_adv_flags_are_valid(hdev, flags))
10916 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10917 MGMT_STATUS_INVALID_PARAMS);
10919 hci_dev_lock(hdev);
10921 if (timeout && !hdev_is_powered(hdev)) {
10922 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10923 MGMT_STATUS_REJECTED);
10927 if (adv_busy(hdev)) {
10928 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10933 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
10934 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
10935 cp->scan_rsp_len, false)) {
10936 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10937 MGMT_STATUS_INVALID_PARAMS);
10941 prev_instance_cnt = hdev->adv_instance_cnt;
10943 adv = hci_add_adv_instance(hdev, cp->instance, flags,
10944 cp->adv_data_len, cp->data,
10946 cp->data + cp->adv_data_len,
10948 HCI_ADV_TX_POWER_NO_PREFERENCE,
10949 hdev->le_adv_min_interval,
10950 hdev->le_adv_max_interval, 0);
10952 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10953 MGMT_STATUS_FAILED);
10957 /* Only trigger an advertising added event if a new instance was
10960 if (hdev->adv_instance_cnt > prev_instance_cnt)
10961 mgmt_advertising_added(sk, hdev, cp->instance);
10963 if (hdev->cur_adv_instance == cp->instance) {
10964 /* If the currently advertised instance is being changed then
10965 * cancel the current advertising and schedule the next
10966 * instance. If there is only one instance then the overridden
10967 * advertising data will be visible right away.
10969 cancel_adv_timeout(hdev);
10971 next_instance = hci_get_next_instance(hdev, cp->instance);
10973 schedule_instance = next_instance->instance;
10974 } else if (!hdev->adv_instance_timeout) {
10975 /* Immediately advertise the new instance if no other
10976 * instance is currently being advertised.
10978 schedule_instance = cp->instance;
10981 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
10982 * there is no instance to be advertised then we have no HCI
10983 * communication to make. Simply return.
10985 if (!hdev_is_powered(hdev) ||
10986 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
10987 !schedule_instance) {
10988 rp.instance = cp->instance;
10989 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10990 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10994 /* We're good to go, update advertising data, parameters, and start
10997 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
11004 cp->instance = schedule_instance;
11006 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
11007 add_advertising_complete);
11009 mgmt_pending_free(cmd);
11012 hci_dev_unlock(hdev);
11017 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
11020 struct mgmt_pending_cmd *cmd = data;
11021 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
11022 struct mgmt_rp_add_ext_adv_params rp;
11023 struct adv_info *adv;
11026 BT_DBG("%s", hdev->name);
11028 hci_dev_lock(hdev);
11030 adv = hci_find_adv_instance(hdev, cp->instance);
11034 rp.instance = cp->instance;
11035 rp.tx_power = adv->tx_power;
11037 /* While we're at it, inform userspace of the available space for this
11038 * advertisement, given the flags that will be used.
11040 flags = __le32_to_cpu(cp->flags);
11041 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
11042 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
11045 /* If this advertisement was previously advertising and we
11046 * failed to update it, we signal that it has been removed and
11047 * delete its structure
11050 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
11052 hci_remove_adv_instance(hdev, cp->instance);
11054 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
11057 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
11058 mgmt_status(err), &rp, sizeof(rp));
11063 mgmt_pending_free(cmd);
11065 hci_dev_unlock(hdev);
11068 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
11070 struct mgmt_pending_cmd *cmd = data;
11071 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
11073 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
11076 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
11077 void *data, u16 data_len)
11079 struct mgmt_cp_add_ext_adv_params *cp = data;
11080 struct mgmt_rp_add_ext_adv_params rp;
11081 struct mgmt_pending_cmd *cmd = NULL;
11082 struct adv_info *adv;
11083 u32 flags, min_interval, max_interval;
11084 u16 timeout, duration;
11089 BT_DBG("%s", hdev->name);
11091 status = mgmt_le_support(hdev);
11093 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11096 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
11097 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11098 MGMT_STATUS_INVALID_PARAMS);
11100 /* The purpose of breaking add_advertising into two separate MGMT calls
11101 * for params and data is to allow more parameters to be added to this
11102 * structure in the future. For this reason, we verify that we have the
11103 * bare minimum structure we know of when the interface was defined. Any
11104 * extra parameters we don't know about will be ignored in this request.
11106 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
11107 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11108 MGMT_STATUS_INVALID_PARAMS);
11110 flags = __le32_to_cpu(cp->flags);
11112 if (!requested_adv_flags_are_valid(hdev, flags))
11113 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11114 MGMT_STATUS_INVALID_PARAMS);
11116 hci_dev_lock(hdev);
11118 /* In new interface, we require that we are powered to register */
11119 if (!hdev_is_powered(hdev)) {
11120 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11121 MGMT_STATUS_REJECTED);
11125 if (adv_busy(hdev)) {
11126 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11131 /* Parse defined parameters from request, use defaults otherwise */
11132 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
11133 __le16_to_cpu(cp->timeout) : 0;
11135 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
11136 __le16_to_cpu(cp->duration) :
11137 hdev->def_multi_adv_rotation_duration;
11139 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
11140 __le32_to_cpu(cp->min_interval) :
11141 hdev->le_adv_min_interval;
11143 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
11144 __le32_to_cpu(cp->max_interval) :
11145 hdev->le_adv_max_interval;
11147 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
11149 HCI_ADV_TX_POWER_NO_PREFERENCE;
11151 /* Create advertising instance with no advertising or response data */
11152 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
11153 timeout, duration, tx_power, min_interval,
11157 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11158 MGMT_STATUS_FAILED);
11162 /* Submit request for advertising params if ext adv available */
11163 if (ext_adv_capable(hdev)) {
11164 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
11168 hci_remove_adv_instance(hdev, cp->instance);
11172 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
11173 add_ext_adv_params_complete);
11175 mgmt_pending_free(cmd);
11177 rp.instance = cp->instance;
11178 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
11179 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
11180 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
11181 err = mgmt_cmd_complete(sk, hdev->id,
11182 MGMT_OP_ADD_EXT_ADV_PARAMS,
11183 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11187 hci_dev_unlock(hdev);
11192 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
11194 struct mgmt_pending_cmd *cmd = data;
11195 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
11196 struct mgmt_rp_add_advertising rp;
11198 add_adv_complete(hdev, cmd->sk, cp->instance, err);
11200 memset(&rp, 0, sizeof(rp));
11202 rp.instance = cp->instance;
11205 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
11208 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
11209 mgmt_status(err), &rp, sizeof(rp));
11211 mgmt_pending_free(cmd);
11214 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
11216 struct mgmt_pending_cmd *cmd = data;
11217 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
11220 if (ext_adv_capable(hdev)) {
11221 err = hci_update_adv_data_sync(hdev, cp->instance);
11225 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
11229 return hci_enable_ext_advertising_sync(hdev, cp->instance);
11232 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
11235 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
11238 struct mgmt_cp_add_ext_adv_data *cp = data;
11239 struct mgmt_rp_add_ext_adv_data rp;
11240 u8 schedule_instance = 0;
11241 struct adv_info *next_instance;
11242 struct adv_info *adv_instance;
11244 struct mgmt_pending_cmd *cmd;
11246 BT_DBG("%s", hdev->name);
11248 hci_dev_lock(hdev);
11250 adv_instance = hci_find_adv_instance(hdev, cp->instance);
11252 if (!adv_instance) {
11253 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
11254 MGMT_STATUS_INVALID_PARAMS);
11258 /* In new interface, we require that we are powered to register */
11259 if (!hdev_is_powered(hdev)) {
11260 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
11261 MGMT_STATUS_REJECTED);
11262 goto clear_new_instance;
11265 if (adv_busy(hdev)) {
11266 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
11268 goto clear_new_instance;
11271 /* Validate new data */
11272 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
11273 cp->adv_data_len, true) ||
11274 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
11275 cp->adv_data_len, cp->scan_rsp_len, false)) {
11276 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
11277 MGMT_STATUS_INVALID_PARAMS);
11278 goto clear_new_instance;
11281 /* Set the data in the advertising instance */
11282 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
11283 cp->data, cp->scan_rsp_len,
11284 cp->data + cp->adv_data_len);
11286 /* If using software rotation, determine next instance to use */
11287 if (hdev->cur_adv_instance == cp->instance) {
11288 /* If the currently advertised instance is being changed
11289 * then cancel the current advertising and schedule the
11290 * next instance. If there is only one instance then the
11291 * overridden advertising data will be visible right
11294 cancel_adv_timeout(hdev);
11296 next_instance = hci_get_next_instance(hdev, cp->instance);
11298 schedule_instance = next_instance->instance;
11299 } else if (!hdev->adv_instance_timeout) {
11300 /* Immediately advertise the new instance if no other
11301 * instance is currently being advertised.
11303 schedule_instance = cp->instance;
11306 /* If the HCI_ADVERTISING flag is set or there is no instance to
11307 * be advertised then we have no HCI communication to make.
11310 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
11311 if (adv_instance->pending) {
11312 mgmt_advertising_added(sk, hdev, cp->instance);
11313 adv_instance->pending = false;
11315 rp.instance = cp->instance;
11316 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
11317 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11321 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
11325 goto clear_new_instance;
11328 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
11329 add_ext_adv_data_complete);
11331 mgmt_pending_free(cmd);
11332 goto clear_new_instance;
11335 /* We were successful in updating data, so trigger advertising_added
11336 * event if this is an instance that wasn't previously advertising. If
11337 * a failure occurs in the requests we initiated, we will remove the
11338 * instance again in add_advertising_complete
11340 if (adv_instance->pending)
11341 mgmt_advertising_added(sk, hdev, cp->instance);
11345 clear_new_instance:
11346 hci_remove_adv_instance(hdev, cp->instance);
11349 hci_dev_unlock(hdev);
11354 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
11357 struct mgmt_pending_cmd *cmd = data;
11358 struct mgmt_cp_remove_advertising *cp = cmd->param;
11359 struct mgmt_rp_remove_advertising rp;
11361 bt_dev_dbg(hdev, "err %d", err);
11363 memset(&rp, 0, sizeof(rp));
11364 rp.instance = cp->instance;
11367 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
11370 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
11371 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11373 mgmt_pending_free(cmd);
11376 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
11378 struct mgmt_pending_cmd *cmd = data;
11379 struct mgmt_cp_remove_advertising *cp = cmd->param;
11382 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
11386 if (list_empty(&hdev->adv_instances))
11387 err = hci_disable_advertising_sync(hdev);
11392 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
11393 void *data, u16 data_len)
11395 struct mgmt_cp_remove_advertising *cp = data;
11396 struct mgmt_pending_cmd *cmd;
11399 bt_dev_dbg(hdev, "sock %p", sk);
11401 hci_dev_lock(hdev);
11403 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
11404 err = mgmt_cmd_status(sk, hdev->id,
11405 MGMT_OP_REMOVE_ADVERTISING,
11406 MGMT_STATUS_INVALID_PARAMS);
11410 if (pending_find(MGMT_OP_SET_LE, hdev)) {
11411 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
11416 if (list_empty(&hdev->adv_instances)) {
11417 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
11418 MGMT_STATUS_INVALID_PARAMS);
11422 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
11429 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
11430 remove_advertising_complete);
11432 mgmt_pending_free(cmd);
11435 hci_dev_unlock(hdev);
11440 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
11441 void *data, u16 data_len)
11443 struct mgmt_cp_get_adv_size_info *cp = data;
11444 struct mgmt_rp_get_adv_size_info rp;
11445 u32 flags, supported_flags;
11447 bt_dev_dbg(hdev, "sock %p", sk);
11449 if (!lmp_le_capable(hdev))
11450 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11451 MGMT_STATUS_REJECTED);
11453 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
11454 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11455 MGMT_STATUS_INVALID_PARAMS);
11457 flags = __le32_to_cpu(cp->flags);
11459 /* The current implementation only supports a subset of the specified
11462 supported_flags = get_supported_adv_flags(hdev);
11463 if (flags & ~supported_flags)
11464 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11465 MGMT_STATUS_INVALID_PARAMS);
11467 rp.instance = cp->instance;
11468 rp.flags = cp->flags;
11469 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
11470 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
11472 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11473 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11476 static const struct hci_mgmt_handler mgmt_handlers[] = {
11477 { NULL }, /* 0x0000 (no command) */
11478 { read_version, MGMT_READ_VERSION_SIZE,
11480 HCI_MGMT_UNTRUSTED },
11481 { read_commands, MGMT_READ_COMMANDS_SIZE,
11483 HCI_MGMT_UNTRUSTED },
11484 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
11486 HCI_MGMT_UNTRUSTED },
11487 { read_controller_info, MGMT_READ_INFO_SIZE,
11488 HCI_MGMT_UNTRUSTED },
11489 { set_powered, MGMT_SETTING_SIZE },
11490 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
11491 { set_connectable, MGMT_SETTING_SIZE },
11492 { set_fast_connectable, MGMT_SETTING_SIZE },
11493 { set_bondable, MGMT_SETTING_SIZE },
11494 { set_link_security, MGMT_SETTING_SIZE },
11495 { set_ssp, MGMT_SETTING_SIZE },
11496 { set_hs, MGMT_SETTING_SIZE },
11497 { set_le, MGMT_SETTING_SIZE },
11498 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
11499 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
11500 { add_uuid, MGMT_ADD_UUID_SIZE },
11501 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
11502 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
11503 HCI_MGMT_VAR_LEN },
11504 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
11505 HCI_MGMT_VAR_LEN },
11506 { disconnect, MGMT_DISCONNECT_SIZE },
11507 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
11508 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
11509 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
11510 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
11511 { pair_device, MGMT_PAIR_DEVICE_SIZE },
11512 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
11513 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
11514 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
11515 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
11516 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
11517 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
11518 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
11519 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
11520 HCI_MGMT_VAR_LEN },
11521 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
11522 { start_discovery, MGMT_START_DISCOVERY_SIZE },
11523 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
11524 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
11525 { block_device, MGMT_BLOCK_DEVICE_SIZE },
11526 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
11527 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
11528 { set_advertising, MGMT_SETTING_SIZE },
11529 { set_bredr, MGMT_SETTING_SIZE },
11530 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
11531 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
11532 { set_secure_conn, MGMT_SETTING_SIZE },
11533 { set_debug_keys, MGMT_SETTING_SIZE },
11534 { set_privacy, MGMT_SET_PRIVACY_SIZE },
11535 { load_irks, MGMT_LOAD_IRKS_SIZE,
11536 HCI_MGMT_VAR_LEN },
11537 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
11538 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
11539 { add_device, MGMT_ADD_DEVICE_SIZE },
11540 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
11541 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
11542 HCI_MGMT_VAR_LEN },
11543 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
11545 HCI_MGMT_UNTRUSTED },
11546 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
11547 HCI_MGMT_UNCONFIGURED |
11548 HCI_MGMT_UNTRUSTED },
11549 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
11550 HCI_MGMT_UNCONFIGURED },
11551 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
11552 HCI_MGMT_UNCONFIGURED },
11553 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
11554 HCI_MGMT_VAR_LEN },
11555 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
11556 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
11558 HCI_MGMT_UNTRUSTED },
11559 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
11560 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
11561 HCI_MGMT_VAR_LEN },
11562 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
11563 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
11564 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
11565 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
11566 HCI_MGMT_UNTRUSTED },
11567 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
11568 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
11569 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
11570 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
11571 HCI_MGMT_VAR_LEN },
11572 { set_wideband_speech, MGMT_SETTING_SIZE },
11573 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
11574 HCI_MGMT_UNTRUSTED },
11575 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
11576 HCI_MGMT_UNTRUSTED |
11577 HCI_MGMT_HDEV_OPTIONAL },
11578 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
11580 HCI_MGMT_HDEV_OPTIONAL },
11581 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
11582 HCI_MGMT_UNTRUSTED },
11583 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
11584 HCI_MGMT_VAR_LEN },
11585 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
11586 HCI_MGMT_UNTRUSTED },
11587 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
11588 HCI_MGMT_VAR_LEN },
11589 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
11590 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
11591 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
11592 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
11593 HCI_MGMT_VAR_LEN },
11594 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
11595 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
11596 HCI_MGMT_VAR_LEN },
11597 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
11598 HCI_MGMT_VAR_LEN },
11599 { add_adv_patterns_monitor_rssi,
11600 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
11601 HCI_MGMT_VAR_LEN },
11602 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
11603 HCI_MGMT_VAR_LEN },
11604 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
11605 { mesh_send, MGMT_MESH_SEND_SIZE,
11606 HCI_MGMT_VAR_LEN },
11607 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
11611 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
11612 { NULL }, /* 0x0000 (no command) */
11613 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
11614 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
11615 HCI_MGMT_VAR_LEN },
11616 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
11617 HCI_MGMT_VAR_LEN },
11618 { add_white_list, MGMT_ADD_DEV_WHITE_LIST_SIZE },
11619 { remove_from_white_list, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
11620 { clear_white_list, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
11621 { set_enable_rssi, MGMT_SET_RSSI_ENABLE_SIZE },
11622 { get_raw_rssi, MGMT_GET_RAW_RSSI_SIZE },
11623 { set_disable_threshold, MGMT_SET_RSSI_DISABLE_SIZE },
11624 { start_le_discovery, MGMT_START_LE_DISCOVERY_SIZE },
11625 { stop_le_discovery, MGMT_STOP_LE_DISCOVERY_SIZE },
11626 { disable_le_auto_connect, MGMT_DISABLE_LE_AUTO_CONNECT_SIZE },
11627 { le_conn_update, MGMT_LE_CONN_UPDATE_SIZE },
11628 { set_manufacturer_data, MGMT_SET_MANUFACTURER_DATA_SIZE },
11629 { le_set_scan_params, MGMT_LE_SET_SCAN_PARAMS_SIZE },
11630 { set_voice_setting, MGMT_SET_VOICE_SETTING_SIZE },
11631 { get_adv_tx_power, MGMT_GET_ADV_TX_POWER_SIZE },
11632 { enable_bt_6lowpan, MGMT_ENABLE_BT_6LOWPAN_SIZE },
11633 { connect_bt_6lowpan, MGMT_CONNECT_6LOWPAN_SIZE },
11634 { disconnect_bt_6lowpan, MGMT_DISCONNECT_6LOWPAN_SIZE },
11635 { read_maximum_le_data_length,
11636 MGMT_LE_READ_MAXIMUM_DATA_LENGTH_SIZE },
11637 { write_host_suggested_le_data_length,
11638 MGMT_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH_SIZE },
11642 void mgmt_index_added(struct hci_dev *hdev)
11644 struct mgmt_ev_ext_index ev;
11646 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
11649 switch (hdev->dev_type) {
11651 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
11652 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
11653 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
11656 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
11657 HCI_MGMT_INDEX_EVENTS);
11668 ev.bus = hdev->bus;
11670 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
11671 HCI_MGMT_EXT_INDEX_EVENTS);
11674 void mgmt_index_removed(struct hci_dev *hdev)
11676 struct mgmt_ev_ext_index ev;
11677 u8 status = MGMT_STATUS_INVALID_INDEX;
11679 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
11682 switch (hdev->dev_type) {
11684 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
11686 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
11687 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
11688 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
11691 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
11692 HCI_MGMT_INDEX_EVENTS);
11703 ev.bus = hdev->bus;
11705 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
11706 HCI_MGMT_EXT_INDEX_EVENTS);
11708 /* Cancel any remaining timed work */
11709 if (!hci_dev_test_flag(hdev, HCI_MGMT))
11711 cancel_delayed_work_sync(&hdev->discov_off);
11712 cancel_delayed_work_sync(&hdev->service_cache);
11713 cancel_delayed_work_sync(&hdev->rpa_expired);
11716 void mgmt_power_on(struct hci_dev *hdev, int err)
11718 struct cmd_lookup match = { NULL, hdev };
11720 bt_dev_dbg(hdev, "err %d", err);
11722 hci_dev_lock(hdev);
11725 restart_le_actions(hdev);
11726 hci_update_passive_scan(hdev);
11729 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
11731 new_settings(hdev, match.sk);
11734 sock_put(match.sk);
11736 hci_dev_unlock(hdev);
11739 void __mgmt_power_off(struct hci_dev *hdev)
11741 struct cmd_lookup match = { NULL, hdev };
11742 u8 status, zero_cod[] = { 0, 0, 0 };
11744 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
11746 /* If the power off is because of hdev unregistration let
11747 * use the appropriate INVALID_INDEX status. Otherwise use
11748 * NOT_POWERED. We cover both scenarios here since later in
11749 * mgmt_index_removed() any hci_conn callbacks will have already
11750 * been triggered, potentially causing misleading DISCONNECTED
11751 * status responses.
11753 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
11754 status = MGMT_STATUS_INVALID_INDEX;
11756 status = MGMT_STATUS_NOT_POWERED;
11758 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
11760 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
11761 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
11762 zero_cod, sizeof(zero_cod),
11763 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
11764 ext_info_changed(hdev, NULL);
11767 new_settings(hdev, match.sk);
11770 sock_put(match.sk);
11773 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
11775 struct mgmt_pending_cmd *cmd;
11778 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
11782 if (err == -ERFKILL)
11783 status = MGMT_STATUS_RFKILLED;
11785 status = MGMT_STATUS_FAILED;
11787 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
11789 mgmt_pending_remove(cmd);
11792 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
11795 struct mgmt_ev_new_link_key ev;
11797 memset(&ev, 0, sizeof(ev));
11799 ev.store_hint = persistent;
11800 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
11801 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
11802 ev.key.type = key->type;
11803 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
11804 ev.key.pin_len = key->pin_len;
11806 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
11809 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
11811 switch (ltk->type) {
11813 case SMP_LTK_RESPONDER:
11814 if (ltk->authenticated)
11815 return MGMT_LTK_AUTHENTICATED;
11816 return MGMT_LTK_UNAUTHENTICATED;
11818 if (ltk->authenticated)
11819 return MGMT_LTK_P256_AUTH;
11820 return MGMT_LTK_P256_UNAUTH;
11821 case SMP_LTK_P256_DEBUG:
11822 return MGMT_LTK_P256_DEBUG;
11825 return MGMT_LTK_UNAUTHENTICATED;
11828 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
11830 struct mgmt_ev_new_long_term_key ev;
11832 memset(&ev, 0, sizeof(ev));
11834 /* Devices using resolvable or non-resolvable random addresses
11835 * without providing an identity resolving key don't require
11836 * to store long term keys. Their addresses will change the
11837 * next time around.
11839 * Only when a remote device provides an identity address
11840 * make sure the long term key is stored. If the remote
11841 * identity is known, the long term keys are internally
11842 * mapped to the identity address. So allow static random
11843 * and public addresses here.
11845 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
11846 (key->bdaddr.b[5] & 0xc0) != 0xc0)
11847 ev.store_hint = 0x00;
11849 ev.store_hint = persistent;
11851 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
11852 ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type);
11853 ev.key.type = mgmt_ltk_type(key);
11854 ev.key.enc_size = key->enc_size;
11855 ev.key.ediv = key->ediv;
11856 ev.key.rand = key->rand;
11858 if (key->type == SMP_LTK)
11859 ev.key.initiator = 1;
11861 /* Make sure we copy only the significant bytes based on the
11862 * encryption key size, and set the rest of the value to zeroes.
11864 memcpy(ev.key.val, key->val, key->enc_size);
11865 memset(ev.key.val + key->enc_size, 0,
11866 sizeof(ev.key.val) - key->enc_size);
11868 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
11871 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
11873 struct mgmt_ev_new_irk ev;
11875 memset(&ev, 0, sizeof(ev));
11877 ev.store_hint = persistent;
11879 bacpy(&ev.rpa, &irk->rpa);
11880 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
11881 ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type);
11882 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
11884 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
11887 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
11890 struct mgmt_ev_new_csrk ev;
11892 memset(&ev, 0, sizeof(ev));
11894 /* Devices using resolvable or non-resolvable random addresses
11895 * without providing an identity resolving key don't require
11896 * to store signature resolving keys. Their addresses will change
11897 * the next time around.
11899 * Only when a remote device provides an identity address
11900 * make sure the signature resolving key is stored. So allow
11901 * static random and public addresses here.
11903 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
11904 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
11905 ev.store_hint = 0x00;
11907 ev.store_hint = persistent;
11909 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
11910 ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type);
11911 ev.key.type = csrk->type;
11912 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
11914 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
11917 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
11918 u8 bdaddr_type, u8 store_hint, u16 min_interval,
11919 u16 max_interval, u16 latency, u16 timeout)
11921 struct mgmt_ev_new_conn_param ev;
11923 if (!hci_is_identity_address(bdaddr, bdaddr_type))
11926 memset(&ev, 0, sizeof(ev));
11927 bacpy(&ev.addr.bdaddr, bdaddr);
11928 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
11929 ev.store_hint = store_hint;
11930 ev.min_interval = cpu_to_le16(min_interval);
11931 ev.max_interval = cpu_to_le16(max_interval);
11932 ev.latency = cpu_to_le16(latency);
11933 ev.timeout = cpu_to_le16(timeout);
11935 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
11938 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
11939 u8 *name, u8 name_len)
11941 struct sk_buff *skb;
11942 struct mgmt_ev_device_connected *ev;
11946 /* allocate buff for LE or BR/EDR adv */
11947 if (conn->le_adv_data_len > 0)
11948 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
11949 sizeof(*ev) + conn->le_adv_data_len);
11951 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
11952 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
11953 eir_precalc_len(sizeof(conn->dev_class)));
11955 ev = skb_put(skb, sizeof(*ev));
11956 bacpy(&ev->addr.bdaddr, &conn->dst);
11957 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
11960 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
11962 ev->flags = __cpu_to_le32(flags);
11964 /* We must ensure that the EIR Data fields are ordered and
11965 * unique. Keep it simple for now and avoid the problem by not
11966 * adding any BR/EDR data to the LE adv.
11968 if (conn->le_adv_data_len > 0) {
11969 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
11970 eir_len = conn->le_adv_data_len;
11973 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
11975 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
11976 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
11977 conn->dev_class, sizeof(conn->dev_class));
11980 ev->eir_len = cpu_to_le16(eir_len);
11982 mgmt_event_skb(skb, NULL);
11985 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
11987 struct sock **sk = data;
11989 cmd->cmd_complete(cmd, 0);
11994 mgmt_pending_remove(cmd);
11997 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
11999 struct hci_dev *hdev = data;
12000 struct mgmt_cp_unpair_device *cp = cmd->param;
12002 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
12004 cmd->cmd_complete(cmd, 0);
12005 mgmt_pending_remove(cmd);
12008 bool mgmt_powering_down(struct hci_dev *hdev)
12010 struct mgmt_pending_cmd *cmd;
12011 struct mgmt_mode *cp;
12013 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
12024 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
12025 u8 link_type, u8 addr_type, u8 reason,
12026 bool mgmt_connected)
12028 struct mgmt_ev_device_disconnected ev;
12029 struct sock *sk = NULL;
12031 /* The connection is still in hci_conn_hash so test for 1
12032 * instead of 0 to know if this is the last one.
12034 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
12035 cancel_delayed_work(&hdev->power_off);
12036 queue_work(hdev->req_workqueue, &hdev->power_off.work);
12039 if (!mgmt_connected)
12042 if (link_type != ACL_LINK && link_type != LE_LINK)
12045 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
12047 bacpy(&ev.addr.bdaddr, bdaddr);
12048 ev.addr.type = link_to_bdaddr(link_type, addr_type);
12049 ev.reason = reason;
12051 /* Report disconnects due to suspend */
12052 if (hdev->suspended)
12053 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
12055 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
12060 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
12064 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
12065 u8 link_type, u8 addr_type, u8 status)
12067 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
12068 struct mgmt_cp_disconnect *cp;
12069 struct mgmt_pending_cmd *cmd;
12071 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
12074 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
12080 if (bacmp(bdaddr, &cp->addr.bdaddr))
12083 if (cp->addr.type != bdaddr_type)
12086 cmd->cmd_complete(cmd, mgmt_status(status));
12087 mgmt_pending_remove(cmd);
12090 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
12091 u8 addr_type, u8 status)
12093 struct mgmt_ev_connect_failed ev;
12095 /* The connection is still in hci_conn_hash so test for 1
12096 * instead of 0 to know if this is the last one.
12098 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
12099 cancel_delayed_work(&hdev->power_off);
12100 queue_work(hdev->req_workqueue, &hdev->power_off.work);
12103 bacpy(&ev.addr.bdaddr, bdaddr);
12104 ev.addr.type = link_to_bdaddr(link_type, addr_type);
12105 ev.status = mgmt_status(status);
12107 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
12110 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
12112 struct mgmt_ev_pin_code_request ev;
12114 bacpy(&ev.addr.bdaddr, bdaddr);
12115 ev.addr.type = BDADDR_BREDR;
12116 ev.secure = secure;
12118 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
12121 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12124 struct mgmt_pending_cmd *cmd;
12126 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
12130 cmd->cmd_complete(cmd, mgmt_status(status));
12131 mgmt_pending_remove(cmd);
12134 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12137 struct mgmt_pending_cmd *cmd;
12139 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
12143 cmd->cmd_complete(cmd, mgmt_status(status));
12144 mgmt_pending_remove(cmd);
12147 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
12148 u8 link_type, u8 addr_type, u32 value,
12151 struct mgmt_ev_user_confirm_request ev;
12153 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
12155 bacpy(&ev.addr.bdaddr, bdaddr);
12156 ev.addr.type = link_to_bdaddr(link_type, addr_type);
12157 ev.confirm_hint = confirm_hint;
12158 ev.value = cpu_to_le32(value);
12160 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
12164 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
12165 u8 link_type, u8 addr_type)
12167 struct mgmt_ev_user_passkey_request ev;
12169 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
12171 bacpy(&ev.addr.bdaddr, bdaddr);
12172 ev.addr.type = link_to_bdaddr(link_type, addr_type);
12174 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
12178 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12179 u8 link_type, u8 addr_type, u8 status,
12182 struct mgmt_pending_cmd *cmd;
12184 cmd = pending_find(opcode, hdev);
12188 cmd->cmd_complete(cmd, mgmt_status(status));
12189 mgmt_pending_remove(cmd);
12194 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12195 u8 link_type, u8 addr_type, u8 status)
12197 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
12198 status, MGMT_OP_USER_CONFIRM_REPLY);
12201 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12202 u8 link_type, u8 addr_type, u8 status)
12204 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
12206 MGMT_OP_USER_CONFIRM_NEG_REPLY);
12209 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12210 u8 link_type, u8 addr_type, u8 status)
12212 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
12213 status, MGMT_OP_USER_PASSKEY_REPLY);
12216 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12217 u8 link_type, u8 addr_type, u8 status)
12219 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
12221 MGMT_OP_USER_PASSKEY_NEG_REPLY);
12224 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
12225 u8 link_type, u8 addr_type, u32 passkey,
12228 struct mgmt_ev_passkey_notify ev;
12230 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
12232 bacpy(&ev.addr.bdaddr, bdaddr);
12233 ev.addr.type = link_to_bdaddr(link_type, addr_type);
12234 ev.passkey = __cpu_to_le32(passkey);
12235 ev.entered = entered;
12237 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
12240 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
12242 struct mgmt_ev_auth_failed ev;
12243 struct mgmt_pending_cmd *cmd;
12244 u8 status = mgmt_status(hci_status);
12246 bacpy(&ev.addr.bdaddr, &conn->dst);
12247 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
12248 ev.status = status;
12250 cmd = find_pairing(conn);
12252 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
12253 cmd ? cmd->sk : NULL);
12256 cmd->cmd_complete(cmd, status);
12257 mgmt_pending_remove(cmd);
12261 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
12263 struct cmd_lookup match = { NULL, hdev };
12267 u8 mgmt_err = mgmt_status(status);
12268 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
12269 cmd_status_rsp, &mgmt_err);
12273 if (test_bit(HCI_AUTH, &hdev->flags))
12274 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
12276 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
12278 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
12282 new_settings(hdev, match.sk);
12285 sock_put(match.sk);
12288 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
12290 struct cmd_lookup *match = data;
12292 if (match->sk == NULL) {
12293 match->sk = cmd->sk;
12294 sock_hold(match->sk);
12298 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
12301 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
12303 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
12304 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
12305 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
12308 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
12309 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
12310 ext_info_changed(hdev, NULL);
12314 sock_put(match.sk);
12317 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
12319 struct mgmt_cp_set_local_name ev;
12320 struct mgmt_pending_cmd *cmd;
12325 memset(&ev, 0, sizeof(ev));
12326 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
12327 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
12329 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
12331 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
12333 /* If this is a HCI command related to powering on the
12334 * HCI dev don't send any mgmt signals.
12336 if (pending_find(MGMT_OP_SET_POWERED, hdev))
12340 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
12341 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
12342 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
12345 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
12349 for (i = 0; i < uuid_count; i++) {
12350 if (!memcmp(uuid, uuids[i], 16))
12357 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
12361 while (parsed < eir_len) {
12362 u8 field_len = eir[0];
12366 if (field_len == 0)
12369 if (eir_len - parsed < field_len + 1)
12373 case EIR_UUID16_ALL:
12374 case EIR_UUID16_SOME:
12375 for (i = 0; i + 3 <= field_len; i += 2) {
12376 memcpy(uuid, bluetooth_base_uuid, 16);
12377 uuid[13] = eir[i + 3];
12378 uuid[12] = eir[i + 2];
12379 if (has_uuid(uuid, uuid_count, uuids))
12383 case EIR_UUID32_ALL:
12384 case EIR_UUID32_SOME:
12385 for (i = 0; i + 5 <= field_len; i += 4) {
12386 memcpy(uuid, bluetooth_base_uuid, 16);
12387 uuid[15] = eir[i + 5];
12388 uuid[14] = eir[i + 4];
12389 uuid[13] = eir[i + 3];
12390 uuid[12] = eir[i + 2];
12391 if (has_uuid(uuid, uuid_count, uuids))
12395 case EIR_UUID128_ALL:
12396 case EIR_UUID128_SOME:
12397 for (i = 0; i + 17 <= field_len; i += 16) {
12398 memcpy(uuid, eir + i + 2, 16);
12399 if (has_uuid(uuid, uuid_count, uuids))
12405 parsed += field_len + 1;
12406 eir += field_len + 1;
12412 static void restart_le_scan(struct hci_dev *hdev)
12414 /* If controller is not scanning we are done. */
12415 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
12418 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
12419 hdev->discovery.scan_start +
12420 hdev->discovery.scan_duration))
12423 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
12424 DISCOV_LE_RESTART_DELAY);
12427 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
12428 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
12430 /* If a RSSI threshold has been specified, and
12431 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
12432 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
12433 * is set, let it through for further processing, as we might need to
12434 * restart the scan.
12436 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
12437 * the results are also dropped.
12439 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
12440 (rssi == HCI_RSSI_INVALID ||
12441 (rssi < hdev->discovery.rssi &&
12442 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
12445 if (hdev->discovery.uuid_count != 0) {
12446 /* If a list of UUIDs is provided in filter, results with no
12447 * matching UUID should be dropped.
12449 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
12450 hdev->discovery.uuids) &&
12451 !eir_has_uuids(scan_rsp, scan_rsp_len,
12452 hdev->discovery.uuid_count,
12453 hdev->discovery.uuids))
12457 /* If duplicate filtering does not report RSSI changes, then restart
12458 * scanning to ensure updated result with updated RSSI values.
12460 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
12461 restart_le_scan(hdev);
12463 /* Validate RSSI value against the RSSI threshold once more. */
12464 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
12465 rssi < hdev->discovery.rssi)
12472 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
12473 bdaddr_t *bdaddr, u8 addr_type)
12475 struct mgmt_ev_adv_monitor_device_lost ev;
12477 ev.monitor_handle = cpu_to_le16(handle);
12478 bacpy(&ev.addr.bdaddr, bdaddr);
12479 ev.addr.type = addr_type;
12481 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
12485 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
12486 struct sk_buff *skb,
12487 struct sock *skip_sk,
12490 struct sk_buff *advmon_skb;
12491 size_t advmon_skb_len;
12492 __le16 *monitor_handle;
12497 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
12498 sizeof(struct mgmt_ev_device_found)) + skb->len;
12499 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
12504 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
12505 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
12506 * store monitor_handle of the matched monitor.
12508 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
12509 *monitor_handle = cpu_to_le16(handle);
12510 skb_put_data(advmon_skb, skb->data, skb->len);
12512 mgmt_event_skb(advmon_skb, skip_sk);
12515 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
12516 bdaddr_t *bdaddr, bool report_device,
12517 struct sk_buff *skb,
12518 struct sock *skip_sk)
12520 struct monitored_device *dev, *tmp;
12521 bool matched = false;
12522 bool notified = false;
12524 /* We have received the Advertisement Report because:
12525 * 1. the kernel has initiated active discovery
12526 * 2. if not, we have pend_le_reports > 0 in which case we are doing
12528 * 3. if none of the above is true, we have one or more active
12529 * Advertisement Monitor
12531 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
12532 * and report ONLY one advertisement per device for the matched Monitor
12533 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
12535 * For case 3, since we are not active scanning and all advertisements
12536 * received are due to a matched Advertisement Monitor, report all
12537 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
12539 if (report_device && !hdev->advmon_pend_notify) {
12540 mgmt_event_skb(skb, skip_sk);
12544 hdev->advmon_pend_notify = false;
12546 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
12547 if (!bacmp(&dev->bdaddr, bdaddr)) {
12550 if (!dev->notified) {
12551 mgmt_send_adv_monitor_device_found(hdev, skb,
12555 dev->notified = true;
12559 if (!dev->notified)
12560 hdev->advmon_pend_notify = true;
12563 if (!report_device &&
12564 ((matched && !notified) || !msft_monitor_supported(hdev))) {
12565 /* Handle 0 indicates that we are not active scanning and this
12566 * is a subsequent advertisement report for an already matched
12567 * Advertisement Monitor or the controller offloading support
12568 * is not available.
12570 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
12574 mgmt_event_skb(skb, skip_sk);
12579 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
12580 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
12581 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
12584 struct sk_buff *skb;
12585 struct mgmt_ev_mesh_device_found *ev;
12588 if (!hdev->mesh_ad_types[0])
12591 /* Scan for requested AD types */
12593 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
12594 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
12595 if (!hdev->mesh_ad_types[j])
12598 if (hdev->mesh_ad_types[j] == eir[i + 1])
12604 if (scan_rsp_len > 0) {
12605 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
12606 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
12607 if (!hdev->mesh_ad_types[j])
12610 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
12619 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
12620 sizeof(*ev) + eir_len + scan_rsp_len);
12624 ev = skb_put(skb, sizeof(*ev));
12626 bacpy(&ev->addr.bdaddr, bdaddr);
12627 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
12629 ev->flags = cpu_to_le32(flags);
12630 ev->instant = cpu_to_le64(instant);
12633 /* Copy EIR or advertising data into event */
12634 skb_put_data(skb, eir, eir_len);
12636 if (scan_rsp_len > 0)
12637 /* Append scan response data to event */
12638 skb_put_data(skb, scan_rsp, scan_rsp_len);
12640 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
12642 mgmt_event_skb(skb, NULL);
12645 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
12646 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
12647 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
12650 struct sk_buff *skb;
12651 struct mgmt_ev_device_found *ev;
12652 bool report_device = hci_discovery_active(hdev);
12654 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
12655 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
12656 eir, eir_len, scan_rsp, scan_rsp_len,
12659 /* Don't send events for a non-kernel initiated discovery. With
12660 * LE one exception is if we have pend_le_reports > 0 in which
12661 * case we're doing passive scanning and want these events.
12663 if (!hci_discovery_active(hdev)) {
12664 if (link_type == ACL_LINK)
12666 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
12667 report_device = true;
12668 else if (!hci_is_adv_monitoring(hdev))
12672 if (hdev->discovery.result_filtering) {
12673 /* We are using service discovery */
12674 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
12679 if (hdev->discovery.limited) {
12680 /* Check for limited discoverable bit */
12682 if (!(dev_class[1] & 0x20))
12685 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
12686 if (!flags || !(flags[0] & LE_AD_LIMITED))
12691 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
12692 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
12693 sizeof(*ev) + eir_len + scan_rsp_len + 5);
12697 ev = skb_put(skb, sizeof(*ev));
12699 /* In case of device discovery with BR/EDR devices (pre 1.2), the
12700 * RSSI value was reported as 0 when not available. This behavior
12701 * is kept when using device discovery. This is required for full
12702 * backwards compatibility with the API.
12704 * However when using service discovery, the value 127 will be
12705 * returned when the RSSI is not available.
12707 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
12708 link_type == ACL_LINK)
12711 bacpy(&ev->addr.bdaddr, bdaddr);
12712 ev->addr.type = link_to_bdaddr(link_type, addr_type);
12714 ev->flags = cpu_to_le32(flags);
12717 /* Copy EIR or advertising data into event */
12718 skb_put_data(skb, eir, eir_len);
12720 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
12723 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
12725 skb_put_data(skb, eir_cod, sizeof(eir_cod));
12728 if (scan_rsp_len > 0)
12729 /* Append scan response data to event */
12730 skb_put_data(skb, scan_rsp, scan_rsp_len);
12732 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
12734 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
12737 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
12738 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
12740 struct sk_buff *skb;
12741 struct mgmt_ev_device_found *ev;
12745 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
12746 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
12748 ev = skb_put(skb, sizeof(*ev));
12749 bacpy(&ev->addr.bdaddr, bdaddr);
12750 ev->addr.type = link_to_bdaddr(link_type, addr_type);
12754 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
12756 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
12758 ev->eir_len = cpu_to_le16(eir_len);
12759 ev->flags = cpu_to_le32(flags);
12761 mgmt_event_skb(skb, NULL);
12764 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
12766 struct mgmt_ev_discovering ev;
12768 bt_dev_dbg(hdev, "discovering %u", discovering);
12770 memset(&ev, 0, sizeof(ev));
12771 ev.type = hdev->discovery.type;
12772 ev.discovering = discovering;
12774 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
12777 void mgmt_suspending(struct hci_dev *hdev, u8 state)
12779 struct mgmt_ev_controller_suspend ev;
12781 ev.suspend_state = state;
12782 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
12785 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
12788 struct mgmt_ev_controller_resume ev;
12790 ev.wake_reason = reason;
12792 bacpy(&ev.addr.bdaddr, bdaddr);
12793 ev.addr.type = addr_type;
12795 memset(&ev.addr, 0, sizeof(ev.addr));
12798 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
12801 static struct hci_mgmt_chan chan = {
12802 .channel = HCI_CHANNEL_CONTROL,
12803 .handler_count = ARRAY_SIZE(mgmt_handlers),
12804 .handlers = mgmt_handlers,
12806 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
12807 .tizen_handlers = tizen_mgmt_handlers,
12809 .hdev_init = mgmt_init_hdev,
12812 int mgmt_init(void)
12814 return hci_mgmt_chan_register(&chan);
12817 void mgmt_exit(void)
12819 hci_mgmt_chan_unregister(&chan);
12822 void mgmt_cleanup(struct sock *sk)
12824 struct mgmt_mesh_tx *mesh_tx;
12825 struct hci_dev *hdev;
12827 read_lock(&hci_dev_list_lock);
12829 list_for_each_entry(hdev, &hci_dev_list, list) {
12831 mesh_tx = mgmt_mesh_next(hdev, sk);
12834 mesh_send_complete(hdev, mesh_tx, true);
12838 read_unlock(&hci_dev_list_lock);