2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
37 #include <net/bluetooth/sco.h>
40 #include "hci_request.h"
42 #include "mgmt_util.h"
43 #include "mgmt_config.h"
48 #define MGMT_VERSION 1
49 #define MGMT_REVISION 22
51 static const u16 mgmt_commands[] = {
52 MGMT_OP_READ_INDEX_LIST,
55 MGMT_OP_SET_DISCOVERABLE,
56 MGMT_OP_SET_CONNECTABLE,
57 MGMT_OP_SET_FAST_CONNECTABLE,
59 MGMT_OP_SET_LINK_SECURITY,
63 MGMT_OP_SET_DEV_CLASS,
64 MGMT_OP_SET_LOCAL_NAME,
67 MGMT_OP_LOAD_LINK_KEYS,
68 MGMT_OP_LOAD_LONG_TERM_KEYS,
70 MGMT_OP_GET_CONNECTIONS,
71 MGMT_OP_PIN_CODE_REPLY,
72 MGMT_OP_PIN_CODE_NEG_REPLY,
73 MGMT_OP_SET_IO_CAPABILITY,
75 MGMT_OP_CANCEL_PAIR_DEVICE,
76 MGMT_OP_UNPAIR_DEVICE,
77 MGMT_OP_USER_CONFIRM_REPLY,
78 MGMT_OP_USER_CONFIRM_NEG_REPLY,
79 MGMT_OP_USER_PASSKEY_REPLY,
80 MGMT_OP_USER_PASSKEY_NEG_REPLY,
81 MGMT_OP_READ_LOCAL_OOB_DATA,
82 MGMT_OP_ADD_REMOTE_OOB_DATA,
83 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
84 MGMT_OP_START_DISCOVERY,
85 MGMT_OP_STOP_DISCOVERY,
88 MGMT_OP_UNBLOCK_DEVICE,
89 MGMT_OP_SET_DEVICE_ID,
90 MGMT_OP_SET_ADVERTISING,
92 MGMT_OP_SET_STATIC_ADDRESS,
93 MGMT_OP_SET_SCAN_PARAMS,
94 MGMT_OP_SET_SECURE_CONN,
95 MGMT_OP_SET_DEBUG_KEYS,
98 MGMT_OP_GET_CONN_INFO,
99 MGMT_OP_GET_CLOCK_INFO,
101 MGMT_OP_REMOVE_DEVICE,
102 MGMT_OP_LOAD_CONN_PARAM,
103 MGMT_OP_READ_UNCONF_INDEX_LIST,
104 MGMT_OP_READ_CONFIG_INFO,
105 MGMT_OP_SET_EXTERNAL_CONFIG,
106 MGMT_OP_SET_PUBLIC_ADDRESS,
107 MGMT_OP_START_SERVICE_DISCOVERY,
108 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
109 MGMT_OP_READ_EXT_INDEX_LIST,
110 MGMT_OP_READ_ADV_FEATURES,
111 MGMT_OP_ADD_ADVERTISING,
112 MGMT_OP_REMOVE_ADVERTISING,
113 MGMT_OP_GET_ADV_SIZE_INFO,
114 MGMT_OP_START_LIMITED_DISCOVERY,
115 MGMT_OP_READ_EXT_INFO,
116 MGMT_OP_SET_APPEARANCE,
117 MGMT_OP_GET_PHY_CONFIGURATION,
118 MGMT_OP_SET_PHY_CONFIGURATION,
119 MGMT_OP_SET_BLOCKED_KEYS,
120 MGMT_OP_SET_WIDEBAND_SPEECH,
121 MGMT_OP_READ_CONTROLLER_CAP,
122 MGMT_OP_READ_EXP_FEATURES_INFO,
123 MGMT_OP_SET_EXP_FEATURE,
124 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
125 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
126 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
127 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
128 MGMT_OP_GET_DEVICE_FLAGS,
129 MGMT_OP_SET_DEVICE_FLAGS,
130 MGMT_OP_READ_ADV_MONITOR_FEATURES,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
132 MGMT_OP_REMOVE_ADV_MONITOR,
133 MGMT_OP_ADD_EXT_ADV_PARAMS,
134 MGMT_OP_ADD_EXT_ADV_DATA,
135 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
136 MGMT_OP_SET_MESH_RECEIVER,
137 MGMT_OP_MESH_READ_FEATURES,
139 MGMT_OP_MESH_SEND_CANCEL,
142 static const u16 mgmt_events[] = {
143 MGMT_EV_CONTROLLER_ERROR,
145 MGMT_EV_INDEX_REMOVED,
146 MGMT_EV_NEW_SETTINGS,
147 MGMT_EV_CLASS_OF_DEV_CHANGED,
148 MGMT_EV_LOCAL_NAME_CHANGED,
149 MGMT_EV_NEW_LINK_KEY,
150 MGMT_EV_NEW_LONG_TERM_KEY,
151 MGMT_EV_DEVICE_CONNECTED,
152 MGMT_EV_DEVICE_DISCONNECTED,
153 MGMT_EV_CONNECT_FAILED,
154 MGMT_EV_PIN_CODE_REQUEST,
155 MGMT_EV_USER_CONFIRM_REQUEST,
156 MGMT_EV_USER_PASSKEY_REQUEST,
158 MGMT_EV_DEVICE_FOUND,
160 MGMT_EV_DEVICE_BLOCKED,
161 MGMT_EV_DEVICE_UNBLOCKED,
162 MGMT_EV_DEVICE_UNPAIRED,
163 MGMT_EV_PASSKEY_NOTIFY,
166 MGMT_EV_DEVICE_ADDED,
167 MGMT_EV_DEVICE_REMOVED,
168 MGMT_EV_NEW_CONN_PARAM,
169 MGMT_EV_UNCONF_INDEX_ADDED,
170 MGMT_EV_UNCONF_INDEX_REMOVED,
171 MGMT_EV_NEW_CONFIG_OPTIONS,
172 MGMT_EV_EXT_INDEX_ADDED,
173 MGMT_EV_EXT_INDEX_REMOVED,
174 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
175 MGMT_EV_ADVERTISING_ADDED,
176 MGMT_EV_ADVERTISING_REMOVED,
177 MGMT_EV_EXT_INFO_CHANGED,
178 MGMT_EV_PHY_CONFIGURATION_CHANGED,
179 MGMT_EV_EXP_FEATURE_CHANGED,
180 MGMT_EV_DEVICE_FLAGS_CHANGED,
181 MGMT_EV_ADV_MONITOR_ADDED,
182 MGMT_EV_ADV_MONITOR_REMOVED,
183 MGMT_EV_CONTROLLER_SUSPEND,
184 MGMT_EV_CONTROLLER_RESUME,
185 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
186 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
189 static const u16 mgmt_untrusted_commands[] = {
190 MGMT_OP_READ_INDEX_LIST,
192 MGMT_OP_READ_UNCONF_INDEX_LIST,
193 MGMT_OP_READ_CONFIG_INFO,
194 MGMT_OP_READ_EXT_INDEX_LIST,
195 MGMT_OP_READ_EXT_INFO,
196 MGMT_OP_READ_CONTROLLER_CAP,
197 MGMT_OP_READ_EXP_FEATURES_INFO,
198 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
199 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
202 static const u16 mgmt_untrusted_events[] = {
204 MGMT_EV_INDEX_REMOVED,
205 MGMT_EV_NEW_SETTINGS,
206 MGMT_EV_CLASS_OF_DEV_CHANGED,
207 MGMT_EV_LOCAL_NAME_CHANGED,
208 MGMT_EV_UNCONF_INDEX_ADDED,
209 MGMT_EV_UNCONF_INDEX_REMOVED,
210 MGMT_EV_NEW_CONFIG_OPTIONS,
211 MGMT_EV_EXT_INDEX_ADDED,
212 MGMT_EV_EXT_INDEX_REMOVED,
213 MGMT_EV_EXT_INFO_CHANGED,
214 MGMT_EV_EXP_FEATURE_CHANGED,
217 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
219 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
220 "\x00\x00\x00\x00\x00\x00\x00\x00"
222 /* HCI to MGMT error code conversion table */
223 static const u8 mgmt_status_table[] = {
225 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
226 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
227 MGMT_STATUS_FAILED, /* Hardware Failure */
228 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
229 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
230 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
231 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
232 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
233 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
234 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
235 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
236 MGMT_STATUS_BUSY, /* Command Disallowed */
237 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
238 MGMT_STATUS_REJECTED, /* Rejected Security */
239 MGMT_STATUS_REJECTED, /* Rejected Personal */
240 MGMT_STATUS_TIMEOUT, /* Host Timeout */
241 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
242 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
243 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
244 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
245 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
246 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
247 MGMT_STATUS_BUSY, /* Repeated Attempts */
248 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
249 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
250 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
251 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
252 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
253 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
254 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
255 MGMT_STATUS_FAILED, /* Unspecified Error */
256 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
257 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
258 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
259 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
260 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
261 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
262 MGMT_STATUS_FAILED, /* Unit Link Key Used */
263 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
264 MGMT_STATUS_TIMEOUT, /* Instant Passed */
265 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
266 MGMT_STATUS_FAILED, /* Transaction Collision */
267 MGMT_STATUS_FAILED, /* Reserved for future use */
268 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
269 MGMT_STATUS_REJECTED, /* QoS Rejected */
270 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
271 MGMT_STATUS_REJECTED, /* Insufficient Security */
272 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
273 MGMT_STATUS_FAILED, /* Reserved for future use */
274 MGMT_STATUS_BUSY, /* Role Switch Pending */
275 MGMT_STATUS_FAILED, /* Reserved for future use */
276 MGMT_STATUS_FAILED, /* Slot Violation */
277 MGMT_STATUS_FAILED, /* Role Switch Failed */
278 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
279 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
280 MGMT_STATUS_BUSY, /* Host Busy Pairing */
281 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
282 MGMT_STATUS_BUSY, /* Controller Busy */
283 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
284 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
285 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
286 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
287 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
290 static u8 mgmt_errno_status(int err)
294 return MGMT_STATUS_SUCCESS;
296 return MGMT_STATUS_REJECTED;
298 return MGMT_STATUS_INVALID_PARAMS;
300 return MGMT_STATUS_NOT_SUPPORTED;
302 return MGMT_STATUS_BUSY;
304 return MGMT_STATUS_AUTH_FAILED;
306 return MGMT_STATUS_NO_RESOURCES;
308 return MGMT_STATUS_ALREADY_CONNECTED;
310 return MGMT_STATUS_DISCONNECTED;
313 return MGMT_STATUS_FAILED;
316 static u8 mgmt_status(int err)
319 return mgmt_errno_status(err);
321 if (err < ARRAY_SIZE(mgmt_status_table))
322 return mgmt_status_table[err];
324 return MGMT_STATUS_FAILED;
327 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
330 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
335 u16 len, int flag, struct sock *skip_sk)
337 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
342 struct sock *skip_sk)
344 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
345 HCI_SOCK_TRUSTED, skip_sk);
348 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
350 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
354 static u8 le_addr_type(u8 mgmt_addr_type)
356 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
357 return ADDR_LE_DEV_PUBLIC;
359 return ADDR_LE_DEV_RANDOM;
362 void mgmt_fill_version_info(void *ver)
364 struct mgmt_rp_read_version *rp = ver;
366 rp->version = MGMT_VERSION;
367 rp->revision = cpu_to_le16(MGMT_REVISION);
370 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
373 struct mgmt_rp_read_version rp;
375 bt_dev_dbg(hdev, "sock %p", sk);
377 mgmt_fill_version_info(&rp);
379 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
383 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
386 struct mgmt_rp_read_commands *rp;
387 u16 num_commands, num_events;
391 bt_dev_dbg(hdev, "sock %p", sk);
393 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
394 num_commands = ARRAY_SIZE(mgmt_commands);
395 num_events = ARRAY_SIZE(mgmt_events);
397 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
398 num_events = ARRAY_SIZE(mgmt_untrusted_events);
401 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
403 rp = kmalloc(rp_size, GFP_KERNEL);
407 rp->num_commands = cpu_to_le16(num_commands);
408 rp->num_events = cpu_to_le16(num_events);
410 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
411 __le16 *opcode = rp->opcodes;
413 for (i = 0; i < num_commands; i++, opcode++)
414 put_unaligned_le16(mgmt_commands[i], opcode);
416 for (i = 0; i < num_events; i++, opcode++)
417 put_unaligned_le16(mgmt_events[i], opcode);
419 __le16 *opcode = rp->opcodes;
421 for (i = 0; i < num_commands; i++, opcode++)
422 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
424 for (i = 0; i < num_events; i++, opcode++)
425 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
428 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
435 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
438 struct mgmt_rp_read_index_list *rp;
444 bt_dev_dbg(hdev, "sock %p", sk);
446 read_lock(&hci_dev_list_lock);
449 list_for_each_entry(d, &hci_dev_list, list) {
450 if (d->dev_type == HCI_PRIMARY &&
451 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
455 rp_len = sizeof(*rp) + (2 * count);
456 rp = kmalloc(rp_len, GFP_ATOMIC);
458 read_unlock(&hci_dev_list_lock);
463 list_for_each_entry(d, &hci_dev_list, list) {
464 if (hci_dev_test_flag(d, HCI_SETUP) ||
465 hci_dev_test_flag(d, HCI_CONFIG) ||
466 hci_dev_test_flag(d, HCI_USER_CHANNEL))
469 /* Devices marked as raw-only are neither configured
470 * nor unconfigured controllers.
472 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
475 if (d->dev_type == HCI_PRIMARY &&
476 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
477 rp->index[count++] = cpu_to_le16(d->id);
478 bt_dev_dbg(hdev, "Added hci%u", d->id);
482 rp->num_controllers = cpu_to_le16(count);
483 rp_len = sizeof(*rp) + (2 * count);
485 read_unlock(&hci_dev_list_lock);
487 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
495 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
496 void *data, u16 data_len)
498 struct mgmt_rp_read_unconf_index_list *rp;
504 bt_dev_dbg(hdev, "sock %p", sk);
506 read_lock(&hci_dev_list_lock);
509 list_for_each_entry(d, &hci_dev_list, list) {
510 if (d->dev_type == HCI_PRIMARY &&
511 hci_dev_test_flag(d, HCI_UNCONFIGURED))
515 rp_len = sizeof(*rp) + (2 * count);
516 rp = kmalloc(rp_len, GFP_ATOMIC);
518 read_unlock(&hci_dev_list_lock);
523 list_for_each_entry(d, &hci_dev_list, list) {
524 if (hci_dev_test_flag(d, HCI_SETUP) ||
525 hci_dev_test_flag(d, HCI_CONFIG) ||
526 hci_dev_test_flag(d, HCI_USER_CHANNEL))
529 /* Devices marked as raw-only are neither configured
530 * nor unconfigured controllers.
532 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
535 if (d->dev_type == HCI_PRIMARY &&
536 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
537 rp->index[count++] = cpu_to_le16(d->id);
538 bt_dev_dbg(hdev, "Added hci%u", d->id);
542 rp->num_controllers = cpu_to_le16(count);
543 rp_len = sizeof(*rp) + (2 * count);
545 read_unlock(&hci_dev_list_lock);
547 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
548 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
555 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
556 void *data, u16 data_len)
558 struct mgmt_rp_read_ext_index_list *rp;
563 bt_dev_dbg(hdev, "sock %p", sk);
565 read_lock(&hci_dev_list_lock);
568 list_for_each_entry(d, &hci_dev_list, list) {
569 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
573 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
575 read_unlock(&hci_dev_list_lock);
580 list_for_each_entry(d, &hci_dev_list, list) {
581 if (hci_dev_test_flag(d, HCI_SETUP) ||
582 hci_dev_test_flag(d, HCI_CONFIG) ||
583 hci_dev_test_flag(d, HCI_USER_CHANNEL))
586 /* Devices marked as raw-only are neither configured
587 * nor unconfigured controllers.
589 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
592 if (d->dev_type == HCI_PRIMARY) {
593 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
594 rp->entry[count].type = 0x01;
596 rp->entry[count].type = 0x00;
597 } else if (d->dev_type == HCI_AMP) {
598 rp->entry[count].type = 0x02;
603 rp->entry[count].bus = d->bus;
604 rp->entry[count++].index = cpu_to_le16(d->id);
605 bt_dev_dbg(hdev, "Added hci%u", d->id);
608 rp->num_controllers = cpu_to_le16(count);
610 read_unlock(&hci_dev_list_lock);
612 /* If this command is called at least once, then all the
613 * default index and unconfigured index events are disabled
614 * and from now on only extended index events are used.
616 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
617 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
618 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
620 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
621 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
622 struct_size(rp, entry, count));
629 static bool is_configured(struct hci_dev *hdev)
631 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
635 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
643 static __le32 get_missing_options(struct hci_dev *hdev)
647 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
648 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
649 options |= MGMT_OPTION_EXTERNAL_CONFIG;
651 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
652 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
653 !bacmp(&hdev->public_addr, BDADDR_ANY))
654 options |= MGMT_OPTION_PUBLIC_ADDRESS;
656 return cpu_to_le32(options);
659 static int new_options(struct hci_dev *hdev, struct sock *skip)
661 __le32 options = get_missing_options(hdev);
663 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
664 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
667 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
669 __le32 options = get_missing_options(hdev);
671 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
675 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
676 void *data, u16 data_len)
678 struct mgmt_rp_read_config_info rp;
681 bt_dev_dbg(hdev, "sock %p", sk);
685 memset(&rp, 0, sizeof(rp));
686 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
688 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
689 options |= MGMT_OPTION_EXTERNAL_CONFIG;
691 if (hdev->set_bdaddr)
692 options |= MGMT_OPTION_PUBLIC_ADDRESS;
694 rp.supported_options = cpu_to_le32(options);
695 rp.missing_options = get_missing_options(hdev);
697 hci_dev_unlock(hdev);
699 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
703 static u32 get_supported_phys(struct hci_dev *hdev)
705 u32 supported_phys = 0;
707 if (lmp_bredr_capable(hdev)) {
708 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
710 if (hdev->features[0][0] & LMP_3SLOT)
711 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
713 if (hdev->features[0][0] & LMP_5SLOT)
714 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
716 if (lmp_edr_2m_capable(hdev)) {
717 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
719 if (lmp_edr_3slot_capable(hdev))
720 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
722 if (lmp_edr_5slot_capable(hdev))
723 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
725 if (lmp_edr_3m_capable(hdev)) {
726 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
728 if (lmp_edr_3slot_capable(hdev))
729 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
731 if (lmp_edr_5slot_capable(hdev))
732 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
737 if (lmp_le_capable(hdev)) {
738 supported_phys |= MGMT_PHY_LE_1M_TX;
739 supported_phys |= MGMT_PHY_LE_1M_RX;
741 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
742 supported_phys |= MGMT_PHY_LE_2M_TX;
743 supported_phys |= MGMT_PHY_LE_2M_RX;
746 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
747 supported_phys |= MGMT_PHY_LE_CODED_TX;
748 supported_phys |= MGMT_PHY_LE_CODED_RX;
752 return supported_phys;
755 static u32 get_selected_phys(struct hci_dev *hdev)
757 u32 selected_phys = 0;
759 if (lmp_bredr_capable(hdev)) {
760 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
762 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
763 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
765 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
766 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
768 if (lmp_edr_2m_capable(hdev)) {
769 if (!(hdev->pkt_type & HCI_2DH1))
770 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
772 if (lmp_edr_3slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_2DH3))
774 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
776 if (lmp_edr_5slot_capable(hdev) &&
777 !(hdev->pkt_type & HCI_2DH5))
778 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
780 if (lmp_edr_3m_capable(hdev)) {
781 if (!(hdev->pkt_type & HCI_3DH1))
782 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
784 if (lmp_edr_3slot_capable(hdev) &&
785 !(hdev->pkt_type & HCI_3DH3))
786 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
788 if (lmp_edr_5slot_capable(hdev) &&
789 !(hdev->pkt_type & HCI_3DH5))
790 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
795 if (lmp_le_capable(hdev)) {
796 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
797 selected_phys |= MGMT_PHY_LE_1M_TX;
799 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
800 selected_phys |= MGMT_PHY_LE_1M_RX;
802 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
803 selected_phys |= MGMT_PHY_LE_2M_TX;
805 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
806 selected_phys |= MGMT_PHY_LE_2M_RX;
808 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
809 selected_phys |= MGMT_PHY_LE_CODED_TX;
811 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
812 selected_phys |= MGMT_PHY_LE_CODED_RX;
815 return selected_phys;
818 static u32 get_configurable_phys(struct hci_dev *hdev)
820 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
821 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
824 static u32 get_supported_settings(struct hci_dev *hdev)
828 settings |= MGMT_SETTING_POWERED;
829 settings |= MGMT_SETTING_BONDABLE;
830 settings |= MGMT_SETTING_DEBUG_KEYS;
831 settings |= MGMT_SETTING_CONNECTABLE;
832 settings |= MGMT_SETTING_DISCOVERABLE;
834 if (lmp_bredr_capable(hdev)) {
835 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
836 settings |= MGMT_SETTING_FAST_CONNECTABLE;
837 settings |= MGMT_SETTING_BREDR;
838 settings |= MGMT_SETTING_LINK_SECURITY;
840 if (lmp_ssp_capable(hdev)) {
841 settings |= MGMT_SETTING_SSP;
842 if (IS_ENABLED(CONFIG_BT_HS))
843 settings |= MGMT_SETTING_HS;
846 if (lmp_sc_capable(hdev))
847 settings |= MGMT_SETTING_SECURE_CONN;
849 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
851 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
854 if (lmp_le_capable(hdev)) {
855 settings |= MGMT_SETTING_LE;
856 settings |= MGMT_SETTING_SECURE_CONN;
857 settings |= MGMT_SETTING_PRIVACY;
858 settings |= MGMT_SETTING_STATIC_ADDRESS;
859 settings |= MGMT_SETTING_ADVERTISING;
862 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
864 settings |= MGMT_SETTING_CONFIGURATION;
866 if (cis_central_capable(hdev))
867 settings |= MGMT_SETTING_CIS_CENTRAL;
869 if (cis_peripheral_capable(hdev))
870 settings |= MGMT_SETTING_CIS_PERIPHERAL;
872 settings |= MGMT_SETTING_PHY_CONFIGURATION;
877 static u32 get_current_settings(struct hci_dev *hdev)
881 if (hdev_is_powered(hdev))
882 settings |= MGMT_SETTING_POWERED;
884 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
885 settings |= MGMT_SETTING_CONNECTABLE;
887 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
888 settings |= MGMT_SETTING_FAST_CONNECTABLE;
890 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
891 settings |= MGMT_SETTING_DISCOVERABLE;
893 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
894 settings |= MGMT_SETTING_BONDABLE;
896 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
897 settings |= MGMT_SETTING_BREDR;
899 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
900 settings |= MGMT_SETTING_LE;
902 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
903 settings |= MGMT_SETTING_LINK_SECURITY;
905 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
906 settings |= MGMT_SETTING_SSP;
908 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
909 settings |= MGMT_SETTING_HS;
911 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
912 settings |= MGMT_SETTING_ADVERTISING;
914 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
915 settings |= MGMT_SETTING_SECURE_CONN;
917 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
918 settings |= MGMT_SETTING_DEBUG_KEYS;
920 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
921 settings |= MGMT_SETTING_PRIVACY;
923 /* The current setting for static address has two purposes. The
924 * first is to indicate if the static address will be used and
925 * the second is to indicate if it is actually set.
927 * This means if the static address is not configured, this flag
928 * will never be set. If the address is configured, then if the
929 * address is actually used decides if the flag is set or not.
931 * For single mode LE only controllers and dual-mode controllers
932 * with BR/EDR disabled, the existence of the static address will
935 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
936 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
937 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
938 if (bacmp(&hdev->static_addr, BDADDR_ANY))
939 settings |= MGMT_SETTING_STATIC_ADDRESS;
942 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
943 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
945 if (cis_central_capable(hdev))
946 settings |= MGMT_SETTING_CIS_CENTRAL;
948 if (cis_peripheral_capable(hdev))
949 settings |= MGMT_SETTING_CIS_PERIPHERAL;
954 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
956 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
959 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
961 struct mgmt_pending_cmd *cmd;
963 /* If there's a pending mgmt command the flags will not yet have
964 * their final values, so check for this first.
966 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
968 struct mgmt_mode *cp = cmd->param;
970 return LE_AD_GENERAL;
971 else if (cp->val == 0x02)
972 return LE_AD_LIMITED;
974 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
975 return LE_AD_LIMITED;
976 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
977 return LE_AD_GENERAL;
983 bool mgmt_get_connectable(struct hci_dev *hdev)
985 struct mgmt_pending_cmd *cmd;
987 /* If there's a pending mgmt command the flag will not yet have
988 * it's final value, so check for this first.
990 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
992 struct mgmt_mode *cp = cmd->param;
997 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1000 static int service_cache_sync(struct hci_dev *hdev, void *data)
1002 hci_update_eir_sync(hdev);
1003 hci_update_class_sync(hdev);
1008 static void service_cache_off(struct work_struct *work)
1010 struct hci_dev *hdev = container_of(work, struct hci_dev,
1011 service_cache.work);
1013 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1016 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1019 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1021 /* The generation of a new RPA and programming it into the
1022 * controller happens in the hci_req_enable_advertising()
1025 if (ext_adv_capable(hdev))
1026 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1028 return hci_enable_advertising_sync(hdev);
1031 static void rpa_expired(struct work_struct *work)
1033 struct hci_dev *hdev = container_of(work, struct hci_dev,
1036 bt_dev_dbg(hdev, "");
1038 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1040 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1043 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1046 static void discov_off(struct work_struct *work)
1048 struct hci_dev *hdev = container_of(work, struct hci_dev,
1051 bt_dev_dbg(hdev, "");
1055 /* When discoverable timeout triggers, then just make sure
1056 * the limited discoverable flag is cleared. Even in the case
1057 * of a timeout triggered from general discoverable, it is
1058 * safe to unconditionally clear the flag.
1060 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1061 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1062 hdev->discov_timeout = 0;
1064 hci_update_discoverable(hdev);
1066 mgmt_new_settings(hdev);
1068 hci_dev_unlock(hdev);
1071 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1073 static void mesh_send_complete(struct hci_dev *hdev,
1074 struct mgmt_mesh_tx *mesh_tx, bool silent)
1076 u8 handle = mesh_tx->handle;
1079 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1080 sizeof(handle), NULL);
1082 mgmt_mesh_remove(mesh_tx);
1085 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1087 struct mgmt_mesh_tx *mesh_tx;
1089 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1090 hci_disable_advertising_sync(hdev);
1091 mesh_tx = mgmt_mesh_next(hdev, NULL);
1094 mesh_send_complete(hdev, mesh_tx, false);
1099 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1100 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1101 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1103 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1108 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1109 mesh_send_start_complete);
1112 mesh_send_complete(hdev, mesh_tx, false);
1114 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1117 static void mesh_send_done(struct work_struct *work)
1119 struct hci_dev *hdev = container_of(work, struct hci_dev,
1120 mesh_send_done.work);
1122 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1125 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1128 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1130 if (hci_dev_test_flag(hdev, HCI_MGMT))
1133 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1135 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1136 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1137 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1138 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1140 /* Non-mgmt controlled devices get this bit set
1141 * implicitly so that pairing works for them, however
1142 * for mgmt we require user-space to explicitly enable
1145 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1147 hci_dev_set_flag(hdev, HCI_MGMT);
1150 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1151 void *data, u16 data_len)
1153 struct mgmt_rp_read_info rp;
1155 bt_dev_dbg(hdev, "sock %p", sk);
1159 memset(&rp, 0, sizeof(rp));
1161 bacpy(&rp.bdaddr, &hdev->bdaddr);
1163 rp.version = hdev->hci_ver;
1164 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1166 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1167 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1169 memcpy(rp.dev_class, hdev->dev_class, 3);
1171 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1172 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1174 hci_dev_unlock(hdev);
1176 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1180 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1185 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1186 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1187 hdev->dev_class, 3);
1189 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1190 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1193 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1194 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1195 hdev->dev_name, name_len);
1197 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1198 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1199 hdev->short_name, name_len);
1204 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1205 void *data, u16 data_len)
1208 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1211 bt_dev_dbg(hdev, "sock %p", sk);
1213 memset(&buf, 0, sizeof(buf));
1217 bacpy(&rp->bdaddr, &hdev->bdaddr);
1219 rp->version = hdev->hci_ver;
1220 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1222 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1223 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1226 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1227 rp->eir_len = cpu_to_le16(eir_len);
1229 hci_dev_unlock(hdev);
1231 /* If this command is called at least once, then the events
1232 * for class of device and local name changes are disabled
1233 * and only the new extended controller information event
1236 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1237 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1238 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1240 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1241 sizeof(*rp) + eir_len);
1244 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1247 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1250 memset(buf, 0, sizeof(buf));
1252 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1253 ev->eir_len = cpu_to_le16(eir_len);
1255 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1256 sizeof(*ev) + eir_len,
1257 HCI_MGMT_EXT_INFO_EVENTS, skip);
1260 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1262 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1264 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1268 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1270 struct mgmt_ev_advertising_added ev;
1272 ev.instance = instance;
1274 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1277 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1280 struct mgmt_ev_advertising_removed ev;
1282 ev.instance = instance;
1284 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1287 static void cancel_adv_timeout(struct hci_dev *hdev)
1289 if (hdev->adv_instance_timeout) {
1290 hdev->adv_instance_timeout = 0;
1291 cancel_delayed_work(&hdev->adv_instance_expire);
1295 /* This function requires the caller holds hdev->lock */
1296 static void restart_le_actions(struct hci_dev *hdev)
1298 struct hci_conn_params *p;
1300 list_for_each_entry(p, &hdev->le_conn_params, list) {
1301 /* Needed for AUTO_OFF case where might not "really"
1302 * have been powered off.
1304 list_del_init(&p->action);
1306 switch (p->auto_connect) {
1307 case HCI_AUTO_CONN_DIRECT:
1308 case HCI_AUTO_CONN_ALWAYS:
1309 list_add(&p->action, &hdev->pend_le_conns);
1311 case HCI_AUTO_CONN_REPORT:
1312 list_add(&p->action, &hdev->pend_le_reports);
1320 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1322 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1324 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1325 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1328 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1330 struct mgmt_pending_cmd *cmd = data;
1331 struct mgmt_mode *cp;
1333 /* Make sure cmd still outstanding. */
1334 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1339 bt_dev_dbg(hdev, "err %d", err);
1344 restart_le_actions(hdev);
1345 hci_update_passive_scan(hdev);
1346 hci_dev_unlock(hdev);
1349 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1351 /* Only call new_setting for power on as power off is deferred
1352 * to hdev->power_off work which does call hci_dev_do_close.
1355 new_settings(hdev, cmd->sk);
1357 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1361 mgmt_pending_remove(cmd);
1364 static int set_powered_sync(struct hci_dev *hdev, void *data)
1366 struct mgmt_pending_cmd *cmd = data;
1367 struct mgmt_mode *cp = cmd->param;
1369 BT_DBG("%s", hdev->name);
1371 return hci_set_powered_sync(hdev, cp->val);
1374 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1377 struct mgmt_mode *cp = data;
1378 struct mgmt_pending_cmd *cmd;
1381 bt_dev_dbg(hdev, "sock %p", sk);
1383 if (cp->val != 0x00 && cp->val != 0x01)
1384 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1385 MGMT_STATUS_INVALID_PARAMS);
1389 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1390 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1395 if (!!cp->val == hdev_is_powered(hdev)) {
1396 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1400 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1406 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1407 mgmt_set_powered_complete);
1410 mgmt_pending_remove(cmd);
1413 hci_dev_unlock(hdev);
1417 int mgmt_new_settings(struct hci_dev *hdev)
1419 return new_settings(hdev, NULL);
1424 struct hci_dev *hdev;
1428 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1430 struct cmd_lookup *match = data;
1432 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1434 list_del(&cmd->list);
1436 if (match->sk == NULL) {
1437 match->sk = cmd->sk;
1438 sock_hold(match->sk);
1441 mgmt_pending_free(cmd);
1444 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1448 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1449 mgmt_pending_remove(cmd);
1452 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1454 if (cmd->cmd_complete) {
1457 cmd->cmd_complete(cmd, *status);
1458 mgmt_pending_remove(cmd);
1463 cmd_status_rsp(cmd, data);
1466 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1468 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1469 cmd->param, cmd->param_len);
1472 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1474 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1475 cmd->param, sizeof(struct mgmt_addr_info));
1478 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1480 if (!lmp_bredr_capable(hdev))
1481 return MGMT_STATUS_NOT_SUPPORTED;
1482 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1483 return MGMT_STATUS_REJECTED;
1485 return MGMT_STATUS_SUCCESS;
1488 static u8 mgmt_le_support(struct hci_dev *hdev)
1490 if (!lmp_le_capable(hdev))
1491 return MGMT_STATUS_NOT_SUPPORTED;
1492 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1493 return MGMT_STATUS_REJECTED;
1495 return MGMT_STATUS_SUCCESS;
1498 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1501 struct mgmt_pending_cmd *cmd = data;
1503 bt_dev_dbg(hdev, "err %d", err);
1505 /* Make sure cmd still outstanding. */
1506 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1512 u8 mgmt_err = mgmt_status(err);
1513 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1514 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1518 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1519 hdev->discov_timeout > 0) {
1520 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1521 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1524 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1525 new_settings(hdev, cmd->sk);
1528 mgmt_pending_remove(cmd);
1529 hci_dev_unlock(hdev);
1532 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1534 BT_DBG("%s", hdev->name);
1536 return hci_update_discoverable_sync(hdev);
1539 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1542 struct mgmt_cp_set_discoverable *cp = data;
1543 struct mgmt_pending_cmd *cmd;
1547 bt_dev_dbg(hdev, "sock %p", sk);
1549 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1550 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1551 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1552 MGMT_STATUS_REJECTED);
1554 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1555 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1556 MGMT_STATUS_INVALID_PARAMS);
1558 timeout = __le16_to_cpu(cp->timeout);
1560 /* Disabling discoverable requires that no timeout is set,
1561 * and enabling limited discoverable requires a timeout.
1563 if ((cp->val == 0x00 && timeout > 0) ||
1564 (cp->val == 0x02 && timeout == 0))
1565 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1566 MGMT_STATUS_INVALID_PARAMS);
1570 if (!hdev_is_powered(hdev) && timeout > 0) {
1571 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1572 MGMT_STATUS_NOT_POWERED);
1576 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1577 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1578 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1583 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1584 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1585 MGMT_STATUS_REJECTED);
1589 if (hdev->advertising_paused) {
1590 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 if (!hdev_is_powered(hdev)) {
1596 bool changed = false;
1598 /* Setting limited discoverable when powered off is
1599 * not a valid operation since it requires a timeout
1600 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1602 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1603 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1607 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1612 err = new_settings(hdev, sk);
1617 /* If the current mode is the same, then just update the timeout
1618 * value with the new value. And if only the timeout gets updated,
1619 * then no need for any HCI transactions.
1621 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1622 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1623 HCI_LIMITED_DISCOVERABLE)) {
1624 cancel_delayed_work(&hdev->discov_off);
1625 hdev->discov_timeout = timeout;
1627 if (cp->val && hdev->discov_timeout > 0) {
1628 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1629 queue_delayed_work(hdev->req_workqueue,
1630 &hdev->discov_off, to);
1633 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1637 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1643 /* Cancel any potential discoverable timeout that might be
1644 * still active and store new timeout value. The arming of
1645 * the timeout happens in the complete handler.
1647 cancel_delayed_work(&hdev->discov_off);
1648 hdev->discov_timeout = timeout;
1651 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1653 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1655 /* Limited discoverable mode */
1656 if (cp->val == 0x02)
1657 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1659 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1661 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1662 mgmt_set_discoverable_complete);
1665 mgmt_pending_remove(cmd);
1668 hci_dev_unlock(hdev);
1672 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1675 struct mgmt_pending_cmd *cmd = data;
1677 bt_dev_dbg(hdev, "err %d", err);
1679 /* Make sure cmd still outstanding. */
1680 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1686 u8 mgmt_err = mgmt_status(err);
1687 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1691 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1692 new_settings(hdev, cmd->sk);
1696 mgmt_pending_remove(cmd);
1698 hci_dev_unlock(hdev);
1701 static int set_connectable_update_settings(struct hci_dev *hdev,
1702 struct sock *sk, u8 val)
1704 bool changed = false;
1707 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1711 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1713 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1714 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1717 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1722 hci_update_scan(hdev);
1723 hci_update_passive_scan(hdev);
1724 return new_settings(hdev, sk);
1730 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1732 BT_DBG("%s", hdev->name);
1734 return hci_update_connectable_sync(hdev);
1737 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1740 struct mgmt_mode *cp = data;
1741 struct mgmt_pending_cmd *cmd;
1744 bt_dev_dbg(hdev, "sock %p", sk);
1746 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1747 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1748 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1749 MGMT_STATUS_REJECTED);
1751 if (cp->val != 0x00 && cp->val != 0x01)
1752 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1753 MGMT_STATUS_INVALID_PARAMS);
1757 if (!hdev_is_powered(hdev)) {
1758 err = set_connectable_update_settings(hdev, sk, cp->val);
1762 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1763 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1764 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1769 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1776 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1778 if (hdev->discov_timeout > 0)
1779 cancel_delayed_work(&hdev->discov_off);
1781 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1782 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1783 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1786 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1787 mgmt_set_connectable_complete);
1790 mgmt_pending_remove(cmd);
1793 hci_dev_unlock(hdev);
1797 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1800 struct mgmt_mode *cp = data;
1804 bt_dev_dbg(hdev, "sock %p", sk);
1806 if (cp->val != 0x00 && cp->val != 0x01)
1807 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1808 MGMT_STATUS_INVALID_PARAMS);
1813 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1815 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1817 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1822 /* In limited privacy mode the change of bondable mode
1823 * may affect the local advertising address.
1825 hci_update_discoverable(hdev);
1827 err = new_settings(hdev, sk);
1831 hci_dev_unlock(hdev);
1835 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1838 struct mgmt_mode *cp = data;
1839 struct mgmt_pending_cmd *cmd;
1843 bt_dev_dbg(hdev, "sock %p", sk);
1845 status = mgmt_bredr_support(hdev);
1847 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1850 if (cp->val != 0x00 && cp->val != 0x01)
1851 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1852 MGMT_STATUS_INVALID_PARAMS);
1856 if (!hdev_is_powered(hdev)) {
1857 bool changed = false;
1859 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1860 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1864 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1869 err = new_settings(hdev, sk);
1874 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1875 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1882 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1883 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1887 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1893 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1895 mgmt_pending_remove(cmd);
1900 hci_dev_unlock(hdev);
1904 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1906 struct cmd_lookup match = { NULL, hdev };
1907 struct mgmt_pending_cmd *cmd = data;
1908 struct mgmt_mode *cp = cmd->param;
1909 u8 enable = cp->val;
1912 /* Make sure cmd still outstanding. */
1913 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1917 u8 mgmt_err = mgmt_status(err);
1919 if (enable && hci_dev_test_and_clear_flag(hdev,
1921 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1922 new_settings(hdev, NULL);
1925 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1931 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1933 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1936 changed = hci_dev_test_and_clear_flag(hdev,
1939 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1942 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1945 new_settings(hdev, match.sk);
1950 hci_update_eir_sync(hdev);
1953 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1955 struct mgmt_pending_cmd *cmd = data;
1956 struct mgmt_mode *cp = cmd->param;
1957 bool changed = false;
1961 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1963 err = hci_write_ssp_mode_sync(hdev, cp->val);
1965 if (!err && changed)
1966 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1971 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1973 struct mgmt_mode *cp = data;
1974 struct mgmt_pending_cmd *cmd;
1978 bt_dev_dbg(hdev, "sock %p", sk);
1980 status = mgmt_bredr_support(hdev);
1982 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1984 if (!lmp_ssp_capable(hdev))
1985 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1986 MGMT_STATUS_NOT_SUPPORTED);
1988 if (cp->val != 0x00 && cp->val != 0x01)
1989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1990 MGMT_STATUS_INVALID_PARAMS);
1994 if (!hdev_is_powered(hdev)) {
1998 changed = !hci_dev_test_and_set_flag(hdev,
2001 changed = hci_dev_test_and_clear_flag(hdev,
2004 changed = hci_dev_test_and_clear_flag(hdev,
2007 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2010 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2015 err = new_settings(hdev, sk);
2020 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2021 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2026 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2027 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2031 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2035 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2039 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2040 MGMT_STATUS_FAILED);
2043 mgmt_pending_remove(cmd);
2047 hci_dev_unlock(hdev);
2051 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2053 struct mgmt_mode *cp = data;
2058 bt_dev_dbg(hdev, "sock %p", sk);
2060 if (!IS_ENABLED(CONFIG_BT_HS))
2061 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2062 MGMT_STATUS_NOT_SUPPORTED);
2064 status = mgmt_bredr_support(hdev);
2066 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2068 if (!lmp_ssp_capable(hdev))
2069 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2070 MGMT_STATUS_NOT_SUPPORTED);
2072 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2073 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2074 MGMT_STATUS_REJECTED);
2076 if (cp->val != 0x00 && cp->val != 0x01)
2077 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2078 MGMT_STATUS_INVALID_PARAMS);
2082 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2083 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2089 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2091 if (hdev_is_powered(hdev)) {
2092 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2093 MGMT_STATUS_REJECTED);
2097 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2100 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2105 err = new_settings(hdev, sk);
2108 hci_dev_unlock(hdev);
2112 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2114 struct cmd_lookup match = { NULL, hdev };
2115 u8 status = mgmt_status(err);
2117 bt_dev_dbg(hdev, "err %d", err);
2120 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2125 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2127 new_settings(hdev, match.sk);
2133 static int set_le_sync(struct hci_dev *hdev, void *data)
2135 struct mgmt_pending_cmd *cmd = data;
2136 struct mgmt_mode *cp = cmd->param;
2141 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2143 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2144 hci_disable_advertising_sync(hdev);
2146 if (ext_adv_capable(hdev))
2147 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2149 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2152 err = hci_write_le_host_supported_sync(hdev, val, 0);
2154 /* Make sure the controller has a good default for
2155 * advertising data. Restrict the update to when LE
2156 * has actually been enabled. During power on, the
2157 * update in powered_update_hci will take care of it.
2159 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2160 if (ext_adv_capable(hdev)) {
2163 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2165 hci_update_scan_rsp_data_sync(hdev, 0x00);
2167 hci_update_adv_data_sync(hdev, 0x00);
2168 hci_update_scan_rsp_data_sync(hdev, 0x00);
2171 hci_update_passive_scan(hdev);
2177 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2179 struct mgmt_pending_cmd *cmd = data;
2180 u8 status = mgmt_status(err);
2181 struct sock *sk = cmd->sk;
2184 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2185 cmd_status_rsp, &status);
2189 mgmt_pending_remove(cmd);
2190 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2193 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2195 struct mgmt_pending_cmd *cmd = data;
2196 struct mgmt_cp_set_mesh *cp = cmd->param;
2197 size_t len = cmd->param_len;
2199 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2202 hci_dev_set_flag(hdev, HCI_MESH);
2204 hci_dev_clear_flag(hdev, HCI_MESH);
2208 /* If filters don't fit, forward all adv pkts */
2209 if (len <= sizeof(hdev->mesh_ad_types))
2210 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2212 hci_update_passive_scan_sync(hdev);
2216 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2218 struct mgmt_cp_set_mesh *cp = data;
2219 struct mgmt_pending_cmd *cmd;
2222 bt_dev_dbg(hdev, "sock %p", sk);
2224 if (!lmp_le_capable(hdev) ||
2225 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2226 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2227 MGMT_STATUS_NOT_SUPPORTED);
2229 if (cp->enable != 0x00 && cp->enable != 0x01)
2230 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2231 MGMT_STATUS_INVALID_PARAMS);
2235 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2239 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2243 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2244 MGMT_STATUS_FAILED);
2247 mgmt_pending_remove(cmd);
2250 hci_dev_unlock(hdev);
2254 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2256 struct mgmt_mesh_tx *mesh_tx = data;
2257 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2258 unsigned long mesh_send_interval;
2259 u8 mgmt_err = mgmt_status(err);
2261 /* Report any errors here, but don't report completion */
2264 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2265 /* Send Complete Error Code for handle */
2266 mesh_send_complete(hdev, mesh_tx, false);
2270 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2271 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2272 mesh_send_interval);
2275 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2277 struct mgmt_mesh_tx *mesh_tx = data;
2278 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2279 struct adv_info *adv, *next_instance;
2280 u8 instance = hdev->le_num_of_adv_sets + 1;
2281 u16 timeout, duration;
2284 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2285 return MGMT_STATUS_BUSY;
2288 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2289 adv = hci_add_adv_instance(hdev, instance, 0,
2290 send->adv_data_len, send->adv_data,
2293 HCI_ADV_TX_POWER_NO_PREFERENCE,
2294 hdev->le_adv_min_interval,
2295 hdev->le_adv_max_interval,
2299 mesh_tx->instance = instance;
2303 if (hdev->cur_adv_instance == instance) {
2304 /* If the currently advertised instance is being changed then
2305 * cancel the current advertising and schedule the next
2306 * instance. If there is only one instance then the overridden
2307 * advertising data will be visible right away.
2309 cancel_adv_timeout(hdev);
2311 next_instance = hci_get_next_instance(hdev, instance);
2313 instance = next_instance->instance;
2316 } else if (hdev->adv_instance_timeout) {
2317 /* Immediately advertise the new instance if no other, or
2318 * let it go naturally from queue if ADV is already happening
2324 return hci_schedule_adv_instance_sync(hdev, instance, true);
2329 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2331 struct mgmt_rp_mesh_read_features *rp = data;
2333 if (rp->used_handles >= rp->max_handles)
2336 rp->handles[rp->used_handles++] = mesh_tx->handle;
2339 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2340 void *data, u16 len)
2342 struct mgmt_rp_mesh_read_features rp;
2344 if (!lmp_le_capable(hdev) ||
2345 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2346 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2347 MGMT_STATUS_NOT_SUPPORTED);
2349 memset(&rp, 0, sizeof(rp));
2350 rp.index = cpu_to_le16(hdev->id);
2351 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2352 rp.max_handles = MESH_HANDLES_MAX;
2357 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2359 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2360 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2362 hci_dev_unlock(hdev);
2366 static int send_cancel(struct hci_dev *hdev, void *data)
2368 struct mgmt_pending_cmd *cmd = data;
2369 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2370 struct mgmt_mesh_tx *mesh_tx;
2372 if (!cancel->handle) {
2374 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2377 mesh_send_complete(hdev, mesh_tx, false);
2380 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2382 if (mesh_tx && mesh_tx->sk == cmd->sk)
2383 mesh_send_complete(hdev, mesh_tx, false);
2386 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2388 mgmt_pending_free(cmd);
2393 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2394 void *data, u16 len)
2396 struct mgmt_pending_cmd *cmd;
2399 if (!lmp_le_capable(hdev) ||
2400 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2401 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2402 MGMT_STATUS_NOT_SUPPORTED);
2404 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2405 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2406 MGMT_STATUS_REJECTED);
2409 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2413 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2416 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2417 MGMT_STATUS_FAILED);
2420 mgmt_pending_free(cmd);
2423 hci_dev_unlock(hdev);
2427 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2429 struct mgmt_mesh_tx *mesh_tx;
2430 struct mgmt_cp_mesh_send *send = data;
2431 struct mgmt_rp_mesh_read_features rp;
2435 if (!lmp_le_capable(hdev) ||
2436 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2437 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2438 MGMT_STATUS_NOT_SUPPORTED);
2439 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2440 len <= MGMT_MESH_SEND_SIZE ||
2441 len > (MGMT_MESH_SEND_SIZE + 31))
2442 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2443 MGMT_STATUS_REJECTED);
2447 memset(&rp, 0, sizeof(rp));
2448 rp.max_handles = MESH_HANDLES_MAX;
2450 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2452 if (rp.max_handles <= rp.used_handles) {
2453 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2458 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2459 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2464 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2465 mesh_send_start_complete);
2468 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2469 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2470 MGMT_STATUS_FAILED);
2474 mgmt_mesh_remove(mesh_tx);
2477 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2479 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2480 &mesh_tx->handle, 1);
2484 hci_dev_unlock(hdev);
2488 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2490 struct mgmt_mode *cp = data;
2491 struct mgmt_pending_cmd *cmd;
2495 bt_dev_dbg(hdev, "sock %p", sk);
2497 if (!lmp_le_capable(hdev))
2498 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2499 MGMT_STATUS_NOT_SUPPORTED);
2501 if (cp->val != 0x00 && cp->val != 0x01)
2502 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2503 MGMT_STATUS_INVALID_PARAMS);
2505 /* Bluetooth single mode LE only controllers or dual-mode
2506 * controllers configured as LE only devices, do not allow
2507 * switching LE off. These have either LE enabled explicitly
2508 * or BR/EDR has been previously switched off.
2510 * When trying to enable an already enabled LE, then gracefully
2511 * send a positive response. Trying to disable it however will
2512 * result into rejection.
2514 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2515 if (cp->val == 0x01)
2516 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2518 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2519 MGMT_STATUS_REJECTED);
2525 enabled = lmp_host_le_capable(hdev);
2527 if (!hdev_is_powered(hdev) || val == enabled) {
2528 bool changed = false;
2530 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2531 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2535 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2536 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2540 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2545 err = new_settings(hdev, sk);
2550 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2551 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2552 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2557 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2561 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2565 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2566 MGMT_STATUS_FAILED);
2569 mgmt_pending_remove(cmd);
2573 hci_dev_unlock(hdev);
2577 /* This is a helper function to test for pending mgmt commands that can
2578 * cause CoD or EIR HCI commands. We can only allow one such pending
2579 * mgmt command at a time since otherwise we cannot easily track what
2580 * the current values are, will be, and based on that calculate if a new
2581 * HCI command needs to be sent and if yes with what value.
2583 static bool pending_eir_or_class(struct hci_dev *hdev)
2585 struct mgmt_pending_cmd *cmd;
2587 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2588 switch (cmd->opcode) {
2589 case MGMT_OP_ADD_UUID:
2590 case MGMT_OP_REMOVE_UUID:
2591 case MGMT_OP_SET_DEV_CLASS:
2592 case MGMT_OP_SET_POWERED:
2600 static const u8 bluetooth_base_uuid[] = {
2601 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2602 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2605 static u8 get_uuid_size(const u8 *uuid)
2609 if (memcmp(uuid, bluetooth_base_uuid, 12))
2612 val = get_unaligned_le32(&uuid[12]);
2619 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2621 struct mgmt_pending_cmd *cmd = data;
2623 bt_dev_dbg(hdev, "err %d", err);
2625 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2626 mgmt_status(err), hdev->dev_class, 3);
2628 mgmt_pending_free(cmd);
2631 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2635 err = hci_update_class_sync(hdev);
2639 return hci_update_eir_sync(hdev);
2642 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2644 struct mgmt_cp_add_uuid *cp = data;
2645 struct mgmt_pending_cmd *cmd;
2646 struct bt_uuid *uuid;
2649 bt_dev_dbg(hdev, "sock %p", sk);
2653 if (pending_eir_or_class(hdev)) {
2654 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2659 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2665 memcpy(uuid->uuid, cp->uuid, 16);
2666 uuid->svc_hint = cp->svc_hint;
2667 uuid->size = get_uuid_size(cp->uuid);
2669 list_add_tail(&uuid->list, &hdev->uuids);
2671 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2677 err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2679 mgmt_pending_free(cmd);
2684 hci_dev_unlock(hdev);
2688 static bool enable_service_cache(struct hci_dev *hdev)
2690 if (!hdev_is_powered(hdev))
2693 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2694 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2702 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2706 err = hci_update_class_sync(hdev);
2710 return hci_update_eir_sync(hdev);
2713 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2716 struct mgmt_cp_remove_uuid *cp = data;
2717 struct mgmt_pending_cmd *cmd;
2718 struct bt_uuid *match, *tmp;
2719 static const u8 bt_uuid_any[] = {
2720 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2724 bt_dev_dbg(hdev, "sock %p", sk);
2728 if (pending_eir_or_class(hdev)) {
2729 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2734 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2735 hci_uuids_clear(hdev);
2737 if (enable_service_cache(hdev)) {
2738 err = mgmt_cmd_complete(sk, hdev->id,
2739 MGMT_OP_REMOVE_UUID,
2740 0, hdev->dev_class, 3);
2749 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2750 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2753 list_del(&match->list);
2759 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2760 MGMT_STATUS_INVALID_PARAMS);
2765 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2771 err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2772 mgmt_class_complete);
2774 mgmt_pending_free(cmd);
2777 hci_dev_unlock(hdev);
2781 static int set_class_sync(struct hci_dev *hdev, void *data)
2785 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2786 cancel_delayed_work_sync(&hdev->service_cache);
2787 err = hci_update_eir_sync(hdev);
2793 return hci_update_class_sync(hdev);
2796 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2799 struct mgmt_cp_set_dev_class *cp = data;
2800 struct mgmt_pending_cmd *cmd;
2803 bt_dev_dbg(hdev, "sock %p", sk);
2805 if (!lmp_bredr_capable(hdev))
2806 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2807 MGMT_STATUS_NOT_SUPPORTED);
2811 if (pending_eir_or_class(hdev)) {
2812 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2817 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2818 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2819 MGMT_STATUS_INVALID_PARAMS);
2823 hdev->major_class = cp->major;
2824 hdev->minor_class = cp->minor;
2826 if (!hdev_is_powered(hdev)) {
2827 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2828 hdev->dev_class, 3);
2832 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2838 err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2839 mgmt_class_complete);
2841 mgmt_pending_free(cmd);
2844 hci_dev_unlock(hdev);
2848 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2851 struct mgmt_cp_load_link_keys *cp = data;
2852 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2853 sizeof(struct mgmt_link_key_info));
2854 u16 key_count, expected_len;
2858 bt_dev_dbg(hdev, "sock %p", sk);
2860 if (!lmp_bredr_capable(hdev))
2861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2862 MGMT_STATUS_NOT_SUPPORTED);
2864 key_count = __le16_to_cpu(cp->key_count);
2865 if (key_count > max_key_count) {
2866 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2868 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2869 MGMT_STATUS_INVALID_PARAMS);
2872 expected_len = struct_size(cp, keys, key_count);
2873 if (expected_len != len) {
2874 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2876 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2877 MGMT_STATUS_INVALID_PARAMS);
2880 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2881 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2882 MGMT_STATUS_INVALID_PARAMS);
2884 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2887 for (i = 0; i < key_count; i++) {
2888 struct mgmt_link_key_info *key = &cp->keys[i];
2890 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2891 return mgmt_cmd_status(sk, hdev->id,
2892 MGMT_OP_LOAD_LINK_KEYS,
2893 MGMT_STATUS_INVALID_PARAMS);
2898 hci_link_keys_clear(hdev);
2901 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2903 changed = hci_dev_test_and_clear_flag(hdev,
2904 HCI_KEEP_DEBUG_KEYS);
2907 new_settings(hdev, NULL);
2909 for (i = 0; i < key_count; i++) {
2910 struct mgmt_link_key_info *key = &cp->keys[i];
2912 if (hci_is_blocked_key(hdev,
2913 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2915 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2920 /* Always ignore debug keys and require a new pairing if
2921 * the user wants to use them.
2923 if (key->type == HCI_LK_DEBUG_COMBINATION)
2926 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2927 key->type, key->pin_len, NULL);
2930 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2932 hci_dev_unlock(hdev);
2937 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2938 u8 addr_type, struct sock *skip_sk)
2940 struct mgmt_ev_device_unpaired ev;
2942 bacpy(&ev.addr.bdaddr, bdaddr);
2943 ev.addr.type = addr_type;
2945 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2949 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2951 struct mgmt_pending_cmd *cmd = data;
2952 struct mgmt_cp_unpair_device *cp = cmd->param;
2955 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2957 cmd->cmd_complete(cmd, err);
2958 mgmt_pending_free(cmd);
2961 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2963 struct mgmt_pending_cmd *cmd = data;
2964 struct mgmt_cp_unpair_device *cp = cmd->param;
2965 struct hci_conn *conn;
2967 if (cp->addr.type == BDADDR_BREDR)
2968 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2971 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2972 le_addr_type(cp->addr.type));
2977 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2980 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2983 struct mgmt_cp_unpair_device *cp = data;
2984 struct mgmt_rp_unpair_device rp;
2985 struct hci_conn_params *params;
2986 struct mgmt_pending_cmd *cmd;
2987 struct hci_conn *conn;
2991 memset(&rp, 0, sizeof(rp));
2992 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2993 rp.addr.type = cp->addr.type;
2995 if (!bdaddr_type_is_valid(cp->addr.type))
2996 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2997 MGMT_STATUS_INVALID_PARAMS,
3000 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3001 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3002 MGMT_STATUS_INVALID_PARAMS,
3007 if (!hdev_is_powered(hdev)) {
3008 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3009 MGMT_STATUS_NOT_POWERED, &rp,
3014 if (cp->addr.type == BDADDR_BREDR) {
3015 /* If disconnection is requested, then look up the
3016 * connection. If the remote device is connected, it
3017 * will be later used to terminate the link.
3019 * Setting it to NULL explicitly will cause no
3020 * termination of the link.
3023 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3028 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3030 err = mgmt_cmd_complete(sk, hdev->id,
3031 MGMT_OP_UNPAIR_DEVICE,
3032 MGMT_STATUS_NOT_PAIRED, &rp,
3040 /* LE address type */
3041 addr_type = le_addr_type(cp->addr.type);
3043 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3044 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3046 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3047 MGMT_STATUS_NOT_PAIRED, &rp,
3052 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3054 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3059 /* Defer clearing up the connection parameters until closing to
3060 * give a chance of keeping them if a repairing happens.
3062 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3064 /* Disable auto-connection parameters if present */
3065 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3067 if (params->explicit_connect)
3068 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3070 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3073 /* If disconnection is not requested, then clear the connection
3074 * variable so that the link is not terminated.
3076 if (!cp->disconnect)
3080 /* If the connection variable is set, then termination of the
3081 * link is requested.
3084 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3086 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3090 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3097 cmd->cmd_complete = addr_cmd_complete;
3099 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3100 unpair_device_complete);
3102 mgmt_pending_free(cmd);
3105 hci_dev_unlock(hdev);
3109 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3112 struct mgmt_cp_disconnect *cp = data;
3113 struct mgmt_rp_disconnect rp;
3114 struct mgmt_pending_cmd *cmd;
3115 struct hci_conn *conn;
3118 bt_dev_dbg(hdev, "sock %p", sk);
3120 memset(&rp, 0, sizeof(rp));
3121 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3122 rp.addr.type = cp->addr.type;
3124 if (!bdaddr_type_is_valid(cp->addr.type))
3125 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3126 MGMT_STATUS_INVALID_PARAMS,
3131 if (!test_bit(HCI_UP, &hdev->flags)) {
3132 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3133 MGMT_STATUS_NOT_POWERED, &rp,
3138 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3139 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3140 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3144 if (cp->addr.type == BDADDR_BREDR)
3145 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3148 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3149 le_addr_type(cp->addr.type));
3151 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3152 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3153 MGMT_STATUS_NOT_CONNECTED, &rp,
3158 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3164 cmd->cmd_complete = generic_cmd_complete;
3166 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3168 mgmt_pending_remove(cmd);
3171 hci_dev_unlock(hdev);
3175 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3177 switch (link_type) {
3179 switch (addr_type) {
3180 case ADDR_LE_DEV_PUBLIC:
3181 return BDADDR_LE_PUBLIC;
3184 /* Fallback to LE Random address type */
3185 return BDADDR_LE_RANDOM;
3189 /* Fallback to BR/EDR type */
3190 return BDADDR_BREDR;
3194 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3197 struct mgmt_rp_get_connections *rp;
3202 bt_dev_dbg(hdev, "sock %p", sk);
3206 if (!hdev_is_powered(hdev)) {
3207 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3208 MGMT_STATUS_NOT_POWERED);
3213 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3214 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3218 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3225 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3226 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3228 bacpy(&rp->addr[i].bdaddr, &c->dst);
3229 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3230 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3235 rp->conn_count = cpu_to_le16(i);
3237 /* Recalculate length in case of filtered SCO connections, etc */
3238 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3239 struct_size(rp, addr, i));
3244 hci_dev_unlock(hdev);
3248 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3249 struct mgmt_cp_pin_code_neg_reply *cp)
3251 struct mgmt_pending_cmd *cmd;
3254 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3259 cmd->cmd_complete = addr_cmd_complete;
3261 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3262 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3264 mgmt_pending_remove(cmd);
3269 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3272 struct hci_conn *conn;
3273 struct mgmt_cp_pin_code_reply *cp = data;
3274 struct hci_cp_pin_code_reply reply;
3275 struct mgmt_pending_cmd *cmd;
3278 bt_dev_dbg(hdev, "sock %p", sk);
3282 if (!hdev_is_powered(hdev)) {
3283 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3284 MGMT_STATUS_NOT_POWERED);
3288 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3290 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3291 MGMT_STATUS_NOT_CONNECTED);
3295 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3296 struct mgmt_cp_pin_code_neg_reply ncp;
3298 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3300 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3302 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3304 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3305 MGMT_STATUS_INVALID_PARAMS);
3310 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3316 cmd->cmd_complete = addr_cmd_complete;
3318 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3319 reply.pin_len = cp->pin_len;
3320 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3322 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3324 mgmt_pending_remove(cmd);
3327 hci_dev_unlock(hdev);
3331 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3334 struct mgmt_cp_set_io_capability *cp = data;
3336 bt_dev_dbg(hdev, "sock %p", sk);
3338 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3339 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3340 MGMT_STATUS_INVALID_PARAMS);
3344 hdev->io_capability = cp->io_capability;
3346 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3348 hci_dev_unlock(hdev);
3350 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3354 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3356 struct hci_dev *hdev = conn->hdev;
3357 struct mgmt_pending_cmd *cmd;
3359 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3360 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3363 if (cmd->user_data != conn)
3372 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3374 struct mgmt_rp_pair_device rp;
3375 struct hci_conn *conn = cmd->user_data;
3378 bacpy(&rp.addr.bdaddr, &conn->dst);
3379 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3381 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3382 status, &rp, sizeof(rp));
3384 /* So we don't get further callbacks for this connection */
3385 conn->connect_cfm_cb = NULL;
3386 conn->security_cfm_cb = NULL;
3387 conn->disconn_cfm_cb = NULL;
3389 hci_conn_drop(conn);
3391 /* The device is paired so there is no need to remove
3392 * its connection parameters anymore.
3394 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3401 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3403 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3404 struct mgmt_pending_cmd *cmd;
3406 cmd = find_pairing(conn);
3408 cmd->cmd_complete(cmd, status);
3409 mgmt_pending_remove(cmd);
3413 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3415 struct mgmt_pending_cmd *cmd;
3417 BT_DBG("status %u", status);
3419 cmd = find_pairing(conn);
3421 BT_DBG("Unable to find a pending command");
3425 cmd->cmd_complete(cmd, mgmt_status(status));
3426 mgmt_pending_remove(cmd);
3429 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3431 struct mgmt_pending_cmd *cmd;
3433 BT_DBG("status %u", status);
3438 cmd = find_pairing(conn);
3440 BT_DBG("Unable to find a pending command");
3444 cmd->cmd_complete(cmd, mgmt_status(status));
3445 mgmt_pending_remove(cmd);
3448 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3451 struct mgmt_cp_pair_device *cp = data;
3452 struct mgmt_rp_pair_device rp;
3453 struct mgmt_pending_cmd *cmd;
3454 u8 sec_level, auth_type;
3455 struct hci_conn *conn;
3458 bt_dev_dbg(hdev, "sock %p", sk);
3460 memset(&rp, 0, sizeof(rp));
3461 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3462 rp.addr.type = cp->addr.type;
3464 if (!bdaddr_type_is_valid(cp->addr.type))
3465 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3466 MGMT_STATUS_INVALID_PARAMS,
3469 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3470 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3471 MGMT_STATUS_INVALID_PARAMS,
3476 if (!hdev_is_powered(hdev)) {
3477 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3478 MGMT_STATUS_NOT_POWERED, &rp,
3483 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3484 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3485 MGMT_STATUS_ALREADY_PAIRED, &rp,
3490 sec_level = BT_SECURITY_MEDIUM;
3491 auth_type = HCI_AT_DEDICATED_BONDING;
3493 if (cp->addr.type == BDADDR_BREDR) {
3494 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3495 auth_type, CONN_REASON_PAIR_DEVICE);
3497 u8 addr_type = le_addr_type(cp->addr.type);
3498 struct hci_conn_params *p;
3500 /* When pairing a new device, it is expected to remember
3501 * this device for future connections. Adding the connection
3502 * parameter information ahead of time allows tracking
3503 * of the peripheral preferred values and will speed up any
3504 * further connection establishment.
3506 * If connection parameters already exist, then they
3507 * will be kept and this function does nothing.
3509 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3511 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3512 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3514 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3515 sec_level, HCI_LE_CONN_TIMEOUT,
3516 CONN_REASON_PAIR_DEVICE);
3522 if (PTR_ERR(conn) == -EBUSY)
3523 status = MGMT_STATUS_BUSY;
3524 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3525 status = MGMT_STATUS_NOT_SUPPORTED;
3526 else if (PTR_ERR(conn) == -ECONNREFUSED)
3527 status = MGMT_STATUS_REJECTED;
3529 status = MGMT_STATUS_CONNECT_FAILED;
3531 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3532 status, &rp, sizeof(rp));
3536 if (conn->connect_cfm_cb) {
3537 hci_conn_drop(conn);
3538 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3539 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3543 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3546 hci_conn_drop(conn);
3550 cmd->cmd_complete = pairing_complete;
3552 /* For LE, just connecting isn't a proof that the pairing finished */
3553 if (cp->addr.type == BDADDR_BREDR) {
3554 conn->connect_cfm_cb = pairing_complete_cb;
3555 conn->security_cfm_cb = pairing_complete_cb;
3556 conn->disconn_cfm_cb = pairing_complete_cb;
3558 conn->connect_cfm_cb = le_pairing_complete_cb;
3559 conn->security_cfm_cb = le_pairing_complete_cb;
3560 conn->disconn_cfm_cb = le_pairing_complete_cb;
3563 conn->io_capability = cp->io_cap;
3564 cmd->user_data = hci_conn_get(conn);
3566 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3567 hci_conn_security(conn, sec_level, auth_type, true)) {
3568 cmd->cmd_complete(cmd, 0);
3569 mgmt_pending_remove(cmd);
3575 hci_dev_unlock(hdev);
3579 static int abort_conn_sync(struct hci_dev *hdev, void *data)
3581 struct hci_conn *conn;
3582 u16 handle = PTR_ERR(data);
3584 conn = hci_conn_hash_lookup_handle(hdev, handle);
3588 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
3591 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3594 struct mgmt_addr_info *addr = data;
3595 struct mgmt_pending_cmd *cmd;
3596 struct hci_conn *conn;
3599 bt_dev_dbg(hdev, "sock %p", sk);
3603 if (!hdev_is_powered(hdev)) {
3604 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3605 MGMT_STATUS_NOT_POWERED);
3609 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3611 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3612 MGMT_STATUS_INVALID_PARAMS);
3616 conn = cmd->user_data;
3618 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3619 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3620 MGMT_STATUS_INVALID_PARAMS);
3624 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3625 mgmt_pending_remove(cmd);
3627 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3628 addr, sizeof(*addr));
3630 /* Since user doesn't want to proceed with the connection, abort any
3631 * ongoing pairing and then terminate the link if it was created
3632 * because of the pair device action.
3634 if (addr->type == BDADDR_BREDR)
3635 hci_remove_link_key(hdev, &addr->bdaddr);
3637 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3638 le_addr_type(addr->type));
3640 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3641 hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
3645 hci_dev_unlock(hdev);
3649 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3650 struct mgmt_addr_info *addr, u16 mgmt_op,
3651 u16 hci_op, __le32 passkey)
3653 struct mgmt_pending_cmd *cmd;
3654 struct hci_conn *conn;
3659 if (!hdev_is_powered(hdev)) {
3660 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3661 MGMT_STATUS_NOT_POWERED, addr,
3666 if (addr->type == BDADDR_BREDR)
3667 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3669 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3670 le_addr_type(addr->type));
3673 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3674 MGMT_STATUS_NOT_CONNECTED, addr,
3679 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3680 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3682 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3683 MGMT_STATUS_SUCCESS, addr,
3686 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3687 MGMT_STATUS_FAILED, addr,
3693 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3699 cmd->cmd_complete = addr_cmd_complete;
3701 /* Continue with pairing via HCI */
3702 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3703 struct hci_cp_user_passkey_reply cp;
3705 bacpy(&cp.bdaddr, &addr->bdaddr);
3706 cp.passkey = passkey;
3707 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3709 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3713 mgmt_pending_remove(cmd);
3716 hci_dev_unlock(hdev);
3720 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3721 void *data, u16 len)
3723 struct mgmt_cp_pin_code_neg_reply *cp = data;
3725 bt_dev_dbg(hdev, "sock %p", sk);
3727 return user_pairing_resp(sk, hdev, &cp->addr,
3728 MGMT_OP_PIN_CODE_NEG_REPLY,
3729 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3732 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3735 struct mgmt_cp_user_confirm_reply *cp = data;
3737 bt_dev_dbg(hdev, "sock %p", sk);
3739 if (len != sizeof(*cp))
3740 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3741 MGMT_STATUS_INVALID_PARAMS);
3743 return user_pairing_resp(sk, hdev, &cp->addr,
3744 MGMT_OP_USER_CONFIRM_REPLY,
3745 HCI_OP_USER_CONFIRM_REPLY, 0);
3748 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3749 void *data, u16 len)
3751 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3753 bt_dev_dbg(hdev, "sock %p", sk);
3755 return user_pairing_resp(sk, hdev, &cp->addr,
3756 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3757 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3760 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3763 struct mgmt_cp_user_passkey_reply *cp = data;
3765 bt_dev_dbg(hdev, "sock %p", sk);
3767 return user_pairing_resp(sk, hdev, &cp->addr,
3768 MGMT_OP_USER_PASSKEY_REPLY,
3769 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3772 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3773 void *data, u16 len)
3775 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3777 bt_dev_dbg(hdev, "sock %p", sk);
3779 return user_pairing_resp(sk, hdev, &cp->addr,
3780 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3781 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3784 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3786 struct adv_info *adv_instance;
3788 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3792 /* stop if current instance doesn't need to be changed */
3793 if (!(adv_instance->flags & flags))
3796 cancel_adv_timeout(hdev);
3798 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3802 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3807 static int name_changed_sync(struct hci_dev *hdev, void *data)
3809 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3812 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3814 struct mgmt_pending_cmd *cmd = data;
3815 struct mgmt_cp_set_local_name *cp = cmd->param;
3816 u8 status = mgmt_status(err);
3818 bt_dev_dbg(hdev, "err %d", err);
3820 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3824 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3827 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3830 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3831 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3834 mgmt_pending_remove(cmd);
3837 static int set_name_sync(struct hci_dev *hdev, void *data)
3839 if (lmp_bredr_capable(hdev)) {
3840 hci_update_name_sync(hdev);
3841 hci_update_eir_sync(hdev);
3844 /* The name is stored in the scan response data and so
3845 * no need to update the advertising data here.
3847 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3848 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3853 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3856 struct mgmt_cp_set_local_name *cp = data;
3857 struct mgmt_pending_cmd *cmd;
3860 bt_dev_dbg(hdev, "sock %p", sk);
3864 /* If the old values are the same as the new ones just return a
3865 * direct command complete event.
3867 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3868 !memcmp(hdev->short_name, cp->short_name,
3869 sizeof(hdev->short_name))) {
3870 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3875 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3877 if (!hdev_is_powered(hdev)) {
3878 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3880 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3885 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3886 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3887 ext_info_changed(hdev, sk);
3892 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3896 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3900 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3901 MGMT_STATUS_FAILED);
3904 mgmt_pending_remove(cmd);
3909 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3912 hci_dev_unlock(hdev);
3916 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3918 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3921 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3924 struct mgmt_cp_set_appearance *cp = data;
3928 bt_dev_dbg(hdev, "sock %p", sk);
3930 if (!lmp_le_capable(hdev))
3931 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3932 MGMT_STATUS_NOT_SUPPORTED);
3934 appearance = le16_to_cpu(cp->appearance);
3938 if (hdev->appearance != appearance) {
3939 hdev->appearance = appearance;
3941 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3942 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3945 ext_info_changed(hdev, sk);
3948 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3951 hci_dev_unlock(hdev);
3956 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3957 void *data, u16 len)
3959 struct mgmt_rp_get_phy_configuration rp;
3961 bt_dev_dbg(hdev, "sock %p", sk);
3965 memset(&rp, 0, sizeof(rp));
3967 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3968 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3969 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3971 hci_dev_unlock(hdev);
3973 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3977 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3979 struct mgmt_ev_phy_configuration_changed ev;
3981 memset(&ev, 0, sizeof(ev));
3983 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3985 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3989 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3991 struct mgmt_pending_cmd *cmd = data;
3992 struct sk_buff *skb = cmd->skb;
3993 u8 status = mgmt_status(err);
3995 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4000 status = MGMT_STATUS_FAILED;
4001 else if (IS_ERR(skb))
4002 status = mgmt_status(PTR_ERR(skb));
4004 status = mgmt_status(skb->data[0]);
4007 bt_dev_dbg(hdev, "status %d", status);
4010 mgmt_cmd_status(cmd->sk, hdev->id,
4011 MGMT_OP_SET_PHY_CONFIGURATION, status);
4013 mgmt_cmd_complete(cmd->sk, hdev->id,
4014 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4017 mgmt_phy_configuration_changed(hdev, cmd->sk);
4020 if (skb && !IS_ERR(skb))
4023 mgmt_pending_remove(cmd);
4026 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4028 struct mgmt_pending_cmd *cmd = data;
4029 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4030 struct hci_cp_le_set_default_phy cp_phy;
4031 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4033 memset(&cp_phy, 0, sizeof(cp_phy));
4035 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4036 cp_phy.all_phys |= 0x01;
4038 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4039 cp_phy.all_phys |= 0x02;
4041 if (selected_phys & MGMT_PHY_LE_1M_TX)
4042 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4044 if (selected_phys & MGMT_PHY_LE_2M_TX)
4045 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4047 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4048 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4050 if (selected_phys & MGMT_PHY_LE_1M_RX)
4051 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4053 if (selected_phys & MGMT_PHY_LE_2M_RX)
4054 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4056 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4057 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4059 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4060 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4065 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4066 void *data, u16 len)
4068 struct mgmt_cp_set_phy_configuration *cp = data;
4069 struct mgmt_pending_cmd *cmd;
4070 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4071 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4072 bool changed = false;
4075 bt_dev_dbg(hdev, "sock %p", sk);
4077 configurable_phys = get_configurable_phys(hdev);
4078 supported_phys = get_supported_phys(hdev);
4079 selected_phys = __le32_to_cpu(cp->selected_phys);
4081 if (selected_phys & ~supported_phys)
4082 return mgmt_cmd_status(sk, hdev->id,
4083 MGMT_OP_SET_PHY_CONFIGURATION,
4084 MGMT_STATUS_INVALID_PARAMS);
4086 unconfigure_phys = supported_phys & ~configurable_phys;
4088 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4089 return mgmt_cmd_status(sk, hdev->id,
4090 MGMT_OP_SET_PHY_CONFIGURATION,
4091 MGMT_STATUS_INVALID_PARAMS);
4093 if (selected_phys == get_selected_phys(hdev))
4094 return mgmt_cmd_complete(sk, hdev->id,
4095 MGMT_OP_SET_PHY_CONFIGURATION,
4100 if (!hdev_is_powered(hdev)) {
4101 err = mgmt_cmd_status(sk, hdev->id,
4102 MGMT_OP_SET_PHY_CONFIGURATION,
4103 MGMT_STATUS_REJECTED);
4107 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4108 err = mgmt_cmd_status(sk, hdev->id,
4109 MGMT_OP_SET_PHY_CONFIGURATION,
4114 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4115 pkt_type |= (HCI_DH3 | HCI_DM3);
4117 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4119 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4120 pkt_type |= (HCI_DH5 | HCI_DM5);
4122 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4124 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4125 pkt_type &= ~HCI_2DH1;
4127 pkt_type |= HCI_2DH1;
4129 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4130 pkt_type &= ~HCI_2DH3;
4132 pkt_type |= HCI_2DH3;
4134 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4135 pkt_type &= ~HCI_2DH5;
4137 pkt_type |= HCI_2DH5;
4139 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4140 pkt_type &= ~HCI_3DH1;
4142 pkt_type |= HCI_3DH1;
4144 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4145 pkt_type &= ~HCI_3DH3;
4147 pkt_type |= HCI_3DH3;
4149 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4150 pkt_type &= ~HCI_3DH5;
4152 pkt_type |= HCI_3DH5;
4154 if (pkt_type != hdev->pkt_type) {
4155 hdev->pkt_type = pkt_type;
4159 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4160 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4162 mgmt_phy_configuration_changed(hdev, sk);
4164 err = mgmt_cmd_complete(sk, hdev->id,
4165 MGMT_OP_SET_PHY_CONFIGURATION,
4171 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4176 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4177 set_default_phy_complete);
4180 err = mgmt_cmd_status(sk, hdev->id,
4181 MGMT_OP_SET_PHY_CONFIGURATION,
4182 MGMT_STATUS_FAILED);
4185 mgmt_pending_remove(cmd);
4189 hci_dev_unlock(hdev);
4194 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4197 int err = MGMT_STATUS_SUCCESS;
4198 struct mgmt_cp_set_blocked_keys *keys = data;
4199 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4200 sizeof(struct mgmt_blocked_key_info));
4201 u16 key_count, expected_len;
4204 bt_dev_dbg(hdev, "sock %p", sk);
4206 key_count = __le16_to_cpu(keys->key_count);
4207 if (key_count > max_key_count) {
4208 bt_dev_err(hdev, "too big key_count value %u", key_count);
4209 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4210 MGMT_STATUS_INVALID_PARAMS);
4213 expected_len = struct_size(keys, keys, key_count);
4214 if (expected_len != len) {
4215 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4217 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4218 MGMT_STATUS_INVALID_PARAMS);
4223 hci_blocked_keys_clear(hdev);
4225 for (i = 0; i < key_count; ++i) {
4226 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4229 err = MGMT_STATUS_NO_RESOURCES;
4233 b->type = keys->keys[i].type;
4234 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4235 list_add_rcu(&b->list, &hdev->blocked_keys);
4237 hci_dev_unlock(hdev);
4239 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4243 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4244 void *data, u16 len)
4246 struct mgmt_mode *cp = data;
4248 bool changed = false;
4250 bt_dev_dbg(hdev, "sock %p", sk);
4252 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4253 return mgmt_cmd_status(sk, hdev->id,
4254 MGMT_OP_SET_WIDEBAND_SPEECH,
4255 MGMT_STATUS_NOT_SUPPORTED);
4257 if (cp->val != 0x00 && cp->val != 0x01)
4258 return mgmt_cmd_status(sk, hdev->id,
4259 MGMT_OP_SET_WIDEBAND_SPEECH,
4260 MGMT_STATUS_INVALID_PARAMS);
4264 if (hdev_is_powered(hdev) &&
4265 !!cp->val != hci_dev_test_flag(hdev,
4266 HCI_WIDEBAND_SPEECH_ENABLED)) {
4267 err = mgmt_cmd_status(sk, hdev->id,
4268 MGMT_OP_SET_WIDEBAND_SPEECH,
4269 MGMT_STATUS_REJECTED);
4274 changed = !hci_dev_test_and_set_flag(hdev,
4275 HCI_WIDEBAND_SPEECH_ENABLED);
4277 changed = hci_dev_test_and_clear_flag(hdev,
4278 HCI_WIDEBAND_SPEECH_ENABLED);
4280 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4285 err = new_settings(hdev, sk);
4288 hci_dev_unlock(hdev);
4292 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4293 void *data, u16 data_len)
4296 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4299 u8 tx_power_range[2];
4301 bt_dev_dbg(hdev, "sock %p", sk);
4303 memset(&buf, 0, sizeof(buf));
4307 /* When the Read Simple Pairing Options command is supported, then
4308 * the remote public key validation is supported.
4310 * Alternatively, when Microsoft extensions are available, they can
4311 * indicate support for public key validation as well.
4313 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4314 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4316 flags |= 0x02; /* Remote public key validation (LE) */
4318 /* When the Read Encryption Key Size command is supported, then the
4319 * encryption key size is enforced.
4321 if (hdev->commands[20] & 0x10)
4322 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4324 flags |= 0x08; /* Encryption key size enforcement (LE) */
4326 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4329 /* When the Read Simple Pairing Options command is supported, then
4330 * also max encryption key size information is provided.
4332 if (hdev->commands[41] & 0x08)
4333 cap_len = eir_append_le16(rp->cap, cap_len,
4334 MGMT_CAP_MAX_ENC_KEY_SIZE,
4335 hdev->max_enc_key_size);
4337 cap_len = eir_append_le16(rp->cap, cap_len,
4338 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4339 SMP_MAX_ENC_KEY_SIZE);
4341 /* Append the min/max LE tx power parameters if we were able to fetch
4342 * it from the controller
4344 if (hdev->commands[38] & 0x80) {
4345 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4346 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4347 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4351 rp->cap_len = cpu_to_le16(cap_len);
4353 hci_dev_unlock(hdev);
4355 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4356 rp, sizeof(*rp) + cap_len);
4359 #ifdef CONFIG_BT_FEATURE_DEBUG
4360 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4361 static const u8 debug_uuid[16] = {
4362 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4363 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4367 /* 330859bc-7506-492d-9370-9a6f0614037f */
4368 static const u8 quality_report_uuid[16] = {
4369 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4370 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4373 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4374 static const u8 offload_codecs_uuid[16] = {
4375 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4376 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4379 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4380 static const u8 le_simultaneous_roles_uuid[16] = {
4381 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4382 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4385 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4386 static const u8 rpa_resolution_uuid[16] = {
4387 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4388 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4391 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4392 static const u8 iso_socket_uuid[16] = {
4393 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4394 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4397 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4398 static const u8 mgmt_mesh_uuid[16] = {
4399 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4400 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4403 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4404 void *data, u16 data_len)
4406 struct mgmt_rp_read_exp_features_info *rp;
4412 bt_dev_dbg(hdev, "sock %p", sk);
4414 /* Enough space for 7 features */
4415 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4416 rp = kzalloc(len, GFP_KERNEL);
4420 #ifdef CONFIG_BT_FEATURE_DEBUG
4422 flags = bt_dbg_get() ? BIT(0) : 0;
4424 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4425 rp->features[idx].flags = cpu_to_le32(flags);
4430 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4431 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4436 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4437 rp->features[idx].flags = cpu_to_le32(flags);
4441 if (hdev && ll_privacy_capable(hdev)) {
4442 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4443 flags = BIT(0) | BIT(1);
4447 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4448 rp->features[idx].flags = cpu_to_le32(flags);
4452 if (hdev && (aosp_has_quality_report(hdev) ||
4453 hdev->set_quality_report)) {
4454 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4459 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4460 rp->features[idx].flags = cpu_to_le32(flags);
4464 if (hdev && hdev->get_data_path_id) {
4465 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4470 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4471 rp->features[idx].flags = cpu_to_le32(flags);
4475 if (IS_ENABLED(CONFIG_BT_LE)) {
4476 flags = iso_enabled() ? BIT(0) : 0;
4477 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4478 rp->features[idx].flags = cpu_to_le32(flags);
4482 if (hdev && lmp_le_capable(hdev)) {
4483 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4488 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4489 rp->features[idx].flags = cpu_to_le32(flags);
4493 rp->feature_count = cpu_to_le16(idx);
4495 /* After reading the experimental features information, enable
4496 * the events to update client on any future change.
4498 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4500 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4501 MGMT_OP_READ_EXP_FEATURES_INFO,
4502 0, rp, sizeof(*rp) + (20 * idx));
4508 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4511 struct mgmt_ev_exp_feature_changed ev;
4513 memset(&ev, 0, sizeof(ev));
4514 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4515 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4517 // Do we need to be atomic with the conn_flags?
4518 if (enabled && privacy_mode_capable(hdev))
4519 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4521 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4523 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4525 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4529 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4530 bool enabled, struct sock *skip)
4532 struct mgmt_ev_exp_feature_changed ev;
4534 memset(&ev, 0, sizeof(ev));
4535 memcpy(ev.uuid, uuid, 16);
4536 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4538 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4540 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4543 #define EXP_FEAT(_uuid, _set_func) \
4546 .set_func = _set_func, \
4549 /* The zero key uuid is special. Multiple exp features are set through it. */
4550 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4551 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4553 struct mgmt_rp_set_exp_feature rp;
4555 memset(rp.uuid, 0, 16);
4556 rp.flags = cpu_to_le32(0);
4558 #ifdef CONFIG_BT_FEATURE_DEBUG
4560 bool changed = bt_dbg_get();
4565 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4569 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4572 changed = hci_dev_test_and_clear_flag(hdev,
4573 HCI_ENABLE_LL_PRIVACY);
4575 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4579 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4581 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4582 MGMT_OP_SET_EXP_FEATURE, 0,
4586 #ifdef CONFIG_BT_FEATURE_DEBUG
4587 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4588 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4590 struct mgmt_rp_set_exp_feature rp;
4595 /* Command requires to use the non-controller index */
4597 return mgmt_cmd_status(sk, hdev->id,
4598 MGMT_OP_SET_EXP_FEATURE,
4599 MGMT_STATUS_INVALID_INDEX);
4601 /* Parameters are limited to a single octet */
4602 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4603 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4604 MGMT_OP_SET_EXP_FEATURE,
4605 MGMT_STATUS_INVALID_PARAMS);
4607 /* Only boolean on/off is supported */
4608 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4609 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4610 MGMT_OP_SET_EXP_FEATURE,
4611 MGMT_STATUS_INVALID_PARAMS);
4613 val = !!cp->param[0];
4614 changed = val ? !bt_dbg_get() : bt_dbg_get();
4617 memcpy(rp.uuid, debug_uuid, 16);
4618 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4620 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4622 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4623 MGMT_OP_SET_EXP_FEATURE, 0,
4627 exp_feature_changed(hdev, debug_uuid, val, sk);
4633 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4634 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4636 struct mgmt_rp_set_exp_feature rp;
4640 /* Command requires to use the controller index */
4642 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4643 MGMT_OP_SET_EXP_FEATURE,
4644 MGMT_STATUS_INVALID_INDEX);
4646 /* Parameters are limited to a single octet */
4647 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4648 return mgmt_cmd_status(sk, hdev->id,
4649 MGMT_OP_SET_EXP_FEATURE,
4650 MGMT_STATUS_INVALID_PARAMS);
4652 /* Only boolean on/off is supported */
4653 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4654 return mgmt_cmd_status(sk, hdev->id,
4655 MGMT_OP_SET_EXP_FEATURE,
4656 MGMT_STATUS_INVALID_PARAMS);
4658 val = !!cp->param[0];
4661 changed = !hci_dev_test_and_set_flag(hdev,
4662 HCI_MESH_EXPERIMENTAL);
4664 hci_dev_clear_flag(hdev, HCI_MESH);
4665 changed = hci_dev_test_and_clear_flag(hdev,
4666 HCI_MESH_EXPERIMENTAL);
4669 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4670 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4672 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4674 err = mgmt_cmd_complete(sk, hdev->id,
4675 MGMT_OP_SET_EXP_FEATURE, 0,
4679 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4684 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4685 struct mgmt_cp_set_exp_feature *cp,
4688 struct mgmt_rp_set_exp_feature rp;
4693 /* Command requires to use the controller index */
4695 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4696 MGMT_OP_SET_EXP_FEATURE,
4697 MGMT_STATUS_INVALID_INDEX);
4699 /* Changes can only be made when controller is powered down */
4700 if (hdev_is_powered(hdev))
4701 return mgmt_cmd_status(sk, hdev->id,
4702 MGMT_OP_SET_EXP_FEATURE,
4703 MGMT_STATUS_REJECTED);
4705 /* Parameters are limited to a single octet */
4706 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4707 return mgmt_cmd_status(sk, hdev->id,
4708 MGMT_OP_SET_EXP_FEATURE,
4709 MGMT_STATUS_INVALID_PARAMS);
4711 /* Only boolean on/off is supported */
4712 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4713 return mgmt_cmd_status(sk, hdev->id,
4714 MGMT_OP_SET_EXP_FEATURE,
4715 MGMT_STATUS_INVALID_PARAMS);
4717 val = !!cp->param[0];
4720 changed = !hci_dev_test_and_set_flag(hdev,
4721 HCI_ENABLE_LL_PRIVACY);
4722 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4724 /* Enable LL privacy + supported settings changed */
4725 flags = BIT(0) | BIT(1);
4727 changed = hci_dev_test_and_clear_flag(hdev,
4728 HCI_ENABLE_LL_PRIVACY);
4730 /* Disable LL privacy + supported settings changed */
4734 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4735 rp.flags = cpu_to_le32(flags);
4737 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4739 err = mgmt_cmd_complete(sk, hdev->id,
4740 MGMT_OP_SET_EXP_FEATURE, 0,
4744 exp_ll_privacy_feature_changed(val, hdev, sk);
4749 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4750 struct mgmt_cp_set_exp_feature *cp,
4753 struct mgmt_rp_set_exp_feature rp;
4757 /* Command requires to use a valid controller index */
4759 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4760 MGMT_OP_SET_EXP_FEATURE,
4761 MGMT_STATUS_INVALID_INDEX);
4763 /* Parameters are limited to a single octet */
4764 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4765 return mgmt_cmd_status(sk, hdev->id,
4766 MGMT_OP_SET_EXP_FEATURE,
4767 MGMT_STATUS_INVALID_PARAMS);
4769 /* Only boolean on/off is supported */
4770 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4771 return mgmt_cmd_status(sk, hdev->id,
4772 MGMT_OP_SET_EXP_FEATURE,
4773 MGMT_STATUS_INVALID_PARAMS);
4775 hci_req_sync_lock(hdev);
4777 val = !!cp->param[0];
4778 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4780 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4781 err = mgmt_cmd_status(sk, hdev->id,
4782 MGMT_OP_SET_EXP_FEATURE,
4783 MGMT_STATUS_NOT_SUPPORTED);
4784 goto unlock_quality_report;
4788 if (hdev->set_quality_report)
4789 err = hdev->set_quality_report(hdev, val);
4791 err = aosp_set_quality_report(hdev, val);
4794 err = mgmt_cmd_status(sk, hdev->id,
4795 MGMT_OP_SET_EXP_FEATURE,
4796 MGMT_STATUS_FAILED);
4797 goto unlock_quality_report;
4801 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4803 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4806 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4808 memcpy(rp.uuid, quality_report_uuid, 16);
4809 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4810 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4812 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4816 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4818 unlock_quality_report:
4819 hci_req_sync_unlock(hdev);
4823 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4824 struct mgmt_cp_set_exp_feature *cp,
4829 struct mgmt_rp_set_exp_feature rp;
4831 /* Command requires to use a valid controller index */
4833 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4834 MGMT_OP_SET_EXP_FEATURE,
4835 MGMT_STATUS_INVALID_INDEX);
4837 /* Parameters are limited to a single octet */
4838 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4839 return mgmt_cmd_status(sk, hdev->id,
4840 MGMT_OP_SET_EXP_FEATURE,
4841 MGMT_STATUS_INVALID_PARAMS);
4843 /* Only boolean on/off is supported */
4844 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4845 return mgmt_cmd_status(sk, hdev->id,
4846 MGMT_OP_SET_EXP_FEATURE,
4847 MGMT_STATUS_INVALID_PARAMS);
4849 val = !!cp->param[0];
4850 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4852 if (!hdev->get_data_path_id) {
4853 return mgmt_cmd_status(sk, hdev->id,
4854 MGMT_OP_SET_EXP_FEATURE,
4855 MGMT_STATUS_NOT_SUPPORTED);
4860 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4862 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4865 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4868 memcpy(rp.uuid, offload_codecs_uuid, 16);
4869 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4870 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4871 err = mgmt_cmd_complete(sk, hdev->id,
4872 MGMT_OP_SET_EXP_FEATURE, 0,
4876 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4881 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4882 struct mgmt_cp_set_exp_feature *cp,
4887 struct mgmt_rp_set_exp_feature rp;
4889 /* Command requires to use a valid controller index */
4891 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4892 MGMT_OP_SET_EXP_FEATURE,
4893 MGMT_STATUS_INVALID_INDEX);
4895 /* Parameters are limited to a single octet */
4896 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4897 return mgmt_cmd_status(sk, hdev->id,
4898 MGMT_OP_SET_EXP_FEATURE,
4899 MGMT_STATUS_INVALID_PARAMS);
4901 /* Only boolean on/off is supported */
4902 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4903 return mgmt_cmd_status(sk, hdev->id,
4904 MGMT_OP_SET_EXP_FEATURE,
4905 MGMT_STATUS_INVALID_PARAMS);
4907 val = !!cp->param[0];
4908 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4910 if (!hci_dev_le_state_simultaneous(hdev)) {
4911 return mgmt_cmd_status(sk, hdev->id,
4912 MGMT_OP_SET_EXP_FEATURE,
4913 MGMT_STATUS_NOT_SUPPORTED);
4918 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4920 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4923 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4926 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4927 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4928 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4929 err = mgmt_cmd_complete(sk, hdev->id,
4930 MGMT_OP_SET_EXP_FEATURE, 0,
4934 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4940 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4941 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4943 struct mgmt_rp_set_exp_feature rp;
4944 bool val, changed = false;
4947 /* Command requires to use the non-controller index */
4949 return mgmt_cmd_status(sk, hdev->id,
4950 MGMT_OP_SET_EXP_FEATURE,
4951 MGMT_STATUS_INVALID_INDEX);
4953 /* Parameters are limited to a single octet */
4954 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4955 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4956 MGMT_OP_SET_EXP_FEATURE,
4957 MGMT_STATUS_INVALID_PARAMS);
4959 /* Only boolean on/off is supported */
4960 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4961 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4962 MGMT_OP_SET_EXP_FEATURE,
4963 MGMT_STATUS_INVALID_PARAMS);
4965 val = cp->param[0] ? true : false;
4974 memcpy(rp.uuid, iso_socket_uuid, 16);
4975 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4977 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4979 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4980 MGMT_OP_SET_EXP_FEATURE, 0,
4984 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4990 static const struct mgmt_exp_feature {
4992 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4993 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4994 } exp_features[] = {
4995 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4996 #ifdef CONFIG_BT_FEATURE_DEBUG
4997 EXP_FEAT(debug_uuid, set_debug_func),
4999 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5000 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
5001 EXP_FEAT(quality_report_uuid, set_quality_report_func),
5002 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5003 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5005 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5008 /* end with a null feature */
5009 EXP_FEAT(NULL, NULL)
5012 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5013 void *data, u16 data_len)
5015 struct mgmt_cp_set_exp_feature *cp = data;
5018 bt_dev_dbg(hdev, "sock %p", sk);
5020 for (i = 0; exp_features[i].uuid; i++) {
5021 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5022 return exp_features[i].set_func(sk, hdev, cp, data_len);
5025 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5026 MGMT_OP_SET_EXP_FEATURE,
5027 MGMT_STATUS_NOT_SUPPORTED);
5030 static u32 get_params_flags(struct hci_dev *hdev,
5031 struct hci_conn_params *params)
5033 u32 flags = hdev->conn_flags;
5035 /* Devices using RPAs can only be programmed in the acceptlist if
5036 * LL Privacy has been enable otherwise they cannot mark
5037 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5039 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5040 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
5041 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5046 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5049 struct mgmt_cp_get_device_flags *cp = data;
5050 struct mgmt_rp_get_device_flags rp;
5051 struct bdaddr_list_with_flags *br_params;
5052 struct hci_conn_params *params;
5053 u32 supported_flags;
5054 u32 current_flags = 0;
5055 u8 status = MGMT_STATUS_INVALID_PARAMS;
5057 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5058 &cp->addr.bdaddr, cp->addr.type);
5062 supported_flags = hdev->conn_flags;
5064 memset(&rp, 0, sizeof(rp));
5066 if (cp->addr.type == BDADDR_BREDR) {
5067 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5073 current_flags = br_params->flags;
5075 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5076 le_addr_type(cp->addr.type));
5080 supported_flags = get_params_flags(hdev, params);
5081 current_flags = params->flags;
5084 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5085 rp.addr.type = cp->addr.type;
5086 rp.supported_flags = cpu_to_le32(supported_flags);
5087 rp.current_flags = cpu_to_le32(current_flags);
5089 status = MGMT_STATUS_SUCCESS;
5092 hci_dev_unlock(hdev);
5094 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5098 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5099 bdaddr_t *bdaddr, u8 bdaddr_type,
5100 u32 supported_flags, u32 current_flags)
5102 struct mgmt_ev_device_flags_changed ev;
5104 bacpy(&ev.addr.bdaddr, bdaddr);
5105 ev.addr.type = bdaddr_type;
5106 ev.supported_flags = cpu_to_le32(supported_flags);
5107 ev.current_flags = cpu_to_le32(current_flags);
5109 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5112 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5115 struct mgmt_cp_set_device_flags *cp = data;
5116 struct bdaddr_list_with_flags *br_params;
5117 struct hci_conn_params *params;
5118 u8 status = MGMT_STATUS_INVALID_PARAMS;
5119 u32 supported_flags;
5120 u32 current_flags = __le32_to_cpu(cp->current_flags);
5122 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5123 &cp->addr.bdaddr, cp->addr.type, current_flags);
5125 // We should take hci_dev_lock() early, I think.. conn_flags can change
5126 supported_flags = hdev->conn_flags;
5128 if ((supported_flags | current_flags) != supported_flags) {
5129 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5130 current_flags, supported_flags);
5136 if (cp->addr.type == BDADDR_BREDR) {
5137 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5142 br_params->flags = current_flags;
5143 status = MGMT_STATUS_SUCCESS;
5145 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5146 &cp->addr.bdaddr, cp->addr.type);
5152 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5153 le_addr_type(cp->addr.type));
5155 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5156 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5160 supported_flags = get_params_flags(hdev, params);
5162 if ((supported_flags | current_flags) != supported_flags) {
5163 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5164 current_flags, supported_flags);
5168 params->flags = current_flags;
5169 status = MGMT_STATUS_SUCCESS;
5171 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5174 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5175 hci_update_passive_scan(hdev);
5178 hci_dev_unlock(hdev);
5181 if (status == MGMT_STATUS_SUCCESS)
5182 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5183 supported_flags, current_flags);
5185 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5186 &cp->addr, sizeof(cp->addr));
5189 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5192 struct mgmt_ev_adv_monitor_added ev;
5194 ev.monitor_handle = cpu_to_le16(handle);
5196 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5199 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5201 struct mgmt_ev_adv_monitor_removed ev;
5202 struct mgmt_pending_cmd *cmd;
5203 struct sock *sk_skip = NULL;
5204 struct mgmt_cp_remove_adv_monitor *cp;
5206 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5210 if (cp->monitor_handle)
5214 ev.monitor_handle = cpu_to_le16(handle);
5216 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5219 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5220 void *data, u16 len)
5222 struct adv_monitor *monitor = NULL;
5223 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5226 __u32 supported = 0;
5228 __u16 num_handles = 0;
5229 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5231 BT_DBG("request for %s", hdev->name);
5235 if (msft_monitor_supported(hdev))
5236 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5238 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5239 handles[num_handles++] = monitor->handle;
5241 hci_dev_unlock(hdev);
5243 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5244 rp = kmalloc(rp_size, GFP_KERNEL);
5248 /* All supported features are currently enabled */
5249 enabled = supported;
5251 rp->supported_features = cpu_to_le32(supported);
5252 rp->enabled_features = cpu_to_le32(enabled);
5253 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5254 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5255 rp->num_handles = cpu_to_le16(num_handles);
5257 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5259 err = mgmt_cmd_complete(sk, hdev->id,
5260 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5261 MGMT_STATUS_SUCCESS, rp, rp_size);
5268 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5269 void *data, int status)
5271 struct mgmt_rp_add_adv_patterns_monitor rp;
5272 struct mgmt_pending_cmd *cmd = data;
5273 struct adv_monitor *monitor = cmd->user_data;
5277 rp.monitor_handle = cpu_to_le16(monitor->handle);
5280 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5281 hdev->adv_monitors_cnt++;
5282 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5283 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5284 hci_update_passive_scan(hdev);
5287 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5288 mgmt_status(status), &rp, sizeof(rp));
5289 mgmt_pending_remove(cmd);
5291 hci_dev_unlock(hdev);
5292 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5293 rp.monitor_handle, status);
5296 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5298 struct mgmt_pending_cmd *cmd = data;
5299 struct adv_monitor *monitor = cmd->user_data;
5301 return hci_add_adv_monitor(hdev, monitor);
5304 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5305 struct adv_monitor *m, u8 status,
5306 void *data, u16 len, u16 op)
5308 struct mgmt_pending_cmd *cmd;
5316 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5317 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5318 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5319 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5320 status = MGMT_STATUS_BUSY;
5324 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5326 status = MGMT_STATUS_NO_RESOURCES;
5331 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5332 mgmt_add_adv_patterns_monitor_complete);
5335 status = MGMT_STATUS_NO_RESOURCES;
5337 status = MGMT_STATUS_FAILED;
5342 hci_dev_unlock(hdev);
5347 hci_free_adv_monitor(hdev, m);
5348 hci_dev_unlock(hdev);
5349 return mgmt_cmd_status(sk, hdev->id, op, status);
5352 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5353 struct mgmt_adv_rssi_thresholds *rssi)
5356 m->rssi.low_threshold = rssi->low_threshold;
5357 m->rssi.low_threshold_timeout =
5358 __le16_to_cpu(rssi->low_threshold_timeout);
5359 m->rssi.high_threshold = rssi->high_threshold;
5360 m->rssi.high_threshold_timeout =
5361 __le16_to_cpu(rssi->high_threshold_timeout);
5362 m->rssi.sampling_period = rssi->sampling_period;
5364 /* Default values. These numbers are the least constricting
5365 * parameters for MSFT API to work, so it behaves as if there
5366 * are no rssi parameter to consider. May need to be changed
5367 * if other API are to be supported.
5369 m->rssi.low_threshold = -127;
5370 m->rssi.low_threshold_timeout = 60;
5371 m->rssi.high_threshold = -127;
5372 m->rssi.high_threshold_timeout = 0;
5373 m->rssi.sampling_period = 0;
5377 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5378 struct mgmt_adv_pattern *patterns)
5380 u8 offset = 0, length = 0;
5381 struct adv_pattern *p = NULL;
5384 for (i = 0; i < pattern_count; i++) {
5385 offset = patterns[i].offset;
5386 length = patterns[i].length;
5387 if (offset >= HCI_MAX_AD_LENGTH ||
5388 length > HCI_MAX_AD_LENGTH ||
5389 (offset + length) > HCI_MAX_AD_LENGTH)
5390 return MGMT_STATUS_INVALID_PARAMS;
5392 p = kmalloc(sizeof(*p), GFP_KERNEL);
5394 return MGMT_STATUS_NO_RESOURCES;
5396 p->ad_type = patterns[i].ad_type;
5397 p->offset = patterns[i].offset;
5398 p->length = patterns[i].length;
5399 memcpy(p->value, patterns[i].value, p->length);
5401 INIT_LIST_HEAD(&p->list);
5402 list_add(&p->list, &m->patterns);
5405 return MGMT_STATUS_SUCCESS;
5408 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5409 void *data, u16 len)
5411 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5412 struct adv_monitor *m = NULL;
5413 u8 status = MGMT_STATUS_SUCCESS;
5414 size_t expected_size = sizeof(*cp);
5416 BT_DBG("request for %s", hdev->name);
5418 if (len <= sizeof(*cp)) {
5419 status = MGMT_STATUS_INVALID_PARAMS;
5423 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5424 if (len != expected_size) {
5425 status = MGMT_STATUS_INVALID_PARAMS;
5429 m = kzalloc(sizeof(*m), GFP_KERNEL);
5431 status = MGMT_STATUS_NO_RESOURCES;
5435 INIT_LIST_HEAD(&m->patterns);
5437 parse_adv_monitor_rssi(m, NULL);
5438 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5441 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5442 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5445 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5446 void *data, u16 len)
5448 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5449 struct adv_monitor *m = NULL;
5450 u8 status = MGMT_STATUS_SUCCESS;
5451 size_t expected_size = sizeof(*cp);
5453 BT_DBG("request for %s", hdev->name);
5455 if (len <= sizeof(*cp)) {
5456 status = MGMT_STATUS_INVALID_PARAMS;
5460 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5461 if (len != expected_size) {
5462 status = MGMT_STATUS_INVALID_PARAMS;
5466 m = kzalloc(sizeof(*m), GFP_KERNEL);
5468 status = MGMT_STATUS_NO_RESOURCES;
5472 INIT_LIST_HEAD(&m->patterns);
5474 parse_adv_monitor_rssi(m, &cp->rssi);
5475 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5478 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5479 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5482 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5483 void *data, int status)
5485 struct mgmt_rp_remove_adv_monitor rp;
5486 struct mgmt_pending_cmd *cmd = data;
5487 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5491 rp.monitor_handle = cp->monitor_handle;
5494 hci_update_passive_scan(hdev);
5496 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5497 mgmt_status(status), &rp, sizeof(rp));
5498 mgmt_pending_remove(cmd);
5500 hci_dev_unlock(hdev);
5501 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5502 rp.monitor_handle, status);
5505 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5507 struct mgmt_pending_cmd *cmd = data;
5508 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5509 u16 handle = __le16_to_cpu(cp->monitor_handle);
5512 return hci_remove_all_adv_monitor(hdev);
5514 return hci_remove_single_adv_monitor(hdev, handle);
5517 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5518 void *data, u16 len)
5520 struct mgmt_pending_cmd *cmd;
5525 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5526 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5527 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5528 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5529 status = MGMT_STATUS_BUSY;
5533 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5535 status = MGMT_STATUS_NO_RESOURCES;
5539 err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5540 mgmt_remove_adv_monitor_complete);
5543 mgmt_pending_remove(cmd);
5546 status = MGMT_STATUS_NO_RESOURCES;
5548 status = MGMT_STATUS_FAILED;
5553 hci_dev_unlock(hdev);
5558 hci_dev_unlock(hdev);
5559 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5563 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5565 struct mgmt_rp_read_local_oob_data mgmt_rp;
5566 size_t rp_size = sizeof(mgmt_rp);
5567 struct mgmt_pending_cmd *cmd = data;
5568 struct sk_buff *skb = cmd->skb;
5569 u8 status = mgmt_status(err);
5573 status = MGMT_STATUS_FAILED;
5574 else if (IS_ERR(skb))
5575 status = mgmt_status(PTR_ERR(skb));
5577 status = mgmt_status(skb->data[0]);
5580 bt_dev_dbg(hdev, "status %d", status);
5583 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5587 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5589 if (!bredr_sc_enabled(hdev)) {
5590 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5592 if (skb->len < sizeof(*rp)) {
5593 mgmt_cmd_status(cmd->sk, hdev->id,
5594 MGMT_OP_READ_LOCAL_OOB_DATA,
5595 MGMT_STATUS_FAILED);
5599 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5600 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5602 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5604 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5606 if (skb->len < sizeof(*rp)) {
5607 mgmt_cmd_status(cmd->sk, hdev->id,
5608 MGMT_OP_READ_LOCAL_OOB_DATA,
5609 MGMT_STATUS_FAILED);
5613 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5614 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5616 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5617 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5620 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5621 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5624 if (skb && !IS_ERR(skb))
5627 mgmt_pending_free(cmd);
5630 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5632 struct mgmt_pending_cmd *cmd = data;
5634 if (bredr_sc_enabled(hdev))
5635 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5637 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5639 if (IS_ERR(cmd->skb))
5640 return PTR_ERR(cmd->skb);
5645 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5646 void *data, u16 data_len)
5648 struct mgmt_pending_cmd *cmd;
5651 bt_dev_dbg(hdev, "sock %p", sk);
5655 if (!hdev_is_powered(hdev)) {
5656 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5657 MGMT_STATUS_NOT_POWERED);
5661 if (!lmp_ssp_capable(hdev)) {
5662 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5663 MGMT_STATUS_NOT_SUPPORTED);
5667 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5671 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5672 read_local_oob_data_complete);
5675 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5676 MGMT_STATUS_FAILED);
5679 mgmt_pending_free(cmd);
5683 hci_dev_unlock(hdev);
5687 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5688 void *data, u16 len)
5690 struct mgmt_addr_info *addr = data;
5693 bt_dev_dbg(hdev, "sock %p", sk);
5695 if (!bdaddr_type_is_valid(addr->type))
5696 return mgmt_cmd_complete(sk, hdev->id,
5697 MGMT_OP_ADD_REMOTE_OOB_DATA,
5698 MGMT_STATUS_INVALID_PARAMS,
5699 addr, sizeof(*addr));
5703 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5704 struct mgmt_cp_add_remote_oob_data *cp = data;
5707 if (cp->addr.type != BDADDR_BREDR) {
5708 err = mgmt_cmd_complete(sk, hdev->id,
5709 MGMT_OP_ADD_REMOTE_OOB_DATA,
5710 MGMT_STATUS_INVALID_PARAMS,
5711 &cp->addr, sizeof(cp->addr));
5715 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5716 cp->addr.type, cp->hash,
5717 cp->rand, NULL, NULL);
5719 status = MGMT_STATUS_FAILED;
5721 status = MGMT_STATUS_SUCCESS;
5723 err = mgmt_cmd_complete(sk, hdev->id,
5724 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5725 &cp->addr, sizeof(cp->addr));
5726 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5727 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5728 u8 *rand192, *hash192, *rand256, *hash256;
5731 if (bdaddr_type_is_le(cp->addr.type)) {
5732 /* Enforce zero-valued 192-bit parameters as
5733 * long as legacy SMP OOB isn't implemented.
5735 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5736 memcmp(cp->hash192, ZERO_KEY, 16)) {
5737 err = mgmt_cmd_complete(sk, hdev->id,
5738 MGMT_OP_ADD_REMOTE_OOB_DATA,
5739 MGMT_STATUS_INVALID_PARAMS,
5740 addr, sizeof(*addr));
5747 /* In case one of the P-192 values is set to zero,
5748 * then just disable OOB data for P-192.
5750 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5751 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5755 rand192 = cp->rand192;
5756 hash192 = cp->hash192;
5760 /* In case one of the P-256 values is set to zero, then just
5761 * disable OOB data for P-256.
5763 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5764 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5768 rand256 = cp->rand256;
5769 hash256 = cp->hash256;
5772 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5773 cp->addr.type, hash192, rand192,
5776 status = MGMT_STATUS_FAILED;
5778 status = MGMT_STATUS_SUCCESS;
5780 err = mgmt_cmd_complete(sk, hdev->id,
5781 MGMT_OP_ADD_REMOTE_OOB_DATA,
5782 status, &cp->addr, sizeof(cp->addr));
5784 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5786 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5787 MGMT_STATUS_INVALID_PARAMS);
5791 hci_dev_unlock(hdev);
5795 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5796 void *data, u16 len)
5798 struct mgmt_cp_remove_remote_oob_data *cp = data;
5802 bt_dev_dbg(hdev, "sock %p", sk);
5804 if (cp->addr.type != BDADDR_BREDR)
5805 return mgmt_cmd_complete(sk, hdev->id,
5806 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5807 MGMT_STATUS_INVALID_PARAMS,
5808 &cp->addr, sizeof(cp->addr));
5812 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5813 hci_remote_oob_data_clear(hdev);
5814 status = MGMT_STATUS_SUCCESS;
5818 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5820 status = MGMT_STATUS_INVALID_PARAMS;
5822 status = MGMT_STATUS_SUCCESS;
5825 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5826 status, &cp->addr, sizeof(cp->addr));
5828 hci_dev_unlock(hdev);
5832 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5834 struct mgmt_pending_cmd *cmd;
5836 bt_dev_dbg(hdev, "status %u", status);
5840 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5842 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5845 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5848 cmd->cmd_complete(cmd, mgmt_status(status));
5849 mgmt_pending_remove(cmd);
5852 hci_dev_unlock(hdev);
5855 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5856 uint8_t *mgmt_status)
5859 case DISCOV_TYPE_LE:
5860 *mgmt_status = mgmt_le_support(hdev);
5864 case DISCOV_TYPE_INTERLEAVED:
5865 *mgmt_status = mgmt_le_support(hdev);
5869 case DISCOV_TYPE_BREDR:
5870 *mgmt_status = mgmt_bredr_support(hdev);
5875 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5882 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5884 struct mgmt_pending_cmd *cmd = data;
5886 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5887 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5888 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5891 bt_dev_dbg(hdev, "err %d", err);
5893 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5895 mgmt_pending_remove(cmd);
5897 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5901 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5903 return hci_start_discovery_sync(hdev);
5906 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5907 u16 op, void *data, u16 len)
5909 struct mgmt_cp_start_discovery *cp = data;
5910 struct mgmt_pending_cmd *cmd;
5914 bt_dev_dbg(hdev, "sock %p", sk);
5918 if (!hdev_is_powered(hdev)) {
5919 err = mgmt_cmd_complete(sk, hdev->id, op,
5920 MGMT_STATUS_NOT_POWERED,
5921 &cp->type, sizeof(cp->type));
5925 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5926 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5927 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5928 &cp->type, sizeof(cp->type));
5932 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5933 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5934 &cp->type, sizeof(cp->type));
5938 /* Can't start discovery when it is paused */
5939 if (hdev->discovery_paused) {
5940 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5941 &cp->type, sizeof(cp->type));
5945 /* Clear the discovery filter first to free any previously
5946 * allocated memory for the UUID list.
5948 hci_discovery_filter_clear(hdev);
5950 hdev->discovery.type = cp->type;
5951 hdev->discovery.report_invalid_rssi = false;
5952 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5953 hdev->discovery.limited = true;
5955 hdev->discovery.limited = false;
5957 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5963 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5964 start_discovery_complete);
5966 mgmt_pending_remove(cmd);
5970 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5973 hci_dev_unlock(hdev);
5977 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5978 void *data, u16 len)
5980 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5984 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5985 void *data, u16 len)
5987 return start_discovery_internal(sk, hdev,
5988 MGMT_OP_START_LIMITED_DISCOVERY,
5992 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5993 void *data, u16 len)
5995 struct mgmt_cp_start_service_discovery *cp = data;
5996 struct mgmt_pending_cmd *cmd;
5997 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5998 u16 uuid_count, expected_len;
6002 bt_dev_dbg(hdev, "sock %p", sk);
6006 if (!hdev_is_powered(hdev)) {
6007 err = mgmt_cmd_complete(sk, hdev->id,
6008 MGMT_OP_START_SERVICE_DISCOVERY,
6009 MGMT_STATUS_NOT_POWERED,
6010 &cp->type, sizeof(cp->type));
6014 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6015 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6016 err = mgmt_cmd_complete(sk, hdev->id,
6017 MGMT_OP_START_SERVICE_DISCOVERY,
6018 MGMT_STATUS_BUSY, &cp->type,
6023 if (hdev->discovery_paused) {
6024 err = mgmt_cmd_complete(sk, hdev->id,
6025 MGMT_OP_START_SERVICE_DISCOVERY,
6026 MGMT_STATUS_BUSY, &cp->type,
6031 uuid_count = __le16_to_cpu(cp->uuid_count);
6032 if (uuid_count > max_uuid_count) {
6033 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6035 err = mgmt_cmd_complete(sk, hdev->id,
6036 MGMT_OP_START_SERVICE_DISCOVERY,
6037 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6042 expected_len = sizeof(*cp) + uuid_count * 16;
6043 if (expected_len != len) {
6044 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6046 err = mgmt_cmd_complete(sk, hdev->id,
6047 MGMT_OP_START_SERVICE_DISCOVERY,
6048 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6053 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6054 err = mgmt_cmd_complete(sk, hdev->id,
6055 MGMT_OP_START_SERVICE_DISCOVERY,
6056 status, &cp->type, sizeof(cp->type));
6060 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6067 /* Clear the discovery filter first to free any previously
6068 * allocated memory for the UUID list.
6070 hci_discovery_filter_clear(hdev);
6072 hdev->discovery.result_filtering = true;
6073 hdev->discovery.type = cp->type;
6074 hdev->discovery.rssi = cp->rssi;
6075 hdev->discovery.uuid_count = uuid_count;
6077 if (uuid_count > 0) {
6078 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6080 if (!hdev->discovery.uuids) {
6081 err = mgmt_cmd_complete(sk, hdev->id,
6082 MGMT_OP_START_SERVICE_DISCOVERY,
6084 &cp->type, sizeof(cp->type));
6085 mgmt_pending_remove(cmd);
6090 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6091 start_discovery_complete);
6093 mgmt_pending_remove(cmd);
6097 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6100 hci_dev_unlock(hdev);
6104 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6106 struct mgmt_pending_cmd *cmd;
6108 bt_dev_dbg(hdev, "status %u", status);
6112 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6114 cmd->cmd_complete(cmd, mgmt_status(status));
6115 mgmt_pending_remove(cmd);
6118 hci_dev_unlock(hdev);
6121 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6123 struct mgmt_pending_cmd *cmd = data;
6125 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6128 bt_dev_dbg(hdev, "err %d", err);
6130 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6132 mgmt_pending_remove(cmd);
6135 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6138 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6140 return hci_stop_discovery_sync(hdev);
6143 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6146 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6147 struct mgmt_pending_cmd *cmd;
6150 bt_dev_dbg(hdev, "sock %p", sk);
6154 if (!hci_discovery_active(hdev)) {
6155 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6156 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6157 sizeof(mgmt_cp->type));
6161 if (hdev->discovery.type != mgmt_cp->type) {
6162 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6163 MGMT_STATUS_INVALID_PARAMS,
6164 &mgmt_cp->type, sizeof(mgmt_cp->type));
6168 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6174 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6175 stop_discovery_complete);
6177 mgmt_pending_remove(cmd);
6181 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6184 hci_dev_unlock(hdev);
6188 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6191 struct mgmt_cp_confirm_name *cp = data;
6192 struct inquiry_entry *e;
6195 bt_dev_dbg(hdev, "sock %p", sk);
6199 if (!hci_discovery_active(hdev)) {
6200 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6201 MGMT_STATUS_FAILED, &cp->addr,
6206 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6208 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6209 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6214 if (cp->name_known) {
6215 e->name_state = NAME_KNOWN;
6218 e->name_state = NAME_NEEDED;
6219 hci_inquiry_cache_update_resolve(hdev, e);
6222 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6223 &cp->addr, sizeof(cp->addr));
6226 hci_dev_unlock(hdev);
6230 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6233 struct mgmt_cp_block_device *cp = data;
6237 bt_dev_dbg(hdev, "sock %p", sk);
6239 if (!bdaddr_type_is_valid(cp->addr.type))
6240 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6241 MGMT_STATUS_INVALID_PARAMS,
6242 &cp->addr, sizeof(cp->addr));
6246 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6249 status = MGMT_STATUS_FAILED;
6253 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6255 status = MGMT_STATUS_SUCCESS;
6258 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6259 &cp->addr, sizeof(cp->addr));
6261 hci_dev_unlock(hdev);
6266 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6269 struct mgmt_cp_unblock_device *cp = data;
6273 bt_dev_dbg(hdev, "sock %p", sk);
6275 if (!bdaddr_type_is_valid(cp->addr.type))
6276 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6277 MGMT_STATUS_INVALID_PARAMS,
6278 &cp->addr, sizeof(cp->addr));
6282 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6285 status = MGMT_STATUS_INVALID_PARAMS;
6289 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6291 status = MGMT_STATUS_SUCCESS;
6294 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6295 &cp->addr, sizeof(cp->addr));
6297 hci_dev_unlock(hdev);
6302 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6304 return hci_update_eir_sync(hdev);
6307 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6310 struct mgmt_cp_set_device_id *cp = data;
6314 bt_dev_dbg(hdev, "sock %p", sk);
6316 source = __le16_to_cpu(cp->source);
6318 if (source > 0x0002)
6319 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6320 MGMT_STATUS_INVALID_PARAMS);
6324 hdev->devid_source = source;
6325 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6326 hdev->devid_product = __le16_to_cpu(cp->product);
6327 hdev->devid_version = __le16_to_cpu(cp->version);
6329 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6332 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6334 hci_dev_unlock(hdev);
6339 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6342 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6344 bt_dev_dbg(hdev, "status %d", err);
6347 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6349 struct cmd_lookup match = { NULL, hdev };
6351 struct adv_info *adv_instance;
6352 u8 status = mgmt_status(err);
6355 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6356 cmd_status_rsp, &status);
6360 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6361 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6363 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6365 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6368 new_settings(hdev, match.sk);
6373 /* If "Set Advertising" was just disabled and instance advertising was
6374 * set up earlier, then re-enable multi-instance advertising.
6376 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6377 list_empty(&hdev->adv_instances))
6380 instance = hdev->cur_adv_instance;
6382 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6383 struct adv_info, list);
6387 instance = adv_instance->instance;
6390 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6392 enable_advertising_instance(hdev, err);
6395 static int set_adv_sync(struct hci_dev *hdev, void *data)
6397 struct mgmt_pending_cmd *cmd = data;
6398 struct mgmt_mode *cp = cmd->param;
6401 if (cp->val == 0x02)
6402 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6404 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6406 cancel_adv_timeout(hdev);
6409 /* Switch to instance "0" for the Set Advertising setting.
6410 * We cannot use update_[adv|scan_rsp]_data() here as the
6411 * HCI_ADVERTISING flag is not yet set.
6413 hdev->cur_adv_instance = 0x00;
6415 if (ext_adv_capable(hdev)) {
6416 hci_start_ext_adv_sync(hdev, 0x00);
6418 hci_update_adv_data_sync(hdev, 0x00);
6419 hci_update_scan_rsp_data_sync(hdev, 0x00);
6420 hci_enable_advertising_sync(hdev);
6423 hci_disable_advertising_sync(hdev);
6429 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6432 struct mgmt_mode *cp = data;
6433 struct mgmt_pending_cmd *cmd;
6437 bt_dev_dbg(hdev, "sock %p", sk);
6439 status = mgmt_le_support(hdev);
6441 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6444 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6445 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6446 MGMT_STATUS_INVALID_PARAMS);
6448 if (hdev->advertising_paused)
6449 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6456 /* The following conditions are ones which mean that we should
6457 * not do any HCI communication but directly send a mgmt
6458 * response to user space (after toggling the flag if
6461 if (!hdev_is_powered(hdev) ||
6462 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6463 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6464 hci_dev_test_flag(hdev, HCI_MESH) ||
6465 hci_conn_num(hdev, LE_LINK) > 0 ||
6466 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6467 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6471 hdev->cur_adv_instance = 0x00;
6472 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6473 if (cp->val == 0x02)
6474 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6476 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6478 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6479 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6482 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6487 err = new_settings(hdev, sk);
6492 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6493 pending_find(MGMT_OP_SET_LE, hdev)) {
6494 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6499 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6503 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6504 set_advertising_complete);
6507 mgmt_pending_remove(cmd);
6510 hci_dev_unlock(hdev);
6514 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6515 void *data, u16 len)
6517 struct mgmt_cp_set_static_address *cp = data;
6520 bt_dev_dbg(hdev, "sock %p", sk);
6522 if (!lmp_le_capable(hdev))
6523 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6524 MGMT_STATUS_NOT_SUPPORTED);
6526 if (hdev_is_powered(hdev))
6527 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6528 MGMT_STATUS_REJECTED);
6530 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6531 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6532 return mgmt_cmd_status(sk, hdev->id,
6533 MGMT_OP_SET_STATIC_ADDRESS,
6534 MGMT_STATUS_INVALID_PARAMS);
6536 /* Two most significant bits shall be set */
6537 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6538 return mgmt_cmd_status(sk, hdev->id,
6539 MGMT_OP_SET_STATIC_ADDRESS,
6540 MGMT_STATUS_INVALID_PARAMS);
6545 bacpy(&hdev->static_addr, &cp->bdaddr);
6547 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6551 err = new_settings(hdev, sk);
6554 hci_dev_unlock(hdev);
6558 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6559 void *data, u16 len)
6561 struct mgmt_cp_set_scan_params *cp = data;
6562 __u16 interval, window;
6565 bt_dev_dbg(hdev, "sock %p", sk);
6567 if (!lmp_le_capable(hdev))
6568 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6569 MGMT_STATUS_NOT_SUPPORTED);
6571 interval = __le16_to_cpu(cp->interval);
6573 if (interval < 0x0004 || interval > 0x4000)
6574 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6575 MGMT_STATUS_INVALID_PARAMS);
6577 window = __le16_to_cpu(cp->window);
6579 if (window < 0x0004 || window > 0x4000)
6580 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6581 MGMT_STATUS_INVALID_PARAMS);
6583 if (window > interval)
6584 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6585 MGMT_STATUS_INVALID_PARAMS);
6589 hdev->le_scan_interval = interval;
6590 hdev->le_scan_window = window;
6592 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6595 /* If background scan is running, restart it so new parameters are
6598 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6599 hdev->discovery.state == DISCOVERY_STOPPED)
6600 hci_update_passive_scan(hdev);
6602 hci_dev_unlock(hdev);
6607 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6609 struct mgmt_pending_cmd *cmd = data;
6611 bt_dev_dbg(hdev, "err %d", err);
6614 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6617 struct mgmt_mode *cp = cmd->param;
6620 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6622 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6624 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6625 new_settings(hdev, cmd->sk);
6628 mgmt_pending_free(cmd);
6631 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6633 struct mgmt_pending_cmd *cmd = data;
6634 struct mgmt_mode *cp = cmd->param;
6636 return hci_write_fast_connectable_sync(hdev, cp->val);
6639 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6640 void *data, u16 len)
6642 struct mgmt_mode *cp = data;
6643 struct mgmt_pending_cmd *cmd;
6646 bt_dev_dbg(hdev, "sock %p", sk);
6648 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6649 hdev->hci_ver < BLUETOOTH_VER_1_2)
6650 return mgmt_cmd_status(sk, hdev->id,
6651 MGMT_OP_SET_FAST_CONNECTABLE,
6652 MGMT_STATUS_NOT_SUPPORTED);
6654 if (cp->val != 0x00 && cp->val != 0x01)
6655 return mgmt_cmd_status(sk, hdev->id,
6656 MGMT_OP_SET_FAST_CONNECTABLE,
6657 MGMT_STATUS_INVALID_PARAMS);
6661 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6662 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6666 if (!hdev_is_powered(hdev)) {
6667 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6668 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6669 new_settings(hdev, sk);
6673 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6678 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6679 fast_connectable_complete);
6682 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6683 MGMT_STATUS_FAILED);
6686 mgmt_pending_free(cmd);
6690 hci_dev_unlock(hdev);
6695 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6697 struct mgmt_pending_cmd *cmd = data;
6699 bt_dev_dbg(hdev, "err %d", err);
6702 u8 mgmt_err = mgmt_status(err);
6704 /* We need to restore the flag if related HCI commands
6707 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6709 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6711 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6712 new_settings(hdev, cmd->sk);
6715 mgmt_pending_free(cmd);
6718 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6722 status = hci_write_fast_connectable_sync(hdev, false);
6725 status = hci_update_scan_sync(hdev);
6727 /* Since only the advertising data flags will change, there
6728 * is no need to update the scan response data.
6731 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6736 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6738 struct mgmt_mode *cp = data;
6739 struct mgmt_pending_cmd *cmd;
6742 bt_dev_dbg(hdev, "sock %p", sk);
6744 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6745 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6746 MGMT_STATUS_NOT_SUPPORTED);
6748 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6750 MGMT_STATUS_REJECTED);
6752 if (cp->val != 0x00 && cp->val != 0x01)
6753 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6754 MGMT_STATUS_INVALID_PARAMS);
6758 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6759 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6763 if (!hdev_is_powered(hdev)) {
6765 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6766 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6767 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6768 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6769 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6772 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6774 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6778 err = new_settings(hdev, sk);
6782 /* Reject disabling when powered on */
6784 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6785 MGMT_STATUS_REJECTED);
6788 /* When configuring a dual-mode controller to operate
6789 * with LE only and using a static address, then switching
6790 * BR/EDR back on is not allowed.
6792 * Dual-mode controllers shall operate with the public
6793 * address as its identity address for BR/EDR and LE. So
6794 * reject the attempt to create an invalid configuration.
6796 * The same restrictions applies when secure connections
6797 * has been enabled. For BR/EDR this is a controller feature
6798 * while for LE it is a host stack feature. This means that
6799 * switching BR/EDR back on when secure connections has been
6800 * enabled is not a supported transaction.
6802 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6803 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6804 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6805 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6806 MGMT_STATUS_REJECTED);
6811 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6815 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6816 set_bredr_complete);
6819 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6820 MGMT_STATUS_FAILED);
6822 mgmt_pending_free(cmd);
6827 /* We need to flip the bit already here so that
6828 * hci_req_update_adv_data generates the correct flags.
6830 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6833 hci_dev_unlock(hdev);
6837 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6839 struct mgmt_pending_cmd *cmd = data;
6840 struct mgmt_mode *cp;
6842 bt_dev_dbg(hdev, "err %d", err);
6845 u8 mgmt_err = mgmt_status(err);
6847 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6855 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6856 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6859 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6860 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6863 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6864 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6868 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6869 new_settings(hdev, cmd->sk);
6872 mgmt_pending_free(cmd);
6875 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6877 struct mgmt_pending_cmd *cmd = data;
6878 struct mgmt_mode *cp = cmd->param;
6881 /* Force write of val */
6882 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6884 return hci_write_sc_support_sync(hdev, val);
6887 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6888 void *data, u16 len)
6890 struct mgmt_mode *cp = data;
6891 struct mgmt_pending_cmd *cmd;
6895 bt_dev_dbg(hdev, "sock %p", sk);
6897 if (!lmp_sc_capable(hdev) &&
6898 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6899 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6900 MGMT_STATUS_NOT_SUPPORTED);
6902 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6903 lmp_sc_capable(hdev) &&
6904 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6905 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6906 MGMT_STATUS_REJECTED);
6908 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6909 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6910 MGMT_STATUS_INVALID_PARAMS);
6914 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6915 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6919 changed = !hci_dev_test_and_set_flag(hdev,
6921 if (cp->val == 0x02)
6922 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6924 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6926 changed = hci_dev_test_and_clear_flag(hdev,
6928 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6931 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6936 err = new_settings(hdev, sk);
6943 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6944 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6945 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6949 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6953 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6954 set_secure_conn_complete);
6957 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6958 MGMT_STATUS_FAILED);
6960 mgmt_pending_free(cmd);
6964 hci_dev_unlock(hdev);
6968 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6969 void *data, u16 len)
6971 struct mgmt_mode *cp = data;
6972 bool changed, use_changed;
6975 bt_dev_dbg(hdev, "sock %p", sk);
6977 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6978 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6979 MGMT_STATUS_INVALID_PARAMS);
6984 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6986 changed = hci_dev_test_and_clear_flag(hdev,
6987 HCI_KEEP_DEBUG_KEYS);
6989 if (cp->val == 0x02)
6990 use_changed = !hci_dev_test_and_set_flag(hdev,
6991 HCI_USE_DEBUG_KEYS);
6993 use_changed = hci_dev_test_and_clear_flag(hdev,
6994 HCI_USE_DEBUG_KEYS);
6996 if (hdev_is_powered(hdev) && use_changed &&
6997 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6998 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6999 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
7000 sizeof(mode), &mode);
7003 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7008 err = new_settings(hdev, sk);
7011 hci_dev_unlock(hdev);
7015 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7018 struct mgmt_cp_set_privacy *cp = cp_data;
7022 bt_dev_dbg(hdev, "sock %p", sk);
7024 if (!lmp_le_capable(hdev))
7025 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7026 MGMT_STATUS_NOT_SUPPORTED);
7028 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7029 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7030 MGMT_STATUS_INVALID_PARAMS);
7033 /* commenting out since set privacy command is always rejected
7034 * if this condition is enabled.
7036 if (hdev_is_powered(hdev))
7037 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7038 MGMT_STATUS_REJECTED);
7043 /* If user space supports this command it is also expected to
7044 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7046 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7049 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7050 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7051 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7052 hci_adv_instances_set_rpa_expired(hdev, true);
7053 if (cp->privacy == 0x02)
7054 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7056 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7058 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7059 memset(hdev->irk, 0, sizeof(hdev->irk));
7060 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7061 hci_adv_instances_set_rpa_expired(hdev, false);
7062 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7065 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7070 err = new_settings(hdev, sk);
7073 hci_dev_unlock(hdev);
7077 static bool irk_is_valid(struct mgmt_irk_info *irk)
7079 switch (irk->addr.type) {
7080 case BDADDR_LE_PUBLIC:
7083 case BDADDR_LE_RANDOM:
7084 /* Two most significant bits shall be set */
7085 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7093 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7096 struct mgmt_cp_load_irks *cp = cp_data;
7097 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7098 sizeof(struct mgmt_irk_info));
7099 u16 irk_count, expected_len;
7102 bt_dev_dbg(hdev, "sock %p", sk);
7104 if (!lmp_le_capable(hdev))
7105 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7106 MGMT_STATUS_NOT_SUPPORTED);
7108 irk_count = __le16_to_cpu(cp->irk_count);
7109 if (irk_count > max_irk_count) {
7110 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7112 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7113 MGMT_STATUS_INVALID_PARAMS);
7116 expected_len = struct_size(cp, irks, irk_count);
7117 if (expected_len != len) {
7118 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7120 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7121 MGMT_STATUS_INVALID_PARAMS);
7124 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7126 for (i = 0; i < irk_count; i++) {
7127 struct mgmt_irk_info *key = &cp->irks[i];
7129 if (!irk_is_valid(key))
7130 return mgmt_cmd_status(sk, hdev->id,
7132 MGMT_STATUS_INVALID_PARAMS);
7137 hci_smp_irks_clear(hdev);
7139 for (i = 0; i < irk_count; i++) {
7140 struct mgmt_irk_info *irk = &cp->irks[i];
7142 if (hci_is_blocked_key(hdev,
7143 HCI_BLOCKED_KEY_TYPE_IRK,
7145 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7150 hci_add_irk(hdev, &irk->addr.bdaddr,
7151 le_addr_type(irk->addr.type), irk->val,
7155 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7157 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7159 hci_dev_unlock(hdev);
7165 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
7166 void *data, u16 len)
7168 struct mgmt_cp_set_advertising_params *cp = data;
7173 BT_DBG("%s", hdev->name);
7175 if (!lmp_le_capable(hdev))
7176 return mgmt_cmd_status(sk, hdev->id,
7177 MGMT_OP_SET_ADVERTISING_PARAMS,
7178 MGMT_STATUS_NOT_SUPPORTED);
7180 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7181 return mgmt_cmd_status(sk, hdev->id,
7182 MGMT_OP_SET_ADVERTISING_PARAMS,
7185 min_interval = __le16_to_cpu(cp->interval_min);
7186 max_interval = __le16_to_cpu(cp->interval_max);
7188 if (min_interval > max_interval ||
7189 min_interval < 0x0020 || max_interval > 0x4000)
7190 return mgmt_cmd_status(sk, hdev->id,
7191 MGMT_OP_SET_ADVERTISING_PARAMS,
7192 MGMT_STATUS_INVALID_PARAMS);
7196 hdev->le_adv_min_interval = min_interval;
7197 hdev->le_adv_max_interval = max_interval;
7198 hdev->adv_filter_policy = cp->filter_policy;
7199 hdev->adv_type = cp->type;
7201 err = mgmt_cmd_complete(sk, hdev->id,
7202 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
7204 hci_dev_unlock(hdev);
7209 static void set_advertising_data_complete(struct hci_dev *hdev,
7210 u8 status, u16 opcode)
7212 struct mgmt_cp_set_advertising_data *cp;
7213 struct mgmt_pending_cmd *cmd;
7215 BT_DBG("status 0x%02x", status);
7219 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
7226 mgmt_cmd_status(cmd->sk, hdev->id,
7227 MGMT_OP_SET_ADVERTISING_DATA,
7228 mgmt_status(status));
7230 mgmt_cmd_complete(cmd->sk, hdev->id,
7231 MGMT_OP_SET_ADVERTISING_DATA, 0,
7234 mgmt_pending_remove(cmd);
7237 hci_dev_unlock(hdev);
7240 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
7241 void *data, u16 len)
7243 struct mgmt_pending_cmd *cmd;
7244 struct hci_request req;
7245 struct mgmt_cp_set_advertising_data *cp = data;
7246 struct hci_cp_le_set_adv_data adv;
7249 BT_DBG("%s", hdev->name);
7251 if (!lmp_le_capable(hdev)) {
7252 return mgmt_cmd_status(sk, hdev->id,
7253 MGMT_OP_SET_ADVERTISING_DATA,
7254 MGMT_STATUS_NOT_SUPPORTED);
7259 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
7260 err = mgmt_cmd_status(sk, hdev->id,
7261 MGMT_OP_SET_ADVERTISING_DATA,
7266 if (len > HCI_MAX_AD_LENGTH) {
7267 err = mgmt_cmd_status(sk, hdev->id,
7268 MGMT_OP_SET_ADVERTISING_DATA,
7269 MGMT_STATUS_INVALID_PARAMS);
7273 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
7280 hci_req_init(&req, hdev);
7282 memset(&adv, 0, sizeof(adv));
7283 memcpy(adv.data, cp->data, len);
7286 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
7288 err = hci_req_run(&req, set_advertising_data_complete);
7290 mgmt_pending_remove(cmd);
7293 hci_dev_unlock(hdev);
7298 /* Adv White List feature */
7299 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7301 struct mgmt_cp_add_dev_white_list *cp;
7302 struct mgmt_pending_cmd *cmd;
7304 BT_DBG("status 0x%02x", status);
7308 cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
7315 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7316 mgmt_status(status));
7318 mgmt_cmd_complete(cmd->sk, hdev->id,
7319 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
7321 mgmt_pending_remove(cmd);
7324 hci_dev_unlock(hdev);
7327 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
7328 void *data, u16 len)
7330 struct mgmt_pending_cmd *cmd;
7331 struct mgmt_cp_add_dev_white_list *cp = data;
7332 struct hci_request req;
7335 BT_DBG("%s", hdev->name);
7337 if (!lmp_le_capable(hdev))
7338 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7339 MGMT_STATUS_NOT_SUPPORTED);
7341 if (!hdev_is_powered(hdev))
7342 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7343 MGMT_STATUS_REJECTED);
7347 if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
7348 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7353 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
7359 hci_req_init(&req, hdev);
7361 hci_req_add(&req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(*cp), cp);
7363 err = hci_req_run(&req, add_white_list_complete);
7365 mgmt_pending_remove(cmd);
7370 hci_dev_unlock(hdev);
7375 static void remove_from_white_list_complete(struct hci_dev *hdev,
7376 u8 status, u16 opcode)
7378 struct mgmt_cp_remove_dev_from_white_list *cp;
7379 struct mgmt_pending_cmd *cmd;
7381 BT_DBG("status 0x%02x", status);
7385 cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
7392 mgmt_cmd_status(cmd->sk, hdev->id,
7393 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7394 mgmt_status(status));
7396 mgmt_cmd_complete(cmd->sk, hdev->id,
7397 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
7400 mgmt_pending_remove(cmd);
7403 hci_dev_unlock(hdev);
7406 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
7407 void *data, u16 len)
7409 struct mgmt_pending_cmd *cmd;
7410 struct mgmt_cp_remove_dev_from_white_list *cp = data;
7411 struct hci_request req;
7414 BT_DBG("%s", hdev->name);
7416 if (!lmp_le_capable(hdev))
7417 return mgmt_cmd_status(sk, hdev->id,
7418 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7419 MGMT_STATUS_NOT_SUPPORTED);
7421 if (!hdev_is_powered(hdev))
7422 return mgmt_cmd_status(sk, hdev->id,
7423 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7424 MGMT_STATUS_REJECTED);
7428 if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
7429 err = mgmt_cmd_status(sk, hdev->id,
7430 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7435 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7442 hci_req_init(&req, hdev);
7444 hci_req_add(&req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(*cp), cp);
7446 err = hci_req_run(&req, remove_from_white_list_complete);
7448 mgmt_pending_remove(cmd);
7453 hci_dev_unlock(hdev);
7458 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
7461 struct mgmt_pending_cmd *cmd;
7463 BT_DBG("status 0x%02x", status);
7467 cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
7472 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
7473 mgmt_status(status));
7475 mgmt_cmd_complete(cmd->sk, hdev->id,
7476 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7479 mgmt_pending_remove(cmd);
7482 hci_dev_unlock(hdev);
7485 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
7486 void *data, u16 len)
7488 struct mgmt_pending_cmd *cmd;
7489 struct hci_request req;
7492 BT_DBG("%s", hdev->name);
7494 if (!lmp_le_capable(hdev))
7495 return mgmt_cmd_status(sk, hdev->id,
7496 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7497 MGMT_STATUS_NOT_SUPPORTED);
7499 if (!hdev_is_powered(hdev))
7500 return mgmt_cmd_status(sk, hdev->id,
7501 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7502 MGMT_STATUS_REJECTED);
7506 if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
7507 err = mgmt_cmd_status(sk, hdev->id,
7508 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7513 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
7520 hci_req_init(&req, hdev);
7522 hci_req_add(&req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
7524 err = hci_req_run(&req, clear_white_list_complete);
7526 mgmt_pending_remove(cmd);
7531 hci_dev_unlock(hdev);
7536 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
7539 struct mgmt_cp_set_scan_rsp_data *cp;
7540 struct mgmt_pending_cmd *cmd;
7542 BT_DBG("status 0x%02x", status);
7546 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
7553 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7554 mgmt_status(status));
7556 mgmt_cmd_complete(cmd->sk, hdev->id,
7557 MGMT_OP_SET_SCAN_RSP_DATA, 0,
7560 mgmt_pending_remove(cmd);
7563 hci_dev_unlock(hdev);
7566 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
7569 struct mgmt_pending_cmd *cmd;
7570 struct hci_request req;
7571 struct mgmt_cp_set_scan_rsp_data *cp = data;
7572 struct hci_cp_le_set_scan_rsp_data rsp;
7575 BT_DBG("%s", hdev->name);
7577 if (!lmp_le_capable(hdev))
7578 return mgmt_cmd_status(sk, hdev->id,
7579 MGMT_OP_SET_SCAN_RSP_DATA,
7580 MGMT_STATUS_NOT_SUPPORTED);
7584 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
7585 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7590 if (len > HCI_MAX_AD_LENGTH) {
7591 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7592 MGMT_STATUS_INVALID_PARAMS);
7596 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
7602 hci_req_init(&req, hdev);
7604 memset(&rsp, 0, sizeof(rsp));
7605 memcpy(rsp.data, cp->data, len);
7608 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
7610 err = hci_req_run(&req, set_scan_rsp_data_complete);
7612 mgmt_pending_remove(cmd);
7615 hci_dev_unlock(hdev);
7620 static void set_rssi_threshold_complete(struct hci_dev *hdev,
7621 u8 status, u16 opcode)
7623 struct mgmt_pending_cmd *cmd;
7625 BT_DBG("status 0x%02x", status);
7629 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7634 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7635 mgmt_status(status));
7637 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7640 mgmt_pending_remove(cmd);
7643 hci_dev_unlock(hdev);
7646 static void set_rssi_disable_complete(struct hci_dev *hdev,
7647 u8 status, u16 opcode)
7649 struct mgmt_pending_cmd *cmd;
7651 BT_DBG("status 0x%02x", status);
7655 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7660 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7661 mgmt_status(status));
7663 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7666 mgmt_pending_remove(cmd);
7669 hci_dev_unlock(hdev);
7672 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
7673 void *data, u16 len)
7676 struct hci_cp_set_rssi_threshold th = { 0, };
7677 struct mgmt_cp_set_enable_rssi *cp = data;
7678 struct hci_conn *conn;
7679 struct mgmt_pending_cmd *cmd;
7680 struct hci_request req;
7685 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7687 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7688 MGMT_STATUS_FAILED);
7692 if (!lmp_le_capable(hdev)) {
7693 mgmt_pending_remove(cmd);
7694 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7695 MGMT_STATUS_NOT_SUPPORTED);
7699 if (!hdev_is_powered(hdev)) {
7700 BT_DBG("%s", hdev->name);
7701 mgmt_pending_remove(cmd);
7702 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7703 MGMT_STATUS_NOT_POWERED);
7707 if (cp->link_type == 0x01)
7708 dest_type = LE_LINK;
7710 dest_type = ACL_LINK;
7712 /* Get LE/ACL link handle info */
7713 conn = hci_conn_hash_lookup_ba(hdev,
7714 dest_type, &cp->bdaddr);
7717 err = mgmt_cmd_complete(sk, hdev->id,
7718 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
7719 mgmt_pending_remove(cmd);
7723 hci_req_init(&req, hdev);
7725 th.hci_le_ext_opcode = 0x0B;
7727 th.conn_handle = conn->handle;
7728 th.alert_mask = 0x07;
7729 th.low_th = cp->low_th;
7730 th.in_range_th = cp->in_range_th;
7731 th.high_th = cp->high_th;
7733 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
7734 err = hci_req_run(&req, set_rssi_threshold_complete);
7737 mgmt_pending_remove(cmd);
7738 BT_ERR("Error in requesting hci_req_run");
7743 hci_dev_unlock(hdev);
7747 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
7748 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
7750 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
7751 struct mgmt_cp_set_enable_rssi *cp = data;
7752 struct mgmt_pending_cmd *cmd;
7757 mgmt_rp.status = rp->status;
7758 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
7759 mgmt_rp.bt_address = cp->bdaddr;
7760 mgmt_rp.link_type = cp->link_type;
7762 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7763 MGMT_STATUS_SUCCESS, &mgmt_rp,
7764 sizeof(struct mgmt_cc_rsp_enable_rssi));
7766 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
7767 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
7769 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
7770 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
7771 &mgmt_rp.bt_address, true);
7775 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7777 mgmt_pending_remove(cmd);
7779 hci_dev_unlock(hdev);
7782 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
7783 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
7785 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
7786 struct mgmt_cp_disable_rssi *cp = data;
7787 struct mgmt_pending_cmd *cmd;
7792 mgmt_rp.status = rp->status;
7793 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
7794 mgmt_rp.bt_address = cp->bdaddr;
7795 mgmt_rp.link_type = cp->link_type;
7797 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7798 MGMT_STATUS_SUCCESS, &mgmt_rp,
7799 sizeof(struct mgmt_cc_rsp_enable_rssi));
7801 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
7802 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
7804 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
7805 &mgmt_rp.bt_address, false);
7809 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7811 mgmt_pending_remove(cmd);
7813 hci_dev_unlock(hdev);
7816 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
7817 void *data, u16 len)
7819 struct mgmt_pending_cmd *cmd;
7820 struct hci_request req;
7821 struct hci_cp_set_enable_rssi cp_en = { 0, };
7824 BT_DBG("Set Disable RSSI.");
7826 cp_en.hci_le_ext_opcode = 0x01;
7827 cp_en.le_enable_cs_Features = 0x00;
7828 cp_en.data[0] = 0x00;
7829 cp_en.data[1] = 0x00;
7830 cp_en.data[2] = 0x00;
7834 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7836 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7837 MGMT_STATUS_FAILED);
7841 if (!lmp_le_capable(hdev)) {
7842 mgmt_pending_remove(cmd);
7843 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7844 MGMT_STATUS_NOT_SUPPORTED);
7848 if (!hdev_is_powered(hdev)) {
7849 BT_DBG("%s", hdev->name);
7850 mgmt_pending_remove(cmd);
7851 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7852 MGMT_STATUS_NOT_POWERED);
7856 hci_req_init(&req, hdev);
7858 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
7859 sizeof(struct hci_cp_set_enable_rssi),
7860 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
7861 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
7863 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
7864 err = hci_req_run(&req, set_rssi_disable_complete);
7867 mgmt_pending_remove(cmd);
7868 BT_ERR("Error in requesting hci_req_run");
7873 hci_dev_unlock(hdev);
7877 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
7879 struct hci_cc_rsp_enable_rssi *rp = response;
7880 struct mgmt_pending_cmd *cmd_enable = NULL;
7881 struct mgmt_pending_cmd *cmd_disable = NULL;
7882 struct mgmt_cp_set_enable_rssi *cp_en;
7883 struct mgmt_cp_disable_rssi *cp_dis;
7886 cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7887 cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7888 hci_dev_unlock(hdev);
7891 BT_DBG("Enable Request");
7894 BT_DBG("Disable Request");
7897 cp_en = cmd_enable->param;
7902 switch (rp->le_ext_opcode) {
7904 BT_DBG("RSSI enabled.. Setting Threshold...");
7905 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
7906 cp_en, sizeof(*cp_en));
7910 BT_DBG("Sending RSSI enable success");
7911 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
7912 cp_en, rp, rp->status);
7916 } else if (cmd_disable) {
7917 cp_dis = cmd_disable->param;
7922 switch (rp->le_ext_opcode) {
7924 BT_DBG("Sending RSSI disable success");
7925 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
7926 cp_dis, rp, rp->status);
7931 * Only unset RSSI Threshold values for the Link if
7932 * RSSI is monitored for other BREDR or LE Links
7934 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
7935 BT_DBG("Unset Threshold. Other links being monitored");
7936 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
7937 cp_dis, rp, rp->status);
7939 BT_DBG("Unset Threshold. Disabling...");
7940 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
7941 cp_dis, sizeof(*cp_dis));
7948 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
7951 struct mgmt_pending_cmd *cmd;
7953 BT_DBG("status 0x%02x", status);
7957 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7962 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7963 mgmt_status(status));
7965 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7968 mgmt_pending_remove(cmd);
7971 hci_dev_unlock(hdev);
7974 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
7975 void *data, u16 len)
7977 struct mgmt_pending_cmd *cmd;
7978 struct hci_request req;
7979 struct mgmt_cp_set_enable_rssi *cp = data;
7980 struct hci_cp_set_enable_rssi cp_en = { 0, };
7983 BT_DBG("Set Enable RSSI.");
7985 cp_en.hci_le_ext_opcode = 0x01;
7986 cp_en.le_enable_cs_Features = 0x04;
7987 cp_en.data[0] = 0x00;
7988 cp_en.data[1] = 0x00;
7989 cp_en.data[2] = 0x00;
7993 if (!lmp_le_capable(hdev)) {
7994 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7995 MGMT_STATUS_NOT_SUPPORTED);
7999 if (!hdev_is_powered(hdev)) {
8000 BT_DBG("%s", hdev->name);
8001 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
8002 MGMT_STATUS_NOT_POWERED);
8006 if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
8007 BT_DBG("%s", hdev->name);
8008 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
8013 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
8016 BT_DBG("%s", hdev->name);
8021 /* If RSSI is already enabled directly set Threshold values */
8022 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
8023 hci_dev_unlock(hdev);
8024 BT_DBG("RSSI Enabled. Directly set Threshold");
8025 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
8029 hci_req_init(&req, hdev);
8031 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
8032 sizeof(struct hci_cp_set_enable_rssi),
8033 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
8034 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
8036 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
8037 err = hci_req_run(&req, set_rssi_enable_complete);
8040 mgmt_pending_remove(cmd);
8041 BT_ERR("Error in requesting hci_req_run");
8046 hci_dev_unlock(hdev);
8051 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8053 struct mgmt_pending_cmd *cmd;
8055 BT_DBG("status 0x%02x", status);
8059 cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
8063 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8064 MGMT_STATUS_SUCCESS, &status, 1);
8066 mgmt_pending_remove(cmd);
8069 hci_dev_unlock(hdev);
8072 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
8075 struct mgmt_pending_cmd *cmd;
8076 struct hci_request req;
8077 struct mgmt_cp_get_raw_rssi *cp = data;
8078 struct hci_cp_get_raw_rssi hci_cp;
8080 struct hci_conn *conn;
8084 BT_DBG("Get Raw RSSI.");
8088 if (!lmp_le_capable(hdev)) {
8089 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8090 MGMT_STATUS_NOT_SUPPORTED);
8094 if (cp->link_type == 0x01)
8095 dest_type = LE_LINK;
8097 dest_type = ACL_LINK;
8099 /* Get LE/BREDR link handle info */
8100 conn = hci_conn_hash_lookup_ba(hdev,
8101 dest_type, &cp->bt_address);
8103 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8104 MGMT_STATUS_NOT_CONNECTED);
8107 hci_cp.conn_handle = conn->handle;
8109 if (!hdev_is_powered(hdev)) {
8110 BT_DBG("%s", hdev->name);
8111 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8112 MGMT_STATUS_NOT_POWERED);
8116 if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
8117 BT_DBG("%s", hdev->name);
8118 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8123 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
8125 BT_DBG("%s", hdev->name);
8130 hci_req_init(&req, hdev);
8132 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
8133 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
8134 err = hci_req_run(&req, get_raw_rssi_complete);
8137 mgmt_pending_remove(cmd);
8138 BT_ERR("Error in requesting hci_req_run");
8142 hci_dev_unlock(hdev);
8147 void mgmt_raw_rssi_response(struct hci_dev *hdev,
8148 struct hci_cc_rp_get_raw_rssi *rp, int success)
8150 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
8151 struct hci_conn *conn;
8153 mgmt_rp.status = rp->status;
8154 mgmt_rp.rssi_dbm = rp->rssi_dbm;
8156 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
8160 bacpy(&mgmt_rp.bt_address, &conn->dst);
8161 if (conn->type == LE_LINK)
8162 mgmt_rp.link_type = 0x01;
8164 mgmt_rp.link_type = 0x00;
8166 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
8167 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
8170 static void set_disable_threshold_complete(struct hci_dev *hdev,
8171 u8 status, u16 opcode)
8173 struct mgmt_pending_cmd *cmd;
8175 BT_DBG("status 0x%02x", status);
8179 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
8183 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8184 MGMT_STATUS_SUCCESS, &status, 1);
8186 mgmt_pending_remove(cmd);
8189 hci_dev_unlock(hdev);
8192 /** Removes monitoring for a link*/
8193 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
8194 void *data, u16 len)
8197 struct hci_cp_set_rssi_threshold th = { 0, };
8198 struct mgmt_cp_disable_rssi *cp = data;
8199 struct hci_conn *conn;
8200 struct mgmt_pending_cmd *cmd;
8201 struct hci_request req;
8204 BT_DBG("Set Disable RSSI.");
8208 if (!lmp_le_capable(hdev)) {
8209 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8210 MGMT_STATUS_NOT_SUPPORTED);
8214 /* Get LE/ACL link handle info*/
8215 if (cp->link_type == 0x01)
8216 dest_type = LE_LINK;
8218 dest_type = ACL_LINK;
8220 conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
8222 err = mgmt_cmd_complete(sk, hdev->id,
8223 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
8227 th.hci_le_ext_opcode = 0x0B;
8229 th.conn_handle = conn->handle;
8230 th.alert_mask = 0x00;
8232 th.in_range_th = 0x00;
8235 if (!hdev_is_powered(hdev)) {
8236 BT_DBG("%s", hdev->name);
8237 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8242 if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
8243 BT_DBG("%s", hdev->name);
8244 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8249 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
8252 BT_DBG("%s", hdev->name);
8257 hci_req_init(&req, hdev);
8259 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
8260 err = hci_req_run(&req, set_disable_threshold_complete);
8262 mgmt_pending_remove(cmd);
8263 BT_ERR("Error in requesting hci_req_run");
8268 hci_dev_unlock(hdev);
8273 void mgmt_rssi_alert_evt(struct hci_dev *hdev, u16 conn_handle,
8274 s8 alert_type, s8 rssi_dbm)
8276 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
8277 struct hci_conn *conn;
8279 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
8280 conn_handle, alert_type, rssi_dbm);
8282 conn = hci_conn_hash_lookup_handle(hdev, conn_handle);
8285 BT_ERR("RSSI alert Error: Device not found for handle");
8288 bacpy(&mgmt_ev.bdaddr, &conn->dst);
8290 if (conn->type == LE_LINK)
8291 mgmt_ev.link_type = 0x01;
8293 mgmt_ev.link_type = 0x00;
8295 mgmt_ev.alert_type = alert_type;
8296 mgmt_ev.rssi_dbm = rssi_dbm;
8298 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
8299 sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
8303 static int mgmt_start_le_discovery_failed(struct hci_dev *hdev, u8 status)
8305 struct mgmt_pending_cmd *cmd;
8309 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
8311 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
8315 type = hdev->le_discovery.type;
8317 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
8318 mgmt_status(status), &type, sizeof(type));
8319 mgmt_pending_remove(cmd);
8324 static void start_le_discovery_complete(struct hci_dev *hdev, u8 status,
8327 unsigned long timeout = 0;
8329 BT_DBG("status %d", status);
8333 mgmt_start_le_discovery_failed(hdev, status);
8334 hci_dev_unlock(hdev);
8339 hci_le_discovery_set_state(hdev, DISCOVERY_FINDING);
8340 hci_dev_unlock(hdev);
8342 if (hdev->le_discovery.type != DISCOV_TYPE_LE)
8343 BT_ERR("Invalid discovery type %d", hdev->le_discovery.type);
8348 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
8351 static int start_le_discovery(struct sock *sk, struct hci_dev *hdev,
8352 void *data, u16 len)
8354 struct mgmt_cp_start_le_discovery *cp = data;
8355 struct mgmt_pending_cmd *cmd;
8356 struct hci_cp_le_set_scan_param param_cp;
8357 struct hci_cp_le_set_scan_enable enable_cp;
8358 struct hci_request req;
8359 u8 status, own_addr_type;
8362 BT_DBG("%s", hdev->name);
8364 if (!hdev_is_powered(hdev)) {
8365 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8366 MGMT_STATUS_NOT_POWERED);
8370 if (hdev->le_discovery.state != DISCOVERY_STOPPED) {
8371 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8376 if (cp->type != DISCOV_TYPE_LE) {
8377 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8378 MGMT_STATUS_INVALID_PARAMS);
8382 cmd = mgmt_pending_add(sk, MGMT_OP_START_LE_DISCOVERY, hdev, NULL, 0);
8388 hdev->le_discovery.type = cp->type;
8390 hci_req_init(&req, hdev);
8392 status = mgmt_le_support(hdev);
8394 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8396 mgmt_pending_remove(cmd);
8400 /* If controller is scanning, it means the background scanning
8401 * is running. Thus, we should temporarily stop it in order to
8402 * set the discovery scanning parameters.
8404 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
8405 hci_req_add_le_scan_disable(&req, false);
8407 memset(¶m_cp, 0, sizeof(param_cp));
8409 /* All active scans will be done with either a resolvable
8410 * private address (when privacy feature has been enabled)
8411 * or unresolvable private address.
8413 err = hci_update_random_address_sync(hdev, true, hci_dev_test_flag(hdev, HCI_PRIVACY), &own_addr_type);
8415 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8416 MGMT_STATUS_FAILED);
8417 mgmt_pending_remove(cmd);
8421 param_cp.type = hdev->le_scan_type;
8422 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
8423 param_cp.window = cpu_to_le16(hdev->le_scan_window);
8424 param_cp.own_address_type = own_addr_type;
8425 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
8428 memset(&enable_cp, 0, sizeof(enable_cp));
8429 enable_cp.enable = LE_SCAN_ENABLE;
8430 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
8432 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
8435 err = hci_req_run(&req, start_le_discovery_complete);
8437 mgmt_pending_remove(cmd);
8439 hci_le_discovery_set_state(hdev, DISCOVERY_STARTING);
8445 static int mgmt_stop_le_discovery_failed(struct hci_dev *hdev, u8 status)
8447 struct mgmt_pending_cmd *cmd;
8450 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
8454 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
8455 mgmt_status(status), &hdev->le_discovery.type,
8456 sizeof(hdev->le_discovery.type));
8457 mgmt_pending_remove(cmd);
8462 static void stop_le_discovery_complete(struct hci_dev *hdev, u8 status,
8465 BT_DBG("status %d", status);
8470 mgmt_stop_le_discovery_failed(hdev, status);
8474 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
8477 hci_dev_unlock(hdev);
8480 static int stop_le_discovery(struct sock *sk, struct hci_dev *hdev,
8481 void *data, u16 len)
8483 struct mgmt_cp_stop_le_discovery *mgmt_cp = data;
8484 struct mgmt_pending_cmd *cmd;
8485 struct hci_request req;
8488 BT_DBG("%s", hdev->name);
8492 if (!hci_le_discovery_active(hdev)) {
8493 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8494 MGMT_STATUS_REJECTED, &mgmt_cp->type,
8495 sizeof(mgmt_cp->type));
8499 if (hdev->le_discovery.type != mgmt_cp->type) {
8500 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8501 MGMT_STATUS_INVALID_PARAMS,
8502 &mgmt_cp->type, sizeof(mgmt_cp->type));
8506 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_LE_DISCOVERY, hdev, NULL, 0);
8512 hci_req_init(&req, hdev);
8514 if (hdev->le_discovery.state != DISCOVERY_FINDING) {
8515 BT_DBG("unknown le discovery state %u",
8516 hdev->le_discovery.state);
8518 mgmt_pending_remove(cmd);
8519 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8520 MGMT_STATUS_FAILED, &mgmt_cp->type,
8521 sizeof(mgmt_cp->type));
8525 cancel_delayed_work(&hdev->le_scan_disable);
8526 hci_req_add_le_scan_disable(&req, false);
8528 err = hci_req_run(&req, stop_le_discovery_complete);
8530 mgmt_pending_remove(cmd);
8532 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
8535 hci_dev_unlock(hdev);
8539 /* Separate LE discovery */
8540 void mgmt_le_discovering(struct hci_dev *hdev, u8 discovering)
8542 struct mgmt_ev_discovering ev;
8543 struct mgmt_pending_cmd *cmd;
8545 BT_DBG("%s le discovering %u", hdev->name, discovering);
8548 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
8550 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
8553 u8 type = hdev->le_discovery.type;
8555 mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
8557 mgmt_pending_remove(cmd);
8560 memset(&ev, 0, sizeof(ev));
8561 ev.type = hdev->le_discovery.type;
8562 ev.discovering = discovering;
8564 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8567 static int disable_le_auto_connect(struct sock *sk, struct hci_dev *hdev,
8568 void *data, u16 len)
8572 BT_DBG("%s", hdev->name);
8576 err = hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
8578 BT_ERR("HCI_OP_LE_CREATE_CONN_CANCEL is failed");
8580 hci_dev_unlock(hdev);
8585 static inline int check_le_conn_update_param(u16 min, u16 max, u16 latency,
8590 if (min > max || min < 6 || max > 3200)
8593 if (to_multiplier < 10 || to_multiplier > 3200)
8596 if (max >= to_multiplier * 8)
8599 max_latency = (to_multiplier * 8 / max) - 1;
8601 if (latency > 499 || latency > max_latency)
8607 static int le_conn_update(struct sock *sk, struct hci_dev *hdev, void *data,
8610 struct mgmt_cp_le_conn_update *cp = data;
8612 struct hci_conn *conn;
8613 u16 min, max, latency, supervision_timeout;
8616 if (!hdev_is_powered(hdev))
8617 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8618 MGMT_STATUS_NOT_POWERED);
8620 min = __le16_to_cpu(cp->conn_interval_min);
8621 max = __le16_to_cpu(cp->conn_interval_max);
8622 latency = __le16_to_cpu(cp->conn_latency);
8623 supervision_timeout = __le16_to_cpu(cp->supervision_timeout);
8625 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x supervision_timeout: 0x%4.4x",
8626 min, max, latency, supervision_timeout);
8628 err = check_le_conn_update_param(min, max, latency,
8629 supervision_timeout);
8632 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8633 MGMT_STATUS_INVALID_PARAMS);
8637 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
8639 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8640 MGMT_STATUS_NOT_CONNECTED);
8641 hci_dev_unlock(hdev);
8645 hci_dev_unlock(hdev);
8647 hci_le_conn_update(conn, min, max, latency, supervision_timeout);
8649 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE, 0,
8653 static void set_manufacturer_data_complete(struct hci_dev *hdev, u8 status,
8656 struct mgmt_cp_set_manufacturer_data *cp;
8657 struct mgmt_pending_cmd *cmd;
8659 BT_DBG("status 0x%02x", status);
8663 cmd = pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev);
8670 mgmt_cmd_status(cmd->sk, hdev->id,
8671 MGMT_OP_SET_MANUFACTURER_DATA,
8672 mgmt_status(status));
8674 mgmt_cmd_complete(cmd->sk, hdev->id,
8675 MGMT_OP_SET_MANUFACTURER_DATA, 0,
8678 mgmt_pending_remove(cmd);
8681 hci_dev_unlock(hdev);
8684 static int set_manufacturer_data(struct sock *sk, struct hci_dev *hdev,
8685 void *data, u16 len)
8687 struct mgmt_pending_cmd *cmd;
8688 struct hci_request req;
8689 struct mgmt_cp_set_manufacturer_data *cp = data;
8690 u8 old_data[HCI_MAX_EIR_LENGTH] = {0, };
8694 BT_DBG("%s", hdev->name);
8696 if (!lmp_bredr_capable(hdev))
8697 return mgmt_cmd_status(sk, hdev->id,
8698 MGMT_OP_SET_MANUFACTURER_DATA,
8699 MGMT_STATUS_NOT_SUPPORTED);
8701 if (cp->data[0] == 0 ||
8702 cp->data[0] - 1 > sizeof(hdev->manufacturer_data))
8703 return mgmt_cmd_status(sk, hdev->id,
8704 MGMT_OP_SET_MANUFACTURER_DATA,
8705 MGMT_STATUS_INVALID_PARAMS);
8707 if (cp->data[1] != 0xFF)
8708 return mgmt_cmd_status(sk, hdev->id,
8709 MGMT_OP_SET_MANUFACTURER_DATA,
8710 MGMT_STATUS_NOT_SUPPORTED);
8714 if (pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev)) {
8715 err = mgmt_cmd_status(sk, hdev->id,
8716 MGMT_OP_SET_MANUFACTURER_DATA,
8721 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MANUFACTURER_DATA, hdev, data,
8728 hci_req_init(&req, hdev);
8730 /* if new data is same as previous data then return command
8733 if (hdev->manufacturer_len == cp->data[0] - 1 &&
8734 !memcmp(hdev->manufacturer_data, cp->data + 2, cp->data[0] - 1)) {
8735 mgmt_pending_remove(cmd);
8736 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
8737 0, cp, sizeof(*cp));
8742 old_len = hdev->manufacturer_len;
8744 memcpy(old_data, hdev->manufacturer_data, old_len);
8746 hdev->manufacturer_len = cp->data[0] - 1;
8747 if (hdev->manufacturer_len > 0)
8748 memcpy(hdev->manufacturer_data, cp->data + 2,
8749 hdev->manufacturer_len);
8751 hci_update_eir_sync(hdev);
8753 err = hci_req_run(&req, set_manufacturer_data_complete);
8755 mgmt_pending_remove(cmd);
8760 hci_dev_unlock(hdev);
8765 memset(hdev->manufacturer_data, 0x00, sizeof(hdev->manufacturer_data));
8766 hdev->manufacturer_len = old_len;
8767 if (hdev->manufacturer_len > 0)
8768 memcpy(hdev->manufacturer_data, old_data,
8769 hdev->manufacturer_len);
8770 hci_dev_unlock(hdev);
8774 static int le_set_scan_params(struct sock *sk, struct hci_dev *hdev,
8775 void *data, u16 len)
8777 struct mgmt_cp_le_set_scan_params *cp = data;
8778 __u16 interval, window;
8781 BT_DBG("%s", hdev->name);
8783 if (!lmp_le_capable(hdev))
8784 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8785 MGMT_STATUS_NOT_SUPPORTED);
8787 interval = __le16_to_cpu(cp->interval);
8789 if (interval < 0x0004 || interval > 0x4000)
8790 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8791 MGMT_STATUS_INVALID_PARAMS);
8793 window = __le16_to_cpu(cp->window);
8795 if (window < 0x0004 || window > 0x4000)
8796 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8797 MGMT_STATUS_INVALID_PARAMS);
8799 if (window > interval)
8800 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8801 MGMT_STATUS_INVALID_PARAMS);
8805 hdev->le_scan_type = cp->type;
8806 hdev->le_scan_interval = interval;
8807 hdev->le_scan_window = window;
8809 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS, 0,
8812 /* If background scan is running, restart it so new parameters are
8815 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
8816 hdev->discovery.state == DISCOVERY_STOPPED) {
8817 struct hci_request req;
8819 hci_req_init(&req, hdev);
8821 hci_req_add_le_scan_disable(&req, false);
8822 hci_req_add_le_passive_scan(&req);
8824 hci_req_run(&req, NULL);
8827 hci_dev_unlock(hdev);
8832 static int set_voice_setting(struct sock *sk, struct hci_dev *hdev,
8833 void *data, u16 len)
8835 struct mgmt_cp_set_voice_setting *cp = data;
8836 struct hci_conn *conn;
8837 struct hci_conn *sco_conn;
8841 BT_DBG("%s", hdev->name);
8843 if (!lmp_bredr_capable(hdev)) {
8844 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING,
8845 MGMT_STATUS_NOT_SUPPORTED);
8850 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
8852 err = mgmt_cmd_complete(sk, hdev->id,
8853 MGMT_OP_SET_VOICE_SETTING, 0, NULL, 0);
8857 conn->voice_setting = cp->voice_setting;
8858 conn->sco_role = cp->sco_role;
8860 sco_conn = hci_conn_hash_lookup_sco(hdev);
8861 if (sco_conn && bacmp(&sco_conn->dst, &cp->bdaddr) != 0) {
8862 BT_ERR("There is other SCO connection.");
8866 if (conn->sco_role == MGMT_SCO_ROLE_HANDSFREE) {
8867 if (conn->voice_setting == 0x0063)
8868 sco_connect_set_wbc(hdev);
8870 sco_connect_set_nbc(hdev);
8872 if (conn->voice_setting == 0x0063)
8873 sco_connect_set_gw_wbc(hdev);
8875 sco_connect_set_gw_nbc(hdev);
8879 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING, 0,
8883 hci_dev_unlock(hdev);
8887 static int get_adv_tx_power(struct sock *sk, struct hci_dev *hdev,
8888 void *data, u16 len)
8890 struct mgmt_rp_get_adv_tx_power *rp;
8894 BT_DBG("%s", hdev->name);
8898 rp_len = sizeof(*rp);
8899 rp = kmalloc(rp_len, GFP_KERNEL);
8905 rp->adv_tx_power = hdev->adv_tx_power;
8907 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_TX_POWER, 0, rp,
8913 hci_dev_unlock(hdev);
8918 void mgmt_hardware_error(struct hci_dev *hdev, u8 err_code)
8920 struct mgmt_ev_hardware_error ev;
8922 ev.error_code = err_code;
8923 mgmt_event(MGMT_EV_HARDWARE_ERROR, hdev, &ev, sizeof(ev), NULL);
8926 void mgmt_tx_timeout_error(struct hci_dev *hdev)
8928 mgmt_event(MGMT_EV_TX_TIMEOUT_ERROR, hdev, NULL, 0, NULL);
8931 void mgmt_multi_adv_state_change_evt(struct hci_dev *hdev, u8 adv_instance,
8932 u8 state_change_reason, u16 connection_handle)
8934 struct mgmt_ev_vendor_specific_multi_adv_state_changed mgmt_ev;
8936 BT_DBG("Multi adv state changed [%2.2X %2.2X %2.2X]",
8937 adv_instance, state_change_reason, connection_handle);
8939 mgmt_ev.adv_instance = adv_instance;
8940 mgmt_ev.state_change_reason = state_change_reason;
8941 mgmt_ev.connection_handle = connection_handle;
8943 mgmt_event(MGMT_EV_MULTI_ADV_STATE_CHANGED, hdev, &mgmt_ev,
8944 sizeof(struct mgmt_ev_vendor_specific_multi_adv_state_changed),
8948 static int enable_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
8949 void *data, u16 len)
8952 struct mgmt_cp_enable_6lowpan *cp = data;
8954 BT_DBG("%s", hdev->name);
8958 if (!hdev_is_powered(hdev)) {
8959 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8960 MGMT_STATUS_NOT_POWERED);
8964 if (!lmp_le_capable(hdev)) {
8965 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8966 MGMT_STATUS_NOT_SUPPORTED);
8970 if (cp->enable_6lowpan)
8971 bt_6lowpan_enable();
8973 bt_6lowpan_disable();
8975 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8976 MGMT_STATUS_SUCCESS, NULL, 0);
8978 hci_dev_unlock(hdev);
8982 static int connect_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
8983 void *data, u16 len)
8985 struct mgmt_cp_connect_6lowpan *cp = data;
8986 __u8 addr_type = ADDR_LE_DEV_PUBLIC;
8989 BT_DBG("%s", hdev->name);
8993 if (!lmp_le_capable(hdev)) {
8994 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
8995 MGMT_STATUS_NOT_SUPPORTED);
8999 if (!hdev_is_powered(hdev)) {
9000 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
9001 MGMT_STATUS_REJECTED);
9005 if (bdaddr_type_is_le(cp->addr.type)) {
9006 if (cp->addr.type == BDADDR_LE_PUBLIC)
9007 addr_type = ADDR_LE_DEV_PUBLIC;
9009 addr_type = ADDR_LE_DEV_RANDOM;
9011 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
9012 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
9016 hci_dev_unlock(hdev);
9018 /* 6lowpan Connect */
9019 err = _bt_6lowpan_connect(&cp->addr.bdaddr, cp->addr.type);
9024 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
9025 MGMT_STATUS_REJECTED, NULL, 0);
9030 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN, 0,
9033 hci_dev_unlock(hdev);
9037 static int disconnect_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
9038 void *data, u16 len)
9040 struct mgmt_cp_disconnect_6lowpan *cp = data;
9041 struct hci_conn *conn = NULL;
9042 __u8 addr_type = ADDR_LE_DEV_PUBLIC;
9045 BT_DBG("%s", hdev->name);
9049 if (!lmp_le_capable(hdev)) {
9050 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT_6LOWPAN,
9051 MGMT_STATUS_NOT_SUPPORTED);
9055 if (!hdev_is_powered(hdev)) {
9056 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT_6LOWPAN,
9057 MGMT_STATUS_REJECTED);
9061 if (bdaddr_type_is_le(cp->addr.type)) {
9062 if (cp->addr.type == BDADDR_LE_PUBLIC)
9063 addr_type = ADDR_LE_DEV_PUBLIC;
9065 addr_type = ADDR_LE_DEV_RANDOM;
9067 err = mgmt_cmd_complete(sk, hdev->id,
9068 MGMT_OP_DISCONNECT_6LOWPAN,
9069 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
9073 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
9075 err = mgmt_cmd_complete(sk, hdev->id,
9076 MGMT_OP_DISCONNECT_6LOWPAN,
9077 MGMT_STATUS_NOT_CONNECTED, NULL, 0);
9081 if (conn->dst_type != addr_type) {
9082 err = mgmt_cmd_complete(sk, hdev->id,
9083 MGMT_OP_DISCONNECT_6LOWPAN,
9084 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
9088 if (conn->state != BT_CONNECTED) {
9089 err = mgmt_cmd_complete(sk, hdev->id,
9090 MGMT_OP_DISCONNECT_6LOWPAN,
9091 MGMT_STATUS_NOT_CONNECTED, NULL, 0);
9095 /* 6lowpan Disconnect */
9096 err = _bt_6lowpan_disconnect(conn->l2cap_data, cp->addr.type);
9098 err = mgmt_cmd_complete(sk, hdev->id,
9099 MGMT_OP_DISCONNECT_6LOWPAN,
9100 MGMT_STATUS_REJECTED, NULL, 0);
9104 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN, 0,
9108 hci_dev_unlock(hdev);
9112 void mgmt_6lowpan_conn_changed(struct hci_dev *hdev, char if_name[16],
9113 bdaddr_t *bdaddr, u8 addr_type, bool connected)
9116 struct mgmt_ev_6lowpan_conn_state_changed *ev = (void *)buf;
9119 memset(buf, 0, sizeof(buf));
9120 bacpy(&ev->addr.bdaddr, bdaddr);
9121 ev->addr.type = addr_type;
9122 ev->connected = connected;
9123 memcpy(ev->ifname, (__u8 *)if_name, 16);
9125 ev_size = sizeof(*ev);
9127 mgmt_event(MGMT_EV_6LOWPAN_CONN_STATE_CHANGED, hdev, ev, ev_size, NULL);
9130 void mgmt_le_read_maximum_data_length_complete(struct hci_dev *hdev, u8 status)
9132 struct mgmt_pending_cmd *cmd;
9133 struct mgmt_rp_le_read_maximum_data_length rp;
9135 BT_DBG("%s status %u", hdev->name, status);
9137 cmd = pending_find(MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, hdev);
9142 mgmt_cmd_status(cmd->sk, hdev->id,
9143 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
9144 mgmt_status(status));
9146 memset(&rp, 0, sizeof(rp));
9148 rp.max_tx_octets = cpu_to_le16(hdev->le_max_tx_len);
9149 rp.max_tx_time = cpu_to_le16(hdev->le_max_tx_time);
9150 rp.max_rx_octets = cpu_to_le16(hdev->le_max_rx_len);
9151 rp.max_rx_time = cpu_to_le16(hdev->le_max_rx_time);
9153 mgmt_cmd_complete(cmd->sk, hdev->id,
9154 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, 0,
9157 mgmt_pending_remove(cmd);
9160 static int read_maximum_le_data_length(struct sock *sk,
9161 struct hci_dev *hdev, void *data, u16 len)
9163 struct mgmt_pending_cmd *cmd;
9166 BT_DBG("read_maximum_le_data_length %s", hdev->name);
9170 if (!hdev_is_powered(hdev)) {
9171 err = mgmt_cmd_status(sk, hdev->id,
9172 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
9173 MGMT_STATUS_NOT_POWERED);
9177 if (!lmp_le_capable(hdev)) {
9178 err = mgmt_cmd_status(sk, hdev->id,
9179 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
9180 MGMT_STATUS_NOT_SUPPORTED);
9184 if (pending_find(MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, hdev)) {
9185 err = mgmt_cmd_status(sk, hdev->id,
9186 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
9191 cmd = mgmt_pending_add(sk, MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
9198 err = hci_send_cmd(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
9200 mgmt_pending_remove(cmd);
9203 hci_dev_unlock(hdev);
9207 void mgmt_le_write_host_suggested_data_length_complete(struct hci_dev *hdev,
9210 struct mgmt_pending_cmd *cmd;
9212 BT_DBG("status 0x%02x", status);
9216 cmd = pending_find(MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH, hdev);
9218 BT_ERR("cmd not found in the pending list");
9223 mgmt_cmd_status(cmd->sk, hdev->id,
9224 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9225 mgmt_status(status));
9227 mgmt_cmd_complete(cmd->sk, hdev->id,
9228 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9231 mgmt_pending_remove(cmd);
9234 hci_dev_unlock(hdev);
9237 static int write_host_suggested_le_data_length(struct sock *sk,
9238 struct hci_dev *hdev, void *data, u16 len)
9240 struct mgmt_pending_cmd *cmd;
9241 struct mgmt_cp_le_write_host_suggested_data_length *cp = data;
9242 struct hci_cp_le_write_def_data_len hci_data;
9245 BT_DBG("Write host suggested data length request for %s", hdev->name);
9249 if (!hdev_is_powered(hdev)) {
9250 err = mgmt_cmd_status(sk, hdev->id,
9251 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9252 MGMT_STATUS_NOT_POWERED);
9256 if (!lmp_le_capable(hdev)) {
9257 err = mgmt_cmd_status(sk, hdev->id,
9258 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9259 MGMT_STATUS_NOT_SUPPORTED);
9263 if (pending_find(MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH, hdev)) {
9264 err = mgmt_cmd_status(sk, hdev->id,
9265 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9270 cmd = mgmt_pending_add(sk, MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9277 hci_data.tx_len = cp->def_tx_octets;
9278 hci_data.tx_time = cp->def_tx_time;
9280 err = hci_send_cmd(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN,
9281 sizeof(hci_data), &hci_data);
9283 mgmt_pending_remove(cmd);
9286 hci_dev_unlock(hdev);
9291 void mgmt_le_read_host_suggested_data_length_complete(struct hci_dev *hdev,
9294 struct mgmt_pending_cmd *cmd;
9295 struct mgmt_rp_le_read_host_suggested_data_length rp;
9297 BT_DBG("%s status %u", hdev->name, status);
9299 cmd = pending_find(MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH, hdev);
9301 BT_ERR("cmd not found in the pending list");
9306 mgmt_cmd_status(cmd->sk, hdev->id,
9307 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
9308 mgmt_status(status));
9310 memset(&rp, 0, sizeof(rp));
9312 rp.def_tx_octets = cpu_to_le16(hdev->le_def_tx_len);
9313 rp.def_tx_time = cpu_to_le16(hdev->le_def_tx_time);
9315 mgmt_cmd_complete(cmd->sk, hdev->id,
9316 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH, 0,
9319 mgmt_pending_remove(cmd);
9322 static int read_host_suggested_data_length(struct sock *sk,
9323 struct hci_dev *hdev, void *data, u16 len)
9325 struct mgmt_pending_cmd *cmd;
9328 BT_DBG("read_host_suggested_data_length %s", hdev->name);
9332 if (!hdev_is_powered(hdev)) {
9333 err = mgmt_cmd_status(sk, hdev->id,
9334 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
9335 MGMT_STATUS_NOT_POWERED);
9339 if (!lmp_le_capable(hdev)) {
9340 err = mgmt_cmd_status(sk, hdev->id,
9341 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
9342 MGMT_STATUS_NOT_SUPPORTED);
9346 if (pending_find(MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH, hdev)) {
9347 err = mgmt_cmd_status(sk, hdev->id,
9348 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
9353 cmd = mgmt_pending_add(sk, MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
9360 err = hci_send_cmd(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
9362 mgmt_pending_remove(cmd);
9365 hci_dev_unlock(hdev);
9369 #endif /* TIZEN_BT */
9371 static bool ltk_is_valid(struct mgmt_ltk_info *key)
9373 if (key->initiator != 0x00 && key->initiator != 0x01)
9376 switch (key->addr.type) {
9377 case BDADDR_LE_PUBLIC:
9380 case BDADDR_LE_RANDOM:
9381 /* Two most significant bits shall be set */
9382 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
9390 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
9391 void *cp_data, u16 len)
9393 struct mgmt_cp_load_long_term_keys *cp = cp_data;
9394 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
9395 sizeof(struct mgmt_ltk_info));
9396 u16 key_count, expected_len;
9399 bt_dev_dbg(hdev, "sock %p", sk);
9401 if (!lmp_le_capable(hdev))
9402 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
9403 MGMT_STATUS_NOT_SUPPORTED);
9405 key_count = __le16_to_cpu(cp->key_count);
9406 if (key_count > max_key_count) {
9407 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
9409 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
9410 MGMT_STATUS_INVALID_PARAMS);
9413 expected_len = struct_size(cp, keys, key_count);
9414 if (expected_len != len) {
9415 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
9417 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
9418 MGMT_STATUS_INVALID_PARAMS);
9421 bt_dev_dbg(hdev, "key_count %u", key_count);
9423 for (i = 0; i < key_count; i++) {
9424 struct mgmt_ltk_info *key = &cp->keys[i];
9426 if (!ltk_is_valid(key))
9427 return mgmt_cmd_status(sk, hdev->id,
9428 MGMT_OP_LOAD_LONG_TERM_KEYS,
9429 MGMT_STATUS_INVALID_PARAMS);
9434 hci_smp_ltks_clear(hdev);
9436 for (i = 0; i < key_count; i++) {
9437 struct mgmt_ltk_info *key = &cp->keys[i];
9438 u8 type, authenticated;
9440 if (hci_is_blocked_key(hdev,
9441 HCI_BLOCKED_KEY_TYPE_LTK,
9443 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
9448 switch (key->type) {
9449 case MGMT_LTK_UNAUTHENTICATED:
9450 authenticated = 0x00;
9451 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
9453 case MGMT_LTK_AUTHENTICATED:
9454 authenticated = 0x01;
9455 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
9457 case MGMT_LTK_P256_UNAUTH:
9458 authenticated = 0x00;
9459 type = SMP_LTK_P256;
9461 case MGMT_LTK_P256_AUTH:
9462 authenticated = 0x01;
9463 type = SMP_LTK_P256;
9465 case MGMT_LTK_P256_DEBUG:
9466 authenticated = 0x00;
9467 type = SMP_LTK_P256_DEBUG;
9473 hci_add_ltk(hdev, &key->addr.bdaddr,
9474 le_addr_type(key->addr.type), type, authenticated,
9475 key->val, key->enc_size, key->ediv, key->rand);
9478 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
9481 hci_dev_unlock(hdev);
9486 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
9488 struct mgmt_pending_cmd *cmd = data;
9489 struct hci_conn *conn = cmd->user_data;
9490 struct mgmt_cp_get_conn_info *cp = cmd->param;
9491 struct mgmt_rp_get_conn_info rp;
9494 bt_dev_dbg(hdev, "err %d", err);
9496 memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
9498 status = mgmt_status(err);
9499 if (status == MGMT_STATUS_SUCCESS) {
9500 rp.rssi = conn->rssi;
9501 rp.tx_power = conn->tx_power;
9502 rp.max_tx_power = conn->max_tx_power;
9504 rp.rssi = HCI_RSSI_INVALID;
9505 rp.tx_power = HCI_TX_POWER_INVALID;
9506 rp.max_tx_power = HCI_TX_POWER_INVALID;
9509 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
9512 mgmt_pending_free(cmd);
9515 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
9517 struct mgmt_pending_cmd *cmd = data;
9518 struct mgmt_cp_get_conn_info *cp = cmd->param;
9519 struct hci_conn *conn;
9523 /* Make sure we are still connected */
9524 if (cp->addr.type == BDADDR_BREDR)
9525 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9528 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
9530 if (!conn || conn->state != BT_CONNECTED)
9531 return MGMT_STATUS_NOT_CONNECTED;
9533 cmd->user_data = conn;
9534 handle = cpu_to_le16(conn->handle);
9536 /* Refresh RSSI each time */
9537 err = hci_read_rssi_sync(hdev, handle);
9539 /* For LE links TX power does not change thus we don't need to
9540 * query for it once value is known.
9542 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
9543 conn->tx_power == HCI_TX_POWER_INVALID))
9544 err = hci_read_tx_power_sync(hdev, handle, 0x00);
9546 /* Max TX power needs to be read only once per connection */
9547 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
9548 err = hci_read_tx_power_sync(hdev, handle, 0x01);
9553 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
9556 struct mgmt_cp_get_conn_info *cp = data;
9557 struct mgmt_rp_get_conn_info rp;
9558 struct hci_conn *conn;
9559 unsigned long conn_info_age;
9562 bt_dev_dbg(hdev, "sock %p", sk);
9564 memset(&rp, 0, sizeof(rp));
9565 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9566 rp.addr.type = cp->addr.type;
9568 if (!bdaddr_type_is_valid(cp->addr.type))
9569 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9570 MGMT_STATUS_INVALID_PARAMS,
9575 if (!hdev_is_powered(hdev)) {
9576 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9577 MGMT_STATUS_NOT_POWERED, &rp,
9582 if (cp->addr.type == BDADDR_BREDR)
9583 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9586 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
9588 if (!conn || conn->state != BT_CONNECTED) {
9589 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9590 MGMT_STATUS_NOT_CONNECTED, &rp,
9595 /* To avoid client trying to guess when to poll again for information we
9596 * calculate conn info age as random value between min/max set in hdev.
9598 conn_info_age = hdev->conn_info_min_age +
9599 prandom_u32_max(hdev->conn_info_max_age -
9600 hdev->conn_info_min_age);
9602 /* Query controller to refresh cached values if they are too old or were
9605 if (time_after(jiffies, conn->conn_info_timestamp +
9606 msecs_to_jiffies(conn_info_age)) ||
9607 !conn->conn_info_timestamp) {
9608 struct mgmt_pending_cmd *cmd;
9610 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
9615 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
9616 cmd, get_conn_info_complete);
9620 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9621 MGMT_STATUS_FAILED, &rp, sizeof(rp));
9624 mgmt_pending_free(cmd);
9629 conn->conn_info_timestamp = jiffies;
9631 /* Cache is valid, just reply with values cached in hci_conn */
9632 rp.rssi = conn->rssi;
9633 rp.tx_power = conn->tx_power;
9634 rp.max_tx_power = conn->max_tx_power;
9636 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9637 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9641 hci_dev_unlock(hdev);
9645 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
9647 struct mgmt_pending_cmd *cmd = data;
9648 struct mgmt_cp_get_clock_info *cp = cmd->param;
9649 struct mgmt_rp_get_clock_info rp;
9650 struct hci_conn *conn = cmd->user_data;
9651 u8 status = mgmt_status(err);
9653 bt_dev_dbg(hdev, "err %d", err);
9655 memset(&rp, 0, sizeof(rp));
9656 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9657 rp.addr.type = cp->addr.type;
9662 rp.local_clock = cpu_to_le32(hdev->clock);
9665 rp.piconet_clock = cpu_to_le32(conn->clock);
9666 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
9670 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
9673 mgmt_pending_free(cmd);
9676 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
9678 struct mgmt_pending_cmd *cmd = data;
9679 struct mgmt_cp_get_clock_info *cp = cmd->param;
9680 struct hci_cp_read_clock hci_cp;
9681 struct hci_conn *conn;
9683 memset(&hci_cp, 0, sizeof(hci_cp));
9684 hci_read_clock_sync(hdev, &hci_cp);
9686 /* Make sure connection still exists */
9687 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
9688 if (!conn || conn->state != BT_CONNECTED)
9689 return MGMT_STATUS_NOT_CONNECTED;
9691 cmd->user_data = conn;
9692 hci_cp.handle = cpu_to_le16(conn->handle);
9693 hci_cp.which = 0x01; /* Piconet clock */
9695 return hci_read_clock_sync(hdev, &hci_cp);
9698 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
9701 struct mgmt_cp_get_clock_info *cp = data;
9702 struct mgmt_rp_get_clock_info rp;
9703 struct mgmt_pending_cmd *cmd;
9704 struct hci_conn *conn;
9707 bt_dev_dbg(hdev, "sock %p", sk);
9709 memset(&rp, 0, sizeof(rp));
9710 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9711 rp.addr.type = cp->addr.type;
9713 if (cp->addr.type != BDADDR_BREDR)
9714 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9715 MGMT_STATUS_INVALID_PARAMS,
9720 if (!hdev_is_powered(hdev)) {
9721 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9722 MGMT_STATUS_NOT_POWERED, &rp,
9727 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
9728 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9730 if (!conn || conn->state != BT_CONNECTED) {
9731 err = mgmt_cmd_complete(sk, hdev->id,
9732 MGMT_OP_GET_CLOCK_INFO,
9733 MGMT_STATUS_NOT_CONNECTED,
9741 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
9745 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
9746 get_clock_info_complete);
9749 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9750 MGMT_STATUS_FAILED, &rp, sizeof(rp));
9753 mgmt_pending_free(cmd);
9758 hci_dev_unlock(hdev);
9762 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
9764 struct hci_conn *conn;
9766 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
9770 if (conn->dst_type != type)
9773 if (conn->state != BT_CONNECTED)
9779 /* This function requires the caller holds hdev->lock */
9780 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
9781 u8 addr_type, u8 auto_connect)
9783 struct hci_conn_params *params;
9785 params = hci_conn_params_add(hdev, addr, addr_type);
9789 if (params->auto_connect == auto_connect)
9792 list_del_init(¶ms->action);
9794 switch (auto_connect) {
9795 case HCI_AUTO_CONN_DISABLED:
9796 case HCI_AUTO_CONN_LINK_LOSS:
9797 /* If auto connect is being disabled when we're trying to
9798 * connect to device, keep connecting.
9800 if (params->explicit_connect)
9801 list_add(¶ms->action, &hdev->pend_le_conns);
9803 case HCI_AUTO_CONN_REPORT:
9804 if (params->explicit_connect)
9805 list_add(¶ms->action, &hdev->pend_le_conns);
9807 list_add(¶ms->action, &hdev->pend_le_reports);
9809 case HCI_AUTO_CONN_DIRECT:
9810 case HCI_AUTO_CONN_ALWAYS:
9811 if (!is_connected(hdev, addr, addr_type))
9812 list_add(¶ms->action, &hdev->pend_le_conns);
9816 params->auto_connect = auto_connect;
9818 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
9819 addr, addr_type, auto_connect);
9824 static void device_added(struct sock *sk, struct hci_dev *hdev,
9825 bdaddr_t *bdaddr, u8 type, u8 action)
9827 struct mgmt_ev_device_added ev;
9829 bacpy(&ev.addr.bdaddr, bdaddr);
9830 ev.addr.type = type;
9833 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
9836 static int add_device_sync(struct hci_dev *hdev, void *data)
9838 return hci_update_passive_scan_sync(hdev);
9841 static int add_device(struct sock *sk, struct hci_dev *hdev,
9842 void *data, u16 len)
9844 struct mgmt_cp_add_device *cp = data;
9845 u8 auto_conn, addr_type;
9846 struct hci_conn_params *params;
9848 u32 current_flags = 0;
9849 u32 supported_flags;
9851 bt_dev_dbg(hdev, "sock %p", sk);
9853 if (!bdaddr_type_is_valid(cp->addr.type) ||
9854 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
9855 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9856 MGMT_STATUS_INVALID_PARAMS,
9857 &cp->addr, sizeof(cp->addr));
9859 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
9860 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9861 MGMT_STATUS_INVALID_PARAMS,
9862 &cp->addr, sizeof(cp->addr));
9866 if (cp->addr.type == BDADDR_BREDR) {
9867 /* Only incoming connections action is supported for now */
9868 if (cp->action != 0x01) {
9869 err = mgmt_cmd_complete(sk, hdev->id,
9871 MGMT_STATUS_INVALID_PARAMS,
9872 &cp->addr, sizeof(cp->addr));
9876 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
9882 hci_update_scan(hdev);
9887 addr_type = le_addr_type(cp->addr.type);
9889 if (cp->action == 0x02)
9890 auto_conn = HCI_AUTO_CONN_ALWAYS;
9891 else if (cp->action == 0x01)
9892 auto_conn = HCI_AUTO_CONN_DIRECT;
9894 auto_conn = HCI_AUTO_CONN_REPORT;
9896 /* Kernel internally uses conn_params with resolvable private
9897 * address, but Add Device allows only identity addresses.
9898 * Make sure it is enforced before calling
9899 * hci_conn_params_lookup.
9901 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
9902 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9903 MGMT_STATUS_INVALID_PARAMS,
9904 &cp->addr, sizeof(cp->addr));
9908 /* If the connection parameters don't exist for this device,
9909 * they will be created and configured with defaults.
9911 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
9913 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9914 MGMT_STATUS_FAILED, &cp->addr,
9918 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
9921 current_flags = params->flags;
9924 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
9929 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
9930 supported_flags = hdev->conn_flags;
9931 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
9932 supported_flags, current_flags);
9934 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9935 MGMT_STATUS_SUCCESS, &cp->addr,
9939 hci_dev_unlock(hdev);
9943 static void device_removed(struct sock *sk, struct hci_dev *hdev,
9944 bdaddr_t *bdaddr, u8 type)
9946 struct mgmt_ev_device_removed ev;
9948 bacpy(&ev.addr.bdaddr, bdaddr);
9949 ev.addr.type = type;
9951 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
9954 static int remove_device_sync(struct hci_dev *hdev, void *data)
9956 return hci_update_passive_scan_sync(hdev);
9959 static int remove_device(struct sock *sk, struct hci_dev *hdev,
9960 void *data, u16 len)
9962 struct mgmt_cp_remove_device *cp = data;
9965 bt_dev_dbg(hdev, "sock %p", sk);
9969 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
9970 struct hci_conn_params *params;
9973 if (!bdaddr_type_is_valid(cp->addr.type)) {
9974 err = mgmt_cmd_complete(sk, hdev->id,
9975 MGMT_OP_REMOVE_DEVICE,
9976 MGMT_STATUS_INVALID_PARAMS,
9977 &cp->addr, sizeof(cp->addr));
9981 if (cp->addr.type == BDADDR_BREDR) {
9982 err = hci_bdaddr_list_del(&hdev->accept_list,
9986 err = mgmt_cmd_complete(sk, hdev->id,
9987 MGMT_OP_REMOVE_DEVICE,
9988 MGMT_STATUS_INVALID_PARAMS,
9994 hci_update_scan(hdev);
9996 device_removed(sk, hdev, &cp->addr.bdaddr,
10001 addr_type = le_addr_type(cp->addr.type);
10003 /* Kernel internally uses conn_params with resolvable private
10004 * address, but Remove Device allows only identity addresses.
10005 * Make sure it is enforced before calling
10006 * hci_conn_params_lookup.
10008 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
10009 err = mgmt_cmd_complete(sk, hdev->id,
10010 MGMT_OP_REMOVE_DEVICE,
10011 MGMT_STATUS_INVALID_PARAMS,
10012 &cp->addr, sizeof(cp->addr));
10016 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
10019 err = mgmt_cmd_complete(sk, hdev->id,
10020 MGMT_OP_REMOVE_DEVICE,
10021 MGMT_STATUS_INVALID_PARAMS,
10022 &cp->addr, sizeof(cp->addr));
10026 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
10027 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
10028 err = mgmt_cmd_complete(sk, hdev->id,
10029 MGMT_OP_REMOVE_DEVICE,
10030 MGMT_STATUS_INVALID_PARAMS,
10031 &cp->addr, sizeof(cp->addr));
10035 list_del(¶ms->action);
10036 list_del(¶ms->list);
10039 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
10041 struct hci_conn_params *p, *tmp;
10042 struct bdaddr_list *b, *btmp;
10044 if (cp->addr.type) {
10045 err = mgmt_cmd_complete(sk, hdev->id,
10046 MGMT_OP_REMOVE_DEVICE,
10047 MGMT_STATUS_INVALID_PARAMS,
10048 &cp->addr, sizeof(cp->addr));
10052 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
10053 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
10054 list_del(&b->list);
10058 hci_update_scan(hdev);
10060 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
10061 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
10063 device_removed(sk, hdev, &p->addr, p->addr_type);
10064 if (p->explicit_connect) {
10065 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
10068 list_del(&p->action);
10069 list_del(&p->list);
10073 bt_dev_dbg(hdev, "All LE connection parameters were removed");
10076 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
10079 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
10080 MGMT_STATUS_SUCCESS, &cp->addr,
10083 hci_dev_unlock(hdev);
10087 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
10090 struct mgmt_cp_load_conn_param *cp = data;
10091 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
10092 sizeof(struct mgmt_conn_param));
10093 u16 param_count, expected_len;
10096 if (!lmp_le_capable(hdev))
10097 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
10098 MGMT_STATUS_NOT_SUPPORTED);
10100 param_count = __le16_to_cpu(cp->param_count);
10101 if (param_count > max_param_count) {
10102 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
10104 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
10105 MGMT_STATUS_INVALID_PARAMS);
10108 expected_len = struct_size(cp, params, param_count);
10109 if (expected_len != len) {
10110 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
10111 expected_len, len);
10112 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
10113 MGMT_STATUS_INVALID_PARAMS);
10116 bt_dev_dbg(hdev, "param_count %u", param_count);
10118 hci_dev_lock(hdev);
10120 hci_conn_params_clear_disabled(hdev);
10122 for (i = 0; i < param_count; i++) {
10123 struct mgmt_conn_param *param = &cp->params[i];
10124 struct hci_conn_params *hci_param;
10125 u16 min, max, latency, timeout;
10128 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
10131 if (param->addr.type == BDADDR_LE_PUBLIC) {
10132 addr_type = ADDR_LE_DEV_PUBLIC;
10133 } else if (param->addr.type == BDADDR_LE_RANDOM) {
10134 addr_type = ADDR_LE_DEV_RANDOM;
10136 bt_dev_err(hdev, "ignoring invalid connection parameters");
10140 min = le16_to_cpu(param->min_interval);
10141 max = le16_to_cpu(param->max_interval);
10142 latency = le16_to_cpu(param->latency);
10143 timeout = le16_to_cpu(param->timeout);
10145 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
10146 min, max, latency, timeout);
10148 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
10149 bt_dev_err(hdev, "ignoring invalid connection parameters");
10153 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
10156 bt_dev_err(hdev, "failed to add connection parameters");
10160 hci_param->conn_min_interval = min;
10161 hci_param->conn_max_interval = max;
10162 hci_param->conn_latency = latency;
10163 hci_param->supervision_timeout = timeout;
10166 hci_dev_unlock(hdev);
10168 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
10172 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
10173 void *data, u16 len)
10175 struct mgmt_cp_set_external_config *cp = data;
10179 bt_dev_dbg(hdev, "sock %p", sk);
10181 if (hdev_is_powered(hdev))
10182 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
10183 MGMT_STATUS_REJECTED);
10185 if (cp->config != 0x00 && cp->config != 0x01)
10186 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
10187 MGMT_STATUS_INVALID_PARAMS);
10189 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
10190 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
10191 MGMT_STATUS_NOT_SUPPORTED);
10193 hci_dev_lock(hdev);
10196 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
10198 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
10200 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
10207 err = new_options(hdev, sk);
10209 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
10210 mgmt_index_removed(hdev);
10212 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
10213 hci_dev_set_flag(hdev, HCI_CONFIG);
10214 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
10216 queue_work(hdev->req_workqueue, &hdev->power_on);
10218 set_bit(HCI_RAW, &hdev->flags);
10219 mgmt_index_added(hdev);
10224 hci_dev_unlock(hdev);
10228 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
10229 void *data, u16 len)
10231 struct mgmt_cp_set_public_address *cp = data;
10235 bt_dev_dbg(hdev, "sock %p", sk);
10237 if (hdev_is_powered(hdev))
10238 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
10239 MGMT_STATUS_REJECTED);
10241 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
10242 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
10243 MGMT_STATUS_INVALID_PARAMS);
10245 if (!hdev->set_bdaddr)
10246 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
10247 MGMT_STATUS_NOT_SUPPORTED);
10249 hci_dev_lock(hdev);
10251 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
10252 bacpy(&hdev->public_addr, &cp->bdaddr);
10254 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
10261 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
10262 err = new_options(hdev, sk);
10264 if (is_configured(hdev)) {
10265 mgmt_index_removed(hdev);
10267 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
10269 hci_dev_set_flag(hdev, HCI_CONFIG);
10270 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
10272 queue_work(hdev->req_workqueue, &hdev->power_on);
10276 hci_dev_unlock(hdev);
10281 int mgmt_device_name_update(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name,
10285 struct mgmt_ev_device_name_update *ev = (void *)buf;
10291 bacpy(&ev->addr.bdaddr, bdaddr);
10292 ev->addr.type = BDADDR_BREDR;
10294 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
10297 ev->eir_len = cpu_to_le16(eir_len);
10299 return mgmt_event(MGMT_EV_DEVICE_NAME_UPDATE, hdev, buf,
10300 sizeof(*ev) + eir_len, NULL);
10303 int mgmt_le_conn_update_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
10304 u8 link_type, u8 addr_type, u8 status)
10306 struct mgmt_ev_conn_update_failed ev;
10308 bacpy(&ev.addr.bdaddr, bdaddr);
10309 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10310 ev.status = status;
10312 return mgmt_event(MGMT_EV_CONN_UPDATE_FAILED, hdev,
10313 &ev, sizeof(ev), NULL);
10316 int mgmt_le_conn_updated(struct hci_dev *hdev, bdaddr_t *bdaddr,
10317 u8 link_type, u8 addr_type, u16 conn_interval,
10318 u16 conn_latency, u16 supervision_timeout)
10320 struct mgmt_ev_conn_updated ev;
10322 bacpy(&ev.addr.bdaddr, bdaddr);
10323 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10324 ev.conn_interval = cpu_to_le16(conn_interval);
10325 ev.conn_latency = cpu_to_le16(conn_latency);
10326 ev.supervision_timeout = cpu_to_le16(supervision_timeout);
10328 return mgmt_event(MGMT_EV_CONN_UPDATED, hdev,
10329 &ev, sizeof(ev), NULL);
10332 /* le device found event - Pass adv type */
10333 void mgmt_le_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10334 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir,
10335 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, u8 adv_type)
10338 struct mgmt_ev_le_device_found *ev = (void *)buf;
10341 if (!hci_discovery_active(hdev) && !hci_le_discovery_active(hdev))
10344 /* Make sure that the buffer is big enough. The 5 extra bytes
10345 * are for the potential CoD field.
10347 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
10350 memset(buf, 0, sizeof(buf));
10352 bacpy(&ev->addr.bdaddr, bdaddr);
10353 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10355 ev->flags = cpu_to_le32(flags);
10356 ev->adv_type = adv_type;
10359 memcpy(ev->eir, eir, eir_len);
10361 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, NULL))
10362 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
10365 if (scan_rsp_len > 0)
10366 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
10368 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10369 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
10371 mgmt_event(MGMT_EV_LE_DEVICE_FOUND, hdev, ev, ev_size, NULL);
10375 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
10378 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
10379 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
10380 u8 *h192, *r192, *h256, *r256;
10381 struct mgmt_pending_cmd *cmd = data;
10382 struct sk_buff *skb = cmd->skb;
10383 u8 status = mgmt_status(err);
10386 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
10391 status = MGMT_STATUS_FAILED;
10392 else if (IS_ERR(skb))
10393 status = mgmt_status(PTR_ERR(skb));
10395 status = mgmt_status(skb->data[0]);
10398 bt_dev_dbg(hdev, "status %u", status);
10400 mgmt_cp = cmd->param;
10403 status = mgmt_status(status);
10410 } else if (!bredr_sc_enabled(hdev)) {
10411 struct hci_rp_read_local_oob_data *rp;
10413 if (skb->len != sizeof(*rp)) {
10414 status = MGMT_STATUS_FAILED;
10417 status = MGMT_STATUS_SUCCESS;
10418 rp = (void *)skb->data;
10420 eir_len = 5 + 18 + 18;
10427 struct hci_rp_read_local_oob_ext_data *rp;
10429 if (skb->len != sizeof(*rp)) {
10430 status = MGMT_STATUS_FAILED;
10433 status = MGMT_STATUS_SUCCESS;
10434 rp = (void *)skb->data;
10436 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
10437 eir_len = 5 + 18 + 18;
10441 eir_len = 5 + 18 + 18 + 18 + 18;
10442 h192 = rp->hash192;
10443 r192 = rp->rand192;
10446 h256 = rp->hash256;
10447 r256 = rp->rand256;
10451 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
10458 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
10459 hdev->dev_class, 3);
10461 if (h192 && r192) {
10462 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10463 EIR_SSP_HASH_C192, h192, 16);
10464 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10465 EIR_SSP_RAND_R192, r192, 16);
10468 if (h256 && r256) {
10469 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10470 EIR_SSP_HASH_C256, h256, 16);
10471 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10472 EIR_SSP_RAND_R256, r256, 16);
10476 mgmt_rp->type = mgmt_cp->type;
10477 mgmt_rp->eir_len = cpu_to_le16(eir_len);
10479 err = mgmt_cmd_complete(cmd->sk, hdev->id,
10480 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
10481 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
10482 if (err < 0 || status)
10485 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
10487 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
10488 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
10489 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
10491 if (skb && !IS_ERR(skb))
10495 mgmt_pending_remove(cmd);
10498 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
10499 struct mgmt_cp_read_local_oob_ext_data *cp)
10501 struct mgmt_pending_cmd *cmd;
10504 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
10509 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
10510 read_local_oob_ext_data_complete);
10513 mgmt_pending_remove(cmd);
10520 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
10521 void *data, u16 data_len)
10523 struct mgmt_cp_read_local_oob_ext_data *cp = data;
10524 struct mgmt_rp_read_local_oob_ext_data *rp;
10527 u8 status, flags, role, addr[7], hash[16], rand[16];
10530 bt_dev_dbg(hdev, "sock %p", sk);
10532 if (hdev_is_powered(hdev)) {
10533 switch (cp->type) {
10534 case BIT(BDADDR_BREDR):
10535 status = mgmt_bredr_support(hdev);
10541 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
10542 status = mgmt_le_support(hdev);
10546 eir_len = 9 + 3 + 18 + 18 + 3;
10549 status = MGMT_STATUS_INVALID_PARAMS;
10554 status = MGMT_STATUS_NOT_POWERED;
10558 rp_len = sizeof(*rp) + eir_len;
10559 rp = kmalloc(rp_len, GFP_ATOMIC);
10563 if (!status && !lmp_ssp_capable(hdev)) {
10564 status = MGMT_STATUS_NOT_SUPPORTED;
10571 hci_dev_lock(hdev);
10574 switch (cp->type) {
10575 case BIT(BDADDR_BREDR):
10576 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
10577 err = read_local_ssp_oob_req(hdev, sk, cp);
10578 hci_dev_unlock(hdev);
10582 status = MGMT_STATUS_FAILED;
10585 eir_len = eir_append_data(rp->eir, eir_len,
10587 hdev->dev_class, 3);
10590 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
10591 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
10592 smp_generate_oob(hdev, hash, rand) < 0) {
10593 hci_dev_unlock(hdev);
10594 status = MGMT_STATUS_FAILED;
10598 /* This should return the active RPA, but since the RPA
10599 * is only programmed on demand, it is really hard to fill
10600 * this in at the moment. For now disallow retrieving
10601 * local out-of-band data when privacy is in use.
10603 * Returning the identity address will not help here since
10604 * pairing happens before the identity resolving key is
10605 * known and thus the connection establishment happens
10606 * based on the RPA and not the identity address.
10608 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
10609 hci_dev_unlock(hdev);
10610 status = MGMT_STATUS_REJECTED;
10614 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
10615 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
10616 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
10617 bacmp(&hdev->static_addr, BDADDR_ANY))) {
10618 memcpy(addr, &hdev->static_addr, 6);
10621 memcpy(addr, &hdev->bdaddr, 6);
10625 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
10626 addr, sizeof(addr));
10628 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
10633 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
10634 &role, sizeof(role));
10636 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
10637 eir_len = eir_append_data(rp->eir, eir_len,
10639 hash, sizeof(hash));
10641 eir_len = eir_append_data(rp->eir, eir_len,
10643 rand, sizeof(rand));
10646 flags = mgmt_get_adv_discov_flags(hdev);
10648 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
10649 flags |= LE_AD_NO_BREDR;
10651 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
10652 &flags, sizeof(flags));
10656 hci_dev_unlock(hdev);
10658 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
10660 status = MGMT_STATUS_SUCCESS;
10663 rp->type = cp->type;
10664 rp->eir_len = cpu_to_le16(eir_len);
10666 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
10667 status, rp, sizeof(*rp) + eir_len);
10668 if (err < 0 || status)
10671 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
10672 rp, sizeof(*rp) + eir_len,
10673 HCI_MGMT_OOB_DATA_EVENTS, sk);
10681 static u32 get_supported_adv_flags(struct hci_dev *hdev)
10685 flags |= MGMT_ADV_FLAG_CONNECTABLE;
10686 flags |= MGMT_ADV_FLAG_DISCOV;
10687 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
10688 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
10689 flags |= MGMT_ADV_FLAG_APPEARANCE;
10690 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
10691 flags |= MGMT_ADV_PARAM_DURATION;
10692 flags |= MGMT_ADV_PARAM_TIMEOUT;
10693 flags |= MGMT_ADV_PARAM_INTERVALS;
10694 flags |= MGMT_ADV_PARAM_TX_POWER;
10695 flags |= MGMT_ADV_PARAM_SCAN_RSP;
10697 /* In extended adv TX_POWER returned from Set Adv Param
10698 * will be always valid.
10700 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
10701 flags |= MGMT_ADV_FLAG_TX_POWER;
10703 if (ext_adv_capable(hdev)) {
10704 flags |= MGMT_ADV_FLAG_SEC_1M;
10705 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
10706 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
10708 if (hdev->le_features[1] & HCI_LE_PHY_2M)
10709 flags |= MGMT_ADV_FLAG_SEC_2M;
10711 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
10712 flags |= MGMT_ADV_FLAG_SEC_CODED;
10718 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
10719 void *data, u16 data_len)
10721 struct mgmt_rp_read_adv_features *rp;
10724 struct adv_info *adv_instance;
10725 u32 supported_flags;
10728 bt_dev_dbg(hdev, "sock %p", sk);
10730 if (!lmp_le_capable(hdev))
10731 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
10732 MGMT_STATUS_REJECTED);
10734 hci_dev_lock(hdev);
10736 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
10737 rp = kmalloc(rp_len, GFP_ATOMIC);
10739 hci_dev_unlock(hdev);
10743 supported_flags = get_supported_adv_flags(hdev);
10745 rp->supported_flags = cpu_to_le32(supported_flags);
10746 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
10747 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
10748 rp->max_instances = hdev->le_num_of_adv_sets;
10749 rp->num_instances = hdev->adv_instance_cnt;
10751 instance = rp->instance;
10752 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
10753 /* Only instances 1-le_num_of_adv_sets are externally visible */
10754 if (adv_instance->instance <= hdev->adv_instance_cnt) {
10755 *instance = adv_instance->instance;
10758 rp->num_instances--;
10763 hci_dev_unlock(hdev);
10765 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
10766 MGMT_STATUS_SUCCESS, rp, rp_len);
10773 static u8 calculate_name_len(struct hci_dev *hdev)
10775 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
10777 return eir_append_local_name(hdev, buf, 0);
10780 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
10783 u8 max_len = HCI_MAX_AD_LENGTH;
10786 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
10787 MGMT_ADV_FLAG_LIMITED_DISCOV |
10788 MGMT_ADV_FLAG_MANAGED_FLAGS))
10791 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
10794 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
10795 max_len -= calculate_name_len(hdev);
10797 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
10804 static bool flags_managed(u32 adv_flags)
10806 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
10807 MGMT_ADV_FLAG_LIMITED_DISCOV |
10808 MGMT_ADV_FLAG_MANAGED_FLAGS);
10811 static bool tx_power_managed(u32 adv_flags)
10813 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
10816 static bool name_managed(u32 adv_flags)
10818 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
10821 static bool appearance_managed(u32 adv_flags)
10823 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
10826 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
10827 u8 len, bool is_adv_data)
10832 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
10837 /* Make sure that the data is correctly formatted. */
10838 for (i = 0; i < len; i += (cur_len + 1)) {
10844 if (data[i + 1] == EIR_FLAGS &&
10845 (!is_adv_data || flags_managed(adv_flags)))
10848 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
10851 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
10854 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
10857 if (data[i + 1] == EIR_APPEARANCE &&
10858 appearance_managed(adv_flags))
10861 /* If the current field length would exceed the total data
10862 * length, then it's invalid.
10864 if (i + cur_len >= len)
10871 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
10873 u32 supported_flags, phy_flags;
10875 /* The current implementation only supports a subset of the specified
10876 * flags. Also need to check mutual exclusiveness of sec flags.
10878 supported_flags = get_supported_adv_flags(hdev);
10879 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
10880 if (adv_flags & ~supported_flags ||
10881 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
10887 static bool adv_busy(struct hci_dev *hdev)
10889 return pending_find(MGMT_OP_SET_LE, hdev);
10892 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
10895 struct adv_info *adv, *n;
10897 bt_dev_dbg(hdev, "err %d", err);
10899 hci_dev_lock(hdev);
10901 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
10908 adv->pending = false;
10912 instance = adv->instance;
10914 if (hdev->cur_adv_instance == instance)
10915 cancel_adv_timeout(hdev);
10917 hci_remove_adv_instance(hdev, instance);
10918 mgmt_advertising_removed(sk, hdev, instance);
10921 hci_dev_unlock(hdev);
10924 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
10926 struct mgmt_pending_cmd *cmd = data;
10927 struct mgmt_cp_add_advertising *cp = cmd->param;
10928 struct mgmt_rp_add_advertising rp;
10930 memset(&rp, 0, sizeof(rp));
10932 rp.instance = cp->instance;
10935 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10938 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10939 mgmt_status(err), &rp, sizeof(rp));
10941 add_adv_complete(hdev, cmd->sk, cp->instance, err);
10943 mgmt_pending_free(cmd);
10946 static int add_advertising_sync(struct hci_dev *hdev, void *data)
10948 struct mgmt_pending_cmd *cmd = data;
10949 struct mgmt_cp_add_advertising *cp = cmd->param;
10951 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
10954 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
10955 void *data, u16 data_len)
10957 struct mgmt_cp_add_advertising *cp = data;
10958 struct mgmt_rp_add_advertising rp;
10961 u16 timeout, duration;
10962 unsigned int prev_instance_cnt;
10963 u8 schedule_instance = 0;
10964 struct adv_info *adv, *next_instance;
10966 struct mgmt_pending_cmd *cmd;
10968 bt_dev_dbg(hdev, "sock %p", sk);
10970 status = mgmt_le_support(hdev);
10972 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10975 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10976 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10977 MGMT_STATUS_INVALID_PARAMS);
10979 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
10980 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10981 MGMT_STATUS_INVALID_PARAMS);
10983 flags = __le32_to_cpu(cp->flags);
10984 timeout = __le16_to_cpu(cp->timeout);
10985 duration = __le16_to_cpu(cp->duration);
10987 if (!requested_adv_flags_are_valid(hdev, flags))
10988 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10989 MGMT_STATUS_INVALID_PARAMS);
10991 hci_dev_lock(hdev);
10993 if (timeout && !hdev_is_powered(hdev)) {
10994 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10995 MGMT_STATUS_REJECTED);
10999 if (adv_busy(hdev)) {
11000 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
11005 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
11006 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
11007 cp->scan_rsp_len, false)) {
11008 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
11009 MGMT_STATUS_INVALID_PARAMS);
11013 prev_instance_cnt = hdev->adv_instance_cnt;
11015 adv = hci_add_adv_instance(hdev, cp->instance, flags,
11016 cp->adv_data_len, cp->data,
11018 cp->data + cp->adv_data_len,
11020 HCI_ADV_TX_POWER_NO_PREFERENCE,
11021 hdev->le_adv_min_interval,
11022 hdev->le_adv_max_interval, 0);
11024 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
11025 MGMT_STATUS_FAILED);
11029 /* Only trigger an advertising added event if a new instance was
11032 if (hdev->adv_instance_cnt > prev_instance_cnt)
11033 mgmt_advertising_added(sk, hdev, cp->instance);
11035 if (hdev->cur_adv_instance == cp->instance) {
11036 /* If the currently advertised instance is being changed then
11037 * cancel the current advertising and schedule the next
11038 * instance. If there is only one instance then the overridden
11039 * advertising data will be visible right away.
11041 cancel_adv_timeout(hdev);
11043 next_instance = hci_get_next_instance(hdev, cp->instance);
11045 schedule_instance = next_instance->instance;
11046 } else if (!hdev->adv_instance_timeout) {
11047 /* Immediately advertise the new instance if no other
11048 * instance is currently being advertised.
11050 schedule_instance = cp->instance;
11053 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
11054 * there is no instance to be advertised then we have no HCI
11055 * communication to make. Simply return.
11057 if (!hdev_is_powered(hdev) ||
11058 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
11059 !schedule_instance) {
11060 rp.instance = cp->instance;
11061 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
11062 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11066 /* We're good to go, update advertising data, parameters, and start
11069 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
11076 cp->instance = schedule_instance;
11078 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
11079 add_advertising_complete);
11081 mgmt_pending_free(cmd);
11084 hci_dev_unlock(hdev);
11089 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
11092 struct mgmt_pending_cmd *cmd = data;
11093 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
11094 struct mgmt_rp_add_ext_adv_params rp;
11095 struct adv_info *adv;
11098 BT_DBG("%s", hdev->name);
11100 hci_dev_lock(hdev);
11102 adv = hci_find_adv_instance(hdev, cp->instance);
11106 rp.instance = cp->instance;
11107 rp.tx_power = adv->tx_power;
11109 /* While we're at it, inform userspace of the available space for this
11110 * advertisement, given the flags that will be used.
11112 flags = __le32_to_cpu(cp->flags);
11113 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
11114 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
11117 /* If this advertisement was previously advertising and we
11118 * failed to update it, we signal that it has been removed and
11119 * delete its structure
11122 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
11124 hci_remove_adv_instance(hdev, cp->instance);
11126 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
11129 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
11130 mgmt_status(err), &rp, sizeof(rp));
11135 mgmt_pending_free(cmd);
11137 hci_dev_unlock(hdev);
11140 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
11142 struct mgmt_pending_cmd *cmd = data;
11143 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
11145 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
11148 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
11149 void *data, u16 data_len)
11151 struct mgmt_cp_add_ext_adv_params *cp = data;
11152 struct mgmt_rp_add_ext_adv_params rp;
11153 struct mgmt_pending_cmd *cmd = NULL;
11154 struct adv_info *adv;
11155 u32 flags, min_interval, max_interval;
11156 u16 timeout, duration;
11161 BT_DBG("%s", hdev->name);
11163 status = mgmt_le_support(hdev);
11165 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11168 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
11169 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11170 MGMT_STATUS_INVALID_PARAMS);
11172 /* The purpose of breaking add_advertising into two separate MGMT calls
11173 * for params and data is to allow more parameters to be added to this
11174 * structure in the future. For this reason, we verify that we have the
11175 * bare minimum structure we know of when the interface was defined. Any
11176 * extra parameters we don't know about will be ignored in this request.
11178 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
11179 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11180 MGMT_STATUS_INVALID_PARAMS);
11182 flags = __le32_to_cpu(cp->flags);
11184 if (!requested_adv_flags_are_valid(hdev, flags))
11185 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11186 MGMT_STATUS_INVALID_PARAMS);
11188 hci_dev_lock(hdev);
11190 /* In new interface, we require that we are powered to register */
11191 if (!hdev_is_powered(hdev)) {
11192 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11193 MGMT_STATUS_REJECTED);
11197 if (adv_busy(hdev)) {
11198 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11203 /* Parse defined parameters from request, use defaults otherwise */
11204 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
11205 __le16_to_cpu(cp->timeout) : 0;
11207 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
11208 __le16_to_cpu(cp->duration) :
11209 hdev->def_multi_adv_rotation_duration;
11211 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
11212 __le32_to_cpu(cp->min_interval) :
11213 hdev->le_adv_min_interval;
11215 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
11216 __le32_to_cpu(cp->max_interval) :
11217 hdev->le_adv_max_interval;
11219 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
11221 HCI_ADV_TX_POWER_NO_PREFERENCE;
11223 /* Create advertising instance with no advertising or response data */
11224 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
11225 timeout, duration, tx_power, min_interval,
11229 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11230 MGMT_STATUS_FAILED);
11234 /* Submit request for advertising params if ext adv available */
11235 if (ext_adv_capable(hdev)) {
11236 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
11240 hci_remove_adv_instance(hdev, cp->instance);
11244 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
11245 add_ext_adv_params_complete);
11247 mgmt_pending_free(cmd);
11249 rp.instance = cp->instance;
11250 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
11251 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
11252 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
11253 err = mgmt_cmd_complete(sk, hdev->id,
11254 MGMT_OP_ADD_EXT_ADV_PARAMS,
11255 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11259 hci_dev_unlock(hdev);
11264 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
11266 struct mgmt_pending_cmd *cmd = data;
11267 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
11268 struct mgmt_rp_add_advertising rp;
11270 add_adv_complete(hdev, cmd->sk, cp->instance, err);
11272 memset(&rp, 0, sizeof(rp));
11274 rp.instance = cp->instance;
11277 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
11280 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
11281 mgmt_status(err), &rp, sizeof(rp));
11283 mgmt_pending_free(cmd);
11286 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
11288 struct mgmt_pending_cmd *cmd = data;
11289 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
11292 if (ext_adv_capable(hdev)) {
11293 err = hci_update_adv_data_sync(hdev, cp->instance);
11297 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
11301 return hci_enable_ext_advertising_sync(hdev, cp->instance);
11304 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
11307 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
11310 struct mgmt_cp_add_ext_adv_data *cp = data;
11311 struct mgmt_rp_add_ext_adv_data rp;
11312 u8 schedule_instance = 0;
11313 struct adv_info *next_instance;
11314 struct adv_info *adv_instance;
11316 struct mgmt_pending_cmd *cmd;
11318 BT_DBG("%s", hdev->name);
11320 hci_dev_lock(hdev);
11322 adv_instance = hci_find_adv_instance(hdev, cp->instance);
11324 if (!adv_instance) {
11325 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
11326 MGMT_STATUS_INVALID_PARAMS);
11330 /* In new interface, we require that we are powered to register */
11331 if (!hdev_is_powered(hdev)) {
11332 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
11333 MGMT_STATUS_REJECTED);
11334 goto clear_new_instance;
11337 if (adv_busy(hdev)) {
11338 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
11340 goto clear_new_instance;
11343 /* Validate new data */
11344 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
11345 cp->adv_data_len, true) ||
11346 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
11347 cp->adv_data_len, cp->scan_rsp_len, false)) {
11348 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
11349 MGMT_STATUS_INVALID_PARAMS);
11350 goto clear_new_instance;
11353 /* Set the data in the advertising instance */
11354 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
11355 cp->data, cp->scan_rsp_len,
11356 cp->data + cp->adv_data_len);
11358 /* If using software rotation, determine next instance to use */
11359 if (hdev->cur_adv_instance == cp->instance) {
11360 /* If the currently advertised instance is being changed
11361 * then cancel the current advertising and schedule the
11362 * next instance. If there is only one instance then the
11363 * overridden advertising data will be visible right
11366 cancel_adv_timeout(hdev);
11368 next_instance = hci_get_next_instance(hdev, cp->instance);
11370 schedule_instance = next_instance->instance;
11371 } else if (!hdev->adv_instance_timeout) {
11372 /* Immediately advertise the new instance if no other
11373 * instance is currently being advertised.
11375 schedule_instance = cp->instance;
11378 /* If the HCI_ADVERTISING flag is set or there is no instance to
11379 * be advertised then we have no HCI communication to make.
11382 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
11383 if (adv_instance->pending) {
11384 mgmt_advertising_added(sk, hdev, cp->instance);
11385 adv_instance->pending = false;
11387 rp.instance = cp->instance;
11388 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
11389 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11393 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
11397 goto clear_new_instance;
11400 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
11401 add_ext_adv_data_complete);
11403 mgmt_pending_free(cmd);
11404 goto clear_new_instance;
11407 /* We were successful in updating data, so trigger advertising_added
11408 * event if this is an instance that wasn't previously advertising. If
11409 * a failure occurs in the requests we initiated, we will remove the
11410 * instance again in add_advertising_complete
11412 if (adv_instance->pending)
11413 mgmt_advertising_added(sk, hdev, cp->instance);
11417 clear_new_instance:
11418 hci_remove_adv_instance(hdev, cp->instance);
11421 hci_dev_unlock(hdev);
11426 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
11429 struct mgmt_pending_cmd *cmd = data;
11430 struct mgmt_cp_remove_advertising *cp = cmd->param;
11431 struct mgmt_rp_remove_advertising rp;
11433 bt_dev_dbg(hdev, "err %d", err);
11435 memset(&rp, 0, sizeof(rp));
11436 rp.instance = cp->instance;
11439 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
11442 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
11443 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11445 mgmt_pending_free(cmd);
11448 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
11450 struct mgmt_pending_cmd *cmd = data;
11451 struct mgmt_cp_remove_advertising *cp = cmd->param;
11454 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
11458 if (list_empty(&hdev->adv_instances))
11459 err = hci_disable_advertising_sync(hdev);
11464 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
11465 void *data, u16 data_len)
11467 struct mgmt_cp_remove_advertising *cp = data;
11468 struct mgmt_pending_cmd *cmd;
11471 bt_dev_dbg(hdev, "sock %p", sk);
11473 hci_dev_lock(hdev);
11475 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
11476 err = mgmt_cmd_status(sk, hdev->id,
11477 MGMT_OP_REMOVE_ADVERTISING,
11478 MGMT_STATUS_INVALID_PARAMS);
11482 if (pending_find(MGMT_OP_SET_LE, hdev)) {
11483 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
11488 if (list_empty(&hdev->adv_instances)) {
11489 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
11490 MGMT_STATUS_INVALID_PARAMS);
11494 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
11501 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
11502 remove_advertising_complete);
11504 mgmt_pending_free(cmd);
11507 hci_dev_unlock(hdev);
11512 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
11513 void *data, u16 data_len)
11515 struct mgmt_cp_get_adv_size_info *cp = data;
11516 struct mgmt_rp_get_adv_size_info rp;
11517 u32 flags, supported_flags;
11519 bt_dev_dbg(hdev, "sock %p", sk);
11521 if (!lmp_le_capable(hdev))
11522 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11523 MGMT_STATUS_REJECTED);
11525 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
11526 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11527 MGMT_STATUS_INVALID_PARAMS);
11529 flags = __le32_to_cpu(cp->flags);
11531 /* The current implementation only supports a subset of the specified
11534 supported_flags = get_supported_adv_flags(hdev);
11535 if (flags & ~supported_flags)
11536 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11537 MGMT_STATUS_INVALID_PARAMS);
11539 rp.instance = cp->instance;
11540 rp.flags = cp->flags;
11541 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
11542 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
11544 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11545 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11548 static const struct hci_mgmt_handler mgmt_handlers[] = {
11549 { NULL }, /* 0x0000 (no command) */
11550 { read_version, MGMT_READ_VERSION_SIZE,
11552 HCI_MGMT_UNTRUSTED },
11553 { read_commands, MGMT_READ_COMMANDS_SIZE,
11555 HCI_MGMT_UNTRUSTED },
11556 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
11558 HCI_MGMT_UNTRUSTED },
11559 { read_controller_info, MGMT_READ_INFO_SIZE,
11560 HCI_MGMT_UNTRUSTED },
11561 { set_powered, MGMT_SETTING_SIZE },
11562 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
11563 { set_connectable, MGMT_SETTING_SIZE },
11564 { set_fast_connectable, MGMT_SETTING_SIZE },
11565 { set_bondable, MGMT_SETTING_SIZE },
11566 { set_link_security, MGMT_SETTING_SIZE },
11567 { set_ssp, MGMT_SETTING_SIZE },
11568 { set_hs, MGMT_SETTING_SIZE },
11569 { set_le, MGMT_SETTING_SIZE },
11570 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
11571 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
11572 { add_uuid, MGMT_ADD_UUID_SIZE },
11573 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
11574 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
11575 HCI_MGMT_VAR_LEN },
11576 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
11577 HCI_MGMT_VAR_LEN },
11578 { disconnect, MGMT_DISCONNECT_SIZE },
11579 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
11580 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
11581 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
11582 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
11583 { pair_device, MGMT_PAIR_DEVICE_SIZE },
11584 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
11585 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
11586 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
11587 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
11588 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
11589 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
11590 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
11591 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
11592 HCI_MGMT_VAR_LEN },
11593 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
11594 { start_discovery, MGMT_START_DISCOVERY_SIZE },
11595 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
11596 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
11597 { block_device, MGMT_BLOCK_DEVICE_SIZE },
11598 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
11599 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
11600 { set_advertising, MGMT_SETTING_SIZE },
11601 { set_bredr, MGMT_SETTING_SIZE },
11602 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
11603 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
11604 { set_secure_conn, MGMT_SETTING_SIZE },
11605 { set_debug_keys, MGMT_SETTING_SIZE },
11606 { set_privacy, MGMT_SET_PRIVACY_SIZE },
11607 { load_irks, MGMT_LOAD_IRKS_SIZE,
11608 HCI_MGMT_VAR_LEN },
11609 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
11610 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
11611 { add_device, MGMT_ADD_DEVICE_SIZE },
11612 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
11613 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
11614 HCI_MGMT_VAR_LEN },
11615 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
11617 HCI_MGMT_UNTRUSTED },
11618 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
11619 HCI_MGMT_UNCONFIGURED |
11620 HCI_MGMT_UNTRUSTED },
11621 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
11622 HCI_MGMT_UNCONFIGURED },
11623 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
11624 HCI_MGMT_UNCONFIGURED },
11625 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
11626 HCI_MGMT_VAR_LEN },
11627 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
11628 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
11630 HCI_MGMT_UNTRUSTED },
11631 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
11632 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
11633 HCI_MGMT_VAR_LEN },
11634 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
11635 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
11636 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
11637 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
11638 HCI_MGMT_UNTRUSTED },
11639 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
11640 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
11641 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
11642 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
11643 HCI_MGMT_VAR_LEN },
11644 { set_wideband_speech, MGMT_SETTING_SIZE },
11645 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
11646 HCI_MGMT_UNTRUSTED },
11647 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
11648 HCI_MGMT_UNTRUSTED |
11649 HCI_MGMT_HDEV_OPTIONAL },
11650 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
11652 HCI_MGMT_HDEV_OPTIONAL },
11653 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
11654 HCI_MGMT_UNTRUSTED },
11655 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
11656 HCI_MGMT_VAR_LEN },
11657 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
11658 HCI_MGMT_UNTRUSTED },
11659 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
11660 HCI_MGMT_VAR_LEN },
11661 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
11662 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
11663 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
11664 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
11665 HCI_MGMT_VAR_LEN },
11666 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
11667 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
11668 HCI_MGMT_VAR_LEN },
11669 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
11670 HCI_MGMT_VAR_LEN },
11671 { add_adv_patterns_monitor_rssi,
11672 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
11673 HCI_MGMT_VAR_LEN },
11674 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
11675 HCI_MGMT_VAR_LEN },
11676 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
11677 { mesh_send, MGMT_MESH_SEND_SIZE,
11678 HCI_MGMT_VAR_LEN },
11679 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
11683 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
11684 { NULL }, /* 0x0000 (no command) */
11685 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
11686 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
11687 HCI_MGMT_VAR_LEN },
11688 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
11689 HCI_MGMT_VAR_LEN },
11690 { add_white_list, MGMT_ADD_DEV_WHITE_LIST_SIZE },
11691 { remove_from_white_list, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
11692 { clear_white_list, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
11693 { set_enable_rssi, MGMT_SET_RSSI_ENABLE_SIZE },
11694 { get_raw_rssi, MGMT_GET_RAW_RSSI_SIZE },
11695 { set_disable_threshold, MGMT_SET_RSSI_DISABLE_SIZE },
11696 { start_le_discovery, MGMT_START_LE_DISCOVERY_SIZE },
11697 { stop_le_discovery, MGMT_STOP_LE_DISCOVERY_SIZE },
11698 { disable_le_auto_connect, MGMT_DISABLE_LE_AUTO_CONNECT_SIZE },
11699 { le_conn_update, MGMT_LE_CONN_UPDATE_SIZE },
11700 { set_manufacturer_data, MGMT_SET_MANUFACTURER_DATA_SIZE },
11701 { le_set_scan_params, MGMT_LE_SET_SCAN_PARAMS_SIZE },
11702 { set_voice_setting, MGMT_SET_VOICE_SETTING_SIZE },
11703 { get_adv_tx_power, MGMT_GET_ADV_TX_POWER_SIZE },
11704 { enable_bt_6lowpan, MGMT_ENABLE_BT_6LOWPAN_SIZE },
11705 { connect_bt_6lowpan, MGMT_CONNECT_6LOWPAN_SIZE },
11706 { disconnect_bt_6lowpan, MGMT_DISCONNECT_6LOWPAN_SIZE },
11707 { read_maximum_le_data_length,
11708 MGMT_LE_READ_MAXIMUM_DATA_LENGTH_SIZE },
11709 { write_host_suggested_le_data_length,
11710 MGMT_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH_SIZE },
11711 { read_host_suggested_data_length,
11712 MGMT_LE_READ_HOST_SUGGESTED_DATA_LENGTH_SIZE },
11716 void mgmt_index_added(struct hci_dev *hdev)
11718 struct mgmt_ev_ext_index ev;
11720 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
11723 switch (hdev->dev_type) {
11725 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
11726 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
11727 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
11730 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
11731 HCI_MGMT_INDEX_EVENTS);
11742 ev.bus = hdev->bus;
11744 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
11745 HCI_MGMT_EXT_INDEX_EVENTS);
11748 void mgmt_index_removed(struct hci_dev *hdev)
11750 struct mgmt_ev_ext_index ev;
11751 u8 status = MGMT_STATUS_INVALID_INDEX;
11753 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
11756 switch (hdev->dev_type) {
11758 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
11760 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
11761 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
11762 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
11765 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
11766 HCI_MGMT_INDEX_EVENTS);
11777 ev.bus = hdev->bus;
11779 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
11780 HCI_MGMT_EXT_INDEX_EVENTS);
11782 /* Cancel any remaining timed work */
11783 if (!hci_dev_test_flag(hdev, HCI_MGMT))
11785 cancel_delayed_work_sync(&hdev->discov_off);
11786 cancel_delayed_work_sync(&hdev->service_cache);
11787 cancel_delayed_work_sync(&hdev->rpa_expired);
11790 void mgmt_power_on(struct hci_dev *hdev, int err)
11792 struct cmd_lookup match = { NULL, hdev };
11794 bt_dev_dbg(hdev, "err %d", err);
11796 hci_dev_lock(hdev);
11799 restart_le_actions(hdev);
11800 hci_update_passive_scan(hdev);
11803 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
11805 new_settings(hdev, match.sk);
11808 sock_put(match.sk);
11810 hci_dev_unlock(hdev);
11813 void __mgmt_power_off(struct hci_dev *hdev)
11815 struct cmd_lookup match = { NULL, hdev };
11816 u8 status, zero_cod[] = { 0, 0, 0 };
11818 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
11820 /* If the power off is because of hdev unregistration let
11821 * use the appropriate INVALID_INDEX status. Otherwise use
11822 * NOT_POWERED. We cover both scenarios here since later in
11823 * mgmt_index_removed() any hci_conn callbacks will have already
11824 * been triggered, potentially causing misleading DISCONNECTED
11825 * status responses.
11827 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
11828 status = MGMT_STATUS_INVALID_INDEX;
11830 status = MGMT_STATUS_NOT_POWERED;
11832 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
11834 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
11835 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
11836 zero_cod, sizeof(zero_cod),
11837 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
11838 ext_info_changed(hdev, NULL);
11841 new_settings(hdev, match.sk);
11844 sock_put(match.sk);
11847 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
11849 struct mgmt_pending_cmd *cmd;
11852 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
11856 if (err == -ERFKILL)
11857 status = MGMT_STATUS_RFKILLED;
11859 status = MGMT_STATUS_FAILED;
11861 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
11863 mgmt_pending_remove(cmd);
11866 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
11869 struct mgmt_ev_new_link_key ev;
11871 memset(&ev, 0, sizeof(ev));
11873 ev.store_hint = persistent;
11874 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
11875 ev.key.addr.type = BDADDR_BREDR;
11876 ev.key.type = key->type;
11877 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
11878 ev.key.pin_len = key->pin_len;
11880 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
11883 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
11885 switch (ltk->type) {
11887 case SMP_LTK_RESPONDER:
11888 if (ltk->authenticated)
11889 return MGMT_LTK_AUTHENTICATED;
11890 return MGMT_LTK_UNAUTHENTICATED;
11892 if (ltk->authenticated)
11893 return MGMT_LTK_P256_AUTH;
11894 return MGMT_LTK_P256_UNAUTH;
11895 case SMP_LTK_P256_DEBUG:
11896 return MGMT_LTK_P256_DEBUG;
11899 return MGMT_LTK_UNAUTHENTICATED;
11902 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
11904 struct mgmt_ev_new_long_term_key ev;
11906 memset(&ev, 0, sizeof(ev));
11908 /* Devices using resolvable or non-resolvable random addresses
11909 * without providing an identity resolving key don't require
11910 * to store long term keys. Their addresses will change the
11911 * next time around.
11913 * Only when a remote device provides an identity address
11914 * make sure the long term key is stored. If the remote
11915 * identity is known, the long term keys are internally
11916 * mapped to the identity address. So allow static random
11917 * and public addresses here.
11919 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
11920 (key->bdaddr.b[5] & 0xc0) != 0xc0)
11921 ev.store_hint = 0x00;
11923 ev.store_hint = persistent;
11925 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
11926 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
11927 ev.key.type = mgmt_ltk_type(key);
11928 ev.key.enc_size = key->enc_size;
11929 ev.key.ediv = key->ediv;
11930 ev.key.rand = key->rand;
11932 if (key->type == SMP_LTK)
11933 ev.key.initiator = 1;
11935 /* Make sure we copy only the significant bytes based on the
11936 * encryption key size, and set the rest of the value to zeroes.
11938 memcpy(ev.key.val, key->val, key->enc_size);
11939 memset(ev.key.val + key->enc_size, 0,
11940 sizeof(ev.key.val) - key->enc_size);
11942 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
11945 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
11947 struct mgmt_ev_new_irk ev;
11949 memset(&ev, 0, sizeof(ev));
11951 ev.store_hint = persistent;
11953 bacpy(&ev.rpa, &irk->rpa);
11954 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
11955 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
11956 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
11958 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
11961 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
11964 struct mgmt_ev_new_csrk ev;
11966 memset(&ev, 0, sizeof(ev));
11968 /* Devices using resolvable or non-resolvable random addresses
11969 * without providing an identity resolving key don't require
11970 * to store signature resolving keys. Their addresses will change
11971 * the next time around.
11973 * Only when a remote device provides an identity address
11974 * make sure the signature resolving key is stored. So allow
11975 * static random and public addresses here.
11977 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
11978 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
11979 ev.store_hint = 0x00;
11981 ev.store_hint = persistent;
11983 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
11984 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
11985 ev.key.type = csrk->type;
11986 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
11988 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
11991 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
11992 u8 bdaddr_type, u8 store_hint, u16 min_interval,
11993 u16 max_interval, u16 latency, u16 timeout)
11995 struct mgmt_ev_new_conn_param ev;
11997 if (!hci_is_identity_address(bdaddr, bdaddr_type))
12000 memset(&ev, 0, sizeof(ev));
12001 bacpy(&ev.addr.bdaddr, bdaddr);
12002 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
12003 ev.store_hint = store_hint;
12004 ev.min_interval = cpu_to_le16(min_interval);
12005 ev.max_interval = cpu_to_le16(max_interval);
12006 ev.latency = cpu_to_le16(latency);
12007 ev.timeout = cpu_to_le16(timeout);
12009 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
12012 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
12013 u8 *name, u8 name_len)
12015 struct sk_buff *skb;
12016 struct mgmt_ev_device_connected *ev;
12020 /* allocate buff for LE or BR/EDR adv */
12021 if (conn->le_adv_data_len > 0)
12022 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
12023 sizeof(*ev) + conn->le_adv_data_len);
12025 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
12026 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
12027 eir_precalc_len(sizeof(conn->dev_class)));
12029 ev = skb_put(skb, sizeof(*ev));
12030 bacpy(&ev->addr.bdaddr, &conn->dst);
12031 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
12034 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
12036 ev->flags = __cpu_to_le32(flags);
12038 /* We must ensure that the EIR Data fields are ordered and
12039 * unique. Keep it simple for now and avoid the problem by not
12040 * adding any BR/EDR data to the LE adv.
12042 if (conn->le_adv_data_len > 0) {
12043 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
12044 eir_len = conn->le_adv_data_len;
12047 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
12049 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
12050 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
12051 conn->dev_class, sizeof(conn->dev_class));
12054 ev->eir_len = cpu_to_le16(eir_len);
12056 mgmt_event_skb(skb, NULL);
12059 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
12061 struct sock **sk = data;
12063 cmd->cmd_complete(cmd, 0);
12068 mgmt_pending_remove(cmd);
12071 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
12073 struct hci_dev *hdev = data;
12074 struct mgmt_cp_unpair_device *cp = cmd->param;
12076 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
12078 cmd->cmd_complete(cmd, 0);
12079 mgmt_pending_remove(cmd);
12082 bool mgmt_powering_down(struct hci_dev *hdev)
12084 struct mgmt_pending_cmd *cmd;
12085 struct mgmt_mode *cp;
12087 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
12098 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
12099 u8 link_type, u8 addr_type, u8 reason,
12100 bool mgmt_connected)
12102 struct mgmt_ev_device_disconnected ev;
12103 struct sock *sk = NULL;
12105 /* The connection is still in hci_conn_hash so test for 1
12106 * instead of 0 to know if this is the last one.
12108 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
12109 cancel_delayed_work(&hdev->power_off);
12110 queue_work(hdev->req_workqueue, &hdev->power_off.work);
12113 if (!mgmt_connected)
12116 if (link_type != ACL_LINK && link_type != LE_LINK)
12119 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
12121 bacpy(&ev.addr.bdaddr, bdaddr);
12122 ev.addr.type = link_to_bdaddr(link_type, addr_type);
12123 ev.reason = reason;
12125 /* Report disconnects due to suspend */
12126 if (hdev->suspended)
12127 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
12129 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
12134 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
12138 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
12139 u8 link_type, u8 addr_type, u8 status)
12141 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
12142 struct mgmt_cp_disconnect *cp;
12143 struct mgmt_pending_cmd *cmd;
12145 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
12148 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
12154 if (bacmp(bdaddr, &cp->addr.bdaddr))
12157 if (cp->addr.type != bdaddr_type)
12160 cmd->cmd_complete(cmd, mgmt_status(status));
12161 mgmt_pending_remove(cmd);
12164 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
12165 u8 addr_type, u8 status)
12167 struct mgmt_ev_connect_failed ev;
12169 /* The connection is still in hci_conn_hash so test for 1
12170 * instead of 0 to know if this is the last one.
12172 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
12173 cancel_delayed_work(&hdev->power_off);
12174 queue_work(hdev->req_workqueue, &hdev->power_off.work);
12177 bacpy(&ev.addr.bdaddr, bdaddr);
12178 ev.addr.type = link_to_bdaddr(link_type, addr_type);
12179 ev.status = mgmt_status(status);
12181 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
12184 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
12186 struct mgmt_ev_pin_code_request ev;
12188 bacpy(&ev.addr.bdaddr, bdaddr);
12189 ev.addr.type = BDADDR_BREDR;
12190 ev.secure = secure;
12192 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
12195 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12198 struct mgmt_pending_cmd *cmd;
12200 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
12204 cmd->cmd_complete(cmd, mgmt_status(status));
12205 mgmt_pending_remove(cmd);
12208 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12211 struct mgmt_pending_cmd *cmd;
12213 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
12217 cmd->cmd_complete(cmd, mgmt_status(status));
12218 mgmt_pending_remove(cmd);
12221 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
12222 u8 link_type, u8 addr_type, u32 value,
12225 struct mgmt_ev_user_confirm_request ev;
12227 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
12229 bacpy(&ev.addr.bdaddr, bdaddr);
12230 ev.addr.type = link_to_bdaddr(link_type, addr_type);
12231 ev.confirm_hint = confirm_hint;
12232 ev.value = cpu_to_le32(value);
12234 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
12238 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
12239 u8 link_type, u8 addr_type)
12241 struct mgmt_ev_user_passkey_request ev;
12243 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
12245 bacpy(&ev.addr.bdaddr, bdaddr);
12246 ev.addr.type = link_to_bdaddr(link_type, addr_type);
12248 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
12252 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12253 u8 link_type, u8 addr_type, u8 status,
12256 struct mgmt_pending_cmd *cmd;
12258 cmd = pending_find(opcode, hdev);
12262 cmd->cmd_complete(cmd, mgmt_status(status));
12263 mgmt_pending_remove(cmd);
12268 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12269 u8 link_type, u8 addr_type, u8 status)
12271 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
12272 status, MGMT_OP_USER_CONFIRM_REPLY);
12275 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12276 u8 link_type, u8 addr_type, u8 status)
12278 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
12280 MGMT_OP_USER_CONFIRM_NEG_REPLY);
12283 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12284 u8 link_type, u8 addr_type, u8 status)
12286 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
12287 status, MGMT_OP_USER_PASSKEY_REPLY);
12290 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12291 u8 link_type, u8 addr_type, u8 status)
12293 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
12295 MGMT_OP_USER_PASSKEY_NEG_REPLY);
12298 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
12299 u8 link_type, u8 addr_type, u32 passkey,
12302 struct mgmt_ev_passkey_notify ev;
12304 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
12306 bacpy(&ev.addr.bdaddr, bdaddr);
12307 ev.addr.type = link_to_bdaddr(link_type, addr_type);
12308 ev.passkey = __cpu_to_le32(passkey);
12309 ev.entered = entered;
12311 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
12314 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
12316 struct mgmt_ev_auth_failed ev;
12317 struct mgmt_pending_cmd *cmd;
12318 u8 status = mgmt_status(hci_status);
12320 bacpy(&ev.addr.bdaddr, &conn->dst);
12321 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
12322 ev.status = status;
12324 cmd = find_pairing(conn);
12326 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
12327 cmd ? cmd->sk : NULL);
12330 cmd->cmd_complete(cmd, status);
12331 mgmt_pending_remove(cmd);
12335 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
12337 struct cmd_lookup match = { NULL, hdev };
12341 u8 mgmt_err = mgmt_status(status);
12342 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
12343 cmd_status_rsp, &mgmt_err);
12347 if (test_bit(HCI_AUTH, &hdev->flags))
12348 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
12350 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
12352 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
12356 new_settings(hdev, match.sk);
12359 sock_put(match.sk);
12362 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
12364 struct cmd_lookup *match = data;
12366 if (match->sk == NULL) {
12367 match->sk = cmd->sk;
12368 sock_hold(match->sk);
12372 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
12375 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
12377 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
12378 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
12379 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
12382 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
12383 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
12384 ext_info_changed(hdev, NULL);
12388 sock_put(match.sk);
12391 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
12393 struct mgmt_cp_set_local_name ev;
12394 struct mgmt_pending_cmd *cmd;
12399 memset(&ev, 0, sizeof(ev));
12400 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
12401 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
12403 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
12405 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
12407 /* If this is a HCI command related to powering on the
12408 * HCI dev don't send any mgmt signals.
12410 if (pending_find(MGMT_OP_SET_POWERED, hdev))
12414 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
12415 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
12416 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
12419 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
12423 for (i = 0; i < uuid_count; i++) {
12424 if (!memcmp(uuid, uuids[i], 16))
12431 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
12435 while (parsed < eir_len) {
12436 u8 field_len = eir[0];
12440 if (field_len == 0)
12443 if (eir_len - parsed < field_len + 1)
12447 case EIR_UUID16_ALL:
12448 case EIR_UUID16_SOME:
12449 for (i = 0; i + 3 <= field_len; i += 2) {
12450 memcpy(uuid, bluetooth_base_uuid, 16);
12451 uuid[13] = eir[i + 3];
12452 uuid[12] = eir[i + 2];
12453 if (has_uuid(uuid, uuid_count, uuids))
12457 case EIR_UUID32_ALL:
12458 case EIR_UUID32_SOME:
12459 for (i = 0; i + 5 <= field_len; i += 4) {
12460 memcpy(uuid, bluetooth_base_uuid, 16);
12461 uuid[15] = eir[i + 5];
12462 uuid[14] = eir[i + 4];
12463 uuid[13] = eir[i + 3];
12464 uuid[12] = eir[i + 2];
12465 if (has_uuid(uuid, uuid_count, uuids))
12469 case EIR_UUID128_ALL:
12470 case EIR_UUID128_SOME:
12471 for (i = 0; i + 17 <= field_len; i += 16) {
12472 memcpy(uuid, eir + i + 2, 16);
12473 if (has_uuid(uuid, uuid_count, uuids))
12479 parsed += field_len + 1;
12480 eir += field_len + 1;
12486 static void restart_le_scan(struct hci_dev *hdev)
12488 /* If controller is not scanning we are done. */
12489 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
12492 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
12493 hdev->discovery.scan_start +
12494 hdev->discovery.scan_duration))
12497 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
12498 DISCOV_LE_RESTART_DELAY);
12501 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
12502 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
12504 /* If a RSSI threshold has been specified, and
12505 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
12506 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
12507 * is set, let it through for further processing, as we might need to
12508 * restart the scan.
12510 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
12511 * the results are also dropped.
12513 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
12514 (rssi == HCI_RSSI_INVALID ||
12515 (rssi < hdev->discovery.rssi &&
12516 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
12519 if (hdev->discovery.uuid_count != 0) {
12520 /* If a list of UUIDs is provided in filter, results with no
12521 * matching UUID should be dropped.
12523 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
12524 hdev->discovery.uuids) &&
12525 !eir_has_uuids(scan_rsp, scan_rsp_len,
12526 hdev->discovery.uuid_count,
12527 hdev->discovery.uuids))
12531 /* If duplicate filtering does not report RSSI changes, then restart
12532 * scanning to ensure updated result with updated RSSI values.
12534 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
12535 restart_le_scan(hdev);
12537 /* Validate RSSI value against the RSSI threshold once more. */
12538 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
12539 rssi < hdev->discovery.rssi)
12546 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
12547 bdaddr_t *bdaddr, u8 addr_type)
12549 struct mgmt_ev_adv_monitor_device_lost ev;
12551 ev.monitor_handle = cpu_to_le16(handle);
12552 bacpy(&ev.addr.bdaddr, bdaddr);
12553 ev.addr.type = addr_type;
12555 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
12559 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
12560 struct sk_buff *skb,
12561 struct sock *skip_sk,
12564 struct sk_buff *advmon_skb;
12565 size_t advmon_skb_len;
12566 __le16 *monitor_handle;
12571 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
12572 sizeof(struct mgmt_ev_device_found)) + skb->len;
12573 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
12578 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
12579 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
12580 * store monitor_handle of the matched monitor.
12582 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
12583 *monitor_handle = cpu_to_le16(handle);
12584 skb_put_data(advmon_skb, skb->data, skb->len);
12586 mgmt_event_skb(advmon_skb, skip_sk);
12589 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
12590 bdaddr_t *bdaddr, bool report_device,
12591 struct sk_buff *skb,
12592 struct sock *skip_sk)
12594 struct monitored_device *dev, *tmp;
12595 bool matched = false;
12596 bool notified = false;
12598 /* We have received the Advertisement Report because:
12599 * 1. the kernel has initiated active discovery
12600 * 2. if not, we have pend_le_reports > 0 in which case we are doing
12602 * 3. if none of the above is true, we have one or more active
12603 * Advertisement Monitor
12605 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
12606 * and report ONLY one advertisement per device for the matched Monitor
12607 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
12609 * For case 3, since we are not active scanning and all advertisements
12610 * received are due to a matched Advertisement Monitor, report all
12611 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
12613 if (report_device && !hdev->advmon_pend_notify) {
12614 mgmt_event_skb(skb, skip_sk);
12618 hdev->advmon_pend_notify = false;
12620 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
12621 if (!bacmp(&dev->bdaddr, bdaddr)) {
12624 if (!dev->notified) {
12625 mgmt_send_adv_monitor_device_found(hdev, skb,
12629 dev->notified = true;
12633 if (!dev->notified)
12634 hdev->advmon_pend_notify = true;
12637 if (!report_device &&
12638 ((matched && !notified) || !msft_monitor_supported(hdev))) {
12639 /* Handle 0 indicates that we are not active scanning and this
12640 * is a subsequent advertisement report for an already matched
12641 * Advertisement Monitor or the controller offloading support
12642 * is not available.
12644 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
12648 mgmt_event_skb(skb, skip_sk);
12653 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
12654 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
12655 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
12658 struct sk_buff *skb;
12659 struct mgmt_ev_mesh_device_found *ev;
12662 if (!hdev->mesh_ad_types[0])
12665 /* Scan for requested AD types */
12667 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
12668 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
12669 if (!hdev->mesh_ad_types[j])
12672 if (hdev->mesh_ad_types[j] == eir[i + 1])
12678 if (scan_rsp_len > 0) {
12679 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
12680 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
12681 if (!hdev->mesh_ad_types[j])
12684 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
12693 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
12694 sizeof(*ev) + eir_len + scan_rsp_len);
12698 ev = skb_put(skb, sizeof(*ev));
12700 bacpy(&ev->addr.bdaddr, bdaddr);
12701 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
12703 ev->flags = cpu_to_le32(flags);
12704 ev->instant = cpu_to_le64(instant);
12707 /* Copy EIR or advertising data into event */
12708 skb_put_data(skb, eir, eir_len);
12710 if (scan_rsp_len > 0)
12711 /* Append scan response data to event */
12712 skb_put_data(skb, scan_rsp, scan_rsp_len);
12714 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
12716 mgmt_event_skb(skb, NULL);
12719 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
12720 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
12721 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
12724 struct sk_buff *skb;
12725 struct mgmt_ev_device_found *ev;
12726 bool report_device = hci_discovery_active(hdev);
12728 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
12729 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
12730 eir, eir_len, scan_rsp, scan_rsp_len,
12733 /* Don't send events for a non-kernel initiated discovery. With
12734 * LE one exception is if we have pend_le_reports > 0 in which
12735 * case we're doing passive scanning and want these events.
12737 if (!hci_discovery_active(hdev)) {
12738 if (link_type == ACL_LINK)
12740 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
12741 report_device = true;
12742 else if (!hci_is_adv_monitoring(hdev))
12746 if (hdev->discovery.result_filtering) {
12747 /* We are using service discovery */
12748 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
12753 if (hdev->discovery.limited) {
12754 /* Check for limited discoverable bit */
12756 if (!(dev_class[1] & 0x20))
12759 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
12760 if (!flags || !(flags[0] & LE_AD_LIMITED))
12765 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
12766 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
12767 sizeof(*ev) + eir_len + scan_rsp_len + 5);
12771 ev = skb_put(skb, sizeof(*ev));
12773 /* In case of device discovery with BR/EDR devices (pre 1.2), the
12774 * RSSI value was reported as 0 when not available. This behavior
12775 * is kept when using device discovery. This is required for full
12776 * backwards compatibility with the API.
12778 * However when using service discovery, the value 127 will be
12779 * returned when the RSSI is not available.
12781 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
12782 link_type == ACL_LINK)
12785 bacpy(&ev->addr.bdaddr, bdaddr);
12786 ev->addr.type = link_to_bdaddr(link_type, addr_type);
12788 ev->flags = cpu_to_le32(flags);
12791 /* Copy EIR or advertising data into event */
12792 skb_put_data(skb, eir, eir_len);
12794 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
12797 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
12799 skb_put_data(skb, eir_cod, sizeof(eir_cod));
12802 if (scan_rsp_len > 0)
12803 /* Append scan response data to event */
12804 skb_put_data(skb, scan_rsp, scan_rsp_len);
12806 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
12808 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
12811 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
12812 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
12814 struct sk_buff *skb;
12815 struct mgmt_ev_device_found *ev;
12819 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
12820 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
12822 ev = skb_put(skb, sizeof(*ev));
12823 bacpy(&ev->addr.bdaddr, bdaddr);
12824 ev->addr.type = link_to_bdaddr(link_type, addr_type);
12828 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
12830 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
12832 ev->eir_len = cpu_to_le16(eir_len);
12833 ev->flags = cpu_to_le32(flags);
12835 mgmt_event_skb(skb, NULL);
12838 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
12840 struct mgmt_ev_discovering ev;
12842 bt_dev_dbg(hdev, "discovering %u", discovering);
12844 memset(&ev, 0, sizeof(ev));
12845 ev.type = hdev->discovery.type;
12846 ev.discovering = discovering;
12848 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
12851 void mgmt_suspending(struct hci_dev *hdev, u8 state)
12853 struct mgmt_ev_controller_suspend ev;
12855 ev.suspend_state = state;
12856 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
12859 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
12862 struct mgmt_ev_controller_resume ev;
12864 ev.wake_reason = reason;
12866 bacpy(&ev.addr.bdaddr, bdaddr);
12867 ev.addr.type = addr_type;
12869 memset(&ev.addr, 0, sizeof(ev.addr));
12872 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
12875 static struct hci_mgmt_chan chan = {
12876 .channel = HCI_CHANNEL_CONTROL,
12877 .handler_count = ARRAY_SIZE(mgmt_handlers),
12878 .handlers = mgmt_handlers,
12880 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
12881 .tizen_handlers = tizen_mgmt_handlers,
12883 .hdev_init = mgmt_init_hdev,
12886 int mgmt_init(void)
12888 return hci_mgmt_chan_register(&chan);
12891 void mgmt_exit(void)
12893 hci_mgmt_chan_unregister(&chan);
12896 void mgmt_cleanup(struct sock *sk)
12898 struct mgmt_mesh_tx *mesh_tx;
12899 struct hci_dev *hdev;
12901 read_lock(&hci_dev_list_lock);
12903 list_for_each_entry(hdev, &hci_dev_list, list) {
12905 mesh_tx = mgmt_mesh_next(hdev, sk);
12908 mesh_send_complete(hdev, mesh_tx, true);
12912 read_unlock(&hci_dev_list_lock);