2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
37 #include <net/bluetooth/sco.h>
40 #include "hci_request.h"
42 #include "mgmt_util.h"
43 #include "mgmt_config.h"
48 #define MGMT_VERSION 1
49 #define MGMT_REVISION 22
51 static const u16 mgmt_commands[] = {
52 MGMT_OP_READ_INDEX_LIST,
55 MGMT_OP_SET_DISCOVERABLE,
56 MGMT_OP_SET_CONNECTABLE,
57 MGMT_OP_SET_FAST_CONNECTABLE,
59 MGMT_OP_SET_LINK_SECURITY,
63 MGMT_OP_SET_DEV_CLASS,
64 MGMT_OP_SET_LOCAL_NAME,
67 MGMT_OP_LOAD_LINK_KEYS,
68 MGMT_OP_LOAD_LONG_TERM_KEYS,
70 MGMT_OP_GET_CONNECTIONS,
71 MGMT_OP_PIN_CODE_REPLY,
72 MGMT_OP_PIN_CODE_NEG_REPLY,
73 MGMT_OP_SET_IO_CAPABILITY,
75 MGMT_OP_CANCEL_PAIR_DEVICE,
76 MGMT_OP_UNPAIR_DEVICE,
77 MGMT_OP_USER_CONFIRM_REPLY,
78 MGMT_OP_USER_CONFIRM_NEG_REPLY,
79 MGMT_OP_USER_PASSKEY_REPLY,
80 MGMT_OP_USER_PASSKEY_NEG_REPLY,
81 MGMT_OP_READ_LOCAL_OOB_DATA,
82 MGMT_OP_ADD_REMOTE_OOB_DATA,
83 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
84 MGMT_OP_START_DISCOVERY,
85 MGMT_OP_STOP_DISCOVERY,
88 MGMT_OP_UNBLOCK_DEVICE,
89 MGMT_OP_SET_DEVICE_ID,
90 MGMT_OP_SET_ADVERTISING,
92 MGMT_OP_SET_STATIC_ADDRESS,
93 MGMT_OP_SET_SCAN_PARAMS,
94 MGMT_OP_SET_SECURE_CONN,
95 MGMT_OP_SET_DEBUG_KEYS,
98 MGMT_OP_GET_CONN_INFO,
99 MGMT_OP_GET_CLOCK_INFO,
101 MGMT_OP_REMOVE_DEVICE,
102 MGMT_OP_LOAD_CONN_PARAM,
103 MGMT_OP_READ_UNCONF_INDEX_LIST,
104 MGMT_OP_READ_CONFIG_INFO,
105 MGMT_OP_SET_EXTERNAL_CONFIG,
106 MGMT_OP_SET_PUBLIC_ADDRESS,
107 MGMT_OP_START_SERVICE_DISCOVERY,
108 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
109 MGMT_OP_READ_EXT_INDEX_LIST,
110 MGMT_OP_READ_ADV_FEATURES,
111 MGMT_OP_ADD_ADVERTISING,
112 MGMT_OP_REMOVE_ADVERTISING,
113 MGMT_OP_GET_ADV_SIZE_INFO,
114 MGMT_OP_START_LIMITED_DISCOVERY,
115 MGMT_OP_READ_EXT_INFO,
116 MGMT_OP_SET_APPEARANCE,
117 MGMT_OP_GET_PHY_CONFIGURATION,
118 MGMT_OP_SET_PHY_CONFIGURATION,
119 MGMT_OP_SET_BLOCKED_KEYS,
120 MGMT_OP_SET_WIDEBAND_SPEECH,
121 MGMT_OP_READ_CONTROLLER_CAP,
122 MGMT_OP_READ_EXP_FEATURES_INFO,
123 MGMT_OP_SET_EXP_FEATURE,
124 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
125 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
126 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
127 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
128 MGMT_OP_GET_DEVICE_FLAGS,
129 MGMT_OP_SET_DEVICE_FLAGS,
130 MGMT_OP_READ_ADV_MONITOR_FEATURES,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
132 MGMT_OP_REMOVE_ADV_MONITOR,
133 MGMT_OP_ADD_EXT_ADV_PARAMS,
134 MGMT_OP_ADD_EXT_ADV_DATA,
135 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
136 MGMT_OP_SET_MESH_RECEIVER,
137 MGMT_OP_MESH_READ_FEATURES,
139 MGMT_OP_MESH_SEND_CANCEL,
142 static const u16 mgmt_events[] = {
143 MGMT_EV_CONTROLLER_ERROR,
145 MGMT_EV_INDEX_REMOVED,
146 MGMT_EV_NEW_SETTINGS,
147 MGMT_EV_CLASS_OF_DEV_CHANGED,
148 MGMT_EV_LOCAL_NAME_CHANGED,
149 MGMT_EV_NEW_LINK_KEY,
150 MGMT_EV_NEW_LONG_TERM_KEY,
151 MGMT_EV_DEVICE_CONNECTED,
152 MGMT_EV_DEVICE_DISCONNECTED,
153 MGMT_EV_CONNECT_FAILED,
154 MGMT_EV_PIN_CODE_REQUEST,
155 MGMT_EV_USER_CONFIRM_REQUEST,
156 MGMT_EV_USER_PASSKEY_REQUEST,
158 MGMT_EV_DEVICE_FOUND,
160 MGMT_EV_DEVICE_BLOCKED,
161 MGMT_EV_DEVICE_UNBLOCKED,
162 MGMT_EV_DEVICE_UNPAIRED,
163 MGMT_EV_PASSKEY_NOTIFY,
166 MGMT_EV_DEVICE_ADDED,
167 MGMT_EV_DEVICE_REMOVED,
168 MGMT_EV_NEW_CONN_PARAM,
169 MGMT_EV_UNCONF_INDEX_ADDED,
170 MGMT_EV_UNCONF_INDEX_REMOVED,
171 MGMT_EV_NEW_CONFIG_OPTIONS,
172 MGMT_EV_EXT_INDEX_ADDED,
173 MGMT_EV_EXT_INDEX_REMOVED,
174 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
175 MGMT_EV_ADVERTISING_ADDED,
176 MGMT_EV_ADVERTISING_REMOVED,
177 MGMT_EV_EXT_INFO_CHANGED,
178 MGMT_EV_PHY_CONFIGURATION_CHANGED,
179 MGMT_EV_EXP_FEATURE_CHANGED,
180 MGMT_EV_DEVICE_FLAGS_CHANGED,
181 MGMT_EV_ADV_MONITOR_ADDED,
182 MGMT_EV_ADV_MONITOR_REMOVED,
183 MGMT_EV_CONTROLLER_SUSPEND,
184 MGMT_EV_CONTROLLER_RESUME,
185 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
186 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
189 static const u16 mgmt_untrusted_commands[] = {
190 MGMT_OP_READ_INDEX_LIST,
192 MGMT_OP_READ_UNCONF_INDEX_LIST,
193 MGMT_OP_READ_CONFIG_INFO,
194 MGMT_OP_READ_EXT_INDEX_LIST,
195 MGMT_OP_READ_EXT_INFO,
196 MGMT_OP_READ_CONTROLLER_CAP,
197 MGMT_OP_READ_EXP_FEATURES_INFO,
198 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
199 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
202 static const u16 mgmt_untrusted_events[] = {
204 MGMT_EV_INDEX_REMOVED,
205 MGMT_EV_NEW_SETTINGS,
206 MGMT_EV_CLASS_OF_DEV_CHANGED,
207 MGMT_EV_LOCAL_NAME_CHANGED,
208 MGMT_EV_UNCONF_INDEX_ADDED,
209 MGMT_EV_UNCONF_INDEX_REMOVED,
210 MGMT_EV_NEW_CONFIG_OPTIONS,
211 MGMT_EV_EXT_INDEX_ADDED,
212 MGMT_EV_EXT_INDEX_REMOVED,
213 MGMT_EV_EXT_INFO_CHANGED,
214 MGMT_EV_EXP_FEATURE_CHANGED,
217 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
219 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
220 "\x00\x00\x00\x00\x00\x00\x00\x00"
222 /* HCI to MGMT error code conversion table */
223 static const u8 mgmt_status_table[] = {
225 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
226 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
227 MGMT_STATUS_FAILED, /* Hardware Failure */
228 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
229 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
230 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
231 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
232 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
233 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
234 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
235 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
236 MGMT_STATUS_BUSY, /* Command Disallowed */
237 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
238 MGMT_STATUS_REJECTED, /* Rejected Security */
239 MGMT_STATUS_REJECTED, /* Rejected Personal */
240 MGMT_STATUS_TIMEOUT, /* Host Timeout */
241 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
242 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
243 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
244 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
245 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
246 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
247 MGMT_STATUS_BUSY, /* Repeated Attempts */
248 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
249 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
250 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
251 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
252 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
253 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
254 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
255 MGMT_STATUS_FAILED, /* Unspecified Error */
256 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
257 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
258 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
259 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
260 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
261 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
262 MGMT_STATUS_FAILED, /* Unit Link Key Used */
263 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
264 MGMT_STATUS_TIMEOUT, /* Instant Passed */
265 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
266 MGMT_STATUS_FAILED, /* Transaction Collision */
267 MGMT_STATUS_FAILED, /* Reserved for future use */
268 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
269 MGMT_STATUS_REJECTED, /* QoS Rejected */
270 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
271 MGMT_STATUS_REJECTED, /* Insufficient Security */
272 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
273 MGMT_STATUS_FAILED, /* Reserved for future use */
274 MGMT_STATUS_BUSY, /* Role Switch Pending */
275 MGMT_STATUS_FAILED, /* Reserved for future use */
276 MGMT_STATUS_FAILED, /* Slot Violation */
277 MGMT_STATUS_FAILED, /* Role Switch Failed */
278 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
279 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
280 MGMT_STATUS_BUSY, /* Host Busy Pairing */
281 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
282 MGMT_STATUS_BUSY, /* Controller Busy */
283 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
284 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
285 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
286 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
287 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
290 static u8 mgmt_errno_status(int err)
294 return MGMT_STATUS_SUCCESS;
296 return MGMT_STATUS_REJECTED;
298 return MGMT_STATUS_INVALID_PARAMS;
300 return MGMT_STATUS_NOT_SUPPORTED;
302 return MGMT_STATUS_BUSY;
304 return MGMT_STATUS_AUTH_FAILED;
306 return MGMT_STATUS_NO_RESOURCES;
308 return MGMT_STATUS_ALREADY_CONNECTED;
310 return MGMT_STATUS_DISCONNECTED;
313 return MGMT_STATUS_FAILED;
316 static u8 mgmt_status(int err)
319 return mgmt_errno_status(err);
321 if (err < ARRAY_SIZE(mgmt_status_table))
322 return mgmt_status_table[err];
324 return MGMT_STATUS_FAILED;
327 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
330 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
335 u16 len, int flag, struct sock *skip_sk)
337 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
342 struct sock *skip_sk)
344 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
345 HCI_SOCK_TRUSTED, skip_sk);
348 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
350 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
354 static u8 le_addr_type(u8 mgmt_addr_type)
356 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
357 return ADDR_LE_DEV_PUBLIC;
359 return ADDR_LE_DEV_RANDOM;
362 void mgmt_fill_version_info(void *ver)
364 struct mgmt_rp_read_version *rp = ver;
366 rp->version = MGMT_VERSION;
367 rp->revision = cpu_to_le16(MGMT_REVISION);
370 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
373 struct mgmt_rp_read_version rp;
375 bt_dev_dbg(hdev, "sock %p", sk);
377 mgmt_fill_version_info(&rp);
379 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
383 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
386 struct mgmt_rp_read_commands *rp;
387 u16 num_commands, num_events;
391 bt_dev_dbg(hdev, "sock %p", sk);
393 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
394 num_commands = ARRAY_SIZE(mgmt_commands);
395 num_events = ARRAY_SIZE(mgmt_events);
397 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
398 num_events = ARRAY_SIZE(mgmt_untrusted_events);
401 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
403 rp = kmalloc(rp_size, GFP_KERNEL);
407 rp->num_commands = cpu_to_le16(num_commands);
408 rp->num_events = cpu_to_le16(num_events);
410 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
411 __le16 *opcode = rp->opcodes;
413 for (i = 0; i < num_commands; i++, opcode++)
414 put_unaligned_le16(mgmt_commands[i], opcode);
416 for (i = 0; i < num_events; i++, opcode++)
417 put_unaligned_le16(mgmt_events[i], opcode);
419 __le16 *opcode = rp->opcodes;
421 for (i = 0; i < num_commands; i++, opcode++)
422 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
424 for (i = 0; i < num_events; i++, opcode++)
425 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
428 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
435 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
438 struct mgmt_rp_read_index_list *rp;
444 bt_dev_dbg(hdev, "sock %p", sk);
446 read_lock(&hci_dev_list_lock);
449 list_for_each_entry(d, &hci_dev_list, list) {
450 if (d->dev_type == HCI_PRIMARY &&
451 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
455 rp_len = sizeof(*rp) + (2 * count);
456 rp = kmalloc(rp_len, GFP_ATOMIC);
458 read_unlock(&hci_dev_list_lock);
463 list_for_each_entry(d, &hci_dev_list, list) {
464 if (hci_dev_test_flag(d, HCI_SETUP) ||
465 hci_dev_test_flag(d, HCI_CONFIG) ||
466 hci_dev_test_flag(d, HCI_USER_CHANNEL))
469 /* Devices marked as raw-only are neither configured
470 * nor unconfigured controllers.
472 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
475 if (d->dev_type == HCI_PRIMARY &&
476 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
477 rp->index[count++] = cpu_to_le16(d->id);
478 bt_dev_dbg(hdev, "Added hci%u", d->id);
482 rp->num_controllers = cpu_to_le16(count);
483 rp_len = sizeof(*rp) + (2 * count);
485 read_unlock(&hci_dev_list_lock);
487 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
495 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
496 void *data, u16 data_len)
498 struct mgmt_rp_read_unconf_index_list *rp;
504 bt_dev_dbg(hdev, "sock %p", sk);
506 read_lock(&hci_dev_list_lock);
509 list_for_each_entry(d, &hci_dev_list, list) {
510 if (d->dev_type == HCI_PRIMARY &&
511 hci_dev_test_flag(d, HCI_UNCONFIGURED))
515 rp_len = sizeof(*rp) + (2 * count);
516 rp = kmalloc(rp_len, GFP_ATOMIC);
518 read_unlock(&hci_dev_list_lock);
523 list_for_each_entry(d, &hci_dev_list, list) {
524 if (hci_dev_test_flag(d, HCI_SETUP) ||
525 hci_dev_test_flag(d, HCI_CONFIG) ||
526 hci_dev_test_flag(d, HCI_USER_CHANNEL))
529 /* Devices marked as raw-only are neither configured
530 * nor unconfigured controllers.
532 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
535 if (d->dev_type == HCI_PRIMARY &&
536 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
537 rp->index[count++] = cpu_to_le16(d->id);
538 bt_dev_dbg(hdev, "Added hci%u", d->id);
542 rp->num_controllers = cpu_to_le16(count);
543 rp_len = sizeof(*rp) + (2 * count);
545 read_unlock(&hci_dev_list_lock);
547 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
548 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
555 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
556 void *data, u16 data_len)
558 struct mgmt_rp_read_ext_index_list *rp;
563 bt_dev_dbg(hdev, "sock %p", sk);
565 read_lock(&hci_dev_list_lock);
568 list_for_each_entry(d, &hci_dev_list, list) {
569 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
573 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
575 read_unlock(&hci_dev_list_lock);
580 list_for_each_entry(d, &hci_dev_list, list) {
581 if (hci_dev_test_flag(d, HCI_SETUP) ||
582 hci_dev_test_flag(d, HCI_CONFIG) ||
583 hci_dev_test_flag(d, HCI_USER_CHANNEL))
586 /* Devices marked as raw-only are neither configured
587 * nor unconfigured controllers.
589 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
592 if (d->dev_type == HCI_PRIMARY) {
593 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
594 rp->entry[count].type = 0x01;
596 rp->entry[count].type = 0x00;
597 } else if (d->dev_type == HCI_AMP) {
598 rp->entry[count].type = 0x02;
603 rp->entry[count].bus = d->bus;
604 rp->entry[count++].index = cpu_to_le16(d->id);
605 bt_dev_dbg(hdev, "Added hci%u", d->id);
608 rp->num_controllers = cpu_to_le16(count);
610 read_unlock(&hci_dev_list_lock);
612 /* If this command is called at least once, then all the
613 * default index and unconfigured index events are disabled
614 * and from now on only extended index events are used.
616 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
617 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
618 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
620 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
621 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
622 struct_size(rp, entry, count));
629 static bool is_configured(struct hci_dev *hdev)
631 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
635 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
643 static __le32 get_missing_options(struct hci_dev *hdev)
647 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
648 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
649 options |= MGMT_OPTION_EXTERNAL_CONFIG;
651 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
652 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
653 !bacmp(&hdev->public_addr, BDADDR_ANY))
654 options |= MGMT_OPTION_PUBLIC_ADDRESS;
656 return cpu_to_le32(options);
659 static int new_options(struct hci_dev *hdev, struct sock *skip)
661 __le32 options = get_missing_options(hdev);
663 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
664 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
667 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
669 __le32 options = get_missing_options(hdev);
671 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
675 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
676 void *data, u16 data_len)
678 struct mgmt_rp_read_config_info rp;
681 bt_dev_dbg(hdev, "sock %p", sk);
685 memset(&rp, 0, sizeof(rp));
686 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
688 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
689 options |= MGMT_OPTION_EXTERNAL_CONFIG;
691 if (hdev->set_bdaddr)
692 options |= MGMT_OPTION_PUBLIC_ADDRESS;
694 rp.supported_options = cpu_to_le32(options);
695 rp.missing_options = get_missing_options(hdev);
697 hci_dev_unlock(hdev);
699 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
703 static u32 get_supported_phys(struct hci_dev *hdev)
705 u32 supported_phys = 0;
707 if (lmp_bredr_capable(hdev)) {
708 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
710 if (hdev->features[0][0] & LMP_3SLOT)
711 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
713 if (hdev->features[0][0] & LMP_5SLOT)
714 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
716 if (lmp_edr_2m_capable(hdev)) {
717 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
719 if (lmp_edr_3slot_capable(hdev))
720 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
722 if (lmp_edr_5slot_capable(hdev))
723 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
725 if (lmp_edr_3m_capable(hdev)) {
726 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
728 if (lmp_edr_3slot_capable(hdev))
729 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
731 if (lmp_edr_5slot_capable(hdev))
732 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
737 if (lmp_le_capable(hdev)) {
738 supported_phys |= MGMT_PHY_LE_1M_TX;
739 supported_phys |= MGMT_PHY_LE_1M_RX;
741 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
742 supported_phys |= MGMT_PHY_LE_2M_TX;
743 supported_phys |= MGMT_PHY_LE_2M_RX;
746 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
747 supported_phys |= MGMT_PHY_LE_CODED_TX;
748 supported_phys |= MGMT_PHY_LE_CODED_RX;
752 return supported_phys;
755 static u32 get_selected_phys(struct hci_dev *hdev)
757 u32 selected_phys = 0;
759 if (lmp_bredr_capable(hdev)) {
760 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
762 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
763 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
765 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
766 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
768 if (lmp_edr_2m_capable(hdev)) {
769 if (!(hdev->pkt_type & HCI_2DH1))
770 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
772 if (lmp_edr_3slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_2DH3))
774 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
776 if (lmp_edr_5slot_capable(hdev) &&
777 !(hdev->pkt_type & HCI_2DH5))
778 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
780 if (lmp_edr_3m_capable(hdev)) {
781 if (!(hdev->pkt_type & HCI_3DH1))
782 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
784 if (lmp_edr_3slot_capable(hdev) &&
785 !(hdev->pkt_type & HCI_3DH3))
786 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
788 if (lmp_edr_5slot_capable(hdev) &&
789 !(hdev->pkt_type & HCI_3DH5))
790 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
795 if (lmp_le_capable(hdev)) {
796 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
797 selected_phys |= MGMT_PHY_LE_1M_TX;
799 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
800 selected_phys |= MGMT_PHY_LE_1M_RX;
802 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
803 selected_phys |= MGMT_PHY_LE_2M_TX;
805 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
806 selected_phys |= MGMT_PHY_LE_2M_RX;
808 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
809 selected_phys |= MGMT_PHY_LE_CODED_TX;
811 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
812 selected_phys |= MGMT_PHY_LE_CODED_RX;
815 return selected_phys;
818 static u32 get_configurable_phys(struct hci_dev *hdev)
820 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
821 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
824 static u32 get_supported_settings(struct hci_dev *hdev)
828 settings |= MGMT_SETTING_POWERED;
829 settings |= MGMT_SETTING_BONDABLE;
830 settings |= MGMT_SETTING_DEBUG_KEYS;
831 settings |= MGMT_SETTING_CONNECTABLE;
832 settings |= MGMT_SETTING_DISCOVERABLE;
834 if (lmp_bredr_capable(hdev)) {
835 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
836 settings |= MGMT_SETTING_FAST_CONNECTABLE;
837 settings |= MGMT_SETTING_BREDR;
838 settings |= MGMT_SETTING_LINK_SECURITY;
840 if (lmp_ssp_capable(hdev)) {
841 settings |= MGMT_SETTING_SSP;
842 if (IS_ENABLED(CONFIG_BT_HS))
843 settings |= MGMT_SETTING_HS;
846 if (lmp_sc_capable(hdev))
847 settings |= MGMT_SETTING_SECURE_CONN;
849 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
851 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
854 if (lmp_le_capable(hdev)) {
855 settings |= MGMT_SETTING_LE;
856 settings |= MGMT_SETTING_SECURE_CONN;
857 settings |= MGMT_SETTING_PRIVACY;
858 settings |= MGMT_SETTING_STATIC_ADDRESS;
859 settings |= MGMT_SETTING_ADVERTISING;
862 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
864 settings |= MGMT_SETTING_CONFIGURATION;
866 if (cis_central_capable(hdev))
867 settings |= MGMT_SETTING_CIS_CENTRAL;
869 if (cis_peripheral_capable(hdev))
870 settings |= MGMT_SETTING_CIS_PERIPHERAL;
872 settings |= MGMT_SETTING_PHY_CONFIGURATION;
877 static u32 get_current_settings(struct hci_dev *hdev)
881 if (hdev_is_powered(hdev))
882 settings |= MGMT_SETTING_POWERED;
884 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
885 settings |= MGMT_SETTING_CONNECTABLE;
887 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
888 settings |= MGMT_SETTING_FAST_CONNECTABLE;
890 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
891 settings |= MGMT_SETTING_DISCOVERABLE;
893 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
894 settings |= MGMT_SETTING_BONDABLE;
896 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
897 settings |= MGMT_SETTING_BREDR;
899 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
900 settings |= MGMT_SETTING_LE;
902 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
903 settings |= MGMT_SETTING_LINK_SECURITY;
905 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
906 settings |= MGMT_SETTING_SSP;
908 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
909 settings |= MGMT_SETTING_HS;
911 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
912 settings |= MGMT_SETTING_ADVERTISING;
914 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
915 settings |= MGMT_SETTING_SECURE_CONN;
917 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
918 settings |= MGMT_SETTING_DEBUG_KEYS;
920 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
921 settings |= MGMT_SETTING_PRIVACY;
923 /* The current setting for static address has two purposes. The
924 * first is to indicate if the static address will be used and
925 * the second is to indicate if it is actually set.
927 * This means if the static address is not configured, this flag
928 * will never be set. If the address is configured, then if the
929 * address is actually used decides if the flag is set or not.
931 * For single mode LE only controllers and dual-mode controllers
932 * with BR/EDR disabled, the existence of the static address will
935 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
936 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
937 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
938 if (bacmp(&hdev->static_addr, BDADDR_ANY))
939 settings |= MGMT_SETTING_STATIC_ADDRESS;
942 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
943 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
945 if (cis_central_capable(hdev))
946 settings |= MGMT_SETTING_CIS_CENTRAL;
948 if (cis_peripheral_capable(hdev))
949 settings |= MGMT_SETTING_CIS_PERIPHERAL;
954 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
956 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
959 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
961 struct mgmt_pending_cmd *cmd;
963 /* If there's a pending mgmt command the flags will not yet have
964 * their final values, so check for this first.
966 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
968 struct mgmt_mode *cp = cmd->param;
970 return LE_AD_GENERAL;
971 else if (cp->val == 0x02)
972 return LE_AD_LIMITED;
974 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
975 return LE_AD_LIMITED;
976 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
977 return LE_AD_GENERAL;
983 bool mgmt_get_connectable(struct hci_dev *hdev)
985 struct mgmt_pending_cmd *cmd;
987 /* If there's a pending mgmt command the flag will not yet have
988 * it's final value, so check for this first.
990 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
992 struct mgmt_mode *cp = cmd->param;
997 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1000 static int service_cache_sync(struct hci_dev *hdev, void *data)
1002 hci_update_eir_sync(hdev);
1003 hci_update_class_sync(hdev);
1008 static void service_cache_off(struct work_struct *work)
1010 struct hci_dev *hdev = container_of(work, struct hci_dev,
1011 service_cache.work);
1013 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1016 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1019 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1021 /* The generation of a new RPA and programming it into the
1022 * controller happens in the hci_req_enable_advertising()
1025 if (ext_adv_capable(hdev))
1026 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1028 return hci_enable_advertising_sync(hdev);
1031 static void rpa_expired(struct work_struct *work)
1033 struct hci_dev *hdev = container_of(work, struct hci_dev,
1036 bt_dev_dbg(hdev, "");
1038 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1040 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1043 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1046 static void discov_off(struct work_struct *work)
1048 struct hci_dev *hdev = container_of(work, struct hci_dev,
1051 bt_dev_dbg(hdev, "");
1055 /* When discoverable timeout triggers, then just make sure
1056 * the limited discoverable flag is cleared. Even in the case
1057 * of a timeout triggered from general discoverable, it is
1058 * safe to unconditionally clear the flag.
1060 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1061 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1062 hdev->discov_timeout = 0;
1064 hci_update_discoverable(hdev);
1066 mgmt_new_settings(hdev);
1068 hci_dev_unlock(hdev);
1071 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1073 static void mesh_send_complete(struct hci_dev *hdev,
1074 struct mgmt_mesh_tx *mesh_tx, bool silent)
1076 u8 handle = mesh_tx->handle;
1079 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1080 sizeof(handle), NULL);
1082 mgmt_mesh_remove(mesh_tx);
1085 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1087 struct mgmt_mesh_tx *mesh_tx;
1089 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1090 hci_disable_advertising_sync(hdev);
1091 mesh_tx = mgmt_mesh_next(hdev, NULL);
1094 mesh_send_complete(hdev, mesh_tx, false);
1099 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1100 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1101 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1103 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1108 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1109 mesh_send_start_complete);
1112 mesh_send_complete(hdev, mesh_tx, false);
1114 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1117 static void mesh_send_done(struct work_struct *work)
1119 struct hci_dev *hdev = container_of(work, struct hci_dev,
1120 mesh_send_done.work);
1122 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1125 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1128 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1130 if (hci_dev_test_flag(hdev, HCI_MGMT))
1133 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1135 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1136 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1137 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1138 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1140 /* Non-mgmt controlled devices get this bit set
1141 * implicitly so that pairing works for them, however
1142 * for mgmt we require user-space to explicitly enable
1145 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1147 hci_dev_set_flag(hdev, HCI_MGMT);
1150 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1151 void *data, u16 data_len)
1153 struct mgmt_rp_read_info rp;
1155 bt_dev_dbg(hdev, "sock %p", sk);
1159 memset(&rp, 0, sizeof(rp));
1161 bacpy(&rp.bdaddr, &hdev->bdaddr);
1163 rp.version = hdev->hci_ver;
1164 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1166 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1167 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1169 memcpy(rp.dev_class, hdev->dev_class, 3);
1171 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1172 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1174 hci_dev_unlock(hdev);
1176 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1180 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1185 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1186 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1187 hdev->dev_class, 3);
1189 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1190 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1193 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1194 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1195 hdev->dev_name, name_len);
1197 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1198 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1199 hdev->short_name, name_len);
1204 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1205 void *data, u16 data_len)
1208 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1211 bt_dev_dbg(hdev, "sock %p", sk);
1213 memset(&buf, 0, sizeof(buf));
1217 bacpy(&rp->bdaddr, &hdev->bdaddr);
1219 rp->version = hdev->hci_ver;
1220 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1222 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1223 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1226 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1227 rp->eir_len = cpu_to_le16(eir_len);
1229 hci_dev_unlock(hdev);
1231 /* If this command is called at least once, then the events
1232 * for class of device and local name changes are disabled
1233 * and only the new extended controller information event
1236 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1237 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1238 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1240 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1241 sizeof(*rp) + eir_len);
1244 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1247 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1250 memset(buf, 0, sizeof(buf));
1252 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1253 ev->eir_len = cpu_to_le16(eir_len);
1255 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1256 sizeof(*ev) + eir_len,
1257 HCI_MGMT_EXT_INFO_EVENTS, skip);
1260 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1262 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1264 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1268 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1270 struct mgmt_ev_advertising_added ev;
1272 ev.instance = instance;
1274 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1277 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1280 struct mgmt_ev_advertising_removed ev;
1282 ev.instance = instance;
1284 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1287 static void cancel_adv_timeout(struct hci_dev *hdev)
1289 if (hdev->adv_instance_timeout) {
1290 hdev->adv_instance_timeout = 0;
1291 cancel_delayed_work(&hdev->adv_instance_expire);
1295 /* This function requires the caller holds hdev->lock */
1296 static void restart_le_actions(struct hci_dev *hdev)
1298 struct hci_conn_params *p;
1300 list_for_each_entry(p, &hdev->le_conn_params, list) {
1301 /* Needed for AUTO_OFF case where might not "really"
1302 * have been powered off.
1304 list_del_init(&p->action);
1306 switch (p->auto_connect) {
1307 case HCI_AUTO_CONN_DIRECT:
1308 case HCI_AUTO_CONN_ALWAYS:
1309 list_add(&p->action, &hdev->pend_le_conns);
1311 case HCI_AUTO_CONN_REPORT:
1312 list_add(&p->action, &hdev->pend_le_reports);
1320 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1322 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1324 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1325 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1328 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1330 struct mgmt_pending_cmd *cmd = data;
1331 struct mgmt_mode *cp;
1333 /* Make sure cmd still outstanding. */
1334 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1339 bt_dev_dbg(hdev, "err %d", err);
1344 restart_le_actions(hdev);
1345 hci_update_passive_scan(hdev);
1346 hci_dev_unlock(hdev);
1349 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1351 /* Only call new_setting for power on as power off is deferred
1352 * to hdev->power_off work which does call hci_dev_do_close.
1355 new_settings(hdev, cmd->sk);
1357 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1361 mgmt_pending_remove(cmd);
1364 static int set_powered_sync(struct hci_dev *hdev, void *data)
1366 struct mgmt_pending_cmd *cmd = data;
1367 struct mgmt_mode *cp = cmd->param;
1369 BT_DBG("%s", hdev->name);
1371 return hci_set_powered_sync(hdev, cp->val);
1374 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1377 struct mgmt_mode *cp = data;
1378 struct mgmt_pending_cmd *cmd;
1381 bt_dev_dbg(hdev, "sock %p", sk);
1383 if (cp->val != 0x00 && cp->val != 0x01)
1384 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1385 MGMT_STATUS_INVALID_PARAMS);
1389 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1390 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1395 if (!!cp->val == hdev_is_powered(hdev)) {
1396 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1400 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1406 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1407 mgmt_set_powered_complete);
1410 mgmt_pending_remove(cmd);
1413 hci_dev_unlock(hdev);
1417 int mgmt_new_settings(struct hci_dev *hdev)
1419 return new_settings(hdev, NULL);
1424 struct hci_dev *hdev;
1428 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1430 struct cmd_lookup *match = data;
1432 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1434 list_del(&cmd->list);
1436 if (match->sk == NULL) {
1437 match->sk = cmd->sk;
1438 sock_hold(match->sk);
1441 mgmt_pending_free(cmd);
1444 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1448 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1449 mgmt_pending_remove(cmd);
1452 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1454 if (cmd->cmd_complete) {
1457 cmd->cmd_complete(cmd, *status);
1458 mgmt_pending_remove(cmd);
1463 cmd_status_rsp(cmd, data);
1466 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1468 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1469 cmd->param, cmd->param_len);
1472 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1474 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1475 cmd->param, sizeof(struct mgmt_addr_info));
1478 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1480 if (!lmp_bredr_capable(hdev))
1481 return MGMT_STATUS_NOT_SUPPORTED;
1482 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1483 return MGMT_STATUS_REJECTED;
1485 return MGMT_STATUS_SUCCESS;
1488 static u8 mgmt_le_support(struct hci_dev *hdev)
1490 if (!lmp_le_capable(hdev))
1491 return MGMT_STATUS_NOT_SUPPORTED;
1492 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1493 return MGMT_STATUS_REJECTED;
1495 return MGMT_STATUS_SUCCESS;
1498 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1501 struct mgmt_pending_cmd *cmd = data;
1503 bt_dev_dbg(hdev, "err %d", err);
1505 /* Make sure cmd still outstanding. */
1506 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1512 u8 mgmt_err = mgmt_status(err);
1513 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1514 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1518 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1519 hdev->discov_timeout > 0) {
1520 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1521 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1524 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1525 new_settings(hdev, cmd->sk);
1528 mgmt_pending_remove(cmd);
1529 hci_dev_unlock(hdev);
1532 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1534 BT_DBG("%s", hdev->name);
1536 return hci_update_discoverable_sync(hdev);
1539 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1542 struct mgmt_cp_set_discoverable *cp = data;
1543 struct mgmt_pending_cmd *cmd;
1547 bt_dev_dbg(hdev, "sock %p", sk);
1549 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1550 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1551 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1552 MGMT_STATUS_REJECTED);
1554 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1555 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1556 MGMT_STATUS_INVALID_PARAMS);
1558 timeout = __le16_to_cpu(cp->timeout);
1560 /* Disabling discoverable requires that no timeout is set,
1561 * and enabling limited discoverable requires a timeout.
1563 if ((cp->val == 0x00 && timeout > 0) ||
1564 (cp->val == 0x02 && timeout == 0))
1565 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1566 MGMT_STATUS_INVALID_PARAMS);
1570 if (!hdev_is_powered(hdev) && timeout > 0) {
1571 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1572 MGMT_STATUS_NOT_POWERED);
1576 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1577 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1578 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1583 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1584 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1585 MGMT_STATUS_REJECTED);
1589 if (hdev->advertising_paused) {
1590 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 if (!hdev_is_powered(hdev)) {
1596 bool changed = false;
1598 /* Setting limited discoverable when powered off is
1599 * not a valid operation since it requires a timeout
1600 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1602 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1603 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1607 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1612 err = new_settings(hdev, sk);
1617 /* If the current mode is the same, then just update the timeout
1618 * value with the new value. And if only the timeout gets updated,
1619 * then no need for any HCI transactions.
1621 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1622 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1623 HCI_LIMITED_DISCOVERABLE)) {
1624 cancel_delayed_work(&hdev->discov_off);
1625 hdev->discov_timeout = timeout;
1627 if (cp->val && hdev->discov_timeout > 0) {
1628 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1629 queue_delayed_work(hdev->req_workqueue,
1630 &hdev->discov_off, to);
1633 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1637 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1643 /* Cancel any potential discoverable timeout that might be
1644 * still active and store new timeout value. The arming of
1645 * the timeout happens in the complete handler.
1647 cancel_delayed_work(&hdev->discov_off);
1648 hdev->discov_timeout = timeout;
1651 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1653 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1655 /* Limited discoverable mode */
1656 if (cp->val == 0x02)
1657 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1659 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1661 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1662 mgmt_set_discoverable_complete);
1665 mgmt_pending_remove(cmd);
1668 hci_dev_unlock(hdev);
1672 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1675 struct mgmt_pending_cmd *cmd = data;
1677 bt_dev_dbg(hdev, "err %d", err);
1679 /* Make sure cmd still outstanding. */
1680 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1686 u8 mgmt_err = mgmt_status(err);
1687 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1691 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1692 new_settings(hdev, cmd->sk);
1696 mgmt_pending_remove(cmd);
1698 hci_dev_unlock(hdev);
1701 static int set_connectable_update_settings(struct hci_dev *hdev,
1702 struct sock *sk, u8 val)
1704 bool changed = false;
1707 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1711 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1713 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1714 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1717 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1722 hci_update_scan(hdev);
1723 hci_update_passive_scan(hdev);
1724 return new_settings(hdev, sk);
1730 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1732 BT_DBG("%s", hdev->name);
1734 return hci_update_connectable_sync(hdev);
1737 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1740 struct mgmt_mode *cp = data;
1741 struct mgmt_pending_cmd *cmd;
1744 bt_dev_dbg(hdev, "sock %p", sk);
1746 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1747 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1748 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1749 MGMT_STATUS_REJECTED);
1751 if (cp->val != 0x00 && cp->val != 0x01)
1752 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1753 MGMT_STATUS_INVALID_PARAMS);
1757 if (!hdev_is_powered(hdev)) {
1758 err = set_connectable_update_settings(hdev, sk, cp->val);
1762 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1763 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1764 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1769 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1776 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1778 if (hdev->discov_timeout > 0)
1779 cancel_delayed_work(&hdev->discov_off);
1781 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1782 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1783 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1786 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1787 mgmt_set_connectable_complete);
1790 mgmt_pending_remove(cmd);
1793 hci_dev_unlock(hdev);
1797 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1800 struct mgmt_mode *cp = data;
1804 bt_dev_dbg(hdev, "sock %p", sk);
1806 if (cp->val != 0x00 && cp->val != 0x01)
1807 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1808 MGMT_STATUS_INVALID_PARAMS);
1813 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1815 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1817 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1822 /* In limited privacy mode the change of bondable mode
1823 * may affect the local advertising address.
1825 hci_update_discoverable(hdev);
1827 err = new_settings(hdev, sk);
1831 hci_dev_unlock(hdev);
1835 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1838 struct mgmt_mode *cp = data;
1839 struct mgmt_pending_cmd *cmd;
1843 bt_dev_dbg(hdev, "sock %p", sk);
1845 status = mgmt_bredr_support(hdev);
1847 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1850 if (cp->val != 0x00 && cp->val != 0x01)
1851 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1852 MGMT_STATUS_INVALID_PARAMS);
1856 if (!hdev_is_powered(hdev)) {
1857 bool changed = false;
1859 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1860 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1864 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1869 err = new_settings(hdev, sk);
1874 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1875 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1882 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1883 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1887 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1893 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1895 mgmt_pending_remove(cmd);
1900 hci_dev_unlock(hdev);
1904 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1906 struct cmd_lookup match = { NULL, hdev };
1907 struct mgmt_pending_cmd *cmd = data;
1908 struct mgmt_mode *cp = cmd->param;
1909 u8 enable = cp->val;
1912 /* Make sure cmd still outstanding. */
1913 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1917 u8 mgmt_err = mgmt_status(err);
1919 if (enable && hci_dev_test_and_clear_flag(hdev,
1921 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1922 new_settings(hdev, NULL);
1925 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1931 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1933 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1936 changed = hci_dev_test_and_clear_flag(hdev,
1939 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1942 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1945 new_settings(hdev, match.sk);
1950 hci_update_eir_sync(hdev);
1953 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1955 struct mgmt_pending_cmd *cmd = data;
1956 struct mgmt_mode *cp = cmd->param;
1957 bool changed = false;
1961 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1963 err = hci_write_ssp_mode_sync(hdev, cp->val);
1965 if (!err && changed)
1966 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1971 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1973 struct mgmt_mode *cp = data;
1974 struct mgmt_pending_cmd *cmd;
1978 bt_dev_dbg(hdev, "sock %p", sk);
1980 status = mgmt_bredr_support(hdev);
1982 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1984 if (!lmp_ssp_capable(hdev))
1985 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1986 MGMT_STATUS_NOT_SUPPORTED);
1988 if (cp->val != 0x00 && cp->val != 0x01)
1989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1990 MGMT_STATUS_INVALID_PARAMS);
1994 if (!hdev_is_powered(hdev)) {
1998 changed = !hci_dev_test_and_set_flag(hdev,
2001 changed = hci_dev_test_and_clear_flag(hdev,
2004 changed = hci_dev_test_and_clear_flag(hdev,
2007 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2010 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2015 err = new_settings(hdev, sk);
2020 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2021 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2026 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2027 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2031 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2035 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2039 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2040 MGMT_STATUS_FAILED);
2043 mgmt_pending_remove(cmd);
2047 hci_dev_unlock(hdev);
2051 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2053 struct mgmt_mode *cp = data;
2058 bt_dev_dbg(hdev, "sock %p", sk);
2060 if (!IS_ENABLED(CONFIG_BT_HS))
2061 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2062 MGMT_STATUS_NOT_SUPPORTED);
2064 status = mgmt_bredr_support(hdev);
2066 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2068 if (!lmp_ssp_capable(hdev))
2069 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2070 MGMT_STATUS_NOT_SUPPORTED);
2072 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2073 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2074 MGMT_STATUS_REJECTED);
2076 if (cp->val != 0x00 && cp->val != 0x01)
2077 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2078 MGMT_STATUS_INVALID_PARAMS);
2082 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2083 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2089 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2091 if (hdev_is_powered(hdev)) {
2092 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2093 MGMT_STATUS_REJECTED);
2097 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2100 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2105 err = new_settings(hdev, sk);
2108 hci_dev_unlock(hdev);
2112 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2114 struct cmd_lookup match = { NULL, hdev };
2115 u8 status = mgmt_status(err);
2117 bt_dev_dbg(hdev, "err %d", err);
2120 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2125 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2127 new_settings(hdev, match.sk);
2133 static int set_le_sync(struct hci_dev *hdev, void *data)
2135 struct mgmt_pending_cmd *cmd = data;
2136 struct mgmt_mode *cp = cmd->param;
2141 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2143 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2144 hci_disable_advertising_sync(hdev);
2146 if (ext_adv_capable(hdev))
2147 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2149 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2152 err = hci_write_le_host_supported_sync(hdev, val, 0);
2154 /* Make sure the controller has a good default for
2155 * advertising data. Restrict the update to when LE
2156 * has actually been enabled. During power on, the
2157 * update in powered_update_hci will take care of it.
2159 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2160 if (ext_adv_capable(hdev)) {
2163 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2165 hci_update_scan_rsp_data_sync(hdev, 0x00);
2167 hci_update_adv_data_sync(hdev, 0x00);
2168 hci_update_scan_rsp_data_sync(hdev, 0x00);
2171 hci_update_passive_scan(hdev);
2177 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2179 struct mgmt_pending_cmd *cmd = data;
2180 u8 status = mgmt_status(err);
2181 struct sock *sk = cmd->sk;
2184 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2185 cmd_status_rsp, &status);
2189 mgmt_pending_remove(cmd);
2190 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2193 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2195 struct mgmt_pending_cmd *cmd = data;
2196 struct mgmt_cp_set_mesh *cp = cmd->param;
2197 size_t len = cmd->param_len;
2199 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2202 hci_dev_set_flag(hdev, HCI_MESH);
2204 hci_dev_clear_flag(hdev, HCI_MESH);
2208 /* If filters don't fit, forward all adv pkts */
2209 if (len <= sizeof(hdev->mesh_ad_types))
2210 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2212 hci_update_passive_scan_sync(hdev);
2216 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2218 struct mgmt_cp_set_mesh *cp = data;
2219 struct mgmt_pending_cmd *cmd;
2222 bt_dev_dbg(hdev, "sock %p", sk);
2224 if (!lmp_le_capable(hdev) ||
2225 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2226 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2227 MGMT_STATUS_NOT_SUPPORTED);
2229 if (cp->enable != 0x00 && cp->enable != 0x01)
2230 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2231 MGMT_STATUS_INVALID_PARAMS);
2235 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2239 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2243 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2244 MGMT_STATUS_FAILED);
2247 mgmt_pending_remove(cmd);
2250 hci_dev_unlock(hdev);
2254 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2256 struct mgmt_mesh_tx *mesh_tx = data;
2257 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2258 unsigned long mesh_send_interval;
2259 u8 mgmt_err = mgmt_status(err);
2261 /* Report any errors here, but don't report completion */
2264 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2265 /* Send Complete Error Code for handle */
2266 mesh_send_complete(hdev, mesh_tx, false);
2270 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2271 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2272 mesh_send_interval);
2275 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2277 struct mgmt_mesh_tx *mesh_tx = data;
2278 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2279 struct adv_info *adv, *next_instance;
2280 u8 instance = hdev->le_num_of_adv_sets + 1;
2281 u16 timeout, duration;
2284 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2285 return MGMT_STATUS_BUSY;
2288 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2289 adv = hci_add_adv_instance(hdev, instance, 0,
2290 send->adv_data_len, send->adv_data,
2293 HCI_ADV_TX_POWER_NO_PREFERENCE,
2294 hdev->le_adv_min_interval,
2295 hdev->le_adv_max_interval,
2299 mesh_tx->instance = instance;
2303 if (hdev->cur_adv_instance == instance) {
2304 /* If the currently advertised instance is being changed then
2305 * cancel the current advertising and schedule the next
2306 * instance. If there is only one instance then the overridden
2307 * advertising data will be visible right away.
2309 cancel_adv_timeout(hdev);
2311 next_instance = hci_get_next_instance(hdev, instance);
2313 instance = next_instance->instance;
2316 } else if (hdev->adv_instance_timeout) {
2317 /* Immediately advertise the new instance if no other, or
2318 * let it go naturally from queue if ADV is already happening
2324 return hci_schedule_adv_instance_sync(hdev, instance, true);
2329 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2331 struct mgmt_rp_mesh_read_features *rp = data;
2333 if (rp->used_handles >= rp->max_handles)
2336 rp->handles[rp->used_handles++] = mesh_tx->handle;
2339 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2340 void *data, u16 len)
2342 struct mgmt_rp_mesh_read_features rp;
2344 if (!lmp_le_capable(hdev) ||
2345 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2346 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2347 MGMT_STATUS_NOT_SUPPORTED);
2349 memset(&rp, 0, sizeof(rp));
2350 rp.index = cpu_to_le16(hdev->id);
2351 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2352 rp.max_handles = MESH_HANDLES_MAX;
2357 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2359 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2360 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2362 hci_dev_unlock(hdev);
2366 static int send_cancel(struct hci_dev *hdev, void *data)
2368 struct mgmt_pending_cmd *cmd = data;
2369 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2370 struct mgmt_mesh_tx *mesh_tx;
2372 if (!cancel->handle) {
2374 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2377 mesh_send_complete(hdev, mesh_tx, false);
2380 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2382 if (mesh_tx && mesh_tx->sk == cmd->sk)
2383 mesh_send_complete(hdev, mesh_tx, false);
2386 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2388 mgmt_pending_free(cmd);
2393 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2394 void *data, u16 len)
2396 struct mgmt_pending_cmd *cmd;
2399 if (!lmp_le_capable(hdev) ||
2400 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2401 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2402 MGMT_STATUS_NOT_SUPPORTED);
2404 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2405 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2406 MGMT_STATUS_REJECTED);
2409 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2413 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2416 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2417 MGMT_STATUS_FAILED);
2420 mgmt_pending_free(cmd);
2423 hci_dev_unlock(hdev);
2427 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2429 struct mgmt_mesh_tx *mesh_tx;
2430 struct mgmt_cp_mesh_send *send = data;
2431 struct mgmt_rp_mesh_read_features rp;
2435 if (!lmp_le_capable(hdev) ||
2436 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2437 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2438 MGMT_STATUS_NOT_SUPPORTED);
2439 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2440 len <= MGMT_MESH_SEND_SIZE ||
2441 len > (MGMT_MESH_SEND_SIZE + 31))
2442 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2443 MGMT_STATUS_REJECTED);
2447 memset(&rp, 0, sizeof(rp));
2448 rp.max_handles = MESH_HANDLES_MAX;
2450 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2452 if (rp.max_handles <= rp.used_handles) {
2453 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2458 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2459 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2464 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2465 mesh_send_start_complete);
2468 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2469 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2470 MGMT_STATUS_FAILED);
2474 mgmt_mesh_remove(mesh_tx);
2477 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2479 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2480 &mesh_tx->handle, 1);
2484 hci_dev_unlock(hdev);
2488 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2490 struct mgmt_mode *cp = data;
2491 struct mgmt_pending_cmd *cmd;
2495 bt_dev_dbg(hdev, "sock %p", sk);
2497 if (!lmp_le_capable(hdev))
2498 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2499 MGMT_STATUS_NOT_SUPPORTED);
2501 if (cp->val != 0x00 && cp->val != 0x01)
2502 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2503 MGMT_STATUS_INVALID_PARAMS);
2505 /* Bluetooth single mode LE only controllers or dual-mode
2506 * controllers configured as LE only devices, do not allow
2507 * switching LE off. These have either LE enabled explicitly
2508 * or BR/EDR has been previously switched off.
2510 * When trying to enable an already enabled LE, then gracefully
2511 * send a positive response. Trying to disable it however will
2512 * result into rejection.
2514 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2515 if (cp->val == 0x01)
2516 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2518 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2519 MGMT_STATUS_REJECTED);
2525 enabled = lmp_host_le_capable(hdev);
2527 if (!hdev_is_powered(hdev) || val == enabled) {
2528 bool changed = false;
2530 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2531 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2535 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2536 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2540 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2545 err = new_settings(hdev, sk);
2550 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2551 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2552 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2557 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2561 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2565 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2566 MGMT_STATUS_FAILED);
2569 mgmt_pending_remove(cmd);
2573 hci_dev_unlock(hdev);
2577 /* This is a helper function to test for pending mgmt commands that can
2578 * cause CoD or EIR HCI commands. We can only allow one such pending
2579 * mgmt command at a time since otherwise we cannot easily track what
2580 * the current values are, will be, and based on that calculate if a new
2581 * HCI command needs to be sent and if yes with what value.
2583 static bool pending_eir_or_class(struct hci_dev *hdev)
2585 struct mgmt_pending_cmd *cmd;
2587 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2588 switch (cmd->opcode) {
2589 case MGMT_OP_ADD_UUID:
2590 case MGMT_OP_REMOVE_UUID:
2591 case MGMT_OP_SET_DEV_CLASS:
2592 case MGMT_OP_SET_POWERED:
2600 static const u8 bluetooth_base_uuid[] = {
2601 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2602 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2605 static u8 get_uuid_size(const u8 *uuid)
2609 if (memcmp(uuid, bluetooth_base_uuid, 12))
2612 val = get_unaligned_le32(&uuid[12]);
2619 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2621 struct mgmt_pending_cmd *cmd = data;
2623 bt_dev_dbg(hdev, "err %d", err);
2625 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2626 mgmt_status(err), hdev->dev_class, 3);
2628 mgmt_pending_free(cmd);
2631 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2635 err = hci_update_class_sync(hdev);
2639 return hci_update_eir_sync(hdev);
2642 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2644 struct mgmt_cp_add_uuid *cp = data;
2645 struct mgmt_pending_cmd *cmd;
2646 struct bt_uuid *uuid;
2649 bt_dev_dbg(hdev, "sock %p", sk);
2653 if (pending_eir_or_class(hdev)) {
2654 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2659 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2665 memcpy(uuid->uuid, cp->uuid, 16);
2666 uuid->svc_hint = cp->svc_hint;
2667 uuid->size = get_uuid_size(cp->uuid);
2669 list_add_tail(&uuid->list, &hdev->uuids);
2671 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2677 err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2679 mgmt_pending_free(cmd);
2684 hci_dev_unlock(hdev);
2688 static bool enable_service_cache(struct hci_dev *hdev)
2690 if (!hdev_is_powered(hdev))
2693 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2694 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2702 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2706 err = hci_update_class_sync(hdev);
2710 return hci_update_eir_sync(hdev);
2713 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2716 struct mgmt_cp_remove_uuid *cp = data;
2717 struct mgmt_pending_cmd *cmd;
2718 struct bt_uuid *match, *tmp;
2719 static const u8 bt_uuid_any[] = {
2720 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2724 bt_dev_dbg(hdev, "sock %p", sk);
2728 if (pending_eir_or_class(hdev)) {
2729 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2734 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2735 hci_uuids_clear(hdev);
2737 if (enable_service_cache(hdev)) {
2738 err = mgmt_cmd_complete(sk, hdev->id,
2739 MGMT_OP_REMOVE_UUID,
2740 0, hdev->dev_class, 3);
2749 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2750 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2753 list_del(&match->list);
2759 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2760 MGMT_STATUS_INVALID_PARAMS);
2765 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2771 err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2772 mgmt_class_complete);
2774 mgmt_pending_free(cmd);
2777 hci_dev_unlock(hdev);
2781 static int set_class_sync(struct hci_dev *hdev, void *data)
2785 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2786 cancel_delayed_work_sync(&hdev->service_cache);
2787 err = hci_update_eir_sync(hdev);
2793 return hci_update_class_sync(hdev);
2796 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2799 struct mgmt_cp_set_dev_class *cp = data;
2800 struct mgmt_pending_cmd *cmd;
2803 bt_dev_dbg(hdev, "sock %p", sk);
2805 if (!lmp_bredr_capable(hdev))
2806 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2807 MGMT_STATUS_NOT_SUPPORTED);
2811 if (pending_eir_or_class(hdev)) {
2812 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2817 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2818 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2819 MGMT_STATUS_INVALID_PARAMS);
2823 hdev->major_class = cp->major;
2824 hdev->minor_class = cp->minor;
2826 if (!hdev_is_powered(hdev)) {
2827 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2828 hdev->dev_class, 3);
2832 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2838 err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2839 mgmt_class_complete);
2841 mgmt_pending_free(cmd);
2844 hci_dev_unlock(hdev);
2848 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2851 struct mgmt_cp_load_link_keys *cp = data;
2852 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2853 sizeof(struct mgmt_link_key_info));
2854 u16 key_count, expected_len;
2858 bt_dev_dbg(hdev, "sock %p", sk);
2860 if (!lmp_bredr_capable(hdev))
2861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2862 MGMT_STATUS_NOT_SUPPORTED);
2864 key_count = __le16_to_cpu(cp->key_count);
2865 if (key_count > max_key_count) {
2866 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2868 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2869 MGMT_STATUS_INVALID_PARAMS);
2872 expected_len = struct_size(cp, keys, key_count);
2873 if (expected_len != len) {
2874 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2876 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2877 MGMT_STATUS_INVALID_PARAMS);
2880 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2881 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2882 MGMT_STATUS_INVALID_PARAMS);
2884 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2887 for (i = 0; i < key_count; i++) {
2888 struct mgmt_link_key_info *key = &cp->keys[i];
2890 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2891 return mgmt_cmd_status(sk, hdev->id,
2892 MGMT_OP_LOAD_LINK_KEYS,
2893 MGMT_STATUS_INVALID_PARAMS);
2898 hci_link_keys_clear(hdev);
2901 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2903 changed = hci_dev_test_and_clear_flag(hdev,
2904 HCI_KEEP_DEBUG_KEYS);
2907 new_settings(hdev, NULL);
2909 for (i = 0; i < key_count; i++) {
2910 struct mgmt_link_key_info *key = &cp->keys[i];
2912 if (hci_is_blocked_key(hdev,
2913 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2915 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2920 /* Always ignore debug keys and require a new pairing if
2921 * the user wants to use them.
2923 if (key->type == HCI_LK_DEBUG_COMBINATION)
2926 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2927 key->type, key->pin_len, NULL);
2930 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2932 hci_dev_unlock(hdev);
2937 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2938 u8 addr_type, struct sock *skip_sk)
2940 struct mgmt_ev_device_unpaired ev;
2942 bacpy(&ev.addr.bdaddr, bdaddr);
2943 ev.addr.type = addr_type;
2945 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2949 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2951 struct mgmt_pending_cmd *cmd = data;
2952 struct mgmt_cp_unpair_device *cp = cmd->param;
2955 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2957 cmd->cmd_complete(cmd, err);
2958 mgmt_pending_free(cmd);
2961 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2963 struct mgmt_pending_cmd *cmd = data;
2964 struct mgmt_cp_unpair_device *cp = cmd->param;
2965 struct hci_conn *conn;
2967 if (cp->addr.type == BDADDR_BREDR)
2968 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2971 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2972 le_addr_type(cp->addr.type));
2977 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2980 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2983 struct mgmt_cp_unpair_device *cp = data;
2984 struct mgmt_rp_unpair_device rp;
2985 struct hci_conn_params *params;
2986 struct mgmt_pending_cmd *cmd;
2987 struct hci_conn *conn;
2991 memset(&rp, 0, sizeof(rp));
2992 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2993 rp.addr.type = cp->addr.type;
2995 if (!bdaddr_type_is_valid(cp->addr.type))
2996 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2997 MGMT_STATUS_INVALID_PARAMS,
3000 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3001 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3002 MGMT_STATUS_INVALID_PARAMS,
3007 if (!hdev_is_powered(hdev)) {
3008 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3009 MGMT_STATUS_NOT_POWERED, &rp,
3014 if (cp->addr.type == BDADDR_BREDR) {
3015 /* If disconnection is requested, then look up the
3016 * connection. If the remote device is connected, it
3017 * will be later used to terminate the link.
3019 * Setting it to NULL explicitly will cause no
3020 * termination of the link.
3023 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3028 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3030 err = mgmt_cmd_complete(sk, hdev->id,
3031 MGMT_OP_UNPAIR_DEVICE,
3032 MGMT_STATUS_NOT_PAIRED, &rp,
3040 /* LE address type */
3041 addr_type = le_addr_type(cp->addr.type);
3043 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3044 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3046 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3047 MGMT_STATUS_NOT_PAIRED, &rp,
3052 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3054 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3059 /* Defer clearing up the connection parameters until closing to
3060 * give a chance of keeping them if a repairing happens.
3062 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3064 /* Disable auto-connection parameters if present */
3065 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3067 if (params->explicit_connect)
3068 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3070 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3073 /* If disconnection is not requested, then clear the connection
3074 * variable so that the link is not terminated.
3076 if (!cp->disconnect)
3080 /* If the connection variable is set, then termination of the
3081 * link is requested.
3084 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3086 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3090 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3097 cmd->cmd_complete = addr_cmd_complete;
3099 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3100 unpair_device_complete);
3102 mgmt_pending_free(cmd);
3105 hci_dev_unlock(hdev);
3109 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3112 struct mgmt_cp_disconnect *cp = data;
3113 struct mgmt_rp_disconnect rp;
3114 struct mgmt_pending_cmd *cmd;
3115 struct hci_conn *conn;
3118 bt_dev_dbg(hdev, "sock %p", sk);
3120 memset(&rp, 0, sizeof(rp));
3121 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3122 rp.addr.type = cp->addr.type;
3124 if (!bdaddr_type_is_valid(cp->addr.type))
3125 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3126 MGMT_STATUS_INVALID_PARAMS,
3131 if (!test_bit(HCI_UP, &hdev->flags)) {
3132 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3133 MGMT_STATUS_NOT_POWERED, &rp,
3138 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3139 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3140 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3144 if (cp->addr.type == BDADDR_BREDR)
3145 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3148 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3149 le_addr_type(cp->addr.type));
3151 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3152 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3153 MGMT_STATUS_NOT_CONNECTED, &rp,
3158 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3164 cmd->cmd_complete = generic_cmd_complete;
3166 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3168 mgmt_pending_remove(cmd);
3171 hci_dev_unlock(hdev);
3175 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3177 switch (link_type) {
3179 switch (addr_type) {
3180 case ADDR_LE_DEV_PUBLIC:
3181 return BDADDR_LE_PUBLIC;
3184 /* Fallback to LE Random address type */
3185 return BDADDR_LE_RANDOM;
3189 /* Fallback to BR/EDR type */
3190 return BDADDR_BREDR;
3194 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3197 struct mgmt_rp_get_connections *rp;
3202 bt_dev_dbg(hdev, "sock %p", sk);
3206 if (!hdev_is_powered(hdev)) {
3207 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3208 MGMT_STATUS_NOT_POWERED);
3213 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3214 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3218 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3225 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3226 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3228 bacpy(&rp->addr[i].bdaddr, &c->dst);
3229 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3230 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3235 rp->conn_count = cpu_to_le16(i);
3237 /* Recalculate length in case of filtered SCO connections, etc */
3238 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3239 struct_size(rp, addr, i));
3244 hci_dev_unlock(hdev);
3248 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3249 struct mgmt_cp_pin_code_neg_reply *cp)
3251 struct mgmt_pending_cmd *cmd;
3254 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3259 cmd->cmd_complete = addr_cmd_complete;
3261 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3262 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3264 mgmt_pending_remove(cmd);
3269 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3272 struct hci_conn *conn;
3273 struct mgmt_cp_pin_code_reply *cp = data;
3274 struct hci_cp_pin_code_reply reply;
3275 struct mgmt_pending_cmd *cmd;
3278 bt_dev_dbg(hdev, "sock %p", sk);
3282 if (!hdev_is_powered(hdev)) {
3283 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3284 MGMT_STATUS_NOT_POWERED);
3288 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3290 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3291 MGMT_STATUS_NOT_CONNECTED);
3295 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3296 struct mgmt_cp_pin_code_neg_reply ncp;
3298 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3300 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3302 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3304 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3305 MGMT_STATUS_INVALID_PARAMS);
3310 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3316 cmd->cmd_complete = addr_cmd_complete;
3318 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3319 reply.pin_len = cp->pin_len;
3320 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3322 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3324 mgmt_pending_remove(cmd);
3327 hci_dev_unlock(hdev);
3331 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3334 struct mgmt_cp_set_io_capability *cp = data;
3336 bt_dev_dbg(hdev, "sock %p", sk);
3338 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3339 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3340 MGMT_STATUS_INVALID_PARAMS);
3344 hdev->io_capability = cp->io_capability;
3346 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3348 hci_dev_unlock(hdev);
3350 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3354 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3356 struct hci_dev *hdev = conn->hdev;
3357 struct mgmt_pending_cmd *cmd;
3359 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3360 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3363 if (cmd->user_data != conn)
3372 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3374 struct mgmt_rp_pair_device rp;
3375 struct hci_conn *conn = cmd->user_data;
3378 bacpy(&rp.addr.bdaddr, &conn->dst);
3379 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3381 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3382 status, &rp, sizeof(rp));
3384 /* So we don't get further callbacks for this connection */
3385 conn->connect_cfm_cb = NULL;
3386 conn->security_cfm_cb = NULL;
3387 conn->disconn_cfm_cb = NULL;
3389 hci_conn_drop(conn);
3391 /* The device is paired so there is no need to remove
3392 * its connection parameters anymore.
3394 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3401 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3403 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3404 struct mgmt_pending_cmd *cmd;
3406 cmd = find_pairing(conn);
3408 cmd->cmd_complete(cmd, status);
3409 mgmt_pending_remove(cmd);
3413 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3415 struct mgmt_pending_cmd *cmd;
3417 BT_DBG("status %u", status);
3419 cmd = find_pairing(conn);
3421 BT_DBG("Unable to find a pending command");
3425 cmd->cmd_complete(cmd, mgmt_status(status));
3426 mgmt_pending_remove(cmd);
3429 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3431 struct mgmt_pending_cmd *cmd;
3433 BT_DBG("status %u", status);
3438 cmd = find_pairing(conn);
3440 BT_DBG("Unable to find a pending command");
3444 cmd->cmd_complete(cmd, mgmt_status(status));
3445 mgmt_pending_remove(cmd);
3448 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3451 struct mgmt_cp_pair_device *cp = data;
3452 struct mgmt_rp_pair_device rp;
3453 struct mgmt_pending_cmd *cmd;
3454 u8 sec_level, auth_type;
3455 struct hci_conn *conn;
3458 bt_dev_dbg(hdev, "sock %p", sk);
3460 memset(&rp, 0, sizeof(rp));
3461 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3462 rp.addr.type = cp->addr.type;
3464 if (!bdaddr_type_is_valid(cp->addr.type))
3465 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3466 MGMT_STATUS_INVALID_PARAMS,
3469 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3470 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3471 MGMT_STATUS_INVALID_PARAMS,
3476 if (!hdev_is_powered(hdev)) {
3477 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3478 MGMT_STATUS_NOT_POWERED, &rp,
3483 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3484 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3485 MGMT_STATUS_ALREADY_PAIRED, &rp,
3490 sec_level = BT_SECURITY_MEDIUM;
3491 auth_type = HCI_AT_DEDICATED_BONDING;
3493 if (cp->addr.type == BDADDR_BREDR) {
3494 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3495 auth_type, CONN_REASON_PAIR_DEVICE);
3497 u8 addr_type = le_addr_type(cp->addr.type);
3498 struct hci_conn_params *p;
3500 /* When pairing a new device, it is expected to remember
3501 * this device for future connections. Adding the connection
3502 * parameter information ahead of time allows tracking
3503 * of the peripheral preferred values and will speed up any
3504 * further connection establishment.
3506 * If connection parameters already exist, then they
3507 * will be kept and this function does nothing.
3509 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3511 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3512 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3514 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3515 sec_level, HCI_LE_CONN_TIMEOUT,
3516 CONN_REASON_PAIR_DEVICE);
3522 if (PTR_ERR(conn) == -EBUSY)
3523 status = MGMT_STATUS_BUSY;
3524 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3525 status = MGMT_STATUS_NOT_SUPPORTED;
3526 else if (PTR_ERR(conn) == -ECONNREFUSED)
3527 status = MGMT_STATUS_REJECTED;
3529 status = MGMT_STATUS_CONNECT_FAILED;
3531 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3532 status, &rp, sizeof(rp));
3536 if (conn->connect_cfm_cb) {
3537 hci_conn_drop(conn);
3538 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3539 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3543 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3546 hci_conn_drop(conn);
3550 cmd->cmd_complete = pairing_complete;
3552 /* For LE, just connecting isn't a proof that the pairing finished */
3553 if (cp->addr.type == BDADDR_BREDR) {
3554 conn->connect_cfm_cb = pairing_complete_cb;
3555 conn->security_cfm_cb = pairing_complete_cb;
3556 conn->disconn_cfm_cb = pairing_complete_cb;
3558 conn->connect_cfm_cb = le_pairing_complete_cb;
3559 conn->security_cfm_cb = le_pairing_complete_cb;
3560 conn->disconn_cfm_cb = le_pairing_complete_cb;
3563 conn->io_capability = cp->io_cap;
3564 cmd->user_data = hci_conn_get(conn);
3566 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3567 hci_conn_security(conn, sec_level, auth_type, true)) {
3568 cmd->cmd_complete(cmd, 0);
3569 mgmt_pending_remove(cmd);
3575 hci_dev_unlock(hdev);
3579 static int abort_conn_sync(struct hci_dev *hdev, void *data)
3581 struct hci_conn *conn;
3582 u16 handle = PTR_ERR(data);
3584 conn = hci_conn_hash_lookup_handle(hdev, handle);
3588 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
3591 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3594 struct mgmt_addr_info *addr = data;
3595 struct mgmt_pending_cmd *cmd;
3596 struct hci_conn *conn;
3599 bt_dev_dbg(hdev, "sock %p", sk);
3603 if (!hdev_is_powered(hdev)) {
3604 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3605 MGMT_STATUS_NOT_POWERED);
3609 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3611 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3612 MGMT_STATUS_INVALID_PARAMS);
3616 conn = cmd->user_data;
3618 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3619 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3620 MGMT_STATUS_INVALID_PARAMS);
3624 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3625 mgmt_pending_remove(cmd);
3627 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3628 addr, sizeof(*addr));
3630 /* Since user doesn't want to proceed with the connection, abort any
3631 * ongoing pairing and then terminate the link if it was created
3632 * because of the pair device action.
3634 if (addr->type == BDADDR_BREDR)
3635 hci_remove_link_key(hdev, &addr->bdaddr);
3637 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3638 le_addr_type(addr->type));
3640 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3641 hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
3645 hci_dev_unlock(hdev);
3649 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3650 struct mgmt_addr_info *addr, u16 mgmt_op,
3651 u16 hci_op, __le32 passkey)
3653 struct mgmt_pending_cmd *cmd;
3654 struct hci_conn *conn;
3659 if (!hdev_is_powered(hdev)) {
3660 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3661 MGMT_STATUS_NOT_POWERED, addr,
3666 if (addr->type == BDADDR_BREDR)
3667 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3669 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3670 le_addr_type(addr->type));
3673 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3674 MGMT_STATUS_NOT_CONNECTED, addr,
3679 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3680 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3682 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3683 MGMT_STATUS_SUCCESS, addr,
3686 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3687 MGMT_STATUS_FAILED, addr,
3693 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3699 cmd->cmd_complete = addr_cmd_complete;
3701 /* Continue with pairing via HCI */
3702 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3703 struct hci_cp_user_passkey_reply cp;
3705 bacpy(&cp.bdaddr, &addr->bdaddr);
3706 cp.passkey = passkey;
3707 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3709 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3713 mgmt_pending_remove(cmd);
3716 hci_dev_unlock(hdev);
3720 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3721 void *data, u16 len)
3723 struct mgmt_cp_pin_code_neg_reply *cp = data;
3725 bt_dev_dbg(hdev, "sock %p", sk);
3727 return user_pairing_resp(sk, hdev, &cp->addr,
3728 MGMT_OP_PIN_CODE_NEG_REPLY,
3729 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3732 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3735 struct mgmt_cp_user_confirm_reply *cp = data;
3737 bt_dev_dbg(hdev, "sock %p", sk);
3739 if (len != sizeof(*cp))
3740 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3741 MGMT_STATUS_INVALID_PARAMS);
3743 return user_pairing_resp(sk, hdev, &cp->addr,
3744 MGMT_OP_USER_CONFIRM_REPLY,
3745 HCI_OP_USER_CONFIRM_REPLY, 0);
3748 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3749 void *data, u16 len)
3751 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3753 bt_dev_dbg(hdev, "sock %p", sk);
3755 return user_pairing_resp(sk, hdev, &cp->addr,
3756 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3757 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3760 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3763 struct mgmt_cp_user_passkey_reply *cp = data;
3765 bt_dev_dbg(hdev, "sock %p", sk);
3767 return user_pairing_resp(sk, hdev, &cp->addr,
3768 MGMT_OP_USER_PASSKEY_REPLY,
3769 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3772 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3773 void *data, u16 len)
3775 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3777 bt_dev_dbg(hdev, "sock %p", sk);
3779 return user_pairing_resp(sk, hdev, &cp->addr,
3780 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3781 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3784 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3786 struct adv_info *adv_instance;
3788 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3792 /* stop if current instance doesn't need to be changed */
3793 if (!(adv_instance->flags & flags))
3796 cancel_adv_timeout(hdev);
3798 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3802 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3807 static int name_changed_sync(struct hci_dev *hdev, void *data)
3809 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3812 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3814 struct mgmt_pending_cmd *cmd = data;
3815 struct mgmt_cp_set_local_name *cp = cmd->param;
3816 u8 status = mgmt_status(err);
3818 bt_dev_dbg(hdev, "err %d", err);
3820 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3824 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3827 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3830 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3831 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3834 mgmt_pending_remove(cmd);
3837 static int set_name_sync(struct hci_dev *hdev, void *data)
3839 if (lmp_bredr_capable(hdev)) {
3840 hci_update_name_sync(hdev);
3841 hci_update_eir_sync(hdev);
3844 /* The name is stored in the scan response data and so
3845 * no need to update the advertising data here.
3847 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3848 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3853 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3856 struct mgmt_cp_set_local_name *cp = data;
3857 struct mgmt_pending_cmd *cmd;
3860 bt_dev_dbg(hdev, "sock %p", sk);
3864 /* If the old values are the same as the new ones just return a
3865 * direct command complete event.
3867 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3868 !memcmp(hdev->short_name, cp->short_name,
3869 sizeof(hdev->short_name))) {
3870 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3875 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3877 if (!hdev_is_powered(hdev)) {
3878 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3880 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3885 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3886 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3887 ext_info_changed(hdev, sk);
3892 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3896 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3900 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3901 MGMT_STATUS_FAILED);
3904 mgmt_pending_remove(cmd);
3909 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3912 hci_dev_unlock(hdev);
3916 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3918 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3921 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3924 struct mgmt_cp_set_appearance *cp = data;
3928 bt_dev_dbg(hdev, "sock %p", sk);
3930 if (!lmp_le_capable(hdev))
3931 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3932 MGMT_STATUS_NOT_SUPPORTED);
3934 appearance = le16_to_cpu(cp->appearance);
3938 if (hdev->appearance != appearance) {
3939 hdev->appearance = appearance;
3941 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3942 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3945 ext_info_changed(hdev, sk);
3948 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3951 hci_dev_unlock(hdev);
3956 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3957 void *data, u16 len)
3959 struct mgmt_rp_get_phy_configuration rp;
3961 bt_dev_dbg(hdev, "sock %p", sk);
3965 memset(&rp, 0, sizeof(rp));
3967 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3968 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3969 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3971 hci_dev_unlock(hdev);
3973 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3977 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3979 struct mgmt_ev_phy_configuration_changed ev;
3981 memset(&ev, 0, sizeof(ev));
3983 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3985 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3989 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3991 struct mgmt_pending_cmd *cmd = data;
3992 struct sk_buff *skb = cmd->skb;
3993 u8 status = mgmt_status(err);
3995 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4000 status = MGMT_STATUS_FAILED;
4001 else if (IS_ERR(skb))
4002 status = mgmt_status(PTR_ERR(skb));
4004 status = mgmt_status(skb->data[0]);
4007 bt_dev_dbg(hdev, "status %d", status);
4010 mgmt_cmd_status(cmd->sk, hdev->id,
4011 MGMT_OP_SET_PHY_CONFIGURATION, status);
4013 mgmt_cmd_complete(cmd->sk, hdev->id,
4014 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4017 mgmt_phy_configuration_changed(hdev, cmd->sk);
4020 if (skb && !IS_ERR(skb))
4023 mgmt_pending_remove(cmd);
4026 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4028 struct mgmt_pending_cmd *cmd = data;
4029 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4030 struct hci_cp_le_set_default_phy cp_phy;
4031 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4033 memset(&cp_phy, 0, sizeof(cp_phy));
4035 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4036 cp_phy.all_phys |= 0x01;
4038 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4039 cp_phy.all_phys |= 0x02;
4041 if (selected_phys & MGMT_PHY_LE_1M_TX)
4042 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4044 if (selected_phys & MGMT_PHY_LE_2M_TX)
4045 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4047 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4048 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4050 if (selected_phys & MGMT_PHY_LE_1M_RX)
4051 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4053 if (selected_phys & MGMT_PHY_LE_2M_RX)
4054 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4056 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4057 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4059 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4060 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4065 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4066 void *data, u16 len)
4068 struct mgmt_cp_set_phy_configuration *cp = data;
4069 struct mgmt_pending_cmd *cmd;
4070 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4071 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4072 bool changed = false;
4075 bt_dev_dbg(hdev, "sock %p", sk);
4077 configurable_phys = get_configurable_phys(hdev);
4078 supported_phys = get_supported_phys(hdev);
4079 selected_phys = __le32_to_cpu(cp->selected_phys);
4081 if (selected_phys & ~supported_phys)
4082 return mgmt_cmd_status(sk, hdev->id,
4083 MGMT_OP_SET_PHY_CONFIGURATION,
4084 MGMT_STATUS_INVALID_PARAMS);
4086 unconfigure_phys = supported_phys & ~configurable_phys;
4088 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4089 return mgmt_cmd_status(sk, hdev->id,
4090 MGMT_OP_SET_PHY_CONFIGURATION,
4091 MGMT_STATUS_INVALID_PARAMS);
4093 if (selected_phys == get_selected_phys(hdev))
4094 return mgmt_cmd_complete(sk, hdev->id,
4095 MGMT_OP_SET_PHY_CONFIGURATION,
4100 if (!hdev_is_powered(hdev)) {
4101 err = mgmt_cmd_status(sk, hdev->id,
4102 MGMT_OP_SET_PHY_CONFIGURATION,
4103 MGMT_STATUS_REJECTED);
4107 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4108 err = mgmt_cmd_status(sk, hdev->id,
4109 MGMT_OP_SET_PHY_CONFIGURATION,
4114 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4115 pkt_type |= (HCI_DH3 | HCI_DM3);
4117 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4119 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4120 pkt_type |= (HCI_DH5 | HCI_DM5);
4122 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4124 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4125 pkt_type &= ~HCI_2DH1;
4127 pkt_type |= HCI_2DH1;
4129 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4130 pkt_type &= ~HCI_2DH3;
4132 pkt_type |= HCI_2DH3;
4134 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4135 pkt_type &= ~HCI_2DH5;
4137 pkt_type |= HCI_2DH5;
4139 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4140 pkt_type &= ~HCI_3DH1;
4142 pkt_type |= HCI_3DH1;
4144 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4145 pkt_type &= ~HCI_3DH3;
4147 pkt_type |= HCI_3DH3;
4149 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4150 pkt_type &= ~HCI_3DH5;
4152 pkt_type |= HCI_3DH5;
4154 if (pkt_type != hdev->pkt_type) {
4155 hdev->pkt_type = pkt_type;
4159 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4160 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4162 mgmt_phy_configuration_changed(hdev, sk);
4164 err = mgmt_cmd_complete(sk, hdev->id,
4165 MGMT_OP_SET_PHY_CONFIGURATION,
4171 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4176 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4177 set_default_phy_complete);
4180 err = mgmt_cmd_status(sk, hdev->id,
4181 MGMT_OP_SET_PHY_CONFIGURATION,
4182 MGMT_STATUS_FAILED);
4185 mgmt_pending_remove(cmd);
4189 hci_dev_unlock(hdev);
4194 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4197 int err = MGMT_STATUS_SUCCESS;
4198 struct mgmt_cp_set_blocked_keys *keys = data;
4199 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4200 sizeof(struct mgmt_blocked_key_info));
4201 u16 key_count, expected_len;
4204 bt_dev_dbg(hdev, "sock %p", sk);
4206 key_count = __le16_to_cpu(keys->key_count);
4207 if (key_count > max_key_count) {
4208 bt_dev_err(hdev, "too big key_count value %u", key_count);
4209 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4210 MGMT_STATUS_INVALID_PARAMS);
4213 expected_len = struct_size(keys, keys, key_count);
4214 if (expected_len != len) {
4215 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4217 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4218 MGMT_STATUS_INVALID_PARAMS);
4223 hci_blocked_keys_clear(hdev);
4225 for (i = 0; i < key_count; ++i) {
4226 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4229 err = MGMT_STATUS_NO_RESOURCES;
4233 b->type = keys->keys[i].type;
4234 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4235 list_add_rcu(&b->list, &hdev->blocked_keys);
4237 hci_dev_unlock(hdev);
4239 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4243 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4244 void *data, u16 len)
4246 struct mgmt_mode *cp = data;
4248 bool changed = false;
4250 bt_dev_dbg(hdev, "sock %p", sk);
4252 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4253 return mgmt_cmd_status(sk, hdev->id,
4254 MGMT_OP_SET_WIDEBAND_SPEECH,
4255 MGMT_STATUS_NOT_SUPPORTED);
4257 if (cp->val != 0x00 && cp->val != 0x01)
4258 return mgmt_cmd_status(sk, hdev->id,
4259 MGMT_OP_SET_WIDEBAND_SPEECH,
4260 MGMT_STATUS_INVALID_PARAMS);
4264 if (hdev_is_powered(hdev) &&
4265 !!cp->val != hci_dev_test_flag(hdev,
4266 HCI_WIDEBAND_SPEECH_ENABLED)) {
4267 err = mgmt_cmd_status(sk, hdev->id,
4268 MGMT_OP_SET_WIDEBAND_SPEECH,
4269 MGMT_STATUS_REJECTED);
4274 changed = !hci_dev_test_and_set_flag(hdev,
4275 HCI_WIDEBAND_SPEECH_ENABLED);
4277 changed = hci_dev_test_and_clear_flag(hdev,
4278 HCI_WIDEBAND_SPEECH_ENABLED);
4280 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4285 err = new_settings(hdev, sk);
4288 hci_dev_unlock(hdev);
4292 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4293 void *data, u16 data_len)
4296 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4299 u8 tx_power_range[2];
4301 bt_dev_dbg(hdev, "sock %p", sk);
4303 memset(&buf, 0, sizeof(buf));
4307 /* When the Read Simple Pairing Options command is supported, then
4308 * the remote public key validation is supported.
4310 * Alternatively, when Microsoft extensions are available, they can
4311 * indicate support for public key validation as well.
4313 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4314 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4316 flags |= 0x02; /* Remote public key validation (LE) */
4318 /* When the Read Encryption Key Size command is supported, then the
4319 * encryption key size is enforced.
4321 if (hdev->commands[20] & 0x10)
4322 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4324 flags |= 0x08; /* Encryption key size enforcement (LE) */
4326 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4329 /* When the Read Simple Pairing Options command is supported, then
4330 * also max encryption key size information is provided.
4332 if (hdev->commands[41] & 0x08)
4333 cap_len = eir_append_le16(rp->cap, cap_len,
4334 MGMT_CAP_MAX_ENC_KEY_SIZE,
4335 hdev->max_enc_key_size);
4337 cap_len = eir_append_le16(rp->cap, cap_len,
4338 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4339 SMP_MAX_ENC_KEY_SIZE);
4341 /* Append the min/max LE tx power parameters if we were able to fetch
4342 * it from the controller
4344 if (hdev->commands[38] & 0x80) {
4345 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4346 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4347 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4351 rp->cap_len = cpu_to_le16(cap_len);
4353 hci_dev_unlock(hdev);
4355 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4356 rp, sizeof(*rp) + cap_len);
4359 #ifdef CONFIG_BT_FEATURE_DEBUG
4360 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4361 static const u8 debug_uuid[16] = {
4362 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4363 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4367 /* 330859bc-7506-492d-9370-9a6f0614037f */
4368 static const u8 quality_report_uuid[16] = {
4369 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4370 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4373 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4374 static const u8 offload_codecs_uuid[16] = {
4375 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4376 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4379 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4380 static const u8 le_simultaneous_roles_uuid[16] = {
4381 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4382 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4385 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4386 static const u8 rpa_resolution_uuid[16] = {
4387 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4388 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4391 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4392 static const u8 iso_socket_uuid[16] = {
4393 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4394 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4397 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4398 static const u8 mgmt_mesh_uuid[16] = {
4399 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4400 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4403 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4404 void *data, u16 data_len)
4406 struct mgmt_rp_read_exp_features_info *rp;
4412 bt_dev_dbg(hdev, "sock %p", sk);
4414 /* Enough space for 7 features */
4415 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4416 rp = kzalloc(len, GFP_KERNEL);
4420 #ifdef CONFIG_BT_FEATURE_DEBUG
4422 flags = bt_dbg_get() ? BIT(0) : 0;
4424 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4425 rp->features[idx].flags = cpu_to_le32(flags);
4430 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4431 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4436 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4437 rp->features[idx].flags = cpu_to_le32(flags);
4441 if (hdev && ll_privacy_capable(hdev)) {
4442 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4443 flags = BIT(0) | BIT(1);
4447 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4448 rp->features[idx].flags = cpu_to_le32(flags);
4452 if (hdev && (aosp_has_quality_report(hdev) ||
4453 hdev->set_quality_report)) {
4454 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4459 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4460 rp->features[idx].flags = cpu_to_le32(flags);
4464 if (hdev && hdev->get_data_path_id) {
4465 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4470 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4471 rp->features[idx].flags = cpu_to_le32(flags);
4475 if (IS_ENABLED(CONFIG_BT_LE)) {
4476 flags = iso_enabled() ? BIT(0) : 0;
4477 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4478 rp->features[idx].flags = cpu_to_le32(flags);
4482 if (hdev && lmp_le_capable(hdev)) {
4483 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4488 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4489 rp->features[idx].flags = cpu_to_le32(flags);
4493 rp->feature_count = cpu_to_le16(idx);
4495 /* After reading the experimental features information, enable
4496 * the events to update client on any future change.
4498 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4500 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4501 MGMT_OP_READ_EXP_FEATURES_INFO,
4502 0, rp, sizeof(*rp) + (20 * idx));
4508 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4511 struct mgmt_ev_exp_feature_changed ev;
4513 memset(&ev, 0, sizeof(ev));
4514 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4515 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4517 // Do we need to be atomic with the conn_flags?
4518 if (enabled && privacy_mode_capable(hdev))
4519 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4521 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4523 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4525 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4529 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4530 bool enabled, struct sock *skip)
4532 struct mgmt_ev_exp_feature_changed ev;
4534 memset(&ev, 0, sizeof(ev));
4535 memcpy(ev.uuid, uuid, 16);
4536 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4538 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4540 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4543 #define EXP_FEAT(_uuid, _set_func) \
4546 .set_func = _set_func, \
4549 /* The zero key uuid is special. Multiple exp features are set through it. */
4550 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4551 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4553 struct mgmt_rp_set_exp_feature rp;
4555 memset(rp.uuid, 0, 16);
4556 rp.flags = cpu_to_le32(0);
4558 #ifdef CONFIG_BT_FEATURE_DEBUG
4560 bool changed = bt_dbg_get();
4565 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4569 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4572 changed = hci_dev_test_and_clear_flag(hdev,
4573 HCI_ENABLE_LL_PRIVACY);
4575 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4579 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4581 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4582 MGMT_OP_SET_EXP_FEATURE, 0,
4586 #ifdef CONFIG_BT_FEATURE_DEBUG
4587 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4588 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4590 struct mgmt_rp_set_exp_feature rp;
4595 /* Command requires to use the non-controller index */
4597 return mgmt_cmd_status(sk, hdev->id,
4598 MGMT_OP_SET_EXP_FEATURE,
4599 MGMT_STATUS_INVALID_INDEX);
4601 /* Parameters are limited to a single octet */
4602 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4603 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4604 MGMT_OP_SET_EXP_FEATURE,
4605 MGMT_STATUS_INVALID_PARAMS);
4607 /* Only boolean on/off is supported */
4608 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4609 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4610 MGMT_OP_SET_EXP_FEATURE,
4611 MGMT_STATUS_INVALID_PARAMS);
4613 val = !!cp->param[0];
4614 changed = val ? !bt_dbg_get() : bt_dbg_get();
4617 memcpy(rp.uuid, debug_uuid, 16);
4618 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4620 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4622 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4623 MGMT_OP_SET_EXP_FEATURE, 0,
4627 exp_feature_changed(hdev, debug_uuid, val, sk);
4633 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4634 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4636 struct mgmt_rp_set_exp_feature rp;
4640 /* Command requires to use the controller index */
4642 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4643 MGMT_OP_SET_EXP_FEATURE,
4644 MGMT_STATUS_INVALID_INDEX);
4646 /* Parameters are limited to a single octet */
4647 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4648 return mgmt_cmd_status(sk, hdev->id,
4649 MGMT_OP_SET_EXP_FEATURE,
4650 MGMT_STATUS_INVALID_PARAMS);
4652 /* Only boolean on/off is supported */
4653 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4654 return mgmt_cmd_status(sk, hdev->id,
4655 MGMT_OP_SET_EXP_FEATURE,
4656 MGMT_STATUS_INVALID_PARAMS);
4658 val = !!cp->param[0];
4661 changed = !hci_dev_test_and_set_flag(hdev,
4662 HCI_MESH_EXPERIMENTAL);
4664 hci_dev_clear_flag(hdev, HCI_MESH);
4665 changed = hci_dev_test_and_clear_flag(hdev,
4666 HCI_MESH_EXPERIMENTAL);
4669 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4670 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4672 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4674 err = mgmt_cmd_complete(sk, hdev->id,
4675 MGMT_OP_SET_EXP_FEATURE, 0,
4679 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4684 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4685 struct mgmt_cp_set_exp_feature *cp,
4688 struct mgmt_rp_set_exp_feature rp;
4693 /* Command requires to use the controller index */
4695 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4696 MGMT_OP_SET_EXP_FEATURE,
4697 MGMT_STATUS_INVALID_INDEX);
4699 /* Changes can only be made when controller is powered down */
4700 if (hdev_is_powered(hdev))
4701 return mgmt_cmd_status(sk, hdev->id,
4702 MGMT_OP_SET_EXP_FEATURE,
4703 MGMT_STATUS_REJECTED);
4705 /* Parameters are limited to a single octet */
4706 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4707 return mgmt_cmd_status(sk, hdev->id,
4708 MGMT_OP_SET_EXP_FEATURE,
4709 MGMT_STATUS_INVALID_PARAMS);
4711 /* Only boolean on/off is supported */
4712 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4713 return mgmt_cmd_status(sk, hdev->id,
4714 MGMT_OP_SET_EXP_FEATURE,
4715 MGMT_STATUS_INVALID_PARAMS);
4717 val = !!cp->param[0];
4720 changed = !hci_dev_test_and_set_flag(hdev,
4721 HCI_ENABLE_LL_PRIVACY);
4722 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4724 /* Enable LL privacy + supported settings changed */
4725 flags = BIT(0) | BIT(1);
4727 changed = hci_dev_test_and_clear_flag(hdev,
4728 HCI_ENABLE_LL_PRIVACY);
4730 /* Disable LL privacy + supported settings changed */
4734 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4735 rp.flags = cpu_to_le32(flags);
4737 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4739 err = mgmt_cmd_complete(sk, hdev->id,
4740 MGMT_OP_SET_EXP_FEATURE, 0,
4744 exp_ll_privacy_feature_changed(val, hdev, sk);
4749 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4750 struct mgmt_cp_set_exp_feature *cp,
4753 struct mgmt_rp_set_exp_feature rp;
4757 /* Command requires to use a valid controller index */
4759 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4760 MGMT_OP_SET_EXP_FEATURE,
4761 MGMT_STATUS_INVALID_INDEX);
4763 /* Parameters are limited to a single octet */
4764 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4765 return mgmt_cmd_status(sk, hdev->id,
4766 MGMT_OP_SET_EXP_FEATURE,
4767 MGMT_STATUS_INVALID_PARAMS);
4769 /* Only boolean on/off is supported */
4770 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4771 return mgmt_cmd_status(sk, hdev->id,
4772 MGMT_OP_SET_EXP_FEATURE,
4773 MGMT_STATUS_INVALID_PARAMS);
4775 hci_req_sync_lock(hdev);
4777 val = !!cp->param[0];
4778 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4780 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4781 err = mgmt_cmd_status(sk, hdev->id,
4782 MGMT_OP_SET_EXP_FEATURE,
4783 MGMT_STATUS_NOT_SUPPORTED);
4784 goto unlock_quality_report;
4788 if (hdev->set_quality_report)
4789 err = hdev->set_quality_report(hdev, val);
4791 err = aosp_set_quality_report(hdev, val);
4794 err = mgmt_cmd_status(sk, hdev->id,
4795 MGMT_OP_SET_EXP_FEATURE,
4796 MGMT_STATUS_FAILED);
4797 goto unlock_quality_report;
4801 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4803 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4806 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4808 memcpy(rp.uuid, quality_report_uuid, 16);
4809 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4810 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4812 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4816 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4818 unlock_quality_report:
4819 hci_req_sync_unlock(hdev);
4823 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4824 struct mgmt_cp_set_exp_feature *cp,
4829 struct mgmt_rp_set_exp_feature rp;
4831 /* Command requires to use a valid controller index */
4833 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4834 MGMT_OP_SET_EXP_FEATURE,
4835 MGMT_STATUS_INVALID_INDEX);
4837 /* Parameters are limited to a single octet */
4838 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4839 return mgmt_cmd_status(sk, hdev->id,
4840 MGMT_OP_SET_EXP_FEATURE,
4841 MGMT_STATUS_INVALID_PARAMS);
4843 /* Only boolean on/off is supported */
4844 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4845 return mgmt_cmd_status(sk, hdev->id,
4846 MGMT_OP_SET_EXP_FEATURE,
4847 MGMT_STATUS_INVALID_PARAMS);
4849 val = !!cp->param[0];
4850 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4852 if (!hdev->get_data_path_id) {
4853 return mgmt_cmd_status(sk, hdev->id,
4854 MGMT_OP_SET_EXP_FEATURE,
4855 MGMT_STATUS_NOT_SUPPORTED);
4860 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4862 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4865 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4868 memcpy(rp.uuid, offload_codecs_uuid, 16);
4869 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4870 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4871 err = mgmt_cmd_complete(sk, hdev->id,
4872 MGMT_OP_SET_EXP_FEATURE, 0,
4876 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4881 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4882 struct mgmt_cp_set_exp_feature *cp,
4887 struct mgmt_rp_set_exp_feature rp;
4889 /* Command requires to use a valid controller index */
4891 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4892 MGMT_OP_SET_EXP_FEATURE,
4893 MGMT_STATUS_INVALID_INDEX);
4895 /* Parameters are limited to a single octet */
4896 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4897 return mgmt_cmd_status(sk, hdev->id,
4898 MGMT_OP_SET_EXP_FEATURE,
4899 MGMT_STATUS_INVALID_PARAMS);
4901 /* Only boolean on/off is supported */
4902 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4903 return mgmt_cmd_status(sk, hdev->id,
4904 MGMT_OP_SET_EXP_FEATURE,
4905 MGMT_STATUS_INVALID_PARAMS);
4907 val = !!cp->param[0];
4908 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4910 if (!hci_dev_le_state_simultaneous(hdev)) {
4911 return mgmt_cmd_status(sk, hdev->id,
4912 MGMT_OP_SET_EXP_FEATURE,
4913 MGMT_STATUS_NOT_SUPPORTED);
4918 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4920 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4923 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4926 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4927 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4928 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4929 err = mgmt_cmd_complete(sk, hdev->id,
4930 MGMT_OP_SET_EXP_FEATURE, 0,
4934 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4940 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4941 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4943 struct mgmt_rp_set_exp_feature rp;
4944 bool val, changed = false;
4947 /* Command requires to use the non-controller index */
4949 return mgmt_cmd_status(sk, hdev->id,
4950 MGMT_OP_SET_EXP_FEATURE,
4951 MGMT_STATUS_INVALID_INDEX);
4953 /* Parameters are limited to a single octet */
4954 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4955 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4956 MGMT_OP_SET_EXP_FEATURE,
4957 MGMT_STATUS_INVALID_PARAMS);
4959 /* Only boolean on/off is supported */
4960 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4961 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4962 MGMT_OP_SET_EXP_FEATURE,
4963 MGMT_STATUS_INVALID_PARAMS);
4965 val = cp->param[0] ? true : false;
4974 memcpy(rp.uuid, iso_socket_uuid, 16);
4975 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4977 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4979 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4980 MGMT_OP_SET_EXP_FEATURE, 0,
4984 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4990 static const struct mgmt_exp_feature {
4992 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4993 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4994 } exp_features[] = {
4995 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4996 #ifdef CONFIG_BT_FEATURE_DEBUG
4997 EXP_FEAT(debug_uuid, set_debug_func),
4999 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5000 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
5001 EXP_FEAT(quality_report_uuid, set_quality_report_func),
5002 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5003 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5005 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5008 /* end with a null feature */
5009 EXP_FEAT(NULL, NULL)
5012 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5013 void *data, u16 data_len)
5015 struct mgmt_cp_set_exp_feature *cp = data;
5018 bt_dev_dbg(hdev, "sock %p", sk);
5020 for (i = 0; exp_features[i].uuid; i++) {
5021 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5022 return exp_features[i].set_func(sk, hdev, cp, data_len);
5025 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5026 MGMT_OP_SET_EXP_FEATURE,
5027 MGMT_STATUS_NOT_SUPPORTED);
5030 static u32 get_params_flags(struct hci_dev *hdev,
5031 struct hci_conn_params *params)
5033 u32 flags = hdev->conn_flags;
5035 /* Devices using RPAs can only be programmed in the acceptlist if
5036 * LL Privacy has been enable otherwise they cannot mark
5037 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5039 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5040 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
5041 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5046 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5049 struct mgmt_cp_get_device_flags *cp = data;
5050 struct mgmt_rp_get_device_flags rp;
5051 struct bdaddr_list_with_flags *br_params;
5052 struct hci_conn_params *params;
5053 u32 supported_flags;
5054 u32 current_flags = 0;
5055 u8 status = MGMT_STATUS_INVALID_PARAMS;
5057 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5058 &cp->addr.bdaddr, cp->addr.type);
5062 supported_flags = hdev->conn_flags;
5064 memset(&rp, 0, sizeof(rp));
5066 if (cp->addr.type == BDADDR_BREDR) {
5067 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5073 current_flags = br_params->flags;
5075 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5076 le_addr_type(cp->addr.type));
5080 supported_flags = get_params_flags(hdev, params);
5081 current_flags = params->flags;
5084 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5085 rp.addr.type = cp->addr.type;
5086 rp.supported_flags = cpu_to_le32(supported_flags);
5087 rp.current_flags = cpu_to_le32(current_flags);
5089 status = MGMT_STATUS_SUCCESS;
5092 hci_dev_unlock(hdev);
5094 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5098 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5099 bdaddr_t *bdaddr, u8 bdaddr_type,
5100 u32 supported_flags, u32 current_flags)
5102 struct mgmt_ev_device_flags_changed ev;
5104 bacpy(&ev.addr.bdaddr, bdaddr);
5105 ev.addr.type = bdaddr_type;
5106 ev.supported_flags = cpu_to_le32(supported_flags);
5107 ev.current_flags = cpu_to_le32(current_flags);
5109 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5112 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5115 struct mgmt_cp_set_device_flags *cp = data;
5116 struct bdaddr_list_with_flags *br_params;
5117 struct hci_conn_params *params;
5118 u8 status = MGMT_STATUS_INVALID_PARAMS;
5119 u32 supported_flags;
5120 u32 current_flags = __le32_to_cpu(cp->current_flags);
5122 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5123 &cp->addr.bdaddr, cp->addr.type, current_flags);
5125 // We should take hci_dev_lock() early, I think.. conn_flags can change
5126 supported_flags = hdev->conn_flags;
5128 if ((supported_flags | current_flags) != supported_flags) {
5129 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5130 current_flags, supported_flags);
5136 if (cp->addr.type == BDADDR_BREDR) {
5137 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5142 br_params->flags = current_flags;
5143 status = MGMT_STATUS_SUCCESS;
5145 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5146 &cp->addr.bdaddr, cp->addr.type);
5152 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5153 le_addr_type(cp->addr.type));
5155 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5156 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5160 supported_flags = get_params_flags(hdev, params);
5162 if ((supported_flags | current_flags) != supported_flags) {
5163 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5164 current_flags, supported_flags);
5168 params->flags = current_flags;
5169 status = MGMT_STATUS_SUCCESS;
5171 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5174 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5175 hci_update_passive_scan(hdev);
5178 hci_dev_unlock(hdev);
5181 if (status == MGMT_STATUS_SUCCESS)
5182 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5183 supported_flags, current_flags);
5185 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5186 &cp->addr, sizeof(cp->addr));
5189 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5192 struct mgmt_ev_adv_monitor_added ev;
5194 ev.monitor_handle = cpu_to_le16(handle);
5196 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5199 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5201 struct mgmt_ev_adv_monitor_removed ev;
5202 struct mgmt_pending_cmd *cmd;
5203 struct sock *sk_skip = NULL;
5204 struct mgmt_cp_remove_adv_monitor *cp;
5206 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5210 if (cp->monitor_handle)
5214 ev.monitor_handle = cpu_to_le16(handle);
5216 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5219 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5220 void *data, u16 len)
5222 struct adv_monitor *monitor = NULL;
5223 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5226 __u32 supported = 0;
5228 __u16 num_handles = 0;
5229 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5231 BT_DBG("request for %s", hdev->name);
5235 if (msft_monitor_supported(hdev))
5236 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5238 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5239 handles[num_handles++] = monitor->handle;
5241 hci_dev_unlock(hdev);
5243 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5244 rp = kmalloc(rp_size, GFP_KERNEL);
5248 /* All supported features are currently enabled */
5249 enabled = supported;
5251 rp->supported_features = cpu_to_le32(supported);
5252 rp->enabled_features = cpu_to_le32(enabled);
5253 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5254 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5255 rp->num_handles = cpu_to_le16(num_handles);
5257 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5259 err = mgmt_cmd_complete(sk, hdev->id,
5260 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5261 MGMT_STATUS_SUCCESS, rp, rp_size);
5268 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5269 void *data, int status)
5271 struct mgmt_rp_add_adv_patterns_monitor rp;
5272 struct mgmt_pending_cmd *cmd = data;
5273 struct adv_monitor *monitor = cmd->user_data;
5277 rp.monitor_handle = cpu_to_le16(monitor->handle);
5280 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5281 hdev->adv_monitors_cnt++;
5282 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5283 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5284 hci_update_passive_scan(hdev);
5287 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5288 mgmt_status(status), &rp, sizeof(rp));
5289 mgmt_pending_remove(cmd);
5291 hci_dev_unlock(hdev);
5292 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5293 rp.monitor_handle, status);
5296 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5298 struct mgmt_pending_cmd *cmd = data;
5299 struct adv_monitor *monitor = cmd->user_data;
5301 return hci_add_adv_monitor(hdev, monitor);
5304 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5305 struct adv_monitor *m, u8 status,
5306 void *data, u16 len, u16 op)
5308 struct mgmt_pending_cmd *cmd;
5316 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5317 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5318 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5319 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5320 status = MGMT_STATUS_BUSY;
5324 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5326 status = MGMT_STATUS_NO_RESOURCES;
5331 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5332 mgmt_add_adv_patterns_monitor_complete);
5335 status = MGMT_STATUS_NO_RESOURCES;
5337 status = MGMT_STATUS_FAILED;
5342 hci_dev_unlock(hdev);
5347 hci_free_adv_monitor(hdev, m);
5348 hci_dev_unlock(hdev);
5349 return mgmt_cmd_status(sk, hdev->id, op, status);
5352 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5353 struct mgmt_adv_rssi_thresholds *rssi)
5356 m->rssi.low_threshold = rssi->low_threshold;
5357 m->rssi.low_threshold_timeout =
5358 __le16_to_cpu(rssi->low_threshold_timeout);
5359 m->rssi.high_threshold = rssi->high_threshold;
5360 m->rssi.high_threshold_timeout =
5361 __le16_to_cpu(rssi->high_threshold_timeout);
5362 m->rssi.sampling_period = rssi->sampling_period;
5364 /* Default values. These numbers are the least constricting
5365 * parameters for MSFT API to work, so it behaves as if there
5366 * are no rssi parameter to consider. May need to be changed
5367 * if other API are to be supported.
5369 m->rssi.low_threshold = -127;
5370 m->rssi.low_threshold_timeout = 60;
5371 m->rssi.high_threshold = -127;
5372 m->rssi.high_threshold_timeout = 0;
5373 m->rssi.sampling_period = 0;
5377 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5378 struct mgmt_adv_pattern *patterns)
5380 u8 offset = 0, length = 0;
5381 struct adv_pattern *p = NULL;
5384 for (i = 0; i < pattern_count; i++) {
5385 offset = patterns[i].offset;
5386 length = patterns[i].length;
5387 if (offset >= HCI_MAX_AD_LENGTH ||
5388 length > HCI_MAX_AD_LENGTH ||
5389 (offset + length) > HCI_MAX_AD_LENGTH)
5390 return MGMT_STATUS_INVALID_PARAMS;
5392 p = kmalloc(sizeof(*p), GFP_KERNEL);
5394 return MGMT_STATUS_NO_RESOURCES;
5396 p->ad_type = patterns[i].ad_type;
5397 p->offset = patterns[i].offset;
5398 p->length = patterns[i].length;
5399 memcpy(p->value, patterns[i].value, p->length);
5401 INIT_LIST_HEAD(&p->list);
5402 list_add(&p->list, &m->patterns);
5405 return MGMT_STATUS_SUCCESS;
5408 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5409 void *data, u16 len)
5411 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5412 struct adv_monitor *m = NULL;
5413 u8 status = MGMT_STATUS_SUCCESS;
5414 size_t expected_size = sizeof(*cp);
5416 BT_DBG("request for %s", hdev->name);
5418 if (len <= sizeof(*cp)) {
5419 status = MGMT_STATUS_INVALID_PARAMS;
5423 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5424 if (len != expected_size) {
5425 status = MGMT_STATUS_INVALID_PARAMS;
5429 m = kzalloc(sizeof(*m), GFP_KERNEL);
5431 status = MGMT_STATUS_NO_RESOURCES;
5435 INIT_LIST_HEAD(&m->patterns);
5437 parse_adv_monitor_rssi(m, NULL);
5438 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5441 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5442 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5445 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5446 void *data, u16 len)
5448 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5449 struct adv_monitor *m = NULL;
5450 u8 status = MGMT_STATUS_SUCCESS;
5451 size_t expected_size = sizeof(*cp);
5453 BT_DBG("request for %s", hdev->name);
5455 if (len <= sizeof(*cp)) {
5456 status = MGMT_STATUS_INVALID_PARAMS;
5460 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5461 if (len != expected_size) {
5462 status = MGMT_STATUS_INVALID_PARAMS;
5466 m = kzalloc(sizeof(*m), GFP_KERNEL);
5468 status = MGMT_STATUS_NO_RESOURCES;
5472 INIT_LIST_HEAD(&m->patterns);
5474 parse_adv_monitor_rssi(m, &cp->rssi);
5475 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5478 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5479 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5482 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5483 void *data, int status)
5485 struct mgmt_rp_remove_adv_monitor rp;
5486 struct mgmt_pending_cmd *cmd = data;
5487 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5491 rp.monitor_handle = cp->monitor_handle;
5494 hci_update_passive_scan(hdev);
5496 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5497 mgmt_status(status), &rp, sizeof(rp));
5498 mgmt_pending_remove(cmd);
5500 hci_dev_unlock(hdev);
5501 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5502 rp.monitor_handle, status);
5505 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5507 struct mgmt_pending_cmd *cmd = data;
5508 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5509 u16 handle = __le16_to_cpu(cp->monitor_handle);
5512 return hci_remove_all_adv_monitor(hdev);
5514 return hci_remove_single_adv_monitor(hdev, handle);
5517 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5518 void *data, u16 len)
5520 struct mgmt_pending_cmd *cmd;
5525 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5526 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5527 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5528 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5529 status = MGMT_STATUS_BUSY;
5533 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5535 status = MGMT_STATUS_NO_RESOURCES;
5539 err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5540 mgmt_remove_adv_monitor_complete);
5543 mgmt_pending_remove(cmd);
5546 status = MGMT_STATUS_NO_RESOURCES;
5548 status = MGMT_STATUS_FAILED;
5553 hci_dev_unlock(hdev);
5558 hci_dev_unlock(hdev);
5559 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5563 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5565 struct mgmt_rp_read_local_oob_data mgmt_rp;
5566 size_t rp_size = sizeof(mgmt_rp);
5567 struct mgmt_pending_cmd *cmd = data;
5568 struct sk_buff *skb = cmd->skb;
5569 u8 status = mgmt_status(err);
5573 status = MGMT_STATUS_FAILED;
5574 else if (IS_ERR(skb))
5575 status = mgmt_status(PTR_ERR(skb));
5577 status = mgmt_status(skb->data[0]);
5580 bt_dev_dbg(hdev, "status %d", status);
5583 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5587 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5589 if (!bredr_sc_enabled(hdev)) {
5590 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5592 if (skb->len < sizeof(*rp)) {
5593 mgmt_cmd_status(cmd->sk, hdev->id,
5594 MGMT_OP_READ_LOCAL_OOB_DATA,
5595 MGMT_STATUS_FAILED);
5599 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5600 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5602 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5604 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5606 if (skb->len < sizeof(*rp)) {
5607 mgmt_cmd_status(cmd->sk, hdev->id,
5608 MGMT_OP_READ_LOCAL_OOB_DATA,
5609 MGMT_STATUS_FAILED);
5613 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5614 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5616 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5617 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5620 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5621 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5624 if (skb && !IS_ERR(skb))
5627 mgmt_pending_free(cmd);
5630 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5632 struct mgmt_pending_cmd *cmd = data;
5634 if (bredr_sc_enabled(hdev))
5635 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5637 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5639 if (IS_ERR(cmd->skb))
5640 return PTR_ERR(cmd->skb);
5645 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5646 void *data, u16 data_len)
5648 struct mgmt_pending_cmd *cmd;
5651 bt_dev_dbg(hdev, "sock %p", sk);
5655 if (!hdev_is_powered(hdev)) {
5656 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5657 MGMT_STATUS_NOT_POWERED);
5661 if (!lmp_ssp_capable(hdev)) {
5662 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5663 MGMT_STATUS_NOT_SUPPORTED);
5667 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5671 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5672 read_local_oob_data_complete);
5675 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5676 MGMT_STATUS_FAILED);
5679 mgmt_pending_free(cmd);
5683 hci_dev_unlock(hdev);
5687 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5688 void *data, u16 len)
5690 struct mgmt_addr_info *addr = data;
5693 bt_dev_dbg(hdev, "sock %p", sk);
5695 if (!bdaddr_type_is_valid(addr->type))
5696 return mgmt_cmd_complete(sk, hdev->id,
5697 MGMT_OP_ADD_REMOTE_OOB_DATA,
5698 MGMT_STATUS_INVALID_PARAMS,
5699 addr, sizeof(*addr));
5703 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5704 struct mgmt_cp_add_remote_oob_data *cp = data;
5707 if (cp->addr.type != BDADDR_BREDR) {
5708 err = mgmt_cmd_complete(sk, hdev->id,
5709 MGMT_OP_ADD_REMOTE_OOB_DATA,
5710 MGMT_STATUS_INVALID_PARAMS,
5711 &cp->addr, sizeof(cp->addr));
5715 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5716 cp->addr.type, cp->hash,
5717 cp->rand, NULL, NULL);
5719 status = MGMT_STATUS_FAILED;
5721 status = MGMT_STATUS_SUCCESS;
5723 err = mgmt_cmd_complete(sk, hdev->id,
5724 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5725 &cp->addr, sizeof(cp->addr));
5726 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5727 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5728 u8 *rand192, *hash192, *rand256, *hash256;
5731 if (bdaddr_type_is_le(cp->addr.type)) {
5732 /* Enforce zero-valued 192-bit parameters as
5733 * long as legacy SMP OOB isn't implemented.
5735 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5736 memcmp(cp->hash192, ZERO_KEY, 16)) {
5737 err = mgmt_cmd_complete(sk, hdev->id,
5738 MGMT_OP_ADD_REMOTE_OOB_DATA,
5739 MGMT_STATUS_INVALID_PARAMS,
5740 addr, sizeof(*addr));
5747 /* In case one of the P-192 values is set to zero,
5748 * then just disable OOB data for P-192.
5750 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5751 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5755 rand192 = cp->rand192;
5756 hash192 = cp->hash192;
5760 /* In case one of the P-256 values is set to zero, then just
5761 * disable OOB data for P-256.
5763 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5764 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5768 rand256 = cp->rand256;
5769 hash256 = cp->hash256;
5772 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5773 cp->addr.type, hash192, rand192,
5776 status = MGMT_STATUS_FAILED;
5778 status = MGMT_STATUS_SUCCESS;
5780 err = mgmt_cmd_complete(sk, hdev->id,
5781 MGMT_OP_ADD_REMOTE_OOB_DATA,
5782 status, &cp->addr, sizeof(cp->addr));
5784 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5786 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5787 MGMT_STATUS_INVALID_PARAMS);
5791 hci_dev_unlock(hdev);
5795 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5796 void *data, u16 len)
5798 struct mgmt_cp_remove_remote_oob_data *cp = data;
5802 bt_dev_dbg(hdev, "sock %p", sk);
5804 if (cp->addr.type != BDADDR_BREDR)
5805 return mgmt_cmd_complete(sk, hdev->id,
5806 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5807 MGMT_STATUS_INVALID_PARAMS,
5808 &cp->addr, sizeof(cp->addr));
5812 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5813 hci_remote_oob_data_clear(hdev);
5814 status = MGMT_STATUS_SUCCESS;
5818 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5820 status = MGMT_STATUS_INVALID_PARAMS;
5822 status = MGMT_STATUS_SUCCESS;
5825 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5826 status, &cp->addr, sizeof(cp->addr));
5828 hci_dev_unlock(hdev);
5832 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5834 struct mgmt_pending_cmd *cmd;
5836 bt_dev_dbg(hdev, "status %u", status);
5840 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5842 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5845 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5848 cmd->cmd_complete(cmd, mgmt_status(status));
5849 mgmt_pending_remove(cmd);
5852 hci_dev_unlock(hdev);
5855 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5856 uint8_t *mgmt_status)
5859 case DISCOV_TYPE_LE:
5860 *mgmt_status = mgmt_le_support(hdev);
5864 case DISCOV_TYPE_INTERLEAVED:
5865 *mgmt_status = mgmt_le_support(hdev);
5869 case DISCOV_TYPE_BREDR:
5870 *mgmt_status = mgmt_bredr_support(hdev);
5875 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5882 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5884 struct mgmt_pending_cmd *cmd = data;
5886 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5887 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5888 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5891 bt_dev_dbg(hdev, "err %d", err);
5893 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5895 mgmt_pending_remove(cmd);
5897 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5901 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5903 return hci_start_discovery_sync(hdev);
5906 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5907 u16 op, void *data, u16 len)
5909 struct mgmt_cp_start_discovery *cp = data;
5910 struct mgmt_pending_cmd *cmd;
5914 bt_dev_dbg(hdev, "sock %p", sk);
5918 if (!hdev_is_powered(hdev)) {
5919 err = mgmt_cmd_complete(sk, hdev->id, op,
5920 MGMT_STATUS_NOT_POWERED,
5921 &cp->type, sizeof(cp->type));
5925 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5926 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5927 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5928 &cp->type, sizeof(cp->type));
5932 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5933 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5934 &cp->type, sizeof(cp->type));
5938 /* Can't start discovery when it is paused */
5939 if (hdev->discovery_paused) {
5940 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5941 &cp->type, sizeof(cp->type));
5945 /* Clear the discovery filter first to free any previously
5946 * allocated memory for the UUID list.
5948 hci_discovery_filter_clear(hdev);
5950 hdev->discovery.type = cp->type;
5951 hdev->discovery.report_invalid_rssi = false;
5952 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5953 hdev->discovery.limited = true;
5955 hdev->discovery.limited = false;
5957 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5963 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5964 start_discovery_complete);
5966 mgmt_pending_remove(cmd);
5970 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5973 hci_dev_unlock(hdev);
5977 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5978 void *data, u16 len)
5980 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5984 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5985 void *data, u16 len)
5987 return start_discovery_internal(sk, hdev,
5988 MGMT_OP_START_LIMITED_DISCOVERY,
5992 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5993 void *data, u16 len)
5995 struct mgmt_cp_start_service_discovery *cp = data;
5996 struct mgmt_pending_cmd *cmd;
5997 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5998 u16 uuid_count, expected_len;
6002 bt_dev_dbg(hdev, "sock %p", sk);
6006 if (!hdev_is_powered(hdev)) {
6007 err = mgmt_cmd_complete(sk, hdev->id,
6008 MGMT_OP_START_SERVICE_DISCOVERY,
6009 MGMT_STATUS_NOT_POWERED,
6010 &cp->type, sizeof(cp->type));
6014 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6015 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6016 err = mgmt_cmd_complete(sk, hdev->id,
6017 MGMT_OP_START_SERVICE_DISCOVERY,
6018 MGMT_STATUS_BUSY, &cp->type,
6023 if (hdev->discovery_paused) {
6024 err = mgmt_cmd_complete(sk, hdev->id,
6025 MGMT_OP_START_SERVICE_DISCOVERY,
6026 MGMT_STATUS_BUSY, &cp->type,
6031 uuid_count = __le16_to_cpu(cp->uuid_count);
6032 if (uuid_count > max_uuid_count) {
6033 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6035 err = mgmt_cmd_complete(sk, hdev->id,
6036 MGMT_OP_START_SERVICE_DISCOVERY,
6037 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6042 expected_len = sizeof(*cp) + uuid_count * 16;
6043 if (expected_len != len) {
6044 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6046 err = mgmt_cmd_complete(sk, hdev->id,
6047 MGMT_OP_START_SERVICE_DISCOVERY,
6048 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6053 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6054 err = mgmt_cmd_complete(sk, hdev->id,
6055 MGMT_OP_START_SERVICE_DISCOVERY,
6056 status, &cp->type, sizeof(cp->type));
6060 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6067 /* Clear the discovery filter first to free any previously
6068 * allocated memory for the UUID list.
6070 hci_discovery_filter_clear(hdev);
6072 hdev->discovery.result_filtering = true;
6073 hdev->discovery.type = cp->type;
6074 hdev->discovery.rssi = cp->rssi;
6075 hdev->discovery.uuid_count = uuid_count;
6077 if (uuid_count > 0) {
6078 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6080 if (!hdev->discovery.uuids) {
6081 err = mgmt_cmd_complete(sk, hdev->id,
6082 MGMT_OP_START_SERVICE_DISCOVERY,
6084 &cp->type, sizeof(cp->type));
6085 mgmt_pending_remove(cmd);
6090 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6091 start_discovery_complete);
6093 mgmt_pending_remove(cmd);
6097 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6100 hci_dev_unlock(hdev);
6104 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6106 struct mgmt_pending_cmd *cmd;
6108 bt_dev_dbg(hdev, "status %u", status);
6112 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6114 cmd->cmd_complete(cmd, mgmt_status(status));
6115 mgmt_pending_remove(cmd);
6118 hci_dev_unlock(hdev);
6121 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6123 struct mgmt_pending_cmd *cmd = data;
6125 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6128 bt_dev_dbg(hdev, "err %d", err);
6130 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6132 mgmt_pending_remove(cmd);
6135 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6138 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6140 return hci_stop_discovery_sync(hdev);
6143 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6146 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6147 struct mgmt_pending_cmd *cmd;
6150 bt_dev_dbg(hdev, "sock %p", sk);
6154 if (!hci_discovery_active(hdev)) {
6155 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6156 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6157 sizeof(mgmt_cp->type));
6161 if (hdev->discovery.type != mgmt_cp->type) {
6162 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6163 MGMT_STATUS_INVALID_PARAMS,
6164 &mgmt_cp->type, sizeof(mgmt_cp->type));
6168 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6174 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6175 stop_discovery_complete);
6177 mgmt_pending_remove(cmd);
6181 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6184 hci_dev_unlock(hdev);
6188 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6191 struct mgmt_cp_confirm_name *cp = data;
6192 struct inquiry_entry *e;
6195 bt_dev_dbg(hdev, "sock %p", sk);
6199 if (!hci_discovery_active(hdev)) {
6200 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6201 MGMT_STATUS_FAILED, &cp->addr,
6206 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6208 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6209 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6214 if (cp->name_known) {
6215 e->name_state = NAME_KNOWN;
6218 e->name_state = NAME_NEEDED;
6219 hci_inquiry_cache_update_resolve(hdev, e);
6222 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6223 &cp->addr, sizeof(cp->addr));
6226 hci_dev_unlock(hdev);
6230 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6233 struct mgmt_cp_block_device *cp = data;
6237 bt_dev_dbg(hdev, "sock %p", sk);
6239 if (!bdaddr_type_is_valid(cp->addr.type))
6240 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6241 MGMT_STATUS_INVALID_PARAMS,
6242 &cp->addr, sizeof(cp->addr));
6246 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6249 status = MGMT_STATUS_FAILED;
6253 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6255 status = MGMT_STATUS_SUCCESS;
6258 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6259 &cp->addr, sizeof(cp->addr));
6261 hci_dev_unlock(hdev);
6266 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6269 struct mgmt_cp_unblock_device *cp = data;
6273 bt_dev_dbg(hdev, "sock %p", sk);
6275 if (!bdaddr_type_is_valid(cp->addr.type))
6276 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6277 MGMT_STATUS_INVALID_PARAMS,
6278 &cp->addr, sizeof(cp->addr));
6282 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6285 status = MGMT_STATUS_INVALID_PARAMS;
6289 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6291 status = MGMT_STATUS_SUCCESS;
6294 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6295 &cp->addr, sizeof(cp->addr));
6297 hci_dev_unlock(hdev);
6302 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6304 return hci_update_eir_sync(hdev);
6307 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6310 struct mgmt_cp_set_device_id *cp = data;
6314 bt_dev_dbg(hdev, "sock %p", sk);
6316 source = __le16_to_cpu(cp->source);
6318 if (source > 0x0002)
6319 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6320 MGMT_STATUS_INVALID_PARAMS);
6324 hdev->devid_source = source;
6325 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6326 hdev->devid_product = __le16_to_cpu(cp->product);
6327 hdev->devid_version = __le16_to_cpu(cp->version);
6329 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6332 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6334 hci_dev_unlock(hdev);
6339 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6342 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6344 bt_dev_dbg(hdev, "status %d", err);
6347 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6349 struct cmd_lookup match = { NULL, hdev };
6351 struct adv_info *adv_instance;
6352 u8 status = mgmt_status(err);
6355 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6356 cmd_status_rsp, &status);
6360 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6361 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6363 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6365 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6368 new_settings(hdev, match.sk);
6373 /* If "Set Advertising" was just disabled and instance advertising was
6374 * set up earlier, then re-enable multi-instance advertising.
6376 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6377 list_empty(&hdev->adv_instances))
6380 instance = hdev->cur_adv_instance;
6382 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6383 struct adv_info, list);
6387 instance = adv_instance->instance;
6390 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6392 enable_advertising_instance(hdev, err);
6395 static int set_adv_sync(struct hci_dev *hdev, void *data)
6397 struct mgmt_pending_cmd *cmd = data;
6398 struct mgmt_mode *cp = cmd->param;
6401 if (cp->val == 0x02)
6402 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6404 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6406 cancel_adv_timeout(hdev);
6409 /* Switch to instance "0" for the Set Advertising setting.
6410 * We cannot use update_[adv|scan_rsp]_data() here as the
6411 * HCI_ADVERTISING flag is not yet set.
6413 hdev->cur_adv_instance = 0x00;
6415 if (ext_adv_capable(hdev)) {
6416 hci_start_ext_adv_sync(hdev, 0x00);
6418 hci_update_adv_data_sync(hdev, 0x00);
6419 hci_update_scan_rsp_data_sync(hdev, 0x00);
6420 hci_enable_advertising_sync(hdev);
6423 hci_disable_advertising_sync(hdev);
6429 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6432 struct mgmt_mode *cp = data;
6433 struct mgmt_pending_cmd *cmd;
6437 bt_dev_dbg(hdev, "sock %p", sk);
6439 status = mgmt_le_support(hdev);
6441 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6444 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6445 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6446 MGMT_STATUS_INVALID_PARAMS);
6448 if (hdev->advertising_paused)
6449 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6456 /* The following conditions are ones which mean that we should
6457 * not do any HCI communication but directly send a mgmt
6458 * response to user space (after toggling the flag if
6461 if (!hdev_is_powered(hdev) ||
6462 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6463 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6464 hci_dev_test_flag(hdev, HCI_MESH) ||
6465 hci_conn_num(hdev, LE_LINK) > 0 ||
6466 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6467 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6471 hdev->cur_adv_instance = 0x00;
6472 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6473 if (cp->val == 0x02)
6474 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6476 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6478 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6479 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6482 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6487 err = new_settings(hdev, sk);
6492 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6493 pending_find(MGMT_OP_SET_LE, hdev)) {
6494 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6499 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6503 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6504 set_advertising_complete);
6507 mgmt_pending_remove(cmd);
6510 hci_dev_unlock(hdev);
6514 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6515 void *data, u16 len)
6517 struct mgmt_cp_set_static_address *cp = data;
6520 bt_dev_dbg(hdev, "sock %p", sk);
6522 if (!lmp_le_capable(hdev))
6523 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6524 MGMT_STATUS_NOT_SUPPORTED);
6526 if (hdev_is_powered(hdev))
6527 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6528 MGMT_STATUS_REJECTED);
6530 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6531 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6532 return mgmt_cmd_status(sk, hdev->id,
6533 MGMT_OP_SET_STATIC_ADDRESS,
6534 MGMT_STATUS_INVALID_PARAMS);
6536 /* Two most significant bits shall be set */
6537 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6538 return mgmt_cmd_status(sk, hdev->id,
6539 MGMT_OP_SET_STATIC_ADDRESS,
6540 MGMT_STATUS_INVALID_PARAMS);
6545 bacpy(&hdev->static_addr, &cp->bdaddr);
6547 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6551 err = new_settings(hdev, sk);
6554 hci_dev_unlock(hdev);
6558 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6559 void *data, u16 len)
6561 struct mgmt_cp_set_scan_params *cp = data;
6562 __u16 interval, window;
6565 bt_dev_dbg(hdev, "sock %p", sk);
6567 if (!lmp_le_capable(hdev))
6568 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6569 MGMT_STATUS_NOT_SUPPORTED);
6571 interval = __le16_to_cpu(cp->interval);
6573 if (interval < 0x0004 || interval > 0x4000)
6574 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6575 MGMT_STATUS_INVALID_PARAMS);
6577 window = __le16_to_cpu(cp->window);
6579 if (window < 0x0004 || window > 0x4000)
6580 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6581 MGMT_STATUS_INVALID_PARAMS);
6583 if (window > interval)
6584 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6585 MGMT_STATUS_INVALID_PARAMS);
6589 hdev->le_scan_interval = interval;
6590 hdev->le_scan_window = window;
6592 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6595 /* If background scan is running, restart it so new parameters are
6598 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6599 hdev->discovery.state == DISCOVERY_STOPPED)
6600 hci_update_passive_scan(hdev);
6602 hci_dev_unlock(hdev);
6607 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6609 struct mgmt_pending_cmd *cmd = data;
6611 bt_dev_dbg(hdev, "err %d", err);
6614 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6617 struct mgmt_mode *cp = cmd->param;
6620 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6622 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6624 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6625 new_settings(hdev, cmd->sk);
6628 mgmt_pending_free(cmd);
6631 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6633 struct mgmt_pending_cmd *cmd = data;
6634 struct mgmt_mode *cp = cmd->param;
6636 return hci_write_fast_connectable_sync(hdev, cp->val);
6639 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6640 void *data, u16 len)
6642 struct mgmt_mode *cp = data;
6643 struct mgmt_pending_cmd *cmd;
6646 bt_dev_dbg(hdev, "sock %p", sk);
6648 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6649 hdev->hci_ver < BLUETOOTH_VER_1_2)
6650 return mgmt_cmd_status(sk, hdev->id,
6651 MGMT_OP_SET_FAST_CONNECTABLE,
6652 MGMT_STATUS_NOT_SUPPORTED);
6654 if (cp->val != 0x00 && cp->val != 0x01)
6655 return mgmt_cmd_status(sk, hdev->id,
6656 MGMT_OP_SET_FAST_CONNECTABLE,
6657 MGMT_STATUS_INVALID_PARAMS);
6661 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6662 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6666 if (!hdev_is_powered(hdev)) {
6667 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6668 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6669 new_settings(hdev, sk);
6673 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6678 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6679 fast_connectable_complete);
6682 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6683 MGMT_STATUS_FAILED);
6686 mgmt_pending_free(cmd);
6690 hci_dev_unlock(hdev);
6695 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6697 struct mgmt_pending_cmd *cmd = data;
6699 bt_dev_dbg(hdev, "err %d", err);
6702 u8 mgmt_err = mgmt_status(err);
6704 /* We need to restore the flag if related HCI commands
6707 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6709 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6711 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6712 new_settings(hdev, cmd->sk);
6715 mgmt_pending_free(cmd);
6718 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6722 status = hci_write_fast_connectable_sync(hdev, false);
6725 status = hci_update_scan_sync(hdev);
6727 /* Since only the advertising data flags will change, there
6728 * is no need to update the scan response data.
6731 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6736 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6738 struct mgmt_mode *cp = data;
6739 struct mgmt_pending_cmd *cmd;
6742 bt_dev_dbg(hdev, "sock %p", sk);
6744 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6745 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6746 MGMT_STATUS_NOT_SUPPORTED);
6748 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6750 MGMT_STATUS_REJECTED);
6752 if (cp->val != 0x00 && cp->val != 0x01)
6753 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6754 MGMT_STATUS_INVALID_PARAMS);
6758 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6759 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6763 if (!hdev_is_powered(hdev)) {
6765 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6766 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6767 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6768 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6769 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6772 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6774 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6778 err = new_settings(hdev, sk);
6782 /* Reject disabling when powered on */
6784 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6785 MGMT_STATUS_REJECTED);
6788 /* When configuring a dual-mode controller to operate
6789 * with LE only and using a static address, then switching
6790 * BR/EDR back on is not allowed.
6792 * Dual-mode controllers shall operate with the public
6793 * address as its identity address for BR/EDR and LE. So
6794 * reject the attempt to create an invalid configuration.
6796 * The same restrictions applies when secure connections
6797 * has been enabled. For BR/EDR this is a controller feature
6798 * while for LE it is a host stack feature. This means that
6799 * switching BR/EDR back on when secure connections has been
6800 * enabled is not a supported transaction.
6802 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6803 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6804 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6805 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6806 MGMT_STATUS_REJECTED);
6811 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6815 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6816 set_bredr_complete);
6819 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6820 MGMT_STATUS_FAILED);
6822 mgmt_pending_free(cmd);
6827 /* We need to flip the bit already here so that
6828 * hci_req_update_adv_data generates the correct flags.
6830 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6833 hci_dev_unlock(hdev);
6837 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6839 struct mgmt_pending_cmd *cmd = data;
6840 struct mgmt_mode *cp;
6842 bt_dev_dbg(hdev, "err %d", err);
6845 u8 mgmt_err = mgmt_status(err);
6847 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6855 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6856 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6859 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6860 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6863 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6864 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6868 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6869 new_settings(hdev, cmd->sk);
6872 mgmt_pending_free(cmd);
6875 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6877 struct mgmt_pending_cmd *cmd = data;
6878 struct mgmt_mode *cp = cmd->param;
6881 /* Force write of val */
6882 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6884 return hci_write_sc_support_sync(hdev, val);
6887 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6888 void *data, u16 len)
6890 struct mgmt_mode *cp = data;
6891 struct mgmt_pending_cmd *cmd;
6895 bt_dev_dbg(hdev, "sock %p", sk);
6897 if (!lmp_sc_capable(hdev) &&
6898 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6899 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6900 MGMT_STATUS_NOT_SUPPORTED);
6902 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6903 lmp_sc_capable(hdev) &&
6904 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6905 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6906 MGMT_STATUS_REJECTED);
6908 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6909 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6910 MGMT_STATUS_INVALID_PARAMS);
6914 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6915 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6919 changed = !hci_dev_test_and_set_flag(hdev,
6921 if (cp->val == 0x02)
6922 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6924 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6926 changed = hci_dev_test_and_clear_flag(hdev,
6928 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6931 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6936 err = new_settings(hdev, sk);
6943 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6944 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6945 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6949 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6953 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6954 set_secure_conn_complete);
6957 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6958 MGMT_STATUS_FAILED);
6960 mgmt_pending_free(cmd);
6964 hci_dev_unlock(hdev);
6968 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6969 void *data, u16 len)
6971 struct mgmt_mode *cp = data;
6972 bool changed, use_changed;
6975 bt_dev_dbg(hdev, "sock %p", sk);
6977 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6978 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6979 MGMT_STATUS_INVALID_PARAMS);
6984 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6986 changed = hci_dev_test_and_clear_flag(hdev,
6987 HCI_KEEP_DEBUG_KEYS);
6989 if (cp->val == 0x02)
6990 use_changed = !hci_dev_test_and_set_flag(hdev,
6991 HCI_USE_DEBUG_KEYS);
6993 use_changed = hci_dev_test_and_clear_flag(hdev,
6994 HCI_USE_DEBUG_KEYS);
6996 if (hdev_is_powered(hdev) && use_changed &&
6997 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6998 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6999 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
7000 sizeof(mode), &mode);
7003 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7008 err = new_settings(hdev, sk);
7011 hci_dev_unlock(hdev);
7015 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7018 struct mgmt_cp_set_privacy *cp = cp_data;
7022 bt_dev_dbg(hdev, "sock %p", sk);
7024 if (!lmp_le_capable(hdev))
7025 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7026 MGMT_STATUS_NOT_SUPPORTED);
7028 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7029 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7030 MGMT_STATUS_INVALID_PARAMS);
7033 /* commenting out since set privacy command is always rejected
7034 * if this condition is enabled.
7036 if (hdev_is_powered(hdev))
7037 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7038 MGMT_STATUS_REJECTED);
7043 /* If user space supports this command it is also expected to
7044 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7046 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7049 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7050 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7051 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7052 hci_adv_instances_set_rpa_expired(hdev, true);
7053 if (cp->privacy == 0x02)
7054 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7056 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7058 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7059 memset(hdev->irk, 0, sizeof(hdev->irk));
7060 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7061 hci_adv_instances_set_rpa_expired(hdev, false);
7062 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7065 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7070 err = new_settings(hdev, sk);
7073 hci_dev_unlock(hdev);
7077 static bool irk_is_valid(struct mgmt_irk_info *irk)
7079 switch (irk->addr.type) {
7080 case BDADDR_LE_PUBLIC:
7083 case BDADDR_LE_RANDOM:
7084 /* Two most significant bits shall be set */
7085 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7093 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7096 struct mgmt_cp_load_irks *cp = cp_data;
7097 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7098 sizeof(struct mgmt_irk_info));
7099 u16 irk_count, expected_len;
7102 bt_dev_dbg(hdev, "sock %p", sk);
7104 if (!lmp_le_capable(hdev))
7105 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7106 MGMT_STATUS_NOT_SUPPORTED);
7108 irk_count = __le16_to_cpu(cp->irk_count);
7109 if (irk_count > max_irk_count) {
7110 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7112 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7113 MGMT_STATUS_INVALID_PARAMS);
7116 expected_len = struct_size(cp, irks, irk_count);
7117 if (expected_len != len) {
7118 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7120 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7121 MGMT_STATUS_INVALID_PARAMS);
7124 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7126 for (i = 0; i < irk_count; i++) {
7127 struct mgmt_irk_info *key = &cp->irks[i];
7129 if (!irk_is_valid(key))
7130 return mgmt_cmd_status(sk, hdev->id,
7132 MGMT_STATUS_INVALID_PARAMS);
7137 hci_smp_irks_clear(hdev);
7139 for (i = 0; i < irk_count; i++) {
7140 struct mgmt_irk_info *irk = &cp->irks[i];
7142 if (hci_is_blocked_key(hdev,
7143 HCI_BLOCKED_KEY_TYPE_IRK,
7145 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7150 hci_add_irk(hdev, &irk->addr.bdaddr,
7151 le_addr_type(irk->addr.type), irk->val,
7155 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7157 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7159 hci_dev_unlock(hdev);
7165 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
7166 void *data, u16 len)
7168 struct mgmt_cp_set_advertising_params *cp = data;
7173 BT_DBG("%s", hdev->name);
7175 if (!lmp_le_capable(hdev))
7176 return mgmt_cmd_status(sk, hdev->id,
7177 MGMT_OP_SET_ADVERTISING_PARAMS,
7178 MGMT_STATUS_NOT_SUPPORTED);
7180 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7181 return mgmt_cmd_status(sk, hdev->id,
7182 MGMT_OP_SET_ADVERTISING_PARAMS,
7185 min_interval = __le16_to_cpu(cp->interval_min);
7186 max_interval = __le16_to_cpu(cp->interval_max);
7188 if (min_interval > max_interval ||
7189 min_interval < 0x0020 || max_interval > 0x4000)
7190 return mgmt_cmd_status(sk, hdev->id,
7191 MGMT_OP_SET_ADVERTISING_PARAMS,
7192 MGMT_STATUS_INVALID_PARAMS);
7196 hdev->le_adv_min_interval = min_interval;
7197 hdev->le_adv_max_interval = max_interval;
7198 hdev->adv_filter_policy = cp->filter_policy;
7199 hdev->adv_type = cp->type;
7201 err = mgmt_cmd_complete(sk, hdev->id,
7202 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
7204 hci_dev_unlock(hdev);
7209 static void set_advertising_data_complete(struct hci_dev *hdev,
7210 u8 status, u16 opcode)
7212 struct mgmt_cp_set_advertising_data *cp;
7213 struct mgmt_pending_cmd *cmd;
7215 BT_DBG("status 0x%02x", status);
7219 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
7226 mgmt_cmd_status(cmd->sk, hdev->id,
7227 MGMT_OP_SET_ADVERTISING_DATA,
7228 mgmt_status(status));
7230 mgmt_cmd_complete(cmd->sk, hdev->id,
7231 MGMT_OP_SET_ADVERTISING_DATA, 0,
7234 mgmt_pending_remove(cmd);
7237 hci_dev_unlock(hdev);
7240 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
7241 void *data, u16 len)
7243 struct mgmt_pending_cmd *cmd;
7244 struct hci_request req;
7245 struct mgmt_cp_set_advertising_data *cp = data;
7246 struct hci_cp_le_set_adv_data adv;
7249 BT_DBG("%s", hdev->name);
7251 if (!lmp_le_capable(hdev)) {
7252 return mgmt_cmd_status(sk, hdev->id,
7253 MGMT_OP_SET_ADVERTISING_DATA,
7254 MGMT_STATUS_NOT_SUPPORTED);
7259 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
7260 err = mgmt_cmd_status(sk, hdev->id,
7261 MGMT_OP_SET_ADVERTISING_DATA,
7266 if (len > HCI_MAX_AD_LENGTH) {
7267 err = mgmt_cmd_status(sk, hdev->id,
7268 MGMT_OP_SET_ADVERTISING_DATA,
7269 MGMT_STATUS_INVALID_PARAMS);
7273 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
7280 hci_req_init(&req, hdev);
7282 memset(&adv, 0, sizeof(adv));
7283 memcpy(adv.data, cp->data, len);
7286 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
7288 err = hci_req_run(&req, set_advertising_data_complete);
7290 mgmt_pending_remove(cmd);
7293 hci_dev_unlock(hdev);
7298 /* Adv White List feature */
7299 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7301 struct mgmt_cp_add_dev_white_list *cp;
7302 struct mgmt_pending_cmd *cmd;
7304 BT_DBG("status 0x%02x", status);
7308 cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
7315 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7316 mgmt_status(status));
7318 mgmt_cmd_complete(cmd->sk, hdev->id,
7319 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
7321 mgmt_pending_remove(cmd);
7324 hci_dev_unlock(hdev);
7327 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
7328 void *data, u16 len)
7330 struct mgmt_pending_cmd *cmd;
7331 struct mgmt_cp_add_dev_white_list *cp = data;
7332 struct hci_request req;
7335 BT_DBG("%s", hdev->name);
7337 if (!lmp_le_capable(hdev))
7338 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7339 MGMT_STATUS_NOT_SUPPORTED);
7341 if (!hdev_is_powered(hdev))
7342 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7343 MGMT_STATUS_REJECTED);
7347 if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
7348 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7353 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
7359 hci_req_init(&req, hdev);
7361 hci_req_add(&req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(*cp), cp);
7363 err = hci_req_run(&req, add_white_list_complete);
7365 mgmt_pending_remove(cmd);
7370 hci_dev_unlock(hdev);
7375 static void remove_from_white_list_complete(struct hci_dev *hdev,
7376 u8 status, u16 opcode)
7378 struct mgmt_cp_remove_dev_from_white_list *cp;
7379 struct mgmt_pending_cmd *cmd;
7381 BT_DBG("status 0x%02x", status);
7385 cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
7392 mgmt_cmd_status(cmd->sk, hdev->id,
7393 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7394 mgmt_status(status));
7396 mgmt_cmd_complete(cmd->sk, hdev->id,
7397 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
7400 mgmt_pending_remove(cmd);
7403 hci_dev_unlock(hdev);
7406 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
7407 void *data, u16 len)
7409 struct mgmt_pending_cmd *cmd;
7410 struct mgmt_cp_remove_dev_from_white_list *cp = data;
7411 struct hci_request req;
7414 BT_DBG("%s", hdev->name);
7416 if (!lmp_le_capable(hdev))
7417 return mgmt_cmd_status(sk, hdev->id,
7418 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7419 MGMT_STATUS_NOT_SUPPORTED);
7421 if (!hdev_is_powered(hdev))
7422 return mgmt_cmd_status(sk, hdev->id,
7423 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7424 MGMT_STATUS_REJECTED);
7428 if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
7429 err = mgmt_cmd_status(sk, hdev->id,
7430 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7435 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7442 hci_req_init(&req, hdev);
7444 hci_req_add(&req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(*cp), cp);
7446 err = hci_req_run(&req, remove_from_white_list_complete);
7448 mgmt_pending_remove(cmd);
7453 hci_dev_unlock(hdev);
7458 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
7461 struct mgmt_pending_cmd *cmd;
7463 BT_DBG("status 0x%02x", status);
7467 cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
7472 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
7473 mgmt_status(status));
7475 mgmt_cmd_complete(cmd->sk, hdev->id,
7476 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7479 mgmt_pending_remove(cmd);
7482 hci_dev_unlock(hdev);
7485 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
7486 void *data, u16 len)
7488 struct mgmt_pending_cmd *cmd;
7489 struct hci_request req;
7492 BT_DBG("%s", hdev->name);
7494 if (!lmp_le_capable(hdev))
7495 return mgmt_cmd_status(sk, hdev->id,
7496 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7497 MGMT_STATUS_NOT_SUPPORTED);
7499 if (!hdev_is_powered(hdev))
7500 return mgmt_cmd_status(sk, hdev->id,
7501 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7502 MGMT_STATUS_REJECTED);
7506 if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
7507 err = mgmt_cmd_status(sk, hdev->id,
7508 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7513 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
7520 hci_req_init(&req, hdev);
7522 hci_req_add(&req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
7524 err = hci_req_run(&req, clear_white_list_complete);
7526 mgmt_pending_remove(cmd);
7531 hci_dev_unlock(hdev);
7536 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
7539 struct mgmt_cp_set_scan_rsp_data *cp;
7540 struct mgmt_pending_cmd *cmd;
7542 BT_DBG("status 0x%02x", status);
7546 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
7553 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7554 mgmt_status(status));
7556 mgmt_cmd_complete(cmd->sk, hdev->id,
7557 MGMT_OP_SET_SCAN_RSP_DATA, 0,
7560 mgmt_pending_remove(cmd);
7563 hci_dev_unlock(hdev);
7566 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
7569 struct mgmt_pending_cmd *cmd;
7570 struct hci_request req;
7571 struct mgmt_cp_set_scan_rsp_data *cp = data;
7572 struct hci_cp_le_set_scan_rsp_data rsp;
7575 BT_DBG("%s", hdev->name);
7577 if (!lmp_le_capable(hdev))
7578 return mgmt_cmd_status(sk, hdev->id,
7579 MGMT_OP_SET_SCAN_RSP_DATA,
7580 MGMT_STATUS_NOT_SUPPORTED);
7584 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
7585 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7590 if (len > HCI_MAX_AD_LENGTH) {
7591 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7592 MGMT_STATUS_INVALID_PARAMS);
7596 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
7602 hci_req_init(&req, hdev);
7604 memset(&rsp, 0, sizeof(rsp));
7605 memcpy(rsp.data, cp->data, len);
7608 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
7610 err = hci_req_run(&req, set_scan_rsp_data_complete);
7612 mgmt_pending_remove(cmd);
7615 hci_dev_unlock(hdev);
7620 static void set_rssi_threshold_complete(struct hci_dev *hdev,
7621 u8 status, u16 opcode)
7623 struct mgmt_pending_cmd *cmd;
7625 BT_DBG("status 0x%02x", status);
7629 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7634 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7635 mgmt_status(status));
7637 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7640 mgmt_pending_remove(cmd);
7643 hci_dev_unlock(hdev);
7646 static void set_rssi_disable_complete(struct hci_dev *hdev,
7647 u8 status, u16 opcode)
7649 struct mgmt_pending_cmd *cmd;
7651 BT_DBG("status 0x%02x", status);
7655 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7660 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7661 mgmt_status(status));
7663 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7666 mgmt_pending_remove(cmd);
7669 hci_dev_unlock(hdev);
7672 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
7673 void *data, u16 len)
7676 struct hci_cp_set_rssi_threshold th = { 0, };
7677 struct mgmt_cp_set_enable_rssi *cp = data;
7678 struct hci_conn *conn;
7679 struct mgmt_pending_cmd *cmd;
7680 struct hci_request req;
7685 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7687 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7688 MGMT_STATUS_FAILED);
7692 if (!lmp_le_capable(hdev)) {
7693 mgmt_pending_remove(cmd);
7694 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7695 MGMT_STATUS_NOT_SUPPORTED);
7699 if (!hdev_is_powered(hdev)) {
7700 BT_DBG("%s", hdev->name);
7701 mgmt_pending_remove(cmd);
7702 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7703 MGMT_STATUS_NOT_POWERED);
7707 if (cp->link_type == 0x01)
7708 dest_type = LE_LINK;
7710 dest_type = ACL_LINK;
7712 /* Get LE/ACL link handle info */
7713 conn = hci_conn_hash_lookup_ba(hdev,
7714 dest_type, &cp->bdaddr);
7717 err = mgmt_cmd_complete(sk, hdev->id,
7718 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
7719 mgmt_pending_remove(cmd);
7723 hci_req_init(&req, hdev);
7725 th.hci_le_ext_opcode = 0x0B;
7727 th.conn_handle = conn->handle;
7728 th.alert_mask = 0x07;
7729 th.low_th = cp->low_th;
7730 th.in_range_th = cp->in_range_th;
7731 th.high_th = cp->high_th;
7733 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
7734 err = hci_req_run(&req, set_rssi_threshold_complete);
7737 mgmt_pending_remove(cmd);
7738 BT_ERR("Error in requesting hci_req_run");
7743 hci_dev_unlock(hdev);
7747 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
7748 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
7750 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
7751 struct mgmt_cp_set_enable_rssi *cp = data;
7752 struct mgmt_pending_cmd *cmd;
7757 mgmt_rp.status = rp->status;
7758 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
7759 mgmt_rp.bt_address = cp->bdaddr;
7760 mgmt_rp.link_type = cp->link_type;
7762 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7763 MGMT_STATUS_SUCCESS, &mgmt_rp,
7764 sizeof(struct mgmt_cc_rsp_enable_rssi));
7766 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
7767 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
7769 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
7770 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
7771 &mgmt_rp.bt_address, true);
7775 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7777 mgmt_pending_remove(cmd);
7779 hci_dev_unlock(hdev);
7782 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
7783 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
7785 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
7786 struct mgmt_cp_disable_rssi *cp = data;
7787 struct mgmt_pending_cmd *cmd;
7792 mgmt_rp.status = rp->status;
7793 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
7794 mgmt_rp.bt_address = cp->bdaddr;
7795 mgmt_rp.link_type = cp->link_type;
7797 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7798 MGMT_STATUS_SUCCESS, &mgmt_rp,
7799 sizeof(struct mgmt_cc_rsp_enable_rssi));
7801 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
7802 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
7804 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
7805 &mgmt_rp.bt_address, false);
7809 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7811 mgmt_pending_remove(cmd);
7813 hci_dev_unlock(hdev);
7816 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
7817 void *data, u16 len)
7819 struct mgmt_pending_cmd *cmd;
7820 struct hci_request req;
7821 struct hci_cp_set_enable_rssi cp_en = { 0, };
7824 BT_DBG("Set Disable RSSI.");
7826 cp_en.hci_le_ext_opcode = 0x01;
7827 cp_en.le_enable_cs_Features = 0x00;
7828 cp_en.data[0] = 0x00;
7829 cp_en.data[1] = 0x00;
7830 cp_en.data[2] = 0x00;
7834 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7836 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7837 MGMT_STATUS_FAILED);
7841 if (!lmp_le_capable(hdev)) {
7842 mgmt_pending_remove(cmd);
7843 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7844 MGMT_STATUS_NOT_SUPPORTED);
7848 if (!hdev_is_powered(hdev)) {
7849 BT_DBG("%s", hdev->name);
7850 mgmt_pending_remove(cmd);
7851 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7852 MGMT_STATUS_NOT_POWERED);
7856 hci_req_init(&req, hdev);
7858 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
7859 sizeof(struct hci_cp_set_enable_rssi),
7860 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
7861 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
7863 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
7864 err = hci_req_run(&req, set_rssi_disable_complete);
7867 mgmt_pending_remove(cmd);
7868 BT_ERR("Error in requesting hci_req_run");
7873 hci_dev_unlock(hdev);
7877 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
7879 struct hci_cc_rsp_enable_rssi *rp = response;
7880 struct mgmt_pending_cmd *cmd_enable = NULL;
7881 struct mgmt_pending_cmd *cmd_disable = NULL;
7882 struct mgmt_cp_set_enable_rssi *cp_en;
7883 struct mgmt_cp_disable_rssi *cp_dis;
7886 cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7887 cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7888 hci_dev_unlock(hdev);
7891 BT_DBG("Enable Request");
7894 BT_DBG("Disable Request");
7897 cp_en = cmd_enable->param;
7902 switch (rp->le_ext_opcode) {
7904 BT_DBG("RSSI enabled.. Setting Threshold...");
7905 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
7906 cp_en, sizeof(*cp_en));
7910 BT_DBG("Sending RSSI enable success");
7911 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
7912 cp_en, rp, rp->status);
7916 } else if (cmd_disable) {
7917 cp_dis = cmd_disable->param;
7922 switch (rp->le_ext_opcode) {
7924 BT_DBG("Sending RSSI disable success");
7925 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
7926 cp_dis, rp, rp->status);
7931 * Only unset RSSI Threshold values for the Link if
7932 * RSSI is monitored for other BREDR or LE Links
7934 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
7935 BT_DBG("Unset Threshold. Other links being monitored");
7936 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
7937 cp_dis, rp, rp->status);
7939 BT_DBG("Unset Threshold. Disabling...");
7940 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
7941 cp_dis, sizeof(*cp_dis));
7948 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
7951 struct mgmt_pending_cmd *cmd;
7953 BT_DBG("status 0x%02x", status);
7957 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7962 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7963 mgmt_status(status));
7965 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7968 mgmt_pending_remove(cmd);
7971 hci_dev_unlock(hdev);
7974 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
7975 void *data, u16 len)
7977 struct mgmt_pending_cmd *cmd;
7978 struct hci_request req;
7979 struct mgmt_cp_set_enable_rssi *cp = data;
7980 struct hci_cp_set_enable_rssi cp_en = { 0, };
7983 BT_DBG("Set Enable RSSI.");
7985 cp_en.hci_le_ext_opcode = 0x01;
7986 cp_en.le_enable_cs_Features = 0x04;
7987 cp_en.data[0] = 0x00;
7988 cp_en.data[1] = 0x00;
7989 cp_en.data[2] = 0x00;
7993 if (!lmp_le_capable(hdev)) {
7994 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7995 MGMT_STATUS_NOT_SUPPORTED);
7999 if (!hdev_is_powered(hdev)) {
8000 BT_DBG("%s", hdev->name);
8001 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
8002 MGMT_STATUS_NOT_POWERED);
8006 if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
8007 BT_DBG("%s", hdev->name);
8008 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
8013 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
8016 BT_DBG("%s", hdev->name);
8021 /* If RSSI is already enabled directly set Threshold values */
8022 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
8023 hci_dev_unlock(hdev);
8024 BT_DBG("RSSI Enabled. Directly set Threshold");
8025 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
8029 hci_req_init(&req, hdev);
8031 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
8032 sizeof(struct hci_cp_set_enable_rssi),
8033 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
8034 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
8036 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
8037 err = hci_req_run(&req, set_rssi_enable_complete);
8040 mgmt_pending_remove(cmd);
8041 BT_ERR("Error in requesting hci_req_run");
8046 hci_dev_unlock(hdev);
8051 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8053 struct mgmt_pending_cmd *cmd;
8055 BT_DBG("status 0x%02x", status);
8059 cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
8063 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8064 MGMT_STATUS_SUCCESS, &status, 1);
8066 mgmt_pending_remove(cmd);
8069 hci_dev_unlock(hdev);
8072 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
8075 struct mgmt_pending_cmd *cmd;
8076 struct hci_request req;
8077 struct mgmt_cp_get_raw_rssi *cp = data;
8078 struct hci_cp_get_raw_rssi hci_cp;
8080 struct hci_conn *conn;
8084 BT_DBG("Get Raw RSSI.");
8088 if (!lmp_le_capable(hdev)) {
8089 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8090 MGMT_STATUS_NOT_SUPPORTED);
8094 if (cp->link_type == 0x01)
8095 dest_type = LE_LINK;
8097 dest_type = ACL_LINK;
8099 /* Get LE/BREDR link handle info */
8100 conn = hci_conn_hash_lookup_ba(hdev,
8101 dest_type, &cp->bt_address);
8103 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8104 MGMT_STATUS_NOT_CONNECTED);
8107 hci_cp.conn_handle = conn->handle;
8109 if (!hdev_is_powered(hdev)) {
8110 BT_DBG("%s", hdev->name);
8111 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8112 MGMT_STATUS_NOT_POWERED);
8116 if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
8117 BT_DBG("%s", hdev->name);
8118 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8123 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
8125 BT_DBG("%s", hdev->name);
8130 hci_req_init(&req, hdev);
8132 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
8133 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
8134 err = hci_req_run(&req, get_raw_rssi_complete);
8137 mgmt_pending_remove(cmd);
8138 BT_ERR("Error in requesting hci_req_run");
8142 hci_dev_unlock(hdev);
8147 void mgmt_raw_rssi_response(struct hci_dev *hdev,
8148 struct hci_cc_rp_get_raw_rssi *rp, int success)
8150 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
8151 struct hci_conn *conn;
8153 mgmt_rp.status = rp->status;
8154 mgmt_rp.rssi_dbm = rp->rssi_dbm;
8156 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
8160 bacpy(&mgmt_rp.bt_address, &conn->dst);
8161 if (conn->type == LE_LINK)
8162 mgmt_rp.link_type = 0x01;
8164 mgmt_rp.link_type = 0x00;
8166 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
8167 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
8170 static void set_disable_threshold_complete(struct hci_dev *hdev,
8171 u8 status, u16 opcode)
8173 struct mgmt_pending_cmd *cmd;
8175 BT_DBG("status 0x%02x", status);
8179 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
8183 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8184 MGMT_STATUS_SUCCESS, &status, 1);
8186 mgmt_pending_remove(cmd);
8189 hci_dev_unlock(hdev);
8192 /** Removes monitoring for a link*/
8193 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
8194 void *data, u16 len)
8197 struct hci_cp_set_rssi_threshold th = { 0, };
8198 struct mgmt_cp_disable_rssi *cp = data;
8199 struct hci_conn *conn;
8200 struct mgmt_pending_cmd *cmd;
8201 struct hci_request req;
8204 BT_DBG("Set Disable RSSI.");
8208 if (!lmp_le_capable(hdev)) {
8209 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8210 MGMT_STATUS_NOT_SUPPORTED);
8214 /* Get LE/ACL link handle info*/
8215 if (cp->link_type == 0x01)
8216 dest_type = LE_LINK;
8218 dest_type = ACL_LINK;
8220 conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
8222 err = mgmt_cmd_complete(sk, hdev->id,
8223 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
8227 th.hci_le_ext_opcode = 0x0B;
8229 th.conn_handle = conn->handle;
8230 th.alert_mask = 0x00;
8232 th.in_range_th = 0x00;
8235 if (!hdev_is_powered(hdev)) {
8236 BT_DBG("%s", hdev->name);
8237 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8242 if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
8243 BT_DBG("%s", hdev->name);
8244 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8249 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
8252 BT_DBG("%s", hdev->name);
8257 hci_req_init(&req, hdev);
8259 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
8260 err = hci_req_run(&req, set_disable_threshold_complete);
8262 mgmt_pending_remove(cmd);
8263 BT_ERR("Error in requesting hci_req_run");
8268 hci_dev_unlock(hdev);
8273 void mgmt_rssi_alert_evt(struct hci_dev *hdev, u16 conn_handle,
8274 s8 alert_type, s8 rssi_dbm)
8276 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
8277 struct hci_conn *conn;
8279 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
8280 conn_handle, alert_type, rssi_dbm);
8282 conn = hci_conn_hash_lookup_handle(hdev, conn_handle);
8285 BT_ERR("RSSI alert Error: Device not found for handle");
8288 bacpy(&mgmt_ev.bdaddr, &conn->dst);
8290 if (conn->type == LE_LINK)
8291 mgmt_ev.link_type = 0x01;
8293 mgmt_ev.link_type = 0x00;
8295 mgmt_ev.alert_type = alert_type;
8296 mgmt_ev.rssi_dbm = rssi_dbm;
8298 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
8299 sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
8303 static int mgmt_start_le_discovery_failed(struct hci_dev *hdev, u8 status)
8305 struct mgmt_pending_cmd *cmd;
8309 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
8311 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
8315 type = hdev->le_discovery.type;
8317 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
8318 mgmt_status(status), &type, sizeof(type));
8319 mgmt_pending_remove(cmd);
8324 static void start_le_discovery_complete(struct hci_dev *hdev, u8 status,
8327 unsigned long timeout = 0;
8329 BT_DBG("status %d", status);
8333 mgmt_start_le_discovery_failed(hdev, status);
8334 hci_dev_unlock(hdev);
8339 hci_le_discovery_set_state(hdev, DISCOVERY_FINDING);
8340 hci_dev_unlock(hdev);
8342 if (hdev->le_discovery.type != DISCOV_TYPE_LE)
8343 BT_ERR("Invalid discovery type %d", hdev->le_discovery.type);
8348 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
8351 static int start_le_discovery(struct sock *sk, struct hci_dev *hdev,
8352 void *data, u16 len)
8354 struct mgmt_cp_start_le_discovery *cp = data;
8355 struct mgmt_pending_cmd *cmd;
8356 struct hci_cp_le_set_scan_param param_cp;
8357 struct hci_cp_le_set_scan_enable enable_cp;
8358 struct hci_request req;
8359 u8 status, own_addr_type;
8362 BT_DBG("%s", hdev->name);
8364 if (!hdev_is_powered(hdev)) {
8365 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8366 MGMT_STATUS_NOT_POWERED);
8370 if (hdev->le_discovery.state != DISCOVERY_STOPPED) {
8371 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8376 if (cp->type != DISCOV_TYPE_LE) {
8377 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8378 MGMT_STATUS_INVALID_PARAMS);
8382 cmd = mgmt_pending_add(sk, MGMT_OP_START_LE_DISCOVERY, hdev, NULL, 0);
8388 hdev->le_discovery.type = cp->type;
8390 hci_req_init(&req, hdev);
8392 status = mgmt_le_support(hdev);
8394 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8396 mgmt_pending_remove(cmd);
8400 /* If controller is scanning, it means the background scanning
8401 * is running. Thus, we should temporarily stop it in order to
8402 * set the discovery scanning parameters.
8404 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
8405 hci_req_add_le_scan_disable(&req, false);
8407 memset(¶m_cp, 0, sizeof(param_cp));
8409 /* All active scans will be done with either a resolvable
8410 * private address (when privacy feature has been enabled)
8411 * or unresolvable private address.
8413 err = hci_update_random_address_sync(hdev, true, hci_dev_test_flag(hdev, HCI_PRIVACY), &own_addr_type);
8415 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8416 MGMT_STATUS_FAILED);
8417 mgmt_pending_remove(cmd);
8421 param_cp.type = hdev->le_scan_type;
8422 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
8423 param_cp.window = cpu_to_le16(hdev->le_scan_window);
8424 param_cp.own_address_type = own_addr_type;
8425 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
8428 memset(&enable_cp, 0, sizeof(enable_cp));
8429 enable_cp.enable = LE_SCAN_ENABLE;
8430 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
8432 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
8435 err = hci_req_run(&req, start_le_discovery_complete);
8437 mgmt_pending_remove(cmd);
8439 hci_le_discovery_set_state(hdev, DISCOVERY_STARTING);
8445 static int mgmt_stop_le_discovery_failed(struct hci_dev *hdev, u8 status)
8447 struct mgmt_pending_cmd *cmd;
8450 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
8454 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
8455 mgmt_status(status), &hdev->le_discovery.type,
8456 sizeof(hdev->le_discovery.type));
8457 mgmt_pending_remove(cmd);
8462 static void stop_le_discovery_complete(struct hci_dev *hdev, u8 status,
8465 BT_DBG("status %d", status);
8470 mgmt_stop_le_discovery_failed(hdev, status);
8474 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
8477 hci_dev_unlock(hdev);
8480 static int stop_le_discovery(struct sock *sk, struct hci_dev *hdev,
8481 void *data, u16 len)
8483 struct mgmt_cp_stop_le_discovery *mgmt_cp = data;
8484 struct mgmt_pending_cmd *cmd;
8485 struct hci_request req;
8488 BT_DBG("%s", hdev->name);
8492 if (!hci_le_discovery_active(hdev)) {
8493 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8494 MGMT_STATUS_REJECTED, &mgmt_cp->type,
8495 sizeof(mgmt_cp->type));
8499 if (hdev->le_discovery.type != mgmt_cp->type) {
8500 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8501 MGMT_STATUS_INVALID_PARAMS,
8502 &mgmt_cp->type, sizeof(mgmt_cp->type));
8506 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_LE_DISCOVERY, hdev, NULL, 0);
8512 hci_req_init(&req, hdev);
8514 if (hdev->le_discovery.state != DISCOVERY_FINDING) {
8515 BT_DBG("unknown le discovery state %u",
8516 hdev->le_discovery.state);
8518 mgmt_pending_remove(cmd);
8519 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8520 MGMT_STATUS_FAILED, &mgmt_cp->type,
8521 sizeof(mgmt_cp->type));
8525 cancel_delayed_work(&hdev->le_scan_disable);
8526 hci_req_add_le_scan_disable(&req, false);
8528 err = hci_req_run(&req, stop_le_discovery_complete);
8530 mgmt_pending_remove(cmd);
8532 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
8535 hci_dev_unlock(hdev);
8539 /* Separate LE discovery */
8540 void mgmt_le_discovering(struct hci_dev *hdev, u8 discovering)
8542 struct mgmt_ev_discovering ev;
8543 struct mgmt_pending_cmd *cmd;
8545 BT_DBG("%s le discovering %u", hdev->name, discovering);
8548 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
8550 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
8553 u8 type = hdev->le_discovery.type;
8555 mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
8557 mgmt_pending_remove(cmd);
8560 memset(&ev, 0, sizeof(ev));
8561 ev.type = hdev->le_discovery.type;
8562 ev.discovering = discovering;
8564 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8567 static int disable_le_auto_connect(struct sock *sk, struct hci_dev *hdev,
8568 void *data, u16 len)
8572 BT_DBG("%s", hdev->name);
8576 err = hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
8578 BT_ERR("HCI_OP_LE_CREATE_CONN_CANCEL is failed");
8580 hci_dev_unlock(hdev);
8585 static inline int check_le_conn_update_param(u16 min, u16 max, u16 latency,
8590 if (min > max || min < 6 || max > 3200)
8593 if (to_multiplier < 10 || to_multiplier > 3200)
8596 if (max >= to_multiplier * 8)
8599 max_latency = (to_multiplier * 8 / max) - 1;
8601 if (latency > 499 || latency > max_latency)
8607 static int le_conn_update(struct sock *sk, struct hci_dev *hdev, void *data,
8610 struct mgmt_cp_le_conn_update *cp = data;
8612 struct hci_conn *conn;
8613 u16 min, max, latency, supervision_timeout;
8616 if (!hdev_is_powered(hdev))
8617 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8618 MGMT_STATUS_NOT_POWERED);
8620 min = __le16_to_cpu(cp->conn_interval_min);
8621 max = __le16_to_cpu(cp->conn_interval_max);
8622 latency = __le16_to_cpu(cp->conn_latency);
8623 supervision_timeout = __le16_to_cpu(cp->supervision_timeout);
8625 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x supervision_timeout: 0x%4.4x",
8626 min, max, latency, supervision_timeout);
8628 err = check_le_conn_update_param(min, max, latency,
8629 supervision_timeout);
8632 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8633 MGMT_STATUS_INVALID_PARAMS);
8637 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
8639 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8640 MGMT_STATUS_NOT_CONNECTED);
8641 hci_dev_unlock(hdev);
8645 hci_dev_unlock(hdev);
8647 hci_le_conn_update(conn, min, max, latency, supervision_timeout);
8649 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE, 0,
8653 static void set_manufacturer_data_complete(struct hci_dev *hdev, u8 status,
8656 struct mgmt_cp_set_manufacturer_data *cp;
8657 struct mgmt_pending_cmd *cmd;
8659 BT_DBG("status 0x%02x", status);
8663 cmd = pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev);
8670 mgmt_cmd_status(cmd->sk, hdev->id,
8671 MGMT_OP_SET_MANUFACTURER_DATA,
8672 mgmt_status(status));
8674 mgmt_cmd_complete(cmd->sk, hdev->id,
8675 MGMT_OP_SET_MANUFACTURER_DATA, 0,
8678 mgmt_pending_remove(cmd);
8681 hci_dev_unlock(hdev);
8684 static int set_manufacturer_data(struct sock *sk, struct hci_dev *hdev,
8685 void *data, u16 len)
8687 struct mgmt_pending_cmd *cmd;
8688 struct hci_request req;
8689 struct mgmt_cp_set_manufacturer_data *cp = data;
8690 u8 old_data[HCI_MAX_EIR_LENGTH] = {0, };
8694 BT_DBG("%s", hdev->name);
8696 if (!lmp_bredr_capable(hdev))
8697 return mgmt_cmd_status(sk, hdev->id,
8698 MGMT_OP_SET_MANUFACTURER_DATA,
8699 MGMT_STATUS_NOT_SUPPORTED);
8701 if (cp->data[0] == 0 ||
8702 cp->data[0] - 1 > sizeof(hdev->manufacturer_data))
8703 return mgmt_cmd_status(sk, hdev->id,
8704 MGMT_OP_SET_MANUFACTURER_DATA,
8705 MGMT_STATUS_INVALID_PARAMS);
8707 if (cp->data[1] != 0xFF)
8708 return mgmt_cmd_status(sk, hdev->id,
8709 MGMT_OP_SET_MANUFACTURER_DATA,
8710 MGMT_STATUS_NOT_SUPPORTED);
8714 if (pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev)) {
8715 err = mgmt_cmd_status(sk, hdev->id,
8716 MGMT_OP_SET_MANUFACTURER_DATA,
8721 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MANUFACTURER_DATA, hdev, data,
8728 hci_req_init(&req, hdev);
8730 /* if new data is same as previous data then return command
8733 if (hdev->manufacturer_len == cp->data[0] - 1 &&
8734 !memcmp(hdev->manufacturer_data, cp->data + 2, cp->data[0] - 1)) {
8735 mgmt_pending_remove(cmd);
8736 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
8737 0, cp, sizeof(*cp));
8742 old_len = hdev->manufacturer_len;
8744 memcpy(old_data, hdev->manufacturer_data, old_len);
8746 hdev->manufacturer_len = cp->data[0] - 1;
8747 if (hdev->manufacturer_len > 0)
8748 memcpy(hdev->manufacturer_data, cp->data + 2,
8749 hdev->manufacturer_len);
8751 hci_update_eir_sync(hdev);
8753 err = hci_req_run(&req, set_manufacturer_data_complete);
8755 mgmt_pending_remove(cmd);
8760 hci_dev_unlock(hdev);
8765 memset(hdev->manufacturer_data, 0x00, sizeof(hdev->manufacturer_data));
8766 hdev->manufacturer_len = old_len;
8767 if (hdev->manufacturer_len > 0)
8768 memcpy(hdev->manufacturer_data, old_data,
8769 hdev->manufacturer_len);
8770 hci_dev_unlock(hdev);
8774 static int le_set_scan_params(struct sock *sk, struct hci_dev *hdev,
8775 void *data, u16 len)
8777 struct mgmt_cp_le_set_scan_params *cp = data;
8778 __u16 interval, window;
8781 BT_DBG("%s", hdev->name);
8783 if (!lmp_le_capable(hdev))
8784 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8785 MGMT_STATUS_NOT_SUPPORTED);
8787 interval = __le16_to_cpu(cp->interval);
8789 if (interval < 0x0004 || interval > 0x4000)
8790 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8791 MGMT_STATUS_INVALID_PARAMS);
8793 window = __le16_to_cpu(cp->window);
8795 if (window < 0x0004 || window > 0x4000)
8796 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8797 MGMT_STATUS_INVALID_PARAMS);
8799 if (window > interval)
8800 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8801 MGMT_STATUS_INVALID_PARAMS);
8805 hdev->le_scan_type = cp->type;
8806 hdev->le_scan_interval = interval;
8807 hdev->le_scan_window = window;
8809 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS, 0,
8812 /* If background scan is running, restart it so new parameters are
8815 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
8816 hdev->discovery.state == DISCOVERY_STOPPED) {
8817 struct hci_request req;
8819 hci_req_init(&req, hdev);
8821 hci_req_add_le_scan_disable(&req, false);
8822 hci_req_add_le_passive_scan(&req);
8824 hci_req_run(&req, NULL);
8827 hci_dev_unlock(hdev);
8832 static int set_voice_setting(struct sock *sk, struct hci_dev *hdev,
8833 void *data, u16 len)
8835 struct mgmt_cp_set_voice_setting *cp = data;
8836 struct hci_conn *conn;
8837 struct hci_conn *sco_conn;
8841 BT_DBG("%s", hdev->name);
8843 if (!lmp_bredr_capable(hdev)) {
8844 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING,
8845 MGMT_STATUS_NOT_SUPPORTED);
8850 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
8852 err = mgmt_cmd_complete(sk, hdev->id,
8853 MGMT_OP_SET_VOICE_SETTING, 0, NULL, 0);
8857 conn->voice_setting = cp->voice_setting;
8858 conn->sco_role = cp->sco_role;
8860 sco_conn = hci_conn_hash_lookup_sco(hdev);
8861 if (sco_conn && bacmp(&sco_conn->dst, &cp->bdaddr) != 0) {
8862 BT_ERR("There is other SCO connection.");
8866 if (conn->sco_role == MGMT_SCO_ROLE_HANDSFREE) {
8867 if (conn->voice_setting == 0x0063)
8868 sco_connect_set_wbc(hdev);
8870 sco_connect_set_nbc(hdev);
8872 if (conn->voice_setting == 0x0063)
8873 sco_connect_set_gw_wbc(hdev);
8875 sco_connect_set_gw_nbc(hdev);
8879 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING, 0,
8883 hci_dev_unlock(hdev);
8887 static int get_adv_tx_power(struct sock *sk, struct hci_dev *hdev,
8888 void *data, u16 len)
8890 struct mgmt_rp_get_adv_tx_power *rp;
8894 BT_DBG("%s", hdev->name);
8898 rp_len = sizeof(*rp);
8899 rp = kmalloc(rp_len, GFP_KERNEL);
8905 rp->adv_tx_power = hdev->adv_tx_power;
8907 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_TX_POWER, 0, rp,
8913 hci_dev_unlock(hdev);
8918 void mgmt_hardware_error(struct hci_dev *hdev, u8 err_code)
8920 struct mgmt_ev_hardware_error ev;
8922 ev.error_code = err_code;
8923 mgmt_event(MGMT_EV_HARDWARE_ERROR, hdev, &ev, sizeof(ev), NULL);
8926 void mgmt_tx_timeout_error(struct hci_dev *hdev)
8928 mgmt_event(MGMT_EV_TX_TIMEOUT_ERROR, hdev, NULL, 0, NULL);
8931 void mgmt_multi_adv_state_change_evt(struct hci_dev *hdev, u8 adv_instance,
8932 u8 state_change_reason, u16 connection_handle)
8934 struct mgmt_ev_vendor_specific_multi_adv_state_changed mgmt_ev;
8936 BT_DBG("Multi adv state changed [%2.2X %2.2X %2.2X]",
8937 adv_instance, state_change_reason, connection_handle);
8939 mgmt_ev.adv_instance = adv_instance;
8940 mgmt_ev.state_change_reason = state_change_reason;
8941 mgmt_ev.connection_handle = connection_handle;
8943 mgmt_event(MGMT_EV_MULTI_ADV_STATE_CHANGED, hdev, &mgmt_ev,
8944 sizeof(struct mgmt_ev_vendor_specific_multi_adv_state_changed),
8948 static int enable_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
8949 void *data, u16 len)
8952 struct mgmt_cp_enable_6lowpan *cp = data;
8954 BT_DBG("%s", hdev->name);
8958 if (!hdev_is_powered(hdev)) {
8959 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8960 MGMT_STATUS_NOT_POWERED);
8964 if (!lmp_le_capable(hdev)) {
8965 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8966 MGMT_STATUS_NOT_SUPPORTED);
8970 if (cp->enable_6lowpan)
8971 bt_6lowpan_enable();
8973 bt_6lowpan_disable();
8975 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8976 MGMT_STATUS_SUCCESS, NULL, 0);
8978 hci_dev_unlock(hdev);
8982 static int connect_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
8983 void *data, u16 len)
8985 struct mgmt_cp_connect_6lowpan *cp = data;
8986 __u8 addr_type = ADDR_LE_DEV_PUBLIC;
8989 BT_DBG("%s", hdev->name);
8993 if (!lmp_le_capable(hdev)) {
8994 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
8995 MGMT_STATUS_NOT_SUPPORTED);
8999 if (!hdev_is_powered(hdev)) {
9000 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
9001 MGMT_STATUS_REJECTED);
9005 if (bdaddr_type_is_le(cp->addr.type)) {
9006 if (cp->addr.type == BDADDR_LE_PUBLIC)
9007 addr_type = ADDR_LE_DEV_PUBLIC;
9009 addr_type = ADDR_LE_DEV_RANDOM;
9011 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
9012 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
9016 hci_dev_unlock(hdev);
9018 /* 6lowpan Connect */
9019 err = _bt_6lowpan_connect(&cp->addr.bdaddr, cp->addr.type);
9024 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
9025 MGMT_STATUS_REJECTED, NULL, 0);
9030 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN, 0,
9033 hci_dev_unlock(hdev);
9037 static int disconnect_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
9038 void *data, u16 len)
9040 struct mgmt_cp_disconnect_6lowpan *cp = data;
9041 struct hci_conn *conn = NULL;
9042 __u8 addr_type = ADDR_LE_DEV_PUBLIC;
9045 BT_DBG("%s", hdev->name);
9049 if (!lmp_le_capable(hdev)) {
9050 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT_6LOWPAN,
9051 MGMT_STATUS_NOT_SUPPORTED);
9055 if (!hdev_is_powered(hdev)) {
9056 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT_6LOWPAN,
9057 MGMT_STATUS_REJECTED);
9061 if (bdaddr_type_is_le(cp->addr.type)) {
9062 if (cp->addr.type == BDADDR_LE_PUBLIC)
9063 addr_type = ADDR_LE_DEV_PUBLIC;
9065 addr_type = ADDR_LE_DEV_RANDOM;
9067 err = mgmt_cmd_complete(sk, hdev->id,
9068 MGMT_OP_DISCONNECT_6LOWPAN,
9069 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
9073 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
9075 err = mgmt_cmd_complete(sk, hdev->id,
9076 MGMT_OP_DISCONNECT_6LOWPAN,
9077 MGMT_STATUS_NOT_CONNECTED, NULL, 0);
9081 if (conn->dst_type != addr_type) {
9082 err = mgmt_cmd_complete(sk, hdev->id,
9083 MGMT_OP_DISCONNECT_6LOWPAN,
9084 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
9088 if (conn->state != BT_CONNECTED) {
9089 err = mgmt_cmd_complete(sk, hdev->id,
9090 MGMT_OP_DISCONNECT_6LOWPAN,
9091 MGMT_STATUS_NOT_CONNECTED, NULL, 0);
9095 /* 6lowpan Disconnect */
9096 err = _bt_6lowpan_disconnect(conn->l2cap_data, cp->addr.type);
9098 err = mgmt_cmd_complete(sk, hdev->id,
9099 MGMT_OP_DISCONNECT_6LOWPAN,
9100 MGMT_STATUS_REJECTED, NULL, 0);
9104 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN, 0,
9108 hci_dev_unlock(hdev);
9112 void mgmt_6lowpan_conn_changed(struct hci_dev *hdev, char if_name[16],
9113 bdaddr_t *bdaddr, u8 addr_type, bool connected)
9116 struct mgmt_ev_6lowpan_conn_state_changed *ev = (void *)buf;
9119 memset(buf, 0, sizeof(buf));
9120 bacpy(&ev->addr.bdaddr, bdaddr);
9121 ev->addr.type = addr_type;
9122 ev->connected = connected;
9123 memcpy(ev->ifname, (__u8 *)if_name, 16);
9125 ev_size = sizeof(*ev);
9127 mgmt_event(MGMT_EV_6LOWPAN_CONN_STATE_CHANGED, hdev, ev, ev_size, NULL);
9130 void mgmt_le_read_maximum_data_length_complete(struct hci_dev *hdev, u8 status)
9132 struct mgmt_pending_cmd *cmd;
9133 struct mgmt_rp_le_read_maximum_data_length rp;
9135 BT_DBG("%s status %u", hdev->name, status);
9137 cmd = pending_find(MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, hdev);
9142 mgmt_cmd_status(cmd->sk, hdev->id,
9143 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
9144 mgmt_status(status));
9146 memset(&rp, 0, sizeof(rp));
9148 rp.max_tx_octets = cpu_to_le16(hdev->le_max_tx_len);
9149 rp.max_tx_time = cpu_to_le16(hdev->le_max_tx_time);
9150 rp.max_rx_octets = cpu_to_le16(hdev->le_max_rx_len);
9151 rp.max_rx_time = cpu_to_le16(hdev->le_max_rx_time);
9153 mgmt_cmd_complete(cmd->sk, hdev->id,
9154 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, 0,
9157 mgmt_pending_remove(cmd);
9160 static int read_maximum_le_data_length(struct sock *sk,
9161 struct hci_dev *hdev, void *data, u16 len)
9163 struct mgmt_pending_cmd *cmd;
9166 BT_DBG("read_maximum_le_data_length %s", hdev->name);
9170 if (!hdev_is_powered(hdev)) {
9171 err = mgmt_cmd_status(sk, hdev->id,
9172 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
9173 MGMT_STATUS_NOT_POWERED);
9177 if (!lmp_le_capable(hdev)) {
9178 err = mgmt_cmd_status(sk, hdev->id,
9179 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
9180 MGMT_STATUS_NOT_SUPPORTED);
9184 if (pending_find(MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, hdev)) {
9185 err = mgmt_cmd_status(sk, hdev->id,
9186 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
9191 cmd = mgmt_pending_add(sk, MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
9198 err = hci_send_cmd(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
9200 mgmt_pending_remove(cmd);
9203 hci_dev_unlock(hdev);
9207 void mgmt_le_write_host_suggested_data_length_complete(struct hci_dev *hdev,
9210 struct mgmt_pending_cmd *cmd;
9212 BT_DBG("status 0x%02x", status);
9216 cmd = pending_find(MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH, hdev);
9218 BT_ERR("cmd not found in the pending list");
9223 mgmt_cmd_status(cmd->sk, hdev->id,
9224 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9225 mgmt_status(status));
9227 mgmt_cmd_complete(cmd->sk, hdev->id,
9228 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9231 mgmt_pending_remove(cmd);
9234 hci_dev_unlock(hdev);
9237 static int write_host_suggested_le_data_length(struct sock *sk,
9238 struct hci_dev *hdev, void *data, u16 len)
9240 struct mgmt_pending_cmd *cmd;
9241 struct mgmt_cp_le_write_host_suggested_data_length *cp = data;
9242 struct hci_cp_le_write_def_data_len hci_data;
9245 BT_DBG("Write host suggested data length request for %s", hdev->name);
9249 if (!hdev_is_powered(hdev)) {
9250 err = mgmt_cmd_status(sk, hdev->id,
9251 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9252 MGMT_STATUS_NOT_POWERED);
9256 if (!lmp_le_capable(hdev)) {
9257 err = mgmt_cmd_status(sk, hdev->id,
9258 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9259 MGMT_STATUS_NOT_SUPPORTED);
9263 if (pending_find(MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH, hdev)) {
9264 err = mgmt_cmd_status(sk, hdev->id,
9265 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9270 cmd = mgmt_pending_add(sk, MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9277 hci_data.tx_len = cp->def_tx_octets;
9278 hci_data.tx_time = cp->def_tx_time;
9280 err = hci_send_cmd(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN,
9281 sizeof(hci_data), &hci_data);
9283 mgmt_pending_remove(cmd);
9286 hci_dev_unlock(hdev);
9290 #endif /* TIZEN_BT */
9292 static bool ltk_is_valid(struct mgmt_ltk_info *key)
9294 if (key->initiator != 0x00 && key->initiator != 0x01)
9297 switch (key->addr.type) {
9298 case BDADDR_LE_PUBLIC:
9301 case BDADDR_LE_RANDOM:
9302 /* Two most significant bits shall be set */
9303 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
9311 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
9312 void *cp_data, u16 len)
9314 struct mgmt_cp_load_long_term_keys *cp = cp_data;
9315 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
9316 sizeof(struct mgmt_ltk_info));
9317 u16 key_count, expected_len;
9320 bt_dev_dbg(hdev, "sock %p", sk);
9322 if (!lmp_le_capable(hdev))
9323 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
9324 MGMT_STATUS_NOT_SUPPORTED);
9326 key_count = __le16_to_cpu(cp->key_count);
9327 if (key_count > max_key_count) {
9328 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
9330 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
9331 MGMT_STATUS_INVALID_PARAMS);
9334 expected_len = struct_size(cp, keys, key_count);
9335 if (expected_len != len) {
9336 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
9338 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
9339 MGMT_STATUS_INVALID_PARAMS);
9342 bt_dev_dbg(hdev, "key_count %u", key_count);
9344 for (i = 0; i < key_count; i++) {
9345 struct mgmt_ltk_info *key = &cp->keys[i];
9347 if (!ltk_is_valid(key))
9348 return mgmt_cmd_status(sk, hdev->id,
9349 MGMT_OP_LOAD_LONG_TERM_KEYS,
9350 MGMT_STATUS_INVALID_PARAMS);
9355 hci_smp_ltks_clear(hdev);
9357 for (i = 0; i < key_count; i++) {
9358 struct mgmt_ltk_info *key = &cp->keys[i];
9359 u8 type, authenticated;
9361 if (hci_is_blocked_key(hdev,
9362 HCI_BLOCKED_KEY_TYPE_LTK,
9364 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
9369 switch (key->type) {
9370 case MGMT_LTK_UNAUTHENTICATED:
9371 authenticated = 0x00;
9372 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
9374 case MGMT_LTK_AUTHENTICATED:
9375 authenticated = 0x01;
9376 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
9378 case MGMT_LTK_P256_UNAUTH:
9379 authenticated = 0x00;
9380 type = SMP_LTK_P256;
9382 case MGMT_LTK_P256_AUTH:
9383 authenticated = 0x01;
9384 type = SMP_LTK_P256;
9386 case MGMT_LTK_P256_DEBUG:
9387 authenticated = 0x00;
9388 type = SMP_LTK_P256_DEBUG;
9394 hci_add_ltk(hdev, &key->addr.bdaddr,
9395 le_addr_type(key->addr.type), type, authenticated,
9396 key->val, key->enc_size, key->ediv, key->rand);
9399 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
9402 hci_dev_unlock(hdev);
9407 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
9409 struct mgmt_pending_cmd *cmd = data;
9410 struct hci_conn *conn = cmd->user_data;
9411 struct mgmt_cp_get_conn_info *cp = cmd->param;
9412 struct mgmt_rp_get_conn_info rp;
9415 bt_dev_dbg(hdev, "err %d", err);
9417 memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
9419 status = mgmt_status(err);
9420 if (status == MGMT_STATUS_SUCCESS) {
9421 rp.rssi = conn->rssi;
9422 rp.tx_power = conn->tx_power;
9423 rp.max_tx_power = conn->max_tx_power;
9425 rp.rssi = HCI_RSSI_INVALID;
9426 rp.tx_power = HCI_TX_POWER_INVALID;
9427 rp.max_tx_power = HCI_TX_POWER_INVALID;
9430 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
9433 mgmt_pending_free(cmd);
9436 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
9438 struct mgmt_pending_cmd *cmd = data;
9439 struct mgmt_cp_get_conn_info *cp = cmd->param;
9440 struct hci_conn *conn;
9444 /* Make sure we are still connected */
9445 if (cp->addr.type == BDADDR_BREDR)
9446 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9449 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
9451 if (!conn || conn->state != BT_CONNECTED)
9452 return MGMT_STATUS_NOT_CONNECTED;
9454 cmd->user_data = conn;
9455 handle = cpu_to_le16(conn->handle);
9457 /* Refresh RSSI each time */
9458 err = hci_read_rssi_sync(hdev, handle);
9460 /* For LE links TX power does not change thus we don't need to
9461 * query for it once value is known.
9463 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
9464 conn->tx_power == HCI_TX_POWER_INVALID))
9465 err = hci_read_tx_power_sync(hdev, handle, 0x00);
9467 /* Max TX power needs to be read only once per connection */
9468 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
9469 err = hci_read_tx_power_sync(hdev, handle, 0x01);
9474 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
9477 struct mgmt_cp_get_conn_info *cp = data;
9478 struct mgmt_rp_get_conn_info rp;
9479 struct hci_conn *conn;
9480 unsigned long conn_info_age;
9483 bt_dev_dbg(hdev, "sock %p", sk);
9485 memset(&rp, 0, sizeof(rp));
9486 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9487 rp.addr.type = cp->addr.type;
9489 if (!bdaddr_type_is_valid(cp->addr.type))
9490 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9491 MGMT_STATUS_INVALID_PARAMS,
9496 if (!hdev_is_powered(hdev)) {
9497 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9498 MGMT_STATUS_NOT_POWERED, &rp,
9503 if (cp->addr.type == BDADDR_BREDR)
9504 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9507 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
9509 if (!conn || conn->state != BT_CONNECTED) {
9510 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9511 MGMT_STATUS_NOT_CONNECTED, &rp,
9516 /* To avoid client trying to guess when to poll again for information we
9517 * calculate conn info age as random value between min/max set in hdev.
9519 conn_info_age = hdev->conn_info_min_age +
9520 prandom_u32_max(hdev->conn_info_max_age -
9521 hdev->conn_info_min_age);
9523 /* Query controller to refresh cached values if they are too old or were
9526 if (time_after(jiffies, conn->conn_info_timestamp +
9527 msecs_to_jiffies(conn_info_age)) ||
9528 !conn->conn_info_timestamp) {
9529 struct mgmt_pending_cmd *cmd;
9531 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
9536 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
9537 cmd, get_conn_info_complete);
9541 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9542 MGMT_STATUS_FAILED, &rp, sizeof(rp));
9545 mgmt_pending_free(cmd);
9550 conn->conn_info_timestamp = jiffies;
9552 /* Cache is valid, just reply with values cached in hci_conn */
9553 rp.rssi = conn->rssi;
9554 rp.tx_power = conn->tx_power;
9555 rp.max_tx_power = conn->max_tx_power;
9557 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9558 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9562 hci_dev_unlock(hdev);
9566 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
9568 struct mgmt_pending_cmd *cmd = data;
9569 struct mgmt_cp_get_clock_info *cp = cmd->param;
9570 struct mgmt_rp_get_clock_info rp;
9571 struct hci_conn *conn = cmd->user_data;
9572 u8 status = mgmt_status(err);
9574 bt_dev_dbg(hdev, "err %d", err);
9576 memset(&rp, 0, sizeof(rp));
9577 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9578 rp.addr.type = cp->addr.type;
9583 rp.local_clock = cpu_to_le32(hdev->clock);
9586 rp.piconet_clock = cpu_to_le32(conn->clock);
9587 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
9591 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
9594 mgmt_pending_free(cmd);
9597 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
9599 struct mgmt_pending_cmd *cmd = data;
9600 struct mgmt_cp_get_clock_info *cp = cmd->param;
9601 struct hci_cp_read_clock hci_cp;
9602 struct hci_conn *conn;
9604 memset(&hci_cp, 0, sizeof(hci_cp));
9605 hci_read_clock_sync(hdev, &hci_cp);
9607 /* Make sure connection still exists */
9608 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
9609 if (!conn || conn->state != BT_CONNECTED)
9610 return MGMT_STATUS_NOT_CONNECTED;
9612 cmd->user_data = conn;
9613 hci_cp.handle = cpu_to_le16(conn->handle);
9614 hci_cp.which = 0x01; /* Piconet clock */
9616 return hci_read_clock_sync(hdev, &hci_cp);
9619 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
9622 struct mgmt_cp_get_clock_info *cp = data;
9623 struct mgmt_rp_get_clock_info rp;
9624 struct mgmt_pending_cmd *cmd;
9625 struct hci_conn *conn;
9628 bt_dev_dbg(hdev, "sock %p", sk);
9630 memset(&rp, 0, sizeof(rp));
9631 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9632 rp.addr.type = cp->addr.type;
9634 if (cp->addr.type != BDADDR_BREDR)
9635 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9636 MGMT_STATUS_INVALID_PARAMS,
9641 if (!hdev_is_powered(hdev)) {
9642 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9643 MGMT_STATUS_NOT_POWERED, &rp,
9648 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
9649 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9651 if (!conn || conn->state != BT_CONNECTED) {
9652 err = mgmt_cmd_complete(sk, hdev->id,
9653 MGMT_OP_GET_CLOCK_INFO,
9654 MGMT_STATUS_NOT_CONNECTED,
9662 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
9666 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
9667 get_clock_info_complete);
9670 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9671 MGMT_STATUS_FAILED, &rp, sizeof(rp));
9674 mgmt_pending_free(cmd);
9679 hci_dev_unlock(hdev);
9683 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
9685 struct hci_conn *conn;
9687 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
9691 if (conn->dst_type != type)
9694 if (conn->state != BT_CONNECTED)
9700 /* This function requires the caller holds hdev->lock */
9701 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
9702 u8 addr_type, u8 auto_connect)
9704 struct hci_conn_params *params;
9706 params = hci_conn_params_add(hdev, addr, addr_type);
9710 if (params->auto_connect == auto_connect)
9713 list_del_init(¶ms->action);
9715 switch (auto_connect) {
9716 case HCI_AUTO_CONN_DISABLED:
9717 case HCI_AUTO_CONN_LINK_LOSS:
9718 /* If auto connect is being disabled when we're trying to
9719 * connect to device, keep connecting.
9721 if (params->explicit_connect)
9722 list_add(¶ms->action, &hdev->pend_le_conns);
9724 case HCI_AUTO_CONN_REPORT:
9725 if (params->explicit_connect)
9726 list_add(¶ms->action, &hdev->pend_le_conns);
9728 list_add(¶ms->action, &hdev->pend_le_reports);
9730 case HCI_AUTO_CONN_DIRECT:
9731 case HCI_AUTO_CONN_ALWAYS:
9732 if (!is_connected(hdev, addr, addr_type))
9733 list_add(¶ms->action, &hdev->pend_le_conns);
9737 params->auto_connect = auto_connect;
9739 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
9740 addr, addr_type, auto_connect);
9745 static void device_added(struct sock *sk, struct hci_dev *hdev,
9746 bdaddr_t *bdaddr, u8 type, u8 action)
9748 struct mgmt_ev_device_added ev;
9750 bacpy(&ev.addr.bdaddr, bdaddr);
9751 ev.addr.type = type;
9754 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
9757 static int add_device_sync(struct hci_dev *hdev, void *data)
9759 return hci_update_passive_scan_sync(hdev);
9762 static int add_device(struct sock *sk, struct hci_dev *hdev,
9763 void *data, u16 len)
9765 struct mgmt_cp_add_device *cp = data;
9766 u8 auto_conn, addr_type;
9767 struct hci_conn_params *params;
9769 u32 current_flags = 0;
9770 u32 supported_flags;
9772 bt_dev_dbg(hdev, "sock %p", sk);
9774 if (!bdaddr_type_is_valid(cp->addr.type) ||
9775 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
9776 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9777 MGMT_STATUS_INVALID_PARAMS,
9778 &cp->addr, sizeof(cp->addr));
9780 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
9781 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9782 MGMT_STATUS_INVALID_PARAMS,
9783 &cp->addr, sizeof(cp->addr));
9787 if (cp->addr.type == BDADDR_BREDR) {
9788 /* Only incoming connections action is supported for now */
9789 if (cp->action != 0x01) {
9790 err = mgmt_cmd_complete(sk, hdev->id,
9792 MGMT_STATUS_INVALID_PARAMS,
9793 &cp->addr, sizeof(cp->addr));
9797 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
9803 hci_update_scan(hdev);
9808 addr_type = le_addr_type(cp->addr.type);
9810 if (cp->action == 0x02)
9811 auto_conn = HCI_AUTO_CONN_ALWAYS;
9812 else if (cp->action == 0x01)
9813 auto_conn = HCI_AUTO_CONN_DIRECT;
9815 auto_conn = HCI_AUTO_CONN_REPORT;
9817 /* Kernel internally uses conn_params with resolvable private
9818 * address, but Add Device allows only identity addresses.
9819 * Make sure it is enforced before calling
9820 * hci_conn_params_lookup.
9822 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
9823 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9824 MGMT_STATUS_INVALID_PARAMS,
9825 &cp->addr, sizeof(cp->addr));
9829 /* If the connection parameters don't exist for this device,
9830 * they will be created and configured with defaults.
9832 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
9834 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9835 MGMT_STATUS_FAILED, &cp->addr,
9839 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
9842 current_flags = params->flags;
9845 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
9850 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
9851 supported_flags = hdev->conn_flags;
9852 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
9853 supported_flags, current_flags);
9855 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9856 MGMT_STATUS_SUCCESS, &cp->addr,
9860 hci_dev_unlock(hdev);
9864 static void device_removed(struct sock *sk, struct hci_dev *hdev,
9865 bdaddr_t *bdaddr, u8 type)
9867 struct mgmt_ev_device_removed ev;
9869 bacpy(&ev.addr.bdaddr, bdaddr);
9870 ev.addr.type = type;
9872 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
9875 static int remove_device_sync(struct hci_dev *hdev, void *data)
9877 return hci_update_passive_scan_sync(hdev);
9880 static int remove_device(struct sock *sk, struct hci_dev *hdev,
9881 void *data, u16 len)
9883 struct mgmt_cp_remove_device *cp = data;
9886 bt_dev_dbg(hdev, "sock %p", sk);
9890 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
9891 struct hci_conn_params *params;
9894 if (!bdaddr_type_is_valid(cp->addr.type)) {
9895 err = mgmt_cmd_complete(sk, hdev->id,
9896 MGMT_OP_REMOVE_DEVICE,
9897 MGMT_STATUS_INVALID_PARAMS,
9898 &cp->addr, sizeof(cp->addr));
9902 if (cp->addr.type == BDADDR_BREDR) {
9903 err = hci_bdaddr_list_del(&hdev->accept_list,
9907 err = mgmt_cmd_complete(sk, hdev->id,
9908 MGMT_OP_REMOVE_DEVICE,
9909 MGMT_STATUS_INVALID_PARAMS,
9915 hci_update_scan(hdev);
9917 device_removed(sk, hdev, &cp->addr.bdaddr,
9922 addr_type = le_addr_type(cp->addr.type);
9924 /* Kernel internally uses conn_params with resolvable private
9925 * address, but Remove Device allows only identity addresses.
9926 * Make sure it is enforced before calling
9927 * hci_conn_params_lookup.
9929 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
9930 err = mgmt_cmd_complete(sk, hdev->id,
9931 MGMT_OP_REMOVE_DEVICE,
9932 MGMT_STATUS_INVALID_PARAMS,
9933 &cp->addr, sizeof(cp->addr));
9937 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
9940 err = mgmt_cmd_complete(sk, hdev->id,
9941 MGMT_OP_REMOVE_DEVICE,
9942 MGMT_STATUS_INVALID_PARAMS,
9943 &cp->addr, sizeof(cp->addr));
9947 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
9948 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
9949 err = mgmt_cmd_complete(sk, hdev->id,
9950 MGMT_OP_REMOVE_DEVICE,
9951 MGMT_STATUS_INVALID_PARAMS,
9952 &cp->addr, sizeof(cp->addr));
9956 list_del(¶ms->action);
9957 list_del(¶ms->list);
9960 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
9962 struct hci_conn_params *p, *tmp;
9963 struct bdaddr_list *b, *btmp;
9965 if (cp->addr.type) {
9966 err = mgmt_cmd_complete(sk, hdev->id,
9967 MGMT_OP_REMOVE_DEVICE,
9968 MGMT_STATUS_INVALID_PARAMS,
9969 &cp->addr, sizeof(cp->addr));
9973 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
9974 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
9979 hci_update_scan(hdev);
9981 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
9982 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
9984 device_removed(sk, hdev, &p->addr, p->addr_type);
9985 if (p->explicit_connect) {
9986 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
9989 list_del(&p->action);
9994 bt_dev_dbg(hdev, "All LE connection parameters were removed");
9997 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
10000 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
10001 MGMT_STATUS_SUCCESS, &cp->addr,
10004 hci_dev_unlock(hdev);
10008 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
10011 struct mgmt_cp_load_conn_param *cp = data;
10012 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
10013 sizeof(struct mgmt_conn_param));
10014 u16 param_count, expected_len;
10017 if (!lmp_le_capable(hdev))
10018 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
10019 MGMT_STATUS_NOT_SUPPORTED);
10021 param_count = __le16_to_cpu(cp->param_count);
10022 if (param_count > max_param_count) {
10023 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
10025 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
10026 MGMT_STATUS_INVALID_PARAMS);
10029 expected_len = struct_size(cp, params, param_count);
10030 if (expected_len != len) {
10031 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
10032 expected_len, len);
10033 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
10034 MGMT_STATUS_INVALID_PARAMS);
10037 bt_dev_dbg(hdev, "param_count %u", param_count);
10039 hci_dev_lock(hdev);
10041 hci_conn_params_clear_disabled(hdev);
10043 for (i = 0; i < param_count; i++) {
10044 struct mgmt_conn_param *param = &cp->params[i];
10045 struct hci_conn_params *hci_param;
10046 u16 min, max, latency, timeout;
10049 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
10052 if (param->addr.type == BDADDR_LE_PUBLIC) {
10053 addr_type = ADDR_LE_DEV_PUBLIC;
10054 } else if (param->addr.type == BDADDR_LE_RANDOM) {
10055 addr_type = ADDR_LE_DEV_RANDOM;
10057 bt_dev_err(hdev, "ignoring invalid connection parameters");
10061 min = le16_to_cpu(param->min_interval);
10062 max = le16_to_cpu(param->max_interval);
10063 latency = le16_to_cpu(param->latency);
10064 timeout = le16_to_cpu(param->timeout);
10066 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
10067 min, max, latency, timeout);
10069 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
10070 bt_dev_err(hdev, "ignoring invalid connection parameters");
10074 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
10077 bt_dev_err(hdev, "failed to add connection parameters");
10081 hci_param->conn_min_interval = min;
10082 hci_param->conn_max_interval = max;
10083 hci_param->conn_latency = latency;
10084 hci_param->supervision_timeout = timeout;
10087 hci_dev_unlock(hdev);
10089 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
10093 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
10094 void *data, u16 len)
10096 struct mgmt_cp_set_external_config *cp = data;
10100 bt_dev_dbg(hdev, "sock %p", sk);
10102 if (hdev_is_powered(hdev))
10103 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
10104 MGMT_STATUS_REJECTED);
10106 if (cp->config != 0x00 && cp->config != 0x01)
10107 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
10108 MGMT_STATUS_INVALID_PARAMS);
10110 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
10111 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
10112 MGMT_STATUS_NOT_SUPPORTED);
10114 hci_dev_lock(hdev);
10117 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
10119 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
10121 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
10128 err = new_options(hdev, sk);
10130 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
10131 mgmt_index_removed(hdev);
10133 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
10134 hci_dev_set_flag(hdev, HCI_CONFIG);
10135 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
10137 queue_work(hdev->req_workqueue, &hdev->power_on);
10139 set_bit(HCI_RAW, &hdev->flags);
10140 mgmt_index_added(hdev);
10145 hci_dev_unlock(hdev);
10149 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
10150 void *data, u16 len)
10152 struct mgmt_cp_set_public_address *cp = data;
10156 bt_dev_dbg(hdev, "sock %p", sk);
10158 if (hdev_is_powered(hdev))
10159 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
10160 MGMT_STATUS_REJECTED);
10162 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
10163 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
10164 MGMT_STATUS_INVALID_PARAMS);
10166 if (!hdev->set_bdaddr)
10167 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
10168 MGMT_STATUS_NOT_SUPPORTED);
10170 hci_dev_lock(hdev);
10172 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
10173 bacpy(&hdev->public_addr, &cp->bdaddr);
10175 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
10182 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
10183 err = new_options(hdev, sk);
10185 if (is_configured(hdev)) {
10186 mgmt_index_removed(hdev);
10188 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
10190 hci_dev_set_flag(hdev, HCI_CONFIG);
10191 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
10193 queue_work(hdev->req_workqueue, &hdev->power_on);
10197 hci_dev_unlock(hdev);
10202 int mgmt_device_name_update(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name,
10206 struct mgmt_ev_device_name_update *ev = (void *)buf;
10212 bacpy(&ev->addr.bdaddr, bdaddr);
10213 ev->addr.type = BDADDR_BREDR;
10215 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
10218 ev->eir_len = cpu_to_le16(eir_len);
10220 return mgmt_event(MGMT_EV_DEVICE_NAME_UPDATE, hdev, buf,
10221 sizeof(*ev) + eir_len, NULL);
10224 int mgmt_le_conn_update_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
10225 u8 link_type, u8 addr_type, u8 status)
10227 struct mgmt_ev_conn_update_failed ev;
10229 bacpy(&ev.addr.bdaddr, bdaddr);
10230 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10231 ev.status = status;
10233 return mgmt_event(MGMT_EV_CONN_UPDATE_FAILED, hdev,
10234 &ev, sizeof(ev), NULL);
10237 int mgmt_le_conn_updated(struct hci_dev *hdev, bdaddr_t *bdaddr,
10238 u8 link_type, u8 addr_type, u16 conn_interval,
10239 u16 conn_latency, u16 supervision_timeout)
10241 struct mgmt_ev_conn_updated ev;
10243 bacpy(&ev.addr.bdaddr, bdaddr);
10244 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10245 ev.conn_interval = cpu_to_le16(conn_interval);
10246 ev.conn_latency = cpu_to_le16(conn_latency);
10247 ev.supervision_timeout = cpu_to_le16(supervision_timeout);
10249 return mgmt_event(MGMT_EV_CONN_UPDATED, hdev,
10250 &ev, sizeof(ev), NULL);
10253 /* le device found event - Pass adv type */
10254 void mgmt_le_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10255 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir,
10256 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, u8 adv_type)
10259 struct mgmt_ev_le_device_found *ev = (void *)buf;
10262 if (!hci_discovery_active(hdev) && !hci_le_discovery_active(hdev))
10265 /* Make sure that the buffer is big enough. The 5 extra bytes
10266 * are for the potential CoD field.
10268 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
10271 memset(buf, 0, sizeof(buf));
10273 bacpy(&ev->addr.bdaddr, bdaddr);
10274 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10276 ev->flags = cpu_to_le32(flags);
10277 ev->adv_type = adv_type;
10280 memcpy(ev->eir, eir, eir_len);
10282 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, NULL))
10283 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
10286 if (scan_rsp_len > 0)
10287 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
10289 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10290 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
10292 mgmt_event(MGMT_EV_LE_DEVICE_FOUND, hdev, ev, ev_size, NULL);
10296 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
10299 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
10300 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
10301 u8 *h192, *r192, *h256, *r256;
10302 struct mgmt_pending_cmd *cmd = data;
10303 struct sk_buff *skb = cmd->skb;
10304 u8 status = mgmt_status(err);
10307 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
10312 status = MGMT_STATUS_FAILED;
10313 else if (IS_ERR(skb))
10314 status = mgmt_status(PTR_ERR(skb));
10316 status = mgmt_status(skb->data[0]);
10319 bt_dev_dbg(hdev, "status %u", status);
10321 mgmt_cp = cmd->param;
10324 status = mgmt_status(status);
10331 } else if (!bredr_sc_enabled(hdev)) {
10332 struct hci_rp_read_local_oob_data *rp;
10334 if (skb->len != sizeof(*rp)) {
10335 status = MGMT_STATUS_FAILED;
10338 status = MGMT_STATUS_SUCCESS;
10339 rp = (void *)skb->data;
10341 eir_len = 5 + 18 + 18;
10348 struct hci_rp_read_local_oob_ext_data *rp;
10350 if (skb->len != sizeof(*rp)) {
10351 status = MGMT_STATUS_FAILED;
10354 status = MGMT_STATUS_SUCCESS;
10355 rp = (void *)skb->data;
10357 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
10358 eir_len = 5 + 18 + 18;
10362 eir_len = 5 + 18 + 18 + 18 + 18;
10363 h192 = rp->hash192;
10364 r192 = rp->rand192;
10367 h256 = rp->hash256;
10368 r256 = rp->rand256;
10372 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
10379 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
10380 hdev->dev_class, 3);
10382 if (h192 && r192) {
10383 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10384 EIR_SSP_HASH_C192, h192, 16);
10385 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10386 EIR_SSP_RAND_R192, r192, 16);
10389 if (h256 && r256) {
10390 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10391 EIR_SSP_HASH_C256, h256, 16);
10392 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10393 EIR_SSP_RAND_R256, r256, 16);
10397 mgmt_rp->type = mgmt_cp->type;
10398 mgmt_rp->eir_len = cpu_to_le16(eir_len);
10400 err = mgmt_cmd_complete(cmd->sk, hdev->id,
10401 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
10402 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
10403 if (err < 0 || status)
10406 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
10408 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
10409 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
10410 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
10412 if (skb && !IS_ERR(skb))
10416 mgmt_pending_remove(cmd);
10419 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
10420 struct mgmt_cp_read_local_oob_ext_data *cp)
10422 struct mgmt_pending_cmd *cmd;
10425 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
10430 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
10431 read_local_oob_ext_data_complete);
10434 mgmt_pending_remove(cmd);
10441 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
10442 void *data, u16 data_len)
10444 struct mgmt_cp_read_local_oob_ext_data *cp = data;
10445 struct mgmt_rp_read_local_oob_ext_data *rp;
10448 u8 status, flags, role, addr[7], hash[16], rand[16];
10451 bt_dev_dbg(hdev, "sock %p", sk);
10453 if (hdev_is_powered(hdev)) {
10454 switch (cp->type) {
10455 case BIT(BDADDR_BREDR):
10456 status = mgmt_bredr_support(hdev);
10462 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
10463 status = mgmt_le_support(hdev);
10467 eir_len = 9 + 3 + 18 + 18 + 3;
10470 status = MGMT_STATUS_INVALID_PARAMS;
10475 status = MGMT_STATUS_NOT_POWERED;
10479 rp_len = sizeof(*rp) + eir_len;
10480 rp = kmalloc(rp_len, GFP_ATOMIC);
10484 if (!status && !lmp_ssp_capable(hdev)) {
10485 status = MGMT_STATUS_NOT_SUPPORTED;
10492 hci_dev_lock(hdev);
10495 switch (cp->type) {
10496 case BIT(BDADDR_BREDR):
10497 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
10498 err = read_local_ssp_oob_req(hdev, sk, cp);
10499 hci_dev_unlock(hdev);
10503 status = MGMT_STATUS_FAILED;
10506 eir_len = eir_append_data(rp->eir, eir_len,
10508 hdev->dev_class, 3);
10511 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
10512 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
10513 smp_generate_oob(hdev, hash, rand) < 0) {
10514 hci_dev_unlock(hdev);
10515 status = MGMT_STATUS_FAILED;
10519 /* This should return the active RPA, but since the RPA
10520 * is only programmed on demand, it is really hard to fill
10521 * this in at the moment. For now disallow retrieving
10522 * local out-of-band data when privacy is in use.
10524 * Returning the identity address will not help here since
10525 * pairing happens before the identity resolving key is
10526 * known and thus the connection establishment happens
10527 * based on the RPA and not the identity address.
10529 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
10530 hci_dev_unlock(hdev);
10531 status = MGMT_STATUS_REJECTED;
10535 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
10536 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
10537 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
10538 bacmp(&hdev->static_addr, BDADDR_ANY))) {
10539 memcpy(addr, &hdev->static_addr, 6);
10542 memcpy(addr, &hdev->bdaddr, 6);
10546 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
10547 addr, sizeof(addr));
10549 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
10554 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
10555 &role, sizeof(role));
10557 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
10558 eir_len = eir_append_data(rp->eir, eir_len,
10560 hash, sizeof(hash));
10562 eir_len = eir_append_data(rp->eir, eir_len,
10564 rand, sizeof(rand));
10567 flags = mgmt_get_adv_discov_flags(hdev);
10569 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
10570 flags |= LE_AD_NO_BREDR;
10572 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
10573 &flags, sizeof(flags));
10577 hci_dev_unlock(hdev);
10579 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
10581 status = MGMT_STATUS_SUCCESS;
10584 rp->type = cp->type;
10585 rp->eir_len = cpu_to_le16(eir_len);
10587 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
10588 status, rp, sizeof(*rp) + eir_len);
10589 if (err < 0 || status)
10592 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
10593 rp, sizeof(*rp) + eir_len,
10594 HCI_MGMT_OOB_DATA_EVENTS, sk);
10602 static u32 get_supported_adv_flags(struct hci_dev *hdev)
10606 flags |= MGMT_ADV_FLAG_CONNECTABLE;
10607 flags |= MGMT_ADV_FLAG_DISCOV;
10608 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
10609 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
10610 flags |= MGMT_ADV_FLAG_APPEARANCE;
10611 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
10612 flags |= MGMT_ADV_PARAM_DURATION;
10613 flags |= MGMT_ADV_PARAM_TIMEOUT;
10614 flags |= MGMT_ADV_PARAM_INTERVALS;
10615 flags |= MGMT_ADV_PARAM_TX_POWER;
10616 flags |= MGMT_ADV_PARAM_SCAN_RSP;
10618 /* In extended adv TX_POWER returned from Set Adv Param
10619 * will be always valid.
10621 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
10622 flags |= MGMT_ADV_FLAG_TX_POWER;
10624 if (ext_adv_capable(hdev)) {
10625 flags |= MGMT_ADV_FLAG_SEC_1M;
10626 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
10627 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
10629 if (hdev->le_features[1] & HCI_LE_PHY_2M)
10630 flags |= MGMT_ADV_FLAG_SEC_2M;
10632 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
10633 flags |= MGMT_ADV_FLAG_SEC_CODED;
10639 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
10640 void *data, u16 data_len)
10642 struct mgmt_rp_read_adv_features *rp;
10645 struct adv_info *adv_instance;
10646 u32 supported_flags;
10649 bt_dev_dbg(hdev, "sock %p", sk);
10651 if (!lmp_le_capable(hdev))
10652 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
10653 MGMT_STATUS_REJECTED);
10655 hci_dev_lock(hdev);
10657 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
10658 rp = kmalloc(rp_len, GFP_ATOMIC);
10660 hci_dev_unlock(hdev);
10664 supported_flags = get_supported_adv_flags(hdev);
10666 rp->supported_flags = cpu_to_le32(supported_flags);
10667 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
10668 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
10669 rp->max_instances = hdev->le_num_of_adv_sets;
10670 rp->num_instances = hdev->adv_instance_cnt;
10672 instance = rp->instance;
10673 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
10674 /* Only instances 1-le_num_of_adv_sets are externally visible */
10675 if (adv_instance->instance <= hdev->adv_instance_cnt) {
10676 *instance = adv_instance->instance;
10679 rp->num_instances--;
10684 hci_dev_unlock(hdev);
10686 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
10687 MGMT_STATUS_SUCCESS, rp, rp_len);
10694 static u8 calculate_name_len(struct hci_dev *hdev)
10696 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
10698 return eir_append_local_name(hdev, buf, 0);
10701 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
10704 u8 max_len = HCI_MAX_AD_LENGTH;
10707 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
10708 MGMT_ADV_FLAG_LIMITED_DISCOV |
10709 MGMT_ADV_FLAG_MANAGED_FLAGS))
10712 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
10715 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
10716 max_len -= calculate_name_len(hdev);
10718 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
10725 static bool flags_managed(u32 adv_flags)
10727 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
10728 MGMT_ADV_FLAG_LIMITED_DISCOV |
10729 MGMT_ADV_FLAG_MANAGED_FLAGS);
10732 static bool tx_power_managed(u32 adv_flags)
10734 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
10737 static bool name_managed(u32 adv_flags)
10739 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
10742 static bool appearance_managed(u32 adv_flags)
10744 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
10747 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
10748 u8 len, bool is_adv_data)
10753 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
10758 /* Make sure that the data is correctly formatted. */
10759 for (i = 0; i < len; i += (cur_len + 1)) {
10765 if (data[i + 1] == EIR_FLAGS &&
10766 (!is_adv_data || flags_managed(adv_flags)))
10769 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
10772 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
10775 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
10778 if (data[i + 1] == EIR_APPEARANCE &&
10779 appearance_managed(adv_flags))
10782 /* If the current field length would exceed the total data
10783 * length, then it's invalid.
10785 if (i + cur_len >= len)
10792 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
10794 u32 supported_flags, phy_flags;
10796 /* The current implementation only supports a subset of the specified
10797 * flags. Also need to check mutual exclusiveness of sec flags.
10799 supported_flags = get_supported_adv_flags(hdev);
10800 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
10801 if (adv_flags & ~supported_flags ||
10802 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
10808 static bool adv_busy(struct hci_dev *hdev)
10810 return pending_find(MGMT_OP_SET_LE, hdev);
10813 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
10816 struct adv_info *adv, *n;
10818 bt_dev_dbg(hdev, "err %d", err);
10820 hci_dev_lock(hdev);
10822 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
10829 adv->pending = false;
10833 instance = adv->instance;
10835 if (hdev->cur_adv_instance == instance)
10836 cancel_adv_timeout(hdev);
10838 hci_remove_adv_instance(hdev, instance);
10839 mgmt_advertising_removed(sk, hdev, instance);
10842 hci_dev_unlock(hdev);
10845 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
10847 struct mgmt_pending_cmd *cmd = data;
10848 struct mgmt_cp_add_advertising *cp = cmd->param;
10849 struct mgmt_rp_add_advertising rp;
10851 memset(&rp, 0, sizeof(rp));
10853 rp.instance = cp->instance;
10856 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
10859 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
10860 mgmt_status(err), &rp, sizeof(rp));
10862 add_adv_complete(hdev, cmd->sk, cp->instance, err);
10864 mgmt_pending_free(cmd);
10867 static int add_advertising_sync(struct hci_dev *hdev, void *data)
10869 struct mgmt_pending_cmd *cmd = data;
10870 struct mgmt_cp_add_advertising *cp = cmd->param;
10872 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
10875 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
10876 void *data, u16 data_len)
10878 struct mgmt_cp_add_advertising *cp = data;
10879 struct mgmt_rp_add_advertising rp;
10882 u16 timeout, duration;
10883 unsigned int prev_instance_cnt;
10884 u8 schedule_instance = 0;
10885 struct adv_info *adv, *next_instance;
10887 struct mgmt_pending_cmd *cmd;
10889 bt_dev_dbg(hdev, "sock %p", sk);
10891 status = mgmt_le_support(hdev);
10893 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10896 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10897 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10898 MGMT_STATUS_INVALID_PARAMS);
10900 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
10901 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10902 MGMT_STATUS_INVALID_PARAMS);
10904 flags = __le32_to_cpu(cp->flags);
10905 timeout = __le16_to_cpu(cp->timeout);
10906 duration = __le16_to_cpu(cp->duration);
10908 if (!requested_adv_flags_are_valid(hdev, flags))
10909 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10910 MGMT_STATUS_INVALID_PARAMS);
10912 hci_dev_lock(hdev);
10914 if (timeout && !hdev_is_powered(hdev)) {
10915 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10916 MGMT_STATUS_REJECTED);
10920 if (adv_busy(hdev)) {
10921 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10926 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
10927 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
10928 cp->scan_rsp_len, false)) {
10929 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10930 MGMT_STATUS_INVALID_PARAMS);
10934 prev_instance_cnt = hdev->adv_instance_cnt;
10936 adv = hci_add_adv_instance(hdev, cp->instance, flags,
10937 cp->adv_data_len, cp->data,
10939 cp->data + cp->adv_data_len,
10941 HCI_ADV_TX_POWER_NO_PREFERENCE,
10942 hdev->le_adv_min_interval,
10943 hdev->le_adv_max_interval, 0);
10945 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10946 MGMT_STATUS_FAILED);
10950 /* Only trigger an advertising added event if a new instance was
10953 if (hdev->adv_instance_cnt > prev_instance_cnt)
10954 mgmt_advertising_added(sk, hdev, cp->instance);
10956 if (hdev->cur_adv_instance == cp->instance) {
10957 /* If the currently advertised instance is being changed then
10958 * cancel the current advertising and schedule the next
10959 * instance. If there is only one instance then the overridden
10960 * advertising data will be visible right away.
10962 cancel_adv_timeout(hdev);
10964 next_instance = hci_get_next_instance(hdev, cp->instance);
10966 schedule_instance = next_instance->instance;
10967 } else if (!hdev->adv_instance_timeout) {
10968 /* Immediately advertise the new instance if no other
10969 * instance is currently being advertised.
10971 schedule_instance = cp->instance;
10974 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
10975 * there is no instance to be advertised then we have no HCI
10976 * communication to make. Simply return.
10978 if (!hdev_is_powered(hdev) ||
10979 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
10980 !schedule_instance) {
10981 rp.instance = cp->instance;
10982 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10983 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
10987 /* We're good to go, update advertising data, parameters, and start
10990 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
10997 cp->instance = schedule_instance;
10999 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
11000 add_advertising_complete);
11002 mgmt_pending_free(cmd);
11005 hci_dev_unlock(hdev);
11010 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
11013 struct mgmt_pending_cmd *cmd = data;
11014 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
11015 struct mgmt_rp_add_ext_adv_params rp;
11016 struct adv_info *adv;
11019 BT_DBG("%s", hdev->name);
11021 hci_dev_lock(hdev);
11023 adv = hci_find_adv_instance(hdev, cp->instance);
11027 rp.instance = cp->instance;
11028 rp.tx_power = adv->tx_power;
11030 /* While we're at it, inform userspace of the available space for this
11031 * advertisement, given the flags that will be used.
11033 flags = __le32_to_cpu(cp->flags);
11034 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
11035 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
11038 /* If this advertisement was previously advertising and we
11039 * failed to update it, we signal that it has been removed and
11040 * delete its structure
11043 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
11045 hci_remove_adv_instance(hdev, cp->instance);
11047 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
11050 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
11051 mgmt_status(err), &rp, sizeof(rp));
11056 mgmt_pending_free(cmd);
11058 hci_dev_unlock(hdev);
11061 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
11063 struct mgmt_pending_cmd *cmd = data;
11064 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
11066 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
11069 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
11070 void *data, u16 data_len)
11072 struct mgmt_cp_add_ext_adv_params *cp = data;
11073 struct mgmt_rp_add_ext_adv_params rp;
11074 struct mgmt_pending_cmd *cmd = NULL;
11075 struct adv_info *adv;
11076 u32 flags, min_interval, max_interval;
11077 u16 timeout, duration;
11082 BT_DBG("%s", hdev->name);
11084 status = mgmt_le_support(hdev);
11086 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11089 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
11090 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11091 MGMT_STATUS_INVALID_PARAMS);
11093 /* The purpose of breaking add_advertising into two separate MGMT calls
11094 * for params and data is to allow more parameters to be added to this
11095 * structure in the future. For this reason, we verify that we have the
11096 * bare minimum structure we know of when the interface was defined. Any
11097 * extra parameters we don't know about will be ignored in this request.
11099 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
11100 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11101 MGMT_STATUS_INVALID_PARAMS);
11103 flags = __le32_to_cpu(cp->flags);
11105 if (!requested_adv_flags_are_valid(hdev, flags))
11106 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11107 MGMT_STATUS_INVALID_PARAMS);
11109 hci_dev_lock(hdev);
11111 /* In new interface, we require that we are powered to register */
11112 if (!hdev_is_powered(hdev)) {
11113 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11114 MGMT_STATUS_REJECTED);
11118 if (adv_busy(hdev)) {
11119 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11124 /* Parse defined parameters from request, use defaults otherwise */
11125 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
11126 __le16_to_cpu(cp->timeout) : 0;
11128 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
11129 __le16_to_cpu(cp->duration) :
11130 hdev->def_multi_adv_rotation_duration;
11132 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
11133 __le32_to_cpu(cp->min_interval) :
11134 hdev->le_adv_min_interval;
11136 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
11137 __le32_to_cpu(cp->max_interval) :
11138 hdev->le_adv_max_interval;
11140 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
11142 HCI_ADV_TX_POWER_NO_PREFERENCE;
11144 /* Create advertising instance with no advertising or response data */
11145 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
11146 timeout, duration, tx_power, min_interval,
11150 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11151 MGMT_STATUS_FAILED);
11155 /* Submit request for advertising params if ext adv available */
11156 if (ext_adv_capable(hdev)) {
11157 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
11161 hci_remove_adv_instance(hdev, cp->instance);
11165 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
11166 add_ext_adv_params_complete);
11168 mgmt_pending_free(cmd);
11170 rp.instance = cp->instance;
11171 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
11172 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
11173 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
11174 err = mgmt_cmd_complete(sk, hdev->id,
11175 MGMT_OP_ADD_EXT_ADV_PARAMS,
11176 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11180 hci_dev_unlock(hdev);
11185 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
11187 struct mgmt_pending_cmd *cmd = data;
11188 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
11189 struct mgmt_rp_add_advertising rp;
11191 add_adv_complete(hdev, cmd->sk, cp->instance, err);
11193 memset(&rp, 0, sizeof(rp));
11195 rp.instance = cp->instance;
11198 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
11201 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
11202 mgmt_status(err), &rp, sizeof(rp));
11204 mgmt_pending_free(cmd);
11207 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
11209 struct mgmt_pending_cmd *cmd = data;
11210 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
11213 if (ext_adv_capable(hdev)) {
11214 err = hci_update_adv_data_sync(hdev, cp->instance);
11218 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
11222 return hci_enable_ext_advertising_sync(hdev, cp->instance);
11225 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
11228 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
11231 struct mgmt_cp_add_ext_adv_data *cp = data;
11232 struct mgmt_rp_add_ext_adv_data rp;
11233 u8 schedule_instance = 0;
11234 struct adv_info *next_instance;
11235 struct adv_info *adv_instance;
11237 struct mgmt_pending_cmd *cmd;
11239 BT_DBG("%s", hdev->name);
11241 hci_dev_lock(hdev);
11243 adv_instance = hci_find_adv_instance(hdev, cp->instance);
11245 if (!adv_instance) {
11246 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
11247 MGMT_STATUS_INVALID_PARAMS);
11251 /* In new interface, we require that we are powered to register */
11252 if (!hdev_is_powered(hdev)) {
11253 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
11254 MGMT_STATUS_REJECTED);
11255 goto clear_new_instance;
11258 if (adv_busy(hdev)) {
11259 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
11261 goto clear_new_instance;
11264 /* Validate new data */
11265 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
11266 cp->adv_data_len, true) ||
11267 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
11268 cp->adv_data_len, cp->scan_rsp_len, false)) {
11269 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
11270 MGMT_STATUS_INVALID_PARAMS);
11271 goto clear_new_instance;
11274 /* Set the data in the advertising instance */
11275 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
11276 cp->data, cp->scan_rsp_len,
11277 cp->data + cp->adv_data_len);
11279 /* If using software rotation, determine next instance to use */
11280 if (hdev->cur_adv_instance == cp->instance) {
11281 /* If the currently advertised instance is being changed
11282 * then cancel the current advertising and schedule the
11283 * next instance. If there is only one instance then the
11284 * overridden advertising data will be visible right
11287 cancel_adv_timeout(hdev);
11289 next_instance = hci_get_next_instance(hdev, cp->instance);
11291 schedule_instance = next_instance->instance;
11292 } else if (!hdev->adv_instance_timeout) {
11293 /* Immediately advertise the new instance if no other
11294 * instance is currently being advertised.
11296 schedule_instance = cp->instance;
11299 /* If the HCI_ADVERTISING flag is set or there is no instance to
11300 * be advertised then we have no HCI communication to make.
11303 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
11304 if (adv_instance->pending) {
11305 mgmt_advertising_added(sk, hdev, cp->instance);
11306 adv_instance->pending = false;
11308 rp.instance = cp->instance;
11309 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
11310 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11314 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
11318 goto clear_new_instance;
11321 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
11322 add_ext_adv_data_complete);
11324 mgmt_pending_free(cmd);
11325 goto clear_new_instance;
11328 /* We were successful in updating data, so trigger advertising_added
11329 * event if this is an instance that wasn't previously advertising. If
11330 * a failure occurs in the requests we initiated, we will remove the
11331 * instance again in add_advertising_complete
11333 if (adv_instance->pending)
11334 mgmt_advertising_added(sk, hdev, cp->instance);
11338 clear_new_instance:
11339 hci_remove_adv_instance(hdev, cp->instance);
11342 hci_dev_unlock(hdev);
11347 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
11350 struct mgmt_pending_cmd *cmd = data;
11351 struct mgmt_cp_remove_advertising *cp = cmd->param;
11352 struct mgmt_rp_remove_advertising rp;
11354 bt_dev_dbg(hdev, "err %d", err);
11356 memset(&rp, 0, sizeof(rp));
11357 rp.instance = cp->instance;
11360 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
11363 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
11364 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11366 mgmt_pending_free(cmd);
11369 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
11371 struct mgmt_pending_cmd *cmd = data;
11372 struct mgmt_cp_remove_advertising *cp = cmd->param;
11375 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
11379 if (list_empty(&hdev->adv_instances))
11380 err = hci_disable_advertising_sync(hdev);
11385 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
11386 void *data, u16 data_len)
11388 struct mgmt_cp_remove_advertising *cp = data;
11389 struct mgmt_pending_cmd *cmd;
11392 bt_dev_dbg(hdev, "sock %p", sk);
11394 hci_dev_lock(hdev);
11396 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
11397 err = mgmt_cmd_status(sk, hdev->id,
11398 MGMT_OP_REMOVE_ADVERTISING,
11399 MGMT_STATUS_INVALID_PARAMS);
11403 if (pending_find(MGMT_OP_SET_LE, hdev)) {
11404 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
11409 if (list_empty(&hdev->adv_instances)) {
11410 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
11411 MGMT_STATUS_INVALID_PARAMS);
11415 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
11422 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
11423 remove_advertising_complete);
11425 mgmt_pending_free(cmd);
11428 hci_dev_unlock(hdev);
11433 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
11434 void *data, u16 data_len)
11436 struct mgmt_cp_get_adv_size_info *cp = data;
11437 struct mgmt_rp_get_adv_size_info rp;
11438 u32 flags, supported_flags;
11440 bt_dev_dbg(hdev, "sock %p", sk);
11442 if (!lmp_le_capable(hdev))
11443 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11444 MGMT_STATUS_REJECTED);
11446 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
11447 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11448 MGMT_STATUS_INVALID_PARAMS);
11450 flags = __le32_to_cpu(cp->flags);
11452 /* The current implementation only supports a subset of the specified
11455 supported_flags = get_supported_adv_flags(hdev);
11456 if (flags & ~supported_flags)
11457 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11458 MGMT_STATUS_INVALID_PARAMS);
11460 rp.instance = cp->instance;
11461 rp.flags = cp->flags;
11462 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
11463 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
11465 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11466 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11469 static const struct hci_mgmt_handler mgmt_handlers[] = {
11470 { NULL }, /* 0x0000 (no command) */
11471 { read_version, MGMT_READ_VERSION_SIZE,
11473 HCI_MGMT_UNTRUSTED },
11474 { read_commands, MGMT_READ_COMMANDS_SIZE,
11476 HCI_MGMT_UNTRUSTED },
11477 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
11479 HCI_MGMT_UNTRUSTED },
11480 { read_controller_info, MGMT_READ_INFO_SIZE,
11481 HCI_MGMT_UNTRUSTED },
11482 { set_powered, MGMT_SETTING_SIZE },
11483 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
11484 { set_connectable, MGMT_SETTING_SIZE },
11485 { set_fast_connectable, MGMT_SETTING_SIZE },
11486 { set_bondable, MGMT_SETTING_SIZE },
11487 { set_link_security, MGMT_SETTING_SIZE },
11488 { set_ssp, MGMT_SETTING_SIZE },
11489 { set_hs, MGMT_SETTING_SIZE },
11490 { set_le, MGMT_SETTING_SIZE },
11491 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
11492 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
11493 { add_uuid, MGMT_ADD_UUID_SIZE },
11494 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
11495 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
11496 HCI_MGMT_VAR_LEN },
11497 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
11498 HCI_MGMT_VAR_LEN },
11499 { disconnect, MGMT_DISCONNECT_SIZE },
11500 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
11501 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
11502 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
11503 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
11504 { pair_device, MGMT_PAIR_DEVICE_SIZE },
11505 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
11506 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
11507 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
11508 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
11509 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
11510 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
11511 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
11512 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
11513 HCI_MGMT_VAR_LEN },
11514 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
11515 { start_discovery, MGMT_START_DISCOVERY_SIZE },
11516 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
11517 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
11518 { block_device, MGMT_BLOCK_DEVICE_SIZE },
11519 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
11520 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
11521 { set_advertising, MGMT_SETTING_SIZE },
11522 { set_bredr, MGMT_SETTING_SIZE },
11523 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
11524 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
11525 { set_secure_conn, MGMT_SETTING_SIZE },
11526 { set_debug_keys, MGMT_SETTING_SIZE },
11527 { set_privacy, MGMT_SET_PRIVACY_SIZE },
11528 { load_irks, MGMT_LOAD_IRKS_SIZE,
11529 HCI_MGMT_VAR_LEN },
11530 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
11531 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
11532 { add_device, MGMT_ADD_DEVICE_SIZE },
11533 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
11534 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
11535 HCI_MGMT_VAR_LEN },
11536 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
11538 HCI_MGMT_UNTRUSTED },
11539 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
11540 HCI_MGMT_UNCONFIGURED |
11541 HCI_MGMT_UNTRUSTED },
11542 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
11543 HCI_MGMT_UNCONFIGURED },
11544 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
11545 HCI_MGMT_UNCONFIGURED },
11546 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
11547 HCI_MGMT_VAR_LEN },
11548 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
11549 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
11551 HCI_MGMT_UNTRUSTED },
11552 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
11553 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
11554 HCI_MGMT_VAR_LEN },
11555 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
11556 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
11557 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
11558 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
11559 HCI_MGMT_UNTRUSTED },
11560 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
11561 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
11562 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
11563 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
11564 HCI_MGMT_VAR_LEN },
11565 { set_wideband_speech, MGMT_SETTING_SIZE },
11566 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
11567 HCI_MGMT_UNTRUSTED },
11568 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
11569 HCI_MGMT_UNTRUSTED |
11570 HCI_MGMT_HDEV_OPTIONAL },
11571 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
11573 HCI_MGMT_HDEV_OPTIONAL },
11574 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
11575 HCI_MGMT_UNTRUSTED },
11576 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
11577 HCI_MGMT_VAR_LEN },
11578 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
11579 HCI_MGMT_UNTRUSTED },
11580 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
11581 HCI_MGMT_VAR_LEN },
11582 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
11583 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
11584 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
11585 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
11586 HCI_MGMT_VAR_LEN },
11587 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
11588 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
11589 HCI_MGMT_VAR_LEN },
11590 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
11591 HCI_MGMT_VAR_LEN },
11592 { add_adv_patterns_monitor_rssi,
11593 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
11594 HCI_MGMT_VAR_LEN },
11595 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
11596 HCI_MGMT_VAR_LEN },
11597 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
11598 { mesh_send, MGMT_MESH_SEND_SIZE,
11599 HCI_MGMT_VAR_LEN },
11600 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
11604 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
11605 { NULL }, /* 0x0000 (no command) */
11606 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
11607 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
11608 HCI_MGMT_VAR_LEN },
11609 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
11610 HCI_MGMT_VAR_LEN },
11611 { add_white_list, MGMT_ADD_DEV_WHITE_LIST_SIZE },
11612 { remove_from_white_list, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
11613 { clear_white_list, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
11614 { set_enable_rssi, MGMT_SET_RSSI_ENABLE_SIZE },
11615 { get_raw_rssi, MGMT_GET_RAW_RSSI_SIZE },
11616 { set_disable_threshold, MGMT_SET_RSSI_DISABLE_SIZE },
11617 { start_le_discovery, MGMT_START_LE_DISCOVERY_SIZE },
11618 { stop_le_discovery, MGMT_STOP_LE_DISCOVERY_SIZE },
11619 { disable_le_auto_connect, MGMT_DISABLE_LE_AUTO_CONNECT_SIZE },
11620 { le_conn_update, MGMT_LE_CONN_UPDATE_SIZE },
11621 { set_manufacturer_data, MGMT_SET_MANUFACTURER_DATA_SIZE },
11622 { le_set_scan_params, MGMT_LE_SET_SCAN_PARAMS_SIZE },
11623 { set_voice_setting, MGMT_SET_VOICE_SETTING_SIZE },
11624 { get_adv_tx_power, MGMT_GET_ADV_TX_POWER_SIZE },
11625 { enable_bt_6lowpan, MGMT_ENABLE_BT_6LOWPAN_SIZE },
11626 { connect_bt_6lowpan, MGMT_CONNECT_6LOWPAN_SIZE },
11627 { disconnect_bt_6lowpan, MGMT_DISCONNECT_6LOWPAN_SIZE },
11628 { read_maximum_le_data_length,
11629 MGMT_LE_READ_MAXIMUM_DATA_LENGTH_SIZE },
11630 { write_host_suggested_le_data_length,
11631 MGMT_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH_SIZE },
11635 void mgmt_index_added(struct hci_dev *hdev)
11637 struct mgmt_ev_ext_index ev;
11639 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
11642 switch (hdev->dev_type) {
11644 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
11645 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
11646 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
11649 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
11650 HCI_MGMT_INDEX_EVENTS);
11661 ev.bus = hdev->bus;
11663 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
11664 HCI_MGMT_EXT_INDEX_EVENTS);
11667 void mgmt_index_removed(struct hci_dev *hdev)
11669 struct mgmt_ev_ext_index ev;
11670 u8 status = MGMT_STATUS_INVALID_INDEX;
11672 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
11675 switch (hdev->dev_type) {
11677 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
11679 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
11680 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
11681 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
11684 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
11685 HCI_MGMT_INDEX_EVENTS);
11696 ev.bus = hdev->bus;
11698 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
11699 HCI_MGMT_EXT_INDEX_EVENTS);
11701 /* Cancel any remaining timed work */
11702 if (!hci_dev_test_flag(hdev, HCI_MGMT))
11704 cancel_delayed_work_sync(&hdev->discov_off);
11705 cancel_delayed_work_sync(&hdev->service_cache);
11706 cancel_delayed_work_sync(&hdev->rpa_expired);
11709 void mgmt_power_on(struct hci_dev *hdev, int err)
11711 struct cmd_lookup match = { NULL, hdev };
11713 bt_dev_dbg(hdev, "err %d", err);
11715 hci_dev_lock(hdev);
11718 restart_le_actions(hdev);
11719 hci_update_passive_scan(hdev);
11722 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
11724 new_settings(hdev, match.sk);
11727 sock_put(match.sk);
11729 hci_dev_unlock(hdev);
11732 void __mgmt_power_off(struct hci_dev *hdev)
11734 struct cmd_lookup match = { NULL, hdev };
11735 u8 status, zero_cod[] = { 0, 0, 0 };
11737 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
11739 /* If the power off is because of hdev unregistration let
11740 * use the appropriate INVALID_INDEX status. Otherwise use
11741 * NOT_POWERED. We cover both scenarios here since later in
11742 * mgmt_index_removed() any hci_conn callbacks will have already
11743 * been triggered, potentially causing misleading DISCONNECTED
11744 * status responses.
11746 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
11747 status = MGMT_STATUS_INVALID_INDEX;
11749 status = MGMT_STATUS_NOT_POWERED;
11751 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
11753 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
11754 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
11755 zero_cod, sizeof(zero_cod),
11756 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
11757 ext_info_changed(hdev, NULL);
11760 new_settings(hdev, match.sk);
11763 sock_put(match.sk);
11766 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
11768 struct mgmt_pending_cmd *cmd;
11771 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
11775 if (err == -ERFKILL)
11776 status = MGMT_STATUS_RFKILLED;
11778 status = MGMT_STATUS_FAILED;
11780 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
11782 mgmt_pending_remove(cmd);
11785 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
11788 struct mgmt_ev_new_link_key ev;
11790 memset(&ev, 0, sizeof(ev));
11792 ev.store_hint = persistent;
11793 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
11794 ev.key.addr.type = BDADDR_BREDR;
11795 ev.key.type = key->type;
11796 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
11797 ev.key.pin_len = key->pin_len;
11799 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
11802 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
11804 switch (ltk->type) {
11806 case SMP_LTK_RESPONDER:
11807 if (ltk->authenticated)
11808 return MGMT_LTK_AUTHENTICATED;
11809 return MGMT_LTK_UNAUTHENTICATED;
11811 if (ltk->authenticated)
11812 return MGMT_LTK_P256_AUTH;
11813 return MGMT_LTK_P256_UNAUTH;
11814 case SMP_LTK_P256_DEBUG:
11815 return MGMT_LTK_P256_DEBUG;
11818 return MGMT_LTK_UNAUTHENTICATED;
11821 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
11823 struct mgmt_ev_new_long_term_key ev;
11825 memset(&ev, 0, sizeof(ev));
11827 /* Devices using resolvable or non-resolvable random addresses
11828 * without providing an identity resolving key don't require
11829 * to store long term keys. Their addresses will change the
11830 * next time around.
11832 * Only when a remote device provides an identity address
11833 * make sure the long term key is stored. If the remote
11834 * identity is known, the long term keys are internally
11835 * mapped to the identity address. So allow static random
11836 * and public addresses here.
11838 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
11839 (key->bdaddr.b[5] & 0xc0) != 0xc0)
11840 ev.store_hint = 0x00;
11842 ev.store_hint = persistent;
11844 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
11845 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
11846 ev.key.type = mgmt_ltk_type(key);
11847 ev.key.enc_size = key->enc_size;
11848 ev.key.ediv = key->ediv;
11849 ev.key.rand = key->rand;
11851 if (key->type == SMP_LTK)
11852 ev.key.initiator = 1;
11854 /* Make sure we copy only the significant bytes based on the
11855 * encryption key size, and set the rest of the value to zeroes.
11857 memcpy(ev.key.val, key->val, key->enc_size);
11858 memset(ev.key.val + key->enc_size, 0,
11859 sizeof(ev.key.val) - key->enc_size);
11861 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
11864 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
11866 struct mgmt_ev_new_irk ev;
11868 memset(&ev, 0, sizeof(ev));
11870 ev.store_hint = persistent;
11872 bacpy(&ev.rpa, &irk->rpa);
11873 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
11874 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
11875 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
11877 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
11880 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
11883 struct mgmt_ev_new_csrk ev;
11885 memset(&ev, 0, sizeof(ev));
11887 /* Devices using resolvable or non-resolvable random addresses
11888 * without providing an identity resolving key don't require
11889 * to store signature resolving keys. Their addresses will change
11890 * the next time around.
11892 * Only when a remote device provides an identity address
11893 * make sure the signature resolving key is stored. So allow
11894 * static random and public addresses here.
11896 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
11897 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
11898 ev.store_hint = 0x00;
11900 ev.store_hint = persistent;
11902 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
11903 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
11904 ev.key.type = csrk->type;
11905 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
11907 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
11910 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
11911 u8 bdaddr_type, u8 store_hint, u16 min_interval,
11912 u16 max_interval, u16 latency, u16 timeout)
11914 struct mgmt_ev_new_conn_param ev;
11916 if (!hci_is_identity_address(bdaddr, bdaddr_type))
11919 memset(&ev, 0, sizeof(ev));
11920 bacpy(&ev.addr.bdaddr, bdaddr);
11921 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
11922 ev.store_hint = store_hint;
11923 ev.min_interval = cpu_to_le16(min_interval);
11924 ev.max_interval = cpu_to_le16(max_interval);
11925 ev.latency = cpu_to_le16(latency);
11926 ev.timeout = cpu_to_le16(timeout);
11928 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
11931 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
11932 u8 *name, u8 name_len)
11934 struct sk_buff *skb;
11935 struct mgmt_ev_device_connected *ev;
11939 /* allocate buff for LE or BR/EDR adv */
11940 if (conn->le_adv_data_len > 0)
11941 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
11942 sizeof(*ev) + conn->le_adv_data_len);
11944 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
11945 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
11946 eir_precalc_len(sizeof(conn->dev_class)));
11948 ev = skb_put(skb, sizeof(*ev));
11949 bacpy(&ev->addr.bdaddr, &conn->dst);
11950 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
11953 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
11955 ev->flags = __cpu_to_le32(flags);
11957 /* We must ensure that the EIR Data fields are ordered and
11958 * unique. Keep it simple for now and avoid the problem by not
11959 * adding any BR/EDR data to the LE adv.
11961 if (conn->le_adv_data_len > 0) {
11962 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
11963 eir_len = conn->le_adv_data_len;
11966 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
11968 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
11969 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
11970 conn->dev_class, sizeof(conn->dev_class));
11973 ev->eir_len = cpu_to_le16(eir_len);
11975 mgmt_event_skb(skb, NULL);
11978 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
11980 struct sock **sk = data;
11982 cmd->cmd_complete(cmd, 0);
11987 mgmt_pending_remove(cmd);
11990 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
11992 struct hci_dev *hdev = data;
11993 struct mgmt_cp_unpair_device *cp = cmd->param;
11995 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
11997 cmd->cmd_complete(cmd, 0);
11998 mgmt_pending_remove(cmd);
12001 bool mgmt_powering_down(struct hci_dev *hdev)
12003 struct mgmt_pending_cmd *cmd;
12004 struct mgmt_mode *cp;
12006 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
12017 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
12018 u8 link_type, u8 addr_type, u8 reason,
12019 bool mgmt_connected)
12021 struct mgmt_ev_device_disconnected ev;
12022 struct sock *sk = NULL;
12024 /* The connection is still in hci_conn_hash so test for 1
12025 * instead of 0 to know if this is the last one.
12027 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
12028 cancel_delayed_work(&hdev->power_off);
12029 queue_work(hdev->req_workqueue, &hdev->power_off.work);
12032 if (!mgmt_connected)
12035 if (link_type != ACL_LINK && link_type != LE_LINK)
12038 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
12040 bacpy(&ev.addr.bdaddr, bdaddr);
12041 ev.addr.type = link_to_bdaddr(link_type, addr_type);
12042 ev.reason = reason;
12044 /* Report disconnects due to suspend */
12045 if (hdev->suspended)
12046 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
12048 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
12053 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
12057 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
12058 u8 link_type, u8 addr_type, u8 status)
12060 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
12061 struct mgmt_cp_disconnect *cp;
12062 struct mgmt_pending_cmd *cmd;
12064 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
12067 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
12073 if (bacmp(bdaddr, &cp->addr.bdaddr))
12076 if (cp->addr.type != bdaddr_type)
12079 cmd->cmd_complete(cmd, mgmt_status(status));
12080 mgmt_pending_remove(cmd);
12083 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
12084 u8 addr_type, u8 status)
12086 struct mgmt_ev_connect_failed ev;
12088 /* The connection is still in hci_conn_hash so test for 1
12089 * instead of 0 to know if this is the last one.
12091 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
12092 cancel_delayed_work(&hdev->power_off);
12093 queue_work(hdev->req_workqueue, &hdev->power_off.work);
12096 bacpy(&ev.addr.bdaddr, bdaddr);
12097 ev.addr.type = link_to_bdaddr(link_type, addr_type);
12098 ev.status = mgmt_status(status);
12100 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
12103 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
12105 struct mgmt_ev_pin_code_request ev;
12107 bacpy(&ev.addr.bdaddr, bdaddr);
12108 ev.addr.type = BDADDR_BREDR;
12109 ev.secure = secure;
12111 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
12114 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12117 struct mgmt_pending_cmd *cmd;
12119 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
12123 cmd->cmd_complete(cmd, mgmt_status(status));
12124 mgmt_pending_remove(cmd);
12127 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12130 struct mgmt_pending_cmd *cmd;
12132 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
12136 cmd->cmd_complete(cmd, mgmt_status(status));
12137 mgmt_pending_remove(cmd);
12140 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
12141 u8 link_type, u8 addr_type, u32 value,
12144 struct mgmt_ev_user_confirm_request ev;
12146 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
12148 bacpy(&ev.addr.bdaddr, bdaddr);
12149 ev.addr.type = link_to_bdaddr(link_type, addr_type);
12150 ev.confirm_hint = confirm_hint;
12151 ev.value = cpu_to_le32(value);
12153 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
12157 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
12158 u8 link_type, u8 addr_type)
12160 struct mgmt_ev_user_passkey_request ev;
12162 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
12164 bacpy(&ev.addr.bdaddr, bdaddr);
12165 ev.addr.type = link_to_bdaddr(link_type, addr_type);
12167 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
12171 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12172 u8 link_type, u8 addr_type, u8 status,
12175 struct mgmt_pending_cmd *cmd;
12177 cmd = pending_find(opcode, hdev);
12181 cmd->cmd_complete(cmd, mgmt_status(status));
12182 mgmt_pending_remove(cmd);
12187 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12188 u8 link_type, u8 addr_type, u8 status)
12190 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
12191 status, MGMT_OP_USER_CONFIRM_REPLY);
12194 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12195 u8 link_type, u8 addr_type, u8 status)
12197 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
12199 MGMT_OP_USER_CONFIRM_NEG_REPLY);
12202 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12203 u8 link_type, u8 addr_type, u8 status)
12205 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
12206 status, MGMT_OP_USER_PASSKEY_REPLY);
12209 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12210 u8 link_type, u8 addr_type, u8 status)
12212 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
12214 MGMT_OP_USER_PASSKEY_NEG_REPLY);
12217 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
12218 u8 link_type, u8 addr_type, u32 passkey,
12221 struct mgmt_ev_passkey_notify ev;
12223 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
12225 bacpy(&ev.addr.bdaddr, bdaddr);
12226 ev.addr.type = link_to_bdaddr(link_type, addr_type);
12227 ev.passkey = __cpu_to_le32(passkey);
12228 ev.entered = entered;
12230 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
12233 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
12235 struct mgmt_ev_auth_failed ev;
12236 struct mgmt_pending_cmd *cmd;
12237 u8 status = mgmt_status(hci_status);
12239 bacpy(&ev.addr.bdaddr, &conn->dst);
12240 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
12241 ev.status = status;
12243 cmd = find_pairing(conn);
12245 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
12246 cmd ? cmd->sk : NULL);
12249 cmd->cmd_complete(cmd, status);
12250 mgmt_pending_remove(cmd);
12254 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
12256 struct cmd_lookup match = { NULL, hdev };
12260 u8 mgmt_err = mgmt_status(status);
12261 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
12262 cmd_status_rsp, &mgmt_err);
12266 if (test_bit(HCI_AUTH, &hdev->flags))
12267 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
12269 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
12271 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
12275 new_settings(hdev, match.sk);
12278 sock_put(match.sk);
12281 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
12283 struct cmd_lookup *match = data;
12285 if (match->sk == NULL) {
12286 match->sk = cmd->sk;
12287 sock_hold(match->sk);
12291 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
12294 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
12296 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
12297 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
12298 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
12301 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
12302 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
12303 ext_info_changed(hdev, NULL);
12307 sock_put(match.sk);
12310 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
12312 struct mgmt_cp_set_local_name ev;
12313 struct mgmt_pending_cmd *cmd;
12318 memset(&ev, 0, sizeof(ev));
12319 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
12320 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
12322 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
12324 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
12326 /* If this is a HCI command related to powering on the
12327 * HCI dev don't send any mgmt signals.
12329 if (pending_find(MGMT_OP_SET_POWERED, hdev))
12333 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
12334 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
12335 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
12338 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
12342 for (i = 0; i < uuid_count; i++) {
12343 if (!memcmp(uuid, uuids[i], 16))
12350 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
12354 while (parsed < eir_len) {
12355 u8 field_len = eir[0];
12359 if (field_len == 0)
12362 if (eir_len - parsed < field_len + 1)
12366 case EIR_UUID16_ALL:
12367 case EIR_UUID16_SOME:
12368 for (i = 0; i + 3 <= field_len; i += 2) {
12369 memcpy(uuid, bluetooth_base_uuid, 16);
12370 uuid[13] = eir[i + 3];
12371 uuid[12] = eir[i + 2];
12372 if (has_uuid(uuid, uuid_count, uuids))
12376 case EIR_UUID32_ALL:
12377 case EIR_UUID32_SOME:
12378 for (i = 0; i + 5 <= field_len; i += 4) {
12379 memcpy(uuid, bluetooth_base_uuid, 16);
12380 uuid[15] = eir[i + 5];
12381 uuid[14] = eir[i + 4];
12382 uuid[13] = eir[i + 3];
12383 uuid[12] = eir[i + 2];
12384 if (has_uuid(uuid, uuid_count, uuids))
12388 case EIR_UUID128_ALL:
12389 case EIR_UUID128_SOME:
12390 for (i = 0; i + 17 <= field_len; i += 16) {
12391 memcpy(uuid, eir + i + 2, 16);
12392 if (has_uuid(uuid, uuid_count, uuids))
12398 parsed += field_len + 1;
12399 eir += field_len + 1;
12405 static void restart_le_scan(struct hci_dev *hdev)
12407 /* If controller is not scanning we are done. */
12408 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
12411 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
12412 hdev->discovery.scan_start +
12413 hdev->discovery.scan_duration))
12416 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
12417 DISCOV_LE_RESTART_DELAY);
12420 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
12421 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
12423 /* If a RSSI threshold has been specified, and
12424 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
12425 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
12426 * is set, let it through for further processing, as we might need to
12427 * restart the scan.
12429 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
12430 * the results are also dropped.
12432 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
12433 (rssi == HCI_RSSI_INVALID ||
12434 (rssi < hdev->discovery.rssi &&
12435 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
12438 if (hdev->discovery.uuid_count != 0) {
12439 /* If a list of UUIDs is provided in filter, results with no
12440 * matching UUID should be dropped.
12442 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
12443 hdev->discovery.uuids) &&
12444 !eir_has_uuids(scan_rsp, scan_rsp_len,
12445 hdev->discovery.uuid_count,
12446 hdev->discovery.uuids))
12450 /* If duplicate filtering does not report RSSI changes, then restart
12451 * scanning to ensure updated result with updated RSSI values.
12453 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
12454 restart_le_scan(hdev);
12456 /* Validate RSSI value against the RSSI threshold once more. */
12457 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
12458 rssi < hdev->discovery.rssi)
12465 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
12466 bdaddr_t *bdaddr, u8 addr_type)
12468 struct mgmt_ev_adv_monitor_device_lost ev;
12470 ev.monitor_handle = cpu_to_le16(handle);
12471 bacpy(&ev.addr.bdaddr, bdaddr);
12472 ev.addr.type = addr_type;
12474 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
12478 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
12479 struct sk_buff *skb,
12480 struct sock *skip_sk,
12483 struct sk_buff *advmon_skb;
12484 size_t advmon_skb_len;
12485 __le16 *monitor_handle;
12490 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
12491 sizeof(struct mgmt_ev_device_found)) + skb->len;
12492 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
12497 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
12498 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
12499 * store monitor_handle of the matched monitor.
12501 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
12502 *monitor_handle = cpu_to_le16(handle);
12503 skb_put_data(advmon_skb, skb->data, skb->len);
12505 mgmt_event_skb(advmon_skb, skip_sk);
12508 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
12509 bdaddr_t *bdaddr, bool report_device,
12510 struct sk_buff *skb,
12511 struct sock *skip_sk)
12513 struct monitored_device *dev, *tmp;
12514 bool matched = false;
12515 bool notified = false;
12517 /* We have received the Advertisement Report because:
12518 * 1. the kernel has initiated active discovery
12519 * 2. if not, we have pend_le_reports > 0 in which case we are doing
12521 * 3. if none of the above is true, we have one or more active
12522 * Advertisement Monitor
12524 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
12525 * and report ONLY one advertisement per device for the matched Monitor
12526 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
12528 * For case 3, since we are not active scanning and all advertisements
12529 * received are due to a matched Advertisement Monitor, report all
12530 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
12532 if (report_device && !hdev->advmon_pend_notify) {
12533 mgmt_event_skb(skb, skip_sk);
12537 hdev->advmon_pend_notify = false;
12539 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
12540 if (!bacmp(&dev->bdaddr, bdaddr)) {
12543 if (!dev->notified) {
12544 mgmt_send_adv_monitor_device_found(hdev, skb,
12548 dev->notified = true;
12552 if (!dev->notified)
12553 hdev->advmon_pend_notify = true;
12556 if (!report_device &&
12557 ((matched && !notified) || !msft_monitor_supported(hdev))) {
12558 /* Handle 0 indicates that we are not active scanning and this
12559 * is a subsequent advertisement report for an already matched
12560 * Advertisement Monitor or the controller offloading support
12561 * is not available.
12563 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
12567 mgmt_event_skb(skb, skip_sk);
12572 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
12573 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
12574 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
12577 struct sk_buff *skb;
12578 struct mgmt_ev_mesh_device_found *ev;
12581 if (!hdev->mesh_ad_types[0])
12584 /* Scan for requested AD types */
12586 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
12587 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
12588 if (!hdev->mesh_ad_types[j])
12591 if (hdev->mesh_ad_types[j] == eir[i + 1])
12597 if (scan_rsp_len > 0) {
12598 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
12599 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
12600 if (!hdev->mesh_ad_types[j])
12603 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
12612 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
12613 sizeof(*ev) + eir_len + scan_rsp_len);
12617 ev = skb_put(skb, sizeof(*ev));
12619 bacpy(&ev->addr.bdaddr, bdaddr);
12620 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
12622 ev->flags = cpu_to_le32(flags);
12623 ev->instant = cpu_to_le64(instant);
12626 /* Copy EIR or advertising data into event */
12627 skb_put_data(skb, eir, eir_len);
12629 if (scan_rsp_len > 0)
12630 /* Append scan response data to event */
12631 skb_put_data(skb, scan_rsp, scan_rsp_len);
12633 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
12635 mgmt_event_skb(skb, NULL);
12638 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
12639 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
12640 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
12643 struct sk_buff *skb;
12644 struct mgmt_ev_device_found *ev;
12645 bool report_device = hci_discovery_active(hdev);
12647 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
12648 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
12649 eir, eir_len, scan_rsp, scan_rsp_len,
12652 /* Don't send events for a non-kernel initiated discovery. With
12653 * LE one exception is if we have pend_le_reports > 0 in which
12654 * case we're doing passive scanning and want these events.
12656 if (!hci_discovery_active(hdev)) {
12657 if (link_type == ACL_LINK)
12659 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
12660 report_device = true;
12661 else if (!hci_is_adv_monitoring(hdev))
12665 if (hdev->discovery.result_filtering) {
12666 /* We are using service discovery */
12667 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
12672 if (hdev->discovery.limited) {
12673 /* Check for limited discoverable bit */
12675 if (!(dev_class[1] & 0x20))
12678 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
12679 if (!flags || !(flags[0] & LE_AD_LIMITED))
12684 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
12685 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
12686 sizeof(*ev) + eir_len + scan_rsp_len + 5);
12690 ev = skb_put(skb, sizeof(*ev));
12692 /* In case of device discovery with BR/EDR devices (pre 1.2), the
12693 * RSSI value was reported as 0 when not available. This behavior
12694 * is kept when using device discovery. This is required for full
12695 * backwards compatibility with the API.
12697 * However when using service discovery, the value 127 will be
12698 * returned when the RSSI is not available.
12700 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
12701 link_type == ACL_LINK)
12704 bacpy(&ev->addr.bdaddr, bdaddr);
12705 ev->addr.type = link_to_bdaddr(link_type, addr_type);
12707 ev->flags = cpu_to_le32(flags);
12710 /* Copy EIR or advertising data into event */
12711 skb_put_data(skb, eir, eir_len);
12713 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
12716 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
12718 skb_put_data(skb, eir_cod, sizeof(eir_cod));
12721 if (scan_rsp_len > 0)
12722 /* Append scan response data to event */
12723 skb_put_data(skb, scan_rsp, scan_rsp_len);
12725 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
12727 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
12730 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
12731 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
12733 struct sk_buff *skb;
12734 struct mgmt_ev_device_found *ev;
12738 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
12739 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
12741 ev = skb_put(skb, sizeof(*ev));
12742 bacpy(&ev->addr.bdaddr, bdaddr);
12743 ev->addr.type = link_to_bdaddr(link_type, addr_type);
12747 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
12749 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
12751 ev->eir_len = cpu_to_le16(eir_len);
12752 ev->flags = cpu_to_le32(flags);
12754 mgmt_event_skb(skb, NULL);
12757 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
12759 struct mgmt_ev_discovering ev;
12761 bt_dev_dbg(hdev, "discovering %u", discovering);
12763 memset(&ev, 0, sizeof(ev));
12764 ev.type = hdev->discovery.type;
12765 ev.discovering = discovering;
12767 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
12770 void mgmt_suspending(struct hci_dev *hdev, u8 state)
12772 struct mgmt_ev_controller_suspend ev;
12774 ev.suspend_state = state;
12775 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
12778 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
12781 struct mgmt_ev_controller_resume ev;
12783 ev.wake_reason = reason;
12785 bacpy(&ev.addr.bdaddr, bdaddr);
12786 ev.addr.type = addr_type;
12788 memset(&ev.addr, 0, sizeof(ev.addr));
12791 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
12794 static struct hci_mgmt_chan chan = {
12795 .channel = HCI_CHANNEL_CONTROL,
12796 .handler_count = ARRAY_SIZE(mgmt_handlers),
12797 .handlers = mgmt_handlers,
12799 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
12800 .tizen_handlers = tizen_mgmt_handlers,
12802 .hdev_init = mgmt_init_hdev,
12805 int mgmt_init(void)
12807 return hci_mgmt_chan_register(&chan);
12810 void mgmt_exit(void)
12812 hci_mgmt_chan_unregister(&chan);
12815 void mgmt_cleanup(struct sock *sk)
12817 struct mgmt_mesh_tx *mesh_tx;
12818 struct hci_dev *hdev;
12820 read_lock(&hci_dev_list_lock);
12822 list_for_each_entry(hdev, &hci_dev_list, list) {
12824 mesh_tx = mgmt_mesh_next(hdev, sk);
12827 mesh_send_complete(hdev, mesh_tx, true);
12831 read_unlock(&hci_dev_list_lock);