2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include <net/bluetooth/mgmt_tizen.h>
37 #include <net/bluetooth/sco.h>
40 #include "hci_request.h"
42 #include "mgmt_util.h"
43 #include "mgmt_config.h"
48 #define MGMT_VERSION 1
49 #define MGMT_REVISION 22
51 static const u16 mgmt_commands[] = {
52 MGMT_OP_READ_INDEX_LIST,
55 MGMT_OP_SET_DISCOVERABLE,
56 MGMT_OP_SET_CONNECTABLE,
57 MGMT_OP_SET_FAST_CONNECTABLE,
59 MGMT_OP_SET_LINK_SECURITY,
63 MGMT_OP_SET_DEV_CLASS,
64 MGMT_OP_SET_LOCAL_NAME,
67 MGMT_OP_LOAD_LINK_KEYS,
68 MGMT_OP_LOAD_LONG_TERM_KEYS,
70 MGMT_OP_GET_CONNECTIONS,
71 MGMT_OP_PIN_CODE_REPLY,
72 MGMT_OP_PIN_CODE_NEG_REPLY,
73 MGMT_OP_SET_IO_CAPABILITY,
75 MGMT_OP_CANCEL_PAIR_DEVICE,
76 MGMT_OP_UNPAIR_DEVICE,
77 MGMT_OP_USER_CONFIRM_REPLY,
78 MGMT_OP_USER_CONFIRM_NEG_REPLY,
79 MGMT_OP_USER_PASSKEY_REPLY,
80 MGMT_OP_USER_PASSKEY_NEG_REPLY,
81 MGMT_OP_READ_LOCAL_OOB_DATA,
82 MGMT_OP_ADD_REMOTE_OOB_DATA,
83 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
84 MGMT_OP_START_DISCOVERY,
85 MGMT_OP_STOP_DISCOVERY,
88 MGMT_OP_UNBLOCK_DEVICE,
89 MGMT_OP_SET_DEVICE_ID,
90 MGMT_OP_SET_ADVERTISING,
92 MGMT_OP_SET_STATIC_ADDRESS,
93 MGMT_OP_SET_SCAN_PARAMS,
94 MGMT_OP_SET_SECURE_CONN,
95 MGMT_OP_SET_DEBUG_KEYS,
98 MGMT_OP_GET_CONN_INFO,
99 MGMT_OP_GET_CLOCK_INFO,
101 MGMT_OP_REMOVE_DEVICE,
102 MGMT_OP_LOAD_CONN_PARAM,
103 MGMT_OP_READ_UNCONF_INDEX_LIST,
104 MGMT_OP_READ_CONFIG_INFO,
105 MGMT_OP_SET_EXTERNAL_CONFIG,
106 MGMT_OP_SET_PUBLIC_ADDRESS,
107 MGMT_OP_START_SERVICE_DISCOVERY,
108 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
109 MGMT_OP_READ_EXT_INDEX_LIST,
110 MGMT_OP_READ_ADV_FEATURES,
111 MGMT_OP_ADD_ADVERTISING,
112 MGMT_OP_REMOVE_ADVERTISING,
113 MGMT_OP_GET_ADV_SIZE_INFO,
114 MGMT_OP_START_LIMITED_DISCOVERY,
115 MGMT_OP_READ_EXT_INFO,
116 MGMT_OP_SET_APPEARANCE,
117 MGMT_OP_GET_PHY_CONFIGURATION,
118 MGMT_OP_SET_PHY_CONFIGURATION,
119 MGMT_OP_SET_BLOCKED_KEYS,
120 MGMT_OP_SET_WIDEBAND_SPEECH,
121 MGMT_OP_READ_CONTROLLER_CAP,
122 MGMT_OP_READ_EXP_FEATURES_INFO,
123 MGMT_OP_SET_EXP_FEATURE,
124 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
125 MGMT_OP_SET_DEF_SYSTEM_CONFIG,
126 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
127 MGMT_OP_SET_DEF_RUNTIME_CONFIG,
128 MGMT_OP_GET_DEVICE_FLAGS,
129 MGMT_OP_SET_DEVICE_FLAGS,
130 MGMT_OP_READ_ADV_MONITOR_FEATURES,
131 MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
132 MGMT_OP_REMOVE_ADV_MONITOR,
133 MGMT_OP_ADD_EXT_ADV_PARAMS,
134 MGMT_OP_ADD_EXT_ADV_DATA,
135 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
136 MGMT_OP_SET_MESH_RECEIVER,
137 MGMT_OP_MESH_READ_FEATURES,
139 MGMT_OP_MESH_SEND_CANCEL,
142 static const u16 mgmt_events[] = {
143 MGMT_EV_CONTROLLER_ERROR,
145 MGMT_EV_INDEX_REMOVED,
146 MGMT_EV_NEW_SETTINGS,
147 MGMT_EV_CLASS_OF_DEV_CHANGED,
148 MGMT_EV_LOCAL_NAME_CHANGED,
149 MGMT_EV_NEW_LINK_KEY,
150 MGMT_EV_NEW_LONG_TERM_KEY,
151 MGMT_EV_DEVICE_CONNECTED,
152 MGMT_EV_DEVICE_DISCONNECTED,
153 MGMT_EV_CONNECT_FAILED,
154 MGMT_EV_PIN_CODE_REQUEST,
155 MGMT_EV_USER_CONFIRM_REQUEST,
156 MGMT_EV_USER_PASSKEY_REQUEST,
158 MGMT_EV_DEVICE_FOUND,
160 MGMT_EV_DEVICE_BLOCKED,
161 MGMT_EV_DEVICE_UNBLOCKED,
162 MGMT_EV_DEVICE_UNPAIRED,
163 MGMT_EV_PASSKEY_NOTIFY,
166 MGMT_EV_DEVICE_ADDED,
167 MGMT_EV_DEVICE_REMOVED,
168 MGMT_EV_NEW_CONN_PARAM,
169 MGMT_EV_UNCONF_INDEX_ADDED,
170 MGMT_EV_UNCONF_INDEX_REMOVED,
171 MGMT_EV_NEW_CONFIG_OPTIONS,
172 MGMT_EV_EXT_INDEX_ADDED,
173 MGMT_EV_EXT_INDEX_REMOVED,
174 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
175 MGMT_EV_ADVERTISING_ADDED,
176 MGMT_EV_ADVERTISING_REMOVED,
177 MGMT_EV_EXT_INFO_CHANGED,
178 MGMT_EV_PHY_CONFIGURATION_CHANGED,
179 MGMT_EV_EXP_FEATURE_CHANGED,
180 MGMT_EV_DEVICE_FLAGS_CHANGED,
181 MGMT_EV_ADV_MONITOR_ADDED,
182 MGMT_EV_ADV_MONITOR_REMOVED,
183 MGMT_EV_CONTROLLER_SUSPEND,
184 MGMT_EV_CONTROLLER_RESUME,
185 MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
186 MGMT_EV_ADV_MONITOR_DEVICE_LOST,
189 static const u16 mgmt_untrusted_commands[] = {
190 MGMT_OP_READ_INDEX_LIST,
192 MGMT_OP_READ_UNCONF_INDEX_LIST,
193 MGMT_OP_READ_CONFIG_INFO,
194 MGMT_OP_READ_EXT_INDEX_LIST,
195 MGMT_OP_READ_EXT_INFO,
196 MGMT_OP_READ_CONTROLLER_CAP,
197 MGMT_OP_READ_EXP_FEATURES_INFO,
198 MGMT_OP_READ_DEF_SYSTEM_CONFIG,
199 MGMT_OP_READ_DEF_RUNTIME_CONFIG,
202 static const u16 mgmt_untrusted_events[] = {
204 MGMT_EV_INDEX_REMOVED,
205 MGMT_EV_NEW_SETTINGS,
206 MGMT_EV_CLASS_OF_DEV_CHANGED,
207 MGMT_EV_LOCAL_NAME_CHANGED,
208 MGMT_EV_UNCONF_INDEX_ADDED,
209 MGMT_EV_UNCONF_INDEX_REMOVED,
210 MGMT_EV_NEW_CONFIG_OPTIONS,
211 MGMT_EV_EXT_INDEX_ADDED,
212 MGMT_EV_EXT_INDEX_REMOVED,
213 MGMT_EV_EXT_INFO_CHANGED,
214 MGMT_EV_EXP_FEATURE_CHANGED,
217 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
219 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
220 "\x00\x00\x00\x00\x00\x00\x00\x00"
222 /* HCI to MGMT error code conversion table */
223 static const u8 mgmt_status_table[] = {
225 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
226 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
227 MGMT_STATUS_FAILED, /* Hardware Failure */
228 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
229 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
230 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
231 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
232 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
233 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
234 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
235 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
236 MGMT_STATUS_BUSY, /* Command Disallowed */
237 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
238 MGMT_STATUS_REJECTED, /* Rejected Security */
239 MGMT_STATUS_REJECTED, /* Rejected Personal */
240 MGMT_STATUS_TIMEOUT, /* Host Timeout */
241 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
242 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
243 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
244 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
245 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
246 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
247 MGMT_STATUS_BUSY, /* Repeated Attempts */
248 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
249 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
250 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
251 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
252 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
253 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
254 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
255 MGMT_STATUS_FAILED, /* Unspecified Error */
256 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
257 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
258 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
259 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
260 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
261 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
262 MGMT_STATUS_FAILED, /* Unit Link Key Used */
263 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
264 MGMT_STATUS_TIMEOUT, /* Instant Passed */
265 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
266 MGMT_STATUS_FAILED, /* Transaction Collision */
267 MGMT_STATUS_FAILED, /* Reserved for future use */
268 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
269 MGMT_STATUS_REJECTED, /* QoS Rejected */
270 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
271 MGMT_STATUS_REJECTED, /* Insufficient Security */
272 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
273 MGMT_STATUS_FAILED, /* Reserved for future use */
274 MGMT_STATUS_BUSY, /* Role Switch Pending */
275 MGMT_STATUS_FAILED, /* Reserved for future use */
276 MGMT_STATUS_FAILED, /* Slot Violation */
277 MGMT_STATUS_FAILED, /* Role Switch Failed */
278 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
279 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
280 MGMT_STATUS_BUSY, /* Host Busy Pairing */
281 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
282 MGMT_STATUS_BUSY, /* Controller Busy */
283 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
284 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
285 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
286 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
287 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
290 static u8 mgmt_errno_status(int err)
294 return MGMT_STATUS_SUCCESS;
296 return MGMT_STATUS_REJECTED;
298 return MGMT_STATUS_INVALID_PARAMS;
300 return MGMT_STATUS_NOT_SUPPORTED;
302 return MGMT_STATUS_BUSY;
304 return MGMT_STATUS_AUTH_FAILED;
306 return MGMT_STATUS_NO_RESOURCES;
308 return MGMT_STATUS_ALREADY_CONNECTED;
310 return MGMT_STATUS_DISCONNECTED;
313 return MGMT_STATUS_FAILED;
316 static u8 mgmt_status(int err)
319 return mgmt_errno_status(err);
321 if (err < ARRAY_SIZE(mgmt_status_table))
322 return mgmt_status_table[err];
324 return MGMT_STATUS_FAILED;
327 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
330 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
334 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
335 u16 len, int flag, struct sock *skip_sk)
337 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
341 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
342 struct sock *skip_sk)
344 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
345 HCI_SOCK_TRUSTED, skip_sk);
348 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
350 return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
354 static u8 le_addr_type(u8 mgmt_addr_type)
356 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
357 return ADDR_LE_DEV_PUBLIC;
359 return ADDR_LE_DEV_RANDOM;
362 void mgmt_fill_version_info(void *ver)
364 struct mgmt_rp_read_version *rp = ver;
366 rp->version = MGMT_VERSION;
367 rp->revision = cpu_to_le16(MGMT_REVISION);
370 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
373 struct mgmt_rp_read_version rp;
375 bt_dev_dbg(hdev, "sock %p", sk);
377 mgmt_fill_version_info(&rp);
379 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
383 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
386 struct mgmt_rp_read_commands *rp;
387 u16 num_commands, num_events;
391 bt_dev_dbg(hdev, "sock %p", sk);
393 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
394 num_commands = ARRAY_SIZE(mgmt_commands);
395 num_events = ARRAY_SIZE(mgmt_events);
397 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
398 num_events = ARRAY_SIZE(mgmt_untrusted_events);
401 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
403 rp = kmalloc(rp_size, GFP_KERNEL);
407 rp->num_commands = cpu_to_le16(num_commands);
408 rp->num_events = cpu_to_le16(num_events);
410 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
411 __le16 *opcode = rp->opcodes;
413 for (i = 0; i < num_commands; i++, opcode++)
414 put_unaligned_le16(mgmt_commands[i], opcode);
416 for (i = 0; i < num_events; i++, opcode++)
417 put_unaligned_le16(mgmt_events[i], opcode);
419 __le16 *opcode = rp->opcodes;
421 for (i = 0; i < num_commands; i++, opcode++)
422 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
424 for (i = 0; i < num_events; i++, opcode++)
425 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
428 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
435 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
438 struct mgmt_rp_read_index_list *rp;
444 bt_dev_dbg(hdev, "sock %p", sk);
446 read_lock(&hci_dev_list_lock);
449 list_for_each_entry(d, &hci_dev_list, list) {
450 if (d->dev_type == HCI_PRIMARY &&
451 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
455 rp_len = sizeof(*rp) + (2 * count);
456 rp = kmalloc(rp_len, GFP_ATOMIC);
458 read_unlock(&hci_dev_list_lock);
463 list_for_each_entry(d, &hci_dev_list, list) {
464 if (hci_dev_test_flag(d, HCI_SETUP) ||
465 hci_dev_test_flag(d, HCI_CONFIG) ||
466 hci_dev_test_flag(d, HCI_USER_CHANNEL))
469 /* Devices marked as raw-only are neither configured
470 * nor unconfigured controllers.
472 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
475 if (d->dev_type == HCI_PRIMARY &&
476 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
477 rp->index[count++] = cpu_to_le16(d->id);
478 bt_dev_dbg(hdev, "Added hci%u", d->id);
482 rp->num_controllers = cpu_to_le16(count);
483 rp_len = sizeof(*rp) + (2 * count);
485 read_unlock(&hci_dev_list_lock);
487 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
495 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
496 void *data, u16 data_len)
498 struct mgmt_rp_read_unconf_index_list *rp;
504 bt_dev_dbg(hdev, "sock %p", sk);
506 read_lock(&hci_dev_list_lock);
509 list_for_each_entry(d, &hci_dev_list, list) {
510 if (d->dev_type == HCI_PRIMARY &&
511 hci_dev_test_flag(d, HCI_UNCONFIGURED))
515 rp_len = sizeof(*rp) + (2 * count);
516 rp = kmalloc(rp_len, GFP_ATOMIC);
518 read_unlock(&hci_dev_list_lock);
523 list_for_each_entry(d, &hci_dev_list, list) {
524 if (hci_dev_test_flag(d, HCI_SETUP) ||
525 hci_dev_test_flag(d, HCI_CONFIG) ||
526 hci_dev_test_flag(d, HCI_USER_CHANNEL))
529 /* Devices marked as raw-only are neither configured
530 * nor unconfigured controllers.
532 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
535 if (d->dev_type == HCI_PRIMARY &&
536 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
537 rp->index[count++] = cpu_to_le16(d->id);
538 bt_dev_dbg(hdev, "Added hci%u", d->id);
542 rp->num_controllers = cpu_to_le16(count);
543 rp_len = sizeof(*rp) + (2 * count);
545 read_unlock(&hci_dev_list_lock);
547 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
548 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
555 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
556 void *data, u16 data_len)
558 struct mgmt_rp_read_ext_index_list *rp;
563 bt_dev_dbg(hdev, "sock %p", sk);
565 read_lock(&hci_dev_list_lock);
568 list_for_each_entry(d, &hci_dev_list, list) {
569 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
573 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
575 read_unlock(&hci_dev_list_lock);
580 list_for_each_entry(d, &hci_dev_list, list) {
581 if (hci_dev_test_flag(d, HCI_SETUP) ||
582 hci_dev_test_flag(d, HCI_CONFIG) ||
583 hci_dev_test_flag(d, HCI_USER_CHANNEL))
586 /* Devices marked as raw-only are neither configured
587 * nor unconfigured controllers.
589 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
592 if (d->dev_type == HCI_PRIMARY) {
593 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
594 rp->entry[count].type = 0x01;
596 rp->entry[count].type = 0x00;
597 } else if (d->dev_type == HCI_AMP) {
598 rp->entry[count].type = 0x02;
603 rp->entry[count].bus = d->bus;
604 rp->entry[count++].index = cpu_to_le16(d->id);
605 bt_dev_dbg(hdev, "Added hci%u", d->id);
608 rp->num_controllers = cpu_to_le16(count);
610 read_unlock(&hci_dev_list_lock);
612 /* If this command is called at least once, then all the
613 * default index and unconfigured index events are disabled
614 * and from now on only extended index events are used.
616 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
617 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
618 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
620 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
621 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
622 struct_size(rp, entry, count));
629 static bool is_configured(struct hci_dev *hdev)
631 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
632 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
635 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
636 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
637 !bacmp(&hdev->public_addr, BDADDR_ANY))
643 static __le32 get_missing_options(struct hci_dev *hdev)
647 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
648 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
649 options |= MGMT_OPTION_EXTERNAL_CONFIG;
651 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
652 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
653 !bacmp(&hdev->public_addr, BDADDR_ANY))
654 options |= MGMT_OPTION_PUBLIC_ADDRESS;
656 return cpu_to_le32(options);
659 static int new_options(struct hci_dev *hdev, struct sock *skip)
661 __le32 options = get_missing_options(hdev);
663 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
664 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
667 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
669 __le32 options = get_missing_options(hdev);
671 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
675 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
676 void *data, u16 data_len)
678 struct mgmt_rp_read_config_info rp;
681 bt_dev_dbg(hdev, "sock %p", sk);
685 memset(&rp, 0, sizeof(rp));
686 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
688 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
689 options |= MGMT_OPTION_EXTERNAL_CONFIG;
691 if (hdev->set_bdaddr)
692 options |= MGMT_OPTION_PUBLIC_ADDRESS;
694 rp.supported_options = cpu_to_le32(options);
695 rp.missing_options = get_missing_options(hdev);
697 hci_dev_unlock(hdev);
699 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
703 static u32 get_supported_phys(struct hci_dev *hdev)
705 u32 supported_phys = 0;
707 if (lmp_bredr_capable(hdev)) {
708 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
710 if (hdev->features[0][0] & LMP_3SLOT)
711 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
713 if (hdev->features[0][0] & LMP_5SLOT)
714 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
716 if (lmp_edr_2m_capable(hdev)) {
717 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
719 if (lmp_edr_3slot_capable(hdev))
720 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
722 if (lmp_edr_5slot_capable(hdev))
723 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
725 if (lmp_edr_3m_capable(hdev)) {
726 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
728 if (lmp_edr_3slot_capable(hdev))
729 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
731 if (lmp_edr_5slot_capable(hdev))
732 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
737 if (lmp_le_capable(hdev)) {
738 supported_phys |= MGMT_PHY_LE_1M_TX;
739 supported_phys |= MGMT_PHY_LE_1M_RX;
741 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
742 supported_phys |= MGMT_PHY_LE_2M_TX;
743 supported_phys |= MGMT_PHY_LE_2M_RX;
746 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
747 supported_phys |= MGMT_PHY_LE_CODED_TX;
748 supported_phys |= MGMT_PHY_LE_CODED_RX;
752 return supported_phys;
755 static u32 get_selected_phys(struct hci_dev *hdev)
757 u32 selected_phys = 0;
759 if (lmp_bredr_capable(hdev)) {
760 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
762 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
763 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
765 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
766 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
768 if (lmp_edr_2m_capable(hdev)) {
769 if (!(hdev->pkt_type & HCI_2DH1))
770 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
772 if (lmp_edr_3slot_capable(hdev) &&
773 !(hdev->pkt_type & HCI_2DH3))
774 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
776 if (lmp_edr_5slot_capable(hdev) &&
777 !(hdev->pkt_type & HCI_2DH5))
778 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
780 if (lmp_edr_3m_capable(hdev)) {
781 if (!(hdev->pkt_type & HCI_3DH1))
782 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
784 if (lmp_edr_3slot_capable(hdev) &&
785 !(hdev->pkt_type & HCI_3DH3))
786 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
788 if (lmp_edr_5slot_capable(hdev) &&
789 !(hdev->pkt_type & HCI_3DH5))
790 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
795 if (lmp_le_capable(hdev)) {
796 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
797 selected_phys |= MGMT_PHY_LE_1M_TX;
799 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
800 selected_phys |= MGMT_PHY_LE_1M_RX;
802 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
803 selected_phys |= MGMT_PHY_LE_2M_TX;
805 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
806 selected_phys |= MGMT_PHY_LE_2M_RX;
808 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
809 selected_phys |= MGMT_PHY_LE_CODED_TX;
811 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
812 selected_phys |= MGMT_PHY_LE_CODED_RX;
815 return selected_phys;
818 static u32 get_configurable_phys(struct hci_dev *hdev)
820 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
821 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
824 static u32 get_supported_settings(struct hci_dev *hdev)
828 settings |= MGMT_SETTING_POWERED;
829 settings |= MGMT_SETTING_BONDABLE;
830 settings |= MGMT_SETTING_DEBUG_KEYS;
831 settings |= MGMT_SETTING_CONNECTABLE;
832 settings |= MGMT_SETTING_DISCOVERABLE;
834 if (lmp_bredr_capable(hdev)) {
835 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
836 settings |= MGMT_SETTING_FAST_CONNECTABLE;
837 settings |= MGMT_SETTING_BREDR;
838 settings |= MGMT_SETTING_LINK_SECURITY;
840 if (lmp_ssp_capable(hdev)) {
841 settings |= MGMT_SETTING_SSP;
842 if (IS_ENABLED(CONFIG_BT_HS))
843 settings |= MGMT_SETTING_HS;
846 if (lmp_sc_capable(hdev))
847 settings |= MGMT_SETTING_SECURE_CONN;
849 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
851 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
854 if (lmp_le_capable(hdev)) {
855 settings |= MGMT_SETTING_LE;
856 settings |= MGMT_SETTING_SECURE_CONN;
857 settings |= MGMT_SETTING_PRIVACY;
858 settings |= MGMT_SETTING_STATIC_ADDRESS;
859 settings |= MGMT_SETTING_ADVERTISING;
862 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
864 settings |= MGMT_SETTING_CONFIGURATION;
866 if (cis_central_capable(hdev))
867 settings |= MGMT_SETTING_CIS_CENTRAL;
869 if (cis_peripheral_capable(hdev))
870 settings |= MGMT_SETTING_CIS_PERIPHERAL;
872 settings |= MGMT_SETTING_PHY_CONFIGURATION;
877 static u32 get_current_settings(struct hci_dev *hdev)
881 if (hdev_is_powered(hdev))
882 settings |= MGMT_SETTING_POWERED;
884 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
885 settings |= MGMT_SETTING_CONNECTABLE;
887 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
888 settings |= MGMT_SETTING_FAST_CONNECTABLE;
890 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
891 settings |= MGMT_SETTING_DISCOVERABLE;
893 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
894 settings |= MGMT_SETTING_BONDABLE;
896 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
897 settings |= MGMT_SETTING_BREDR;
899 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
900 settings |= MGMT_SETTING_LE;
902 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
903 settings |= MGMT_SETTING_LINK_SECURITY;
905 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
906 settings |= MGMT_SETTING_SSP;
908 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
909 settings |= MGMT_SETTING_HS;
911 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
912 settings |= MGMT_SETTING_ADVERTISING;
914 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
915 settings |= MGMT_SETTING_SECURE_CONN;
917 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
918 settings |= MGMT_SETTING_DEBUG_KEYS;
920 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
921 settings |= MGMT_SETTING_PRIVACY;
923 /* The current setting for static address has two purposes. The
924 * first is to indicate if the static address will be used and
925 * the second is to indicate if it is actually set.
927 * This means if the static address is not configured, this flag
928 * will never be set. If the address is configured, then if the
929 * address is actually used decides if the flag is set or not.
931 * For single mode LE only controllers and dual-mode controllers
932 * with BR/EDR disabled, the existence of the static address will
935 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
936 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
937 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
938 if (bacmp(&hdev->static_addr, BDADDR_ANY))
939 settings |= MGMT_SETTING_STATIC_ADDRESS;
942 if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
943 settings |= MGMT_SETTING_WIDEBAND_SPEECH;
945 if (cis_central_capable(hdev))
946 settings |= MGMT_SETTING_CIS_CENTRAL;
948 if (cis_peripheral_capable(hdev))
949 settings |= MGMT_SETTING_CIS_PERIPHERAL;
954 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
956 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
959 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
961 struct mgmt_pending_cmd *cmd;
963 /* If there's a pending mgmt command the flags will not yet have
964 * their final values, so check for this first.
966 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
968 struct mgmt_mode *cp = cmd->param;
970 return LE_AD_GENERAL;
971 else if (cp->val == 0x02)
972 return LE_AD_LIMITED;
974 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
975 return LE_AD_LIMITED;
976 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
977 return LE_AD_GENERAL;
983 bool mgmt_get_connectable(struct hci_dev *hdev)
985 struct mgmt_pending_cmd *cmd;
987 /* If there's a pending mgmt command the flag will not yet have
988 * it's final value, so check for this first.
990 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
992 struct mgmt_mode *cp = cmd->param;
997 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
1000 static int service_cache_sync(struct hci_dev *hdev, void *data)
1002 hci_update_eir_sync(hdev);
1003 hci_update_class_sync(hdev);
1008 static void service_cache_off(struct work_struct *work)
1010 struct hci_dev *hdev = container_of(work, struct hci_dev,
1011 service_cache.work);
1013 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1016 hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
1019 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1021 /* The generation of a new RPA and programming it into the
1022 * controller happens in the hci_req_enable_advertising()
1025 if (ext_adv_capable(hdev))
1026 return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1028 return hci_enable_advertising_sync(hdev);
1031 static void rpa_expired(struct work_struct *work)
1033 struct hci_dev *hdev = container_of(work, struct hci_dev,
1036 bt_dev_dbg(hdev, "");
1038 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1040 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1043 hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1046 static void discov_off(struct work_struct *work)
1048 struct hci_dev *hdev = container_of(work, struct hci_dev,
1051 bt_dev_dbg(hdev, "");
1055 /* When discoverable timeout triggers, then just make sure
1056 * the limited discoverable flag is cleared. Even in the case
1057 * of a timeout triggered from general discoverable, it is
1058 * safe to unconditionally clear the flag.
1060 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1061 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1062 hdev->discov_timeout = 0;
1064 hci_update_discoverable(hdev);
1066 mgmt_new_settings(hdev);
1068 hci_dev_unlock(hdev);
1071 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev);
1073 static void mesh_send_complete(struct hci_dev *hdev,
1074 struct mgmt_mesh_tx *mesh_tx, bool silent)
1076 u8 handle = mesh_tx->handle;
1079 mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle,
1080 sizeof(handle), NULL);
1082 mgmt_mesh_remove(mesh_tx);
1085 static int mesh_send_done_sync(struct hci_dev *hdev, void *data)
1087 struct mgmt_mesh_tx *mesh_tx;
1089 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
1090 hci_disable_advertising_sync(hdev);
1091 mesh_tx = mgmt_mesh_next(hdev, NULL);
1094 mesh_send_complete(hdev, mesh_tx, false);
1099 static int mesh_send_sync(struct hci_dev *hdev, void *data);
1100 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err);
1101 static void mesh_next(struct hci_dev *hdev, void *data, int err)
1103 struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL);
1108 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
1109 mesh_send_start_complete);
1112 mesh_send_complete(hdev, mesh_tx, false);
1114 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
1117 static void mesh_send_done(struct work_struct *work)
1119 struct hci_dev *hdev = container_of(work, struct hci_dev,
1120 mesh_send_done.work);
1122 if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING))
1125 hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next);
1128 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1130 if (hci_dev_test_flag(hdev, HCI_MGMT))
1133 BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION);
1135 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
1136 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1137 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1138 INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);
1140 /* Non-mgmt controlled devices get this bit set
1141 * implicitly so that pairing works for them, however
1142 * for mgmt we require user-space to explicitly enable
1145 hci_dev_clear_flag(hdev, HCI_BONDABLE);
1147 hci_dev_set_flag(hdev, HCI_MGMT);
1150 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1151 void *data, u16 data_len)
1153 struct mgmt_rp_read_info rp;
1155 bt_dev_dbg(hdev, "sock %p", sk);
1159 memset(&rp, 0, sizeof(rp));
1161 bacpy(&rp.bdaddr, &hdev->bdaddr);
1163 rp.version = hdev->hci_ver;
1164 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1166 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1167 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1169 memcpy(rp.dev_class, hdev->dev_class, 3);
1171 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1172 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1174 hci_dev_unlock(hdev);
1176 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1180 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1185 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1186 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1187 hdev->dev_class, 3);
1189 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1190 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1193 name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name));
1194 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1195 hdev->dev_name, name_len);
1197 name_len = strnlen(hdev->short_name, sizeof(hdev->short_name));
1198 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1199 hdev->short_name, name_len);
1204 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1205 void *data, u16 data_len)
1208 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1211 bt_dev_dbg(hdev, "sock %p", sk);
1213 memset(&buf, 0, sizeof(buf));
1217 bacpy(&rp->bdaddr, &hdev->bdaddr);
1219 rp->version = hdev->hci_ver;
1220 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1222 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1223 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1226 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1227 rp->eir_len = cpu_to_le16(eir_len);
1229 hci_dev_unlock(hdev);
1231 /* If this command is called at least once, then the events
1232 * for class of device and local name changes are disabled
1233 * and only the new extended controller information event
1236 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1237 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1238 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1240 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1241 sizeof(*rp) + eir_len);
1244 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1247 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1250 memset(buf, 0, sizeof(buf));
1252 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1253 ev->eir_len = cpu_to_le16(eir_len);
1255 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1256 sizeof(*ev) + eir_len,
1257 HCI_MGMT_EXT_INFO_EVENTS, skip);
1260 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1262 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1264 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1268 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1270 struct mgmt_ev_advertising_added ev;
1272 ev.instance = instance;
1274 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1277 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1280 struct mgmt_ev_advertising_removed ev;
1282 ev.instance = instance;
1284 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1287 static void cancel_adv_timeout(struct hci_dev *hdev)
1289 if (hdev->adv_instance_timeout) {
1290 hdev->adv_instance_timeout = 0;
1291 cancel_delayed_work(&hdev->adv_instance_expire);
1295 /* This function requires the caller holds hdev->lock */
1296 static void restart_le_actions(struct hci_dev *hdev)
1298 struct hci_conn_params *p;
1300 list_for_each_entry(p, &hdev->le_conn_params, list) {
1301 /* Needed for AUTO_OFF case where might not "really"
1302 * have been powered off.
1304 list_del_init(&p->action);
1306 switch (p->auto_connect) {
1307 case HCI_AUTO_CONN_DIRECT:
1308 case HCI_AUTO_CONN_ALWAYS:
1309 list_add(&p->action, &hdev->pend_le_conns);
1311 case HCI_AUTO_CONN_REPORT:
1312 list_add(&p->action, &hdev->pend_le_reports);
1320 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1322 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1324 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1325 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1328 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1330 struct mgmt_pending_cmd *cmd = data;
1331 struct mgmt_mode *cp;
1333 /* Make sure cmd still outstanding. */
1334 if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1339 bt_dev_dbg(hdev, "err %d", err);
1344 restart_le_actions(hdev);
1345 hci_update_passive_scan(hdev);
1346 hci_dev_unlock(hdev);
1349 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1351 /* Only call new_setting for power on as power off is deferred
1352 * to hdev->power_off work which does call hci_dev_do_close.
1355 new_settings(hdev, cmd->sk);
1357 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1361 mgmt_pending_remove(cmd);
1364 static int set_powered_sync(struct hci_dev *hdev, void *data)
1366 struct mgmt_pending_cmd *cmd = data;
1367 struct mgmt_mode *cp = cmd->param;
1369 BT_DBG("%s", hdev->name);
1371 return hci_set_powered_sync(hdev, cp->val);
1374 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1377 struct mgmt_mode *cp = data;
1378 struct mgmt_pending_cmd *cmd;
1381 bt_dev_dbg(hdev, "sock %p", sk);
1383 if (cp->val != 0x00 && cp->val != 0x01)
1384 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1385 MGMT_STATUS_INVALID_PARAMS);
1389 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1390 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1395 if (!!cp->val == hdev_is_powered(hdev)) {
1396 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1400 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1406 err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1407 mgmt_set_powered_complete);
1410 mgmt_pending_remove(cmd);
1413 hci_dev_unlock(hdev);
1417 int mgmt_new_settings(struct hci_dev *hdev)
1419 return new_settings(hdev, NULL);
1424 struct hci_dev *hdev;
1428 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1430 struct cmd_lookup *match = data;
1432 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1434 list_del(&cmd->list);
1436 if (match->sk == NULL) {
1437 match->sk = cmd->sk;
1438 sock_hold(match->sk);
1441 mgmt_pending_free(cmd);
1444 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1448 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1449 mgmt_pending_remove(cmd);
1452 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1454 if (cmd->cmd_complete) {
1457 cmd->cmd_complete(cmd, *status);
1458 mgmt_pending_remove(cmd);
1463 cmd_status_rsp(cmd, data);
1466 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1468 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1469 cmd->param, cmd->param_len);
1472 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1474 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1475 cmd->param, sizeof(struct mgmt_addr_info));
1478 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1480 if (!lmp_bredr_capable(hdev))
1481 return MGMT_STATUS_NOT_SUPPORTED;
1482 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1483 return MGMT_STATUS_REJECTED;
1485 return MGMT_STATUS_SUCCESS;
1488 static u8 mgmt_le_support(struct hci_dev *hdev)
1490 if (!lmp_le_capable(hdev))
1491 return MGMT_STATUS_NOT_SUPPORTED;
1492 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1493 return MGMT_STATUS_REJECTED;
1495 return MGMT_STATUS_SUCCESS;
1498 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1501 struct mgmt_pending_cmd *cmd = data;
1503 bt_dev_dbg(hdev, "err %d", err);
1505 /* Make sure cmd still outstanding. */
1506 if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1512 u8 mgmt_err = mgmt_status(err);
1513 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1514 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1518 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1519 hdev->discov_timeout > 0) {
1520 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1521 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1524 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1525 new_settings(hdev, cmd->sk);
1528 mgmt_pending_remove(cmd);
1529 hci_dev_unlock(hdev);
1532 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1534 BT_DBG("%s", hdev->name);
1536 return hci_update_discoverable_sync(hdev);
1539 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1542 struct mgmt_cp_set_discoverable *cp = data;
1543 struct mgmt_pending_cmd *cmd;
1547 bt_dev_dbg(hdev, "sock %p", sk);
1549 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1550 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1551 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1552 MGMT_STATUS_REJECTED);
1554 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1555 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1556 MGMT_STATUS_INVALID_PARAMS);
1558 timeout = __le16_to_cpu(cp->timeout);
1560 /* Disabling discoverable requires that no timeout is set,
1561 * and enabling limited discoverable requires a timeout.
1563 if ((cp->val == 0x00 && timeout > 0) ||
1564 (cp->val == 0x02 && timeout == 0))
1565 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1566 MGMT_STATUS_INVALID_PARAMS);
1570 if (!hdev_is_powered(hdev) && timeout > 0) {
1571 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1572 MGMT_STATUS_NOT_POWERED);
1576 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1577 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1578 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1583 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1584 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1585 MGMT_STATUS_REJECTED);
1589 if (hdev->advertising_paused) {
1590 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1595 if (!hdev_is_powered(hdev)) {
1596 bool changed = false;
1598 /* Setting limited discoverable when powered off is
1599 * not a valid operation since it requires a timeout
1600 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1602 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1603 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1607 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1612 err = new_settings(hdev, sk);
1617 /* If the current mode is the same, then just update the timeout
1618 * value with the new value. And if only the timeout gets updated,
1619 * then no need for any HCI transactions.
1621 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1622 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1623 HCI_LIMITED_DISCOVERABLE)) {
1624 cancel_delayed_work(&hdev->discov_off);
1625 hdev->discov_timeout = timeout;
1627 if (cp->val && hdev->discov_timeout > 0) {
1628 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1629 queue_delayed_work(hdev->req_workqueue,
1630 &hdev->discov_off, to);
1633 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1637 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1643 /* Cancel any potential discoverable timeout that might be
1644 * still active and store new timeout value. The arming of
1645 * the timeout happens in the complete handler.
1647 cancel_delayed_work(&hdev->discov_off);
1648 hdev->discov_timeout = timeout;
1651 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1653 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1655 /* Limited discoverable mode */
1656 if (cp->val == 0x02)
1657 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1659 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1661 err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1662 mgmt_set_discoverable_complete);
1665 mgmt_pending_remove(cmd);
1668 hci_dev_unlock(hdev);
1672 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1675 struct mgmt_pending_cmd *cmd = data;
1677 bt_dev_dbg(hdev, "err %d", err);
1679 /* Make sure cmd still outstanding. */
1680 if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1686 u8 mgmt_err = mgmt_status(err);
1687 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1691 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1692 new_settings(hdev, cmd->sk);
1696 mgmt_pending_remove(cmd);
1698 hci_dev_unlock(hdev);
1701 static int set_connectable_update_settings(struct hci_dev *hdev,
1702 struct sock *sk, u8 val)
1704 bool changed = false;
1707 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1711 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1713 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1714 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1717 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1722 hci_update_scan(hdev);
1723 hci_update_passive_scan(hdev);
1724 return new_settings(hdev, sk);
1730 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1732 BT_DBG("%s", hdev->name);
1734 return hci_update_connectable_sync(hdev);
1737 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1740 struct mgmt_mode *cp = data;
1741 struct mgmt_pending_cmd *cmd;
1744 bt_dev_dbg(hdev, "sock %p", sk);
1746 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1747 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1748 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1749 MGMT_STATUS_REJECTED);
1751 if (cp->val != 0x00 && cp->val != 0x01)
1752 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1753 MGMT_STATUS_INVALID_PARAMS);
1757 if (!hdev_is_powered(hdev)) {
1758 err = set_connectable_update_settings(hdev, sk, cp->val);
1762 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1763 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1764 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1769 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1776 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1778 if (hdev->discov_timeout > 0)
1779 cancel_delayed_work(&hdev->discov_off);
1781 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1782 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1783 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1786 err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1787 mgmt_set_connectable_complete);
1790 mgmt_pending_remove(cmd);
1793 hci_dev_unlock(hdev);
1797 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1800 struct mgmt_mode *cp = data;
1804 bt_dev_dbg(hdev, "sock %p", sk);
1806 if (cp->val != 0x00 && cp->val != 0x01)
1807 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1808 MGMT_STATUS_INVALID_PARAMS);
1813 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1815 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1817 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1822 /* In limited privacy mode the change of bondable mode
1823 * may affect the local advertising address.
1825 hci_update_discoverable(hdev);
1827 err = new_settings(hdev, sk);
1831 hci_dev_unlock(hdev);
1835 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1838 struct mgmt_mode *cp = data;
1839 struct mgmt_pending_cmd *cmd;
1843 bt_dev_dbg(hdev, "sock %p", sk);
1845 status = mgmt_bredr_support(hdev);
1847 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1850 if (cp->val != 0x00 && cp->val != 0x01)
1851 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1852 MGMT_STATUS_INVALID_PARAMS);
1856 if (!hdev_is_powered(hdev)) {
1857 bool changed = false;
1859 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1860 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1864 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1869 err = new_settings(hdev, sk);
1874 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1875 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1882 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1883 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1887 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1893 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1895 mgmt_pending_remove(cmd);
1900 hci_dev_unlock(hdev);
1904 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1906 struct cmd_lookup match = { NULL, hdev };
1907 struct mgmt_pending_cmd *cmd = data;
1908 struct mgmt_mode *cp = cmd->param;
1909 u8 enable = cp->val;
1912 /* Make sure cmd still outstanding. */
1913 if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1917 u8 mgmt_err = mgmt_status(err);
1919 if (enable && hci_dev_test_and_clear_flag(hdev,
1921 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1922 new_settings(hdev, NULL);
1925 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1931 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1933 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1936 changed = hci_dev_test_and_clear_flag(hdev,
1939 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1942 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1945 new_settings(hdev, match.sk);
1950 hci_update_eir_sync(hdev);
1953 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1955 struct mgmt_pending_cmd *cmd = data;
1956 struct mgmt_mode *cp = cmd->param;
1957 bool changed = false;
1961 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1963 err = hci_write_ssp_mode_sync(hdev, cp->val);
1965 if (!err && changed)
1966 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1971 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1973 struct mgmt_mode *cp = data;
1974 struct mgmt_pending_cmd *cmd;
1978 bt_dev_dbg(hdev, "sock %p", sk);
1980 status = mgmt_bredr_support(hdev);
1982 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1984 if (!lmp_ssp_capable(hdev))
1985 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1986 MGMT_STATUS_NOT_SUPPORTED);
1988 if (cp->val != 0x00 && cp->val != 0x01)
1989 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1990 MGMT_STATUS_INVALID_PARAMS);
1994 if (!hdev_is_powered(hdev)) {
1998 changed = !hci_dev_test_and_set_flag(hdev,
2001 changed = hci_dev_test_and_clear_flag(hdev,
2004 changed = hci_dev_test_and_clear_flag(hdev,
2007 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
2010 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2015 err = new_settings(hdev, sk);
2020 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2021 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2026 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2027 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2031 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2035 err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
2039 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2040 MGMT_STATUS_FAILED);
2043 mgmt_pending_remove(cmd);
2047 hci_dev_unlock(hdev);
2051 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2053 struct mgmt_mode *cp = data;
2058 bt_dev_dbg(hdev, "sock %p", sk);
2060 if (!IS_ENABLED(CONFIG_BT_HS))
2061 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2062 MGMT_STATUS_NOT_SUPPORTED);
2064 status = mgmt_bredr_support(hdev);
2066 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2068 if (!lmp_ssp_capable(hdev))
2069 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2070 MGMT_STATUS_NOT_SUPPORTED);
2072 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
2073 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2074 MGMT_STATUS_REJECTED);
2076 if (cp->val != 0x00 && cp->val != 0x01)
2077 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2078 MGMT_STATUS_INVALID_PARAMS);
2082 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
2083 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2089 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
2091 if (hdev_is_powered(hdev)) {
2092 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2093 MGMT_STATUS_REJECTED);
2097 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
2100 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2105 err = new_settings(hdev, sk);
2108 hci_dev_unlock(hdev);
2112 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2114 struct cmd_lookup match = { NULL, hdev };
2115 u8 status = mgmt_status(err);
2117 bt_dev_dbg(hdev, "err %d", err);
2120 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2125 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2127 new_settings(hdev, match.sk);
2133 static int set_le_sync(struct hci_dev *hdev, void *data)
2135 struct mgmt_pending_cmd *cmd = data;
2136 struct mgmt_mode *cp = cmd->param;
2141 hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
2143 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2144 hci_disable_advertising_sync(hdev);
2146 if (ext_adv_capable(hdev))
2147 hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2149 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2152 err = hci_write_le_host_supported_sync(hdev, val, 0);
2154 /* Make sure the controller has a good default for
2155 * advertising data. Restrict the update to when LE
2156 * has actually been enabled. During power on, the
2157 * update in powered_update_hci will take care of it.
2159 if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2160 if (ext_adv_capable(hdev)) {
2163 status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2165 hci_update_scan_rsp_data_sync(hdev, 0x00);
2167 hci_update_adv_data_sync(hdev, 0x00);
2168 hci_update_scan_rsp_data_sync(hdev, 0x00);
2171 hci_update_passive_scan(hdev);
2177 static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
2179 struct mgmt_pending_cmd *cmd = data;
2180 u8 status = mgmt_status(err);
2181 struct sock *sk = cmd->sk;
2184 mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev,
2185 cmd_status_rsp, &status);
2189 mgmt_pending_remove(cmd);
2190 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
2193 static int set_mesh_sync(struct hci_dev *hdev, void *data)
2195 struct mgmt_pending_cmd *cmd = data;
2196 struct mgmt_cp_set_mesh *cp = cmd->param;
2197 size_t len = cmd->param_len;
2199 memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
2202 hci_dev_set_flag(hdev, HCI_MESH);
2204 hci_dev_clear_flag(hdev, HCI_MESH);
2208 /* If filters don't fit, forward all adv pkts */
2209 if (len <= sizeof(hdev->mesh_ad_types))
2210 memcpy(hdev->mesh_ad_types, cp->ad_types, len);
2212 hci_update_passive_scan_sync(hdev);
2216 static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2218 struct mgmt_cp_set_mesh *cp = data;
2219 struct mgmt_pending_cmd *cmd;
2222 bt_dev_dbg(hdev, "sock %p", sk);
2224 if (!lmp_le_capable(hdev) ||
2225 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2226 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2227 MGMT_STATUS_NOT_SUPPORTED);
2229 if (cp->enable != 0x00 && cp->enable != 0x01)
2230 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2231 MGMT_STATUS_INVALID_PARAMS);
2235 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len);
2239 err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd,
2243 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
2244 MGMT_STATUS_FAILED);
2247 mgmt_pending_remove(cmd);
2250 hci_dev_unlock(hdev);
2254 static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err)
2256 struct mgmt_mesh_tx *mesh_tx = data;
2257 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2258 unsigned long mesh_send_interval;
2259 u8 mgmt_err = mgmt_status(err);
2261 /* Report any errors here, but don't report completion */
2264 hci_dev_clear_flag(hdev, HCI_MESH_SENDING);
2265 /* Send Complete Error Code for handle */
2266 mesh_send_complete(hdev, mesh_tx, false);
2270 mesh_send_interval = msecs_to_jiffies((send->cnt) * 25);
2271 queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done,
2272 mesh_send_interval);
2275 static int mesh_send_sync(struct hci_dev *hdev, void *data)
2277 struct mgmt_mesh_tx *mesh_tx = data;
2278 struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param;
2279 struct adv_info *adv, *next_instance;
2280 u8 instance = hdev->le_num_of_adv_sets + 1;
2281 u16 timeout, duration;
2284 if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt)
2285 return MGMT_STATUS_BUSY;
2288 duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval);
2289 adv = hci_add_adv_instance(hdev, instance, 0,
2290 send->adv_data_len, send->adv_data,
2293 HCI_ADV_TX_POWER_NO_PREFERENCE,
2294 hdev->le_adv_min_interval,
2295 hdev->le_adv_max_interval,
2299 mesh_tx->instance = instance;
2303 if (hdev->cur_adv_instance == instance) {
2304 /* If the currently advertised instance is being changed then
2305 * cancel the current advertising and schedule the next
2306 * instance. If there is only one instance then the overridden
2307 * advertising data will be visible right away.
2309 cancel_adv_timeout(hdev);
2311 next_instance = hci_get_next_instance(hdev, instance);
2313 instance = next_instance->instance;
2316 } else if (hdev->adv_instance_timeout) {
2317 /* Immediately advertise the new instance if no other, or
2318 * let it go naturally from queue if ADV is already happening
2324 return hci_schedule_adv_instance_sync(hdev, instance, true);
2329 static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data)
2331 struct mgmt_rp_mesh_read_features *rp = data;
2333 if (rp->used_handles >= rp->max_handles)
2336 rp->handles[rp->used_handles++] = mesh_tx->handle;
2339 static int mesh_features(struct sock *sk, struct hci_dev *hdev,
2340 void *data, u16 len)
2342 struct mgmt_rp_mesh_read_features rp;
2344 if (!lmp_le_capable(hdev) ||
2345 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2346 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES,
2347 MGMT_STATUS_NOT_SUPPORTED);
2349 memset(&rp, 0, sizeof(rp));
2350 rp.index = cpu_to_le16(hdev->id);
2351 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2352 rp.max_handles = MESH_HANDLES_MAX;
2357 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2359 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp,
2360 rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX);
2362 hci_dev_unlock(hdev);
2366 static int send_cancel(struct hci_dev *hdev, void *data)
2368 struct mgmt_pending_cmd *cmd = data;
2369 struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param;
2370 struct mgmt_mesh_tx *mesh_tx;
2372 if (!cancel->handle) {
2374 mesh_tx = mgmt_mesh_next(hdev, cmd->sk);
2377 mesh_send_complete(hdev, mesh_tx, false);
2380 mesh_tx = mgmt_mesh_find(hdev, cancel->handle);
2382 if (mesh_tx && mesh_tx->sk == cmd->sk)
2383 mesh_send_complete(hdev, mesh_tx, false);
2386 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2388 mgmt_pending_free(cmd);
2393 static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev,
2394 void *data, u16 len)
2396 struct mgmt_pending_cmd *cmd;
2399 if (!lmp_le_capable(hdev) ||
2400 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2401 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2402 MGMT_STATUS_NOT_SUPPORTED);
2404 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2405 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2406 MGMT_STATUS_REJECTED);
2409 cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len);
2413 err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL);
2416 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL,
2417 MGMT_STATUS_FAILED);
2420 mgmt_pending_free(cmd);
2423 hci_dev_unlock(hdev);
2427 static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2429 struct mgmt_mesh_tx *mesh_tx;
2430 struct mgmt_cp_mesh_send *send = data;
2431 struct mgmt_rp_mesh_read_features rp;
2435 if (!lmp_le_capable(hdev) ||
2436 !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
2437 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2438 MGMT_STATUS_NOT_SUPPORTED);
2439 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
2440 len <= MGMT_MESH_SEND_SIZE ||
2441 len > (MGMT_MESH_SEND_SIZE + 31))
2442 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2443 MGMT_STATUS_REJECTED);
2447 memset(&rp, 0, sizeof(rp));
2448 rp.max_handles = MESH_HANDLES_MAX;
2450 mgmt_mesh_foreach(hdev, send_count, &rp, sk);
2452 if (rp.max_handles <= rp.used_handles) {
2453 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2458 sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING);
2459 mesh_tx = mgmt_mesh_add(sk, hdev, send, len);
2464 err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx,
2465 mesh_send_start_complete);
2468 bt_dev_err(hdev, "Send Mesh Failed %d", err);
2469 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND,
2470 MGMT_STATUS_FAILED);
2474 mgmt_mesh_remove(mesh_tx);
2477 hci_dev_set_flag(hdev, HCI_MESH_SENDING);
2479 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0,
2480 &mesh_tx->handle, 1);
2484 hci_dev_unlock(hdev);
2488 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2490 struct mgmt_mode *cp = data;
2491 struct mgmt_pending_cmd *cmd;
2495 bt_dev_dbg(hdev, "sock %p", sk);
2497 if (!lmp_le_capable(hdev))
2498 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2499 MGMT_STATUS_NOT_SUPPORTED);
2501 if (cp->val != 0x00 && cp->val != 0x01)
2502 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2503 MGMT_STATUS_INVALID_PARAMS);
2505 /* Bluetooth single mode LE only controllers or dual-mode
2506 * controllers configured as LE only devices, do not allow
2507 * switching LE off. These have either LE enabled explicitly
2508 * or BR/EDR has been previously switched off.
2510 * When trying to enable an already enabled LE, then gracefully
2511 * send a positive response. Trying to disable it however will
2512 * result into rejection.
2514 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2515 if (cp->val == 0x01)
2516 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2518 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2519 MGMT_STATUS_REJECTED);
2525 enabled = lmp_host_le_capable(hdev);
2527 if (!hdev_is_powered(hdev) || val == enabled) {
2528 bool changed = false;
2530 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2531 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2535 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2536 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2540 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2545 err = new_settings(hdev, sk);
2550 if (pending_find(MGMT_OP_SET_LE, hdev) ||
2551 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2552 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2557 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2561 err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2565 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2566 MGMT_STATUS_FAILED);
2569 mgmt_pending_remove(cmd);
2573 hci_dev_unlock(hdev);
2577 /* This is a helper function to test for pending mgmt commands that can
2578 * cause CoD or EIR HCI commands. We can only allow one such pending
2579 * mgmt command at a time since otherwise we cannot easily track what
2580 * the current values are, will be, and based on that calculate if a new
2581 * HCI command needs to be sent and if yes with what value.
2583 static bool pending_eir_or_class(struct hci_dev *hdev)
2585 struct mgmt_pending_cmd *cmd;
2587 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2588 switch (cmd->opcode) {
2589 case MGMT_OP_ADD_UUID:
2590 case MGMT_OP_REMOVE_UUID:
2591 case MGMT_OP_SET_DEV_CLASS:
2592 case MGMT_OP_SET_POWERED:
2600 static const u8 bluetooth_base_uuid[] = {
2601 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2602 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2605 static u8 get_uuid_size(const u8 *uuid)
2609 if (memcmp(uuid, bluetooth_base_uuid, 12))
2612 val = get_unaligned_le32(&uuid[12]);
2619 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2621 struct mgmt_pending_cmd *cmd = data;
2623 bt_dev_dbg(hdev, "err %d", err);
2625 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2626 mgmt_status(err), hdev->dev_class, 3);
2628 mgmt_pending_free(cmd);
2631 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2635 err = hci_update_class_sync(hdev);
2639 return hci_update_eir_sync(hdev);
2642 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2644 struct mgmt_cp_add_uuid *cp = data;
2645 struct mgmt_pending_cmd *cmd;
2646 struct bt_uuid *uuid;
2649 bt_dev_dbg(hdev, "sock %p", sk);
2653 if (pending_eir_or_class(hdev)) {
2654 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2659 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2665 memcpy(uuid->uuid, cp->uuid, 16);
2666 uuid->svc_hint = cp->svc_hint;
2667 uuid->size = get_uuid_size(cp->uuid);
2669 list_add_tail(&uuid->list, &hdev->uuids);
2671 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2677 err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2679 mgmt_pending_free(cmd);
2684 hci_dev_unlock(hdev);
2688 static bool enable_service_cache(struct hci_dev *hdev)
2690 if (!hdev_is_powered(hdev))
2693 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2694 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2702 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2706 err = hci_update_class_sync(hdev);
2710 return hci_update_eir_sync(hdev);
2713 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2716 struct mgmt_cp_remove_uuid *cp = data;
2717 struct mgmt_pending_cmd *cmd;
2718 struct bt_uuid *match, *tmp;
2719 static const u8 bt_uuid_any[] = {
2720 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2724 bt_dev_dbg(hdev, "sock %p", sk);
2728 if (pending_eir_or_class(hdev)) {
2729 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2734 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2735 hci_uuids_clear(hdev);
2737 if (enable_service_cache(hdev)) {
2738 err = mgmt_cmd_complete(sk, hdev->id,
2739 MGMT_OP_REMOVE_UUID,
2740 0, hdev->dev_class, 3);
2749 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2750 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2753 list_del(&match->list);
2759 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2760 MGMT_STATUS_INVALID_PARAMS);
2765 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2771 err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2772 mgmt_class_complete);
2774 mgmt_pending_free(cmd);
2777 hci_dev_unlock(hdev);
2781 static int set_class_sync(struct hci_dev *hdev, void *data)
2785 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2786 cancel_delayed_work_sync(&hdev->service_cache);
2787 err = hci_update_eir_sync(hdev);
2793 return hci_update_class_sync(hdev);
2796 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2799 struct mgmt_cp_set_dev_class *cp = data;
2800 struct mgmt_pending_cmd *cmd;
2803 bt_dev_dbg(hdev, "sock %p", sk);
2805 if (!lmp_bredr_capable(hdev))
2806 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2807 MGMT_STATUS_NOT_SUPPORTED);
2811 if (pending_eir_or_class(hdev)) {
2812 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2817 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2818 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2819 MGMT_STATUS_INVALID_PARAMS);
2823 hdev->major_class = cp->major;
2824 hdev->minor_class = cp->minor;
2826 if (!hdev_is_powered(hdev)) {
2827 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2828 hdev->dev_class, 3);
2832 cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2838 err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2839 mgmt_class_complete);
2841 mgmt_pending_free(cmd);
2844 hci_dev_unlock(hdev);
2848 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2851 struct mgmt_cp_load_link_keys *cp = data;
2852 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2853 sizeof(struct mgmt_link_key_info));
2854 u16 key_count, expected_len;
2858 bt_dev_dbg(hdev, "sock %p", sk);
2860 if (!lmp_bredr_capable(hdev))
2861 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2862 MGMT_STATUS_NOT_SUPPORTED);
2864 key_count = __le16_to_cpu(cp->key_count);
2865 if (key_count > max_key_count) {
2866 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2868 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2869 MGMT_STATUS_INVALID_PARAMS);
2872 expected_len = struct_size(cp, keys, key_count);
2873 if (expected_len != len) {
2874 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2876 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2877 MGMT_STATUS_INVALID_PARAMS);
2880 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2881 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2882 MGMT_STATUS_INVALID_PARAMS);
2884 bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2887 for (i = 0; i < key_count; i++) {
2888 struct mgmt_link_key_info *key = &cp->keys[i];
2890 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2891 return mgmt_cmd_status(sk, hdev->id,
2892 MGMT_OP_LOAD_LINK_KEYS,
2893 MGMT_STATUS_INVALID_PARAMS);
2898 hci_link_keys_clear(hdev);
2901 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2903 changed = hci_dev_test_and_clear_flag(hdev,
2904 HCI_KEEP_DEBUG_KEYS);
2907 new_settings(hdev, NULL);
2909 for (i = 0; i < key_count; i++) {
2910 struct mgmt_link_key_info *key = &cp->keys[i];
2912 if (hci_is_blocked_key(hdev,
2913 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2915 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2920 /* Always ignore debug keys and require a new pairing if
2921 * the user wants to use them.
2923 if (key->type == HCI_LK_DEBUG_COMBINATION)
2926 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2927 key->type, key->pin_len, NULL);
2930 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2932 hci_dev_unlock(hdev);
2937 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2938 u8 addr_type, struct sock *skip_sk)
2940 struct mgmt_ev_device_unpaired ev;
2942 bacpy(&ev.addr.bdaddr, bdaddr);
2943 ev.addr.type = addr_type;
2945 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2949 static void unpair_device_complete(struct hci_dev *hdev, void *data, int err)
2951 struct mgmt_pending_cmd *cmd = data;
2952 struct mgmt_cp_unpair_device *cp = cmd->param;
2955 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
2957 cmd->cmd_complete(cmd, err);
2958 mgmt_pending_free(cmd);
2961 static int unpair_device_sync(struct hci_dev *hdev, void *data)
2963 struct mgmt_pending_cmd *cmd = data;
2964 struct mgmt_cp_unpair_device *cp = cmd->param;
2965 struct hci_conn *conn;
2967 if (cp->addr.type == BDADDR_BREDR)
2968 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2971 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2972 le_addr_type(cp->addr.type));
2977 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
2980 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2983 struct mgmt_cp_unpair_device *cp = data;
2984 struct mgmt_rp_unpair_device rp;
2985 struct hci_conn_params *params;
2986 struct mgmt_pending_cmd *cmd;
2987 struct hci_conn *conn;
2991 memset(&rp, 0, sizeof(rp));
2992 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2993 rp.addr.type = cp->addr.type;
2995 if (!bdaddr_type_is_valid(cp->addr.type))
2996 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2997 MGMT_STATUS_INVALID_PARAMS,
3000 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
3001 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3002 MGMT_STATUS_INVALID_PARAMS,
3007 if (!hdev_is_powered(hdev)) {
3008 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3009 MGMT_STATUS_NOT_POWERED, &rp,
3014 if (cp->addr.type == BDADDR_BREDR) {
3015 /* If disconnection is requested, then look up the
3016 * connection. If the remote device is connected, it
3017 * will be later used to terminate the link.
3019 * Setting it to NULL explicitly will cause no
3020 * termination of the link.
3023 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3028 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
3030 err = mgmt_cmd_complete(sk, hdev->id,
3031 MGMT_OP_UNPAIR_DEVICE,
3032 MGMT_STATUS_NOT_PAIRED, &rp,
3040 /* LE address type */
3041 addr_type = le_addr_type(cp->addr.type);
3043 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
3044 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
3046 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
3047 MGMT_STATUS_NOT_PAIRED, &rp,
3052 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
3054 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
3059 /* Defer clearing up the connection parameters until closing to
3060 * give a chance of keeping them if a repairing happens.
3062 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3064 /* Disable auto-connection parameters if present */
3065 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
3067 if (params->explicit_connect)
3068 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3070 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3073 /* If disconnection is not requested, then clear the connection
3074 * variable so that the link is not terminated.
3076 if (!cp->disconnect)
3080 /* If the connection variable is set, then termination of the
3081 * link is requested.
3084 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
3086 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
3090 cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
3097 cmd->cmd_complete = addr_cmd_complete;
3099 err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd,
3100 unpair_device_complete);
3102 mgmt_pending_free(cmd);
3105 hci_dev_unlock(hdev);
3109 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
3112 struct mgmt_cp_disconnect *cp = data;
3113 struct mgmt_rp_disconnect rp;
3114 struct mgmt_pending_cmd *cmd;
3115 struct hci_conn *conn;
3118 bt_dev_dbg(hdev, "sock %p", sk);
3120 memset(&rp, 0, sizeof(rp));
3121 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3122 rp.addr.type = cp->addr.type;
3124 if (!bdaddr_type_is_valid(cp->addr.type))
3125 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3126 MGMT_STATUS_INVALID_PARAMS,
3131 if (!test_bit(HCI_UP, &hdev->flags)) {
3132 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3133 MGMT_STATUS_NOT_POWERED, &rp,
3138 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
3139 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3140 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3144 if (cp->addr.type == BDADDR_BREDR)
3145 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
3148 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
3149 le_addr_type(cp->addr.type));
3151 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
3152 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
3153 MGMT_STATUS_NOT_CONNECTED, &rp,
3158 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
3164 cmd->cmd_complete = generic_cmd_complete;
3166 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
3168 mgmt_pending_remove(cmd);
3171 hci_dev_unlock(hdev);
3175 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
3177 switch (link_type) {
3179 switch (addr_type) {
3180 case ADDR_LE_DEV_PUBLIC:
3181 return BDADDR_LE_PUBLIC;
3184 /* Fallback to LE Random address type */
3185 return BDADDR_LE_RANDOM;
3189 /* Fallback to BR/EDR type */
3190 return BDADDR_BREDR;
3194 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
3197 struct mgmt_rp_get_connections *rp;
3202 bt_dev_dbg(hdev, "sock %p", sk);
3206 if (!hdev_is_powered(hdev)) {
3207 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
3208 MGMT_STATUS_NOT_POWERED);
3213 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3214 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3218 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
3225 list_for_each_entry(c, &hdev->conn_hash.list, list) {
3226 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
3228 bacpy(&rp->addr[i].bdaddr, &c->dst);
3229 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
3230 if (c->type == SCO_LINK || c->type == ESCO_LINK)
3235 rp->conn_count = cpu_to_le16(i);
3237 /* Recalculate length in case of filtered SCO connections, etc */
3238 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
3239 struct_size(rp, addr, i));
3244 hci_dev_unlock(hdev);
3248 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3249 struct mgmt_cp_pin_code_neg_reply *cp)
3251 struct mgmt_pending_cmd *cmd;
3254 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
3259 cmd->cmd_complete = addr_cmd_complete;
3261 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3262 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
3264 mgmt_pending_remove(cmd);
3269 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3272 struct hci_conn *conn;
3273 struct mgmt_cp_pin_code_reply *cp = data;
3274 struct hci_cp_pin_code_reply reply;
3275 struct mgmt_pending_cmd *cmd;
3278 bt_dev_dbg(hdev, "sock %p", sk);
3282 if (!hdev_is_powered(hdev)) {
3283 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3284 MGMT_STATUS_NOT_POWERED);
3288 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
3290 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3291 MGMT_STATUS_NOT_CONNECTED);
3295 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
3296 struct mgmt_cp_pin_code_neg_reply ncp;
3298 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
3300 bt_dev_err(hdev, "PIN code is not 16 bytes long");
3302 err = send_pin_code_neg_reply(sk, hdev, &ncp);
3304 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3305 MGMT_STATUS_INVALID_PARAMS);
3310 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
3316 cmd->cmd_complete = addr_cmd_complete;
3318 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
3319 reply.pin_len = cp->pin_len;
3320 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
3322 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
3324 mgmt_pending_remove(cmd);
3327 hci_dev_unlock(hdev);
3331 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3334 struct mgmt_cp_set_io_capability *cp = data;
3336 bt_dev_dbg(hdev, "sock %p", sk);
3338 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3339 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3340 MGMT_STATUS_INVALID_PARAMS);
3344 hdev->io_capability = cp->io_capability;
3346 bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
3348 hci_dev_unlock(hdev);
3350 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
3354 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
3356 struct hci_dev *hdev = conn->hdev;
3357 struct mgmt_pending_cmd *cmd;
3359 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3360 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3363 if (cmd->user_data != conn)
3372 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
3374 struct mgmt_rp_pair_device rp;
3375 struct hci_conn *conn = cmd->user_data;
3378 bacpy(&rp.addr.bdaddr, &conn->dst);
3379 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3381 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
3382 status, &rp, sizeof(rp));
3384 /* So we don't get further callbacks for this connection */
3385 conn->connect_cfm_cb = NULL;
3386 conn->security_cfm_cb = NULL;
3387 conn->disconn_cfm_cb = NULL;
3389 hci_conn_drop(conn);
3391 /* The device is paired so there is no need to remove
3392 * its connection parameters anymore.
3394 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
3401 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3403 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3404 struct mgmt_pending_cmd *cmd;
3406 cmd = find_pairing(conn);
3408 cmd->cmd_complete(cmd, status);
3409 mgmt_pending_remove(cmd);
3413 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3415 struct mgmt_pending_cmd *cmd;
3417 BT_DBG("status %u", status);
3419 cmd = find_pairing(conn);
3421 BT_DBG("Unable to find a pending command");
3425 cmd->cmd_complete(cmd, mgmt_status(status));
3426 mgmt_pending_remove(cmd);
3429 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3431 struct mgmt_pending_cmd *cmd;
3433 BT_DBG("status %u", status);
3438 cmd = find_pairing(conn);
3440 BT_DBG("Unable to find a pending command");
3444 cmd->cmd_complete(cmd, mgmt_status(status));
3445 mgmt_pending_remove(cmd);
3448 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3451 struct mgmt_cp_pair_device *cp = data;
3452 struct mgmt_rp_pair_device rp;
3453 struct mgmt_pending_cmd *cmd;
3454 u8 sec_level, auth_type;
3455 struct hci_conn *conn;
3458 bt_dev_dbg(hdev, "sock %p", sk);
3460 memset(&rp, 0, sizeof(rp));
3461 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3462 rp.addr.type = cp->addr.type;
3464 if (!bdaddr_type_is_valid(cp->addr.type))
3465 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3466 MGMT_STATUS_INVALID_PARAMS,
3469 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3470 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3471 MGMT_STATUS_INVALID_PARAMS,
3476 if (!hdev_is_powered(hdev)) {
3477 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3478 MGMT_STATUS_NOT_POWERED, &rp,
3483 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3484 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3485 MGMT_STATUS_ALREADY_PAIRED, &rp,
3490 sec_level = BT_SECURITY_MEDIUM;
3491 auth_type = HCI_AT_DEDICATED_BONDING;
3493 if (cp->addr.type == BDADDR_BREDR) {
3494 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3495 auth_type, CONN_REASON_PAIR_DEVICE);
3497 u8 addr_type = le_addr_type(cp->addr.type);
3498 struct hci_conn_params *p;
3500 /* When pairing a new device, it is expected to remember
3501 * this device for future connections. Adding the connection
3502 * parameter information ahead of time allows tracking
3503 * of the peripheral preferred values and will speed up any
3504 * further connection establishment.
3506 * If connection parameters already exist, then they
3507 * will be kept and this function does nothing.
3509 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3511 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3512 p->auto_connect = HCI_AUTO_CONN_DISABLED;
3514 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3515 sec_level, HCI_LE_CONN_TIMEOUT,
3516 CONN_REASON_PAIR_DEVICE);
3522 if (PTR_ERR(conn) == -EBUSY)
3523 status = MGMT_STATUS_BUSY;
3524 else if (PTR_ERR(conn) == -EOPNOTSUPP)
3525 status = MGMT_STATUS_NOT_SUPPORTED;
3526 else if (PTR_ERR(conn) == -ECONNREFUSED)
3527 status = MGMT_STATUS_REJECTED;
3529 status = MGMT_STATUS_CONNECT_FAILED;
3531 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3532 status, &rp, sizeof(rp));
3536 if (conn->connect_cfm_cb) {
3537 hci_conn_drop(conn);
3538 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3539 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3543 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3546 hci_conn_drop(conn);
3550 cmd->cmd_complete = pairing_complete;
3552 /* For LE, just connecting isn't a proof that the pairing finished */
3553 if (cp->addr.type == BDADDR_BREDR) {
3554 conn->connect_cfm_cb = pairing_complete_cb;
3555 conn->security_cfm_cb = pairing_complete_cb;
3556 conn->disconn_cfm_cb = pairing_complete_cb;
3558 conn->connect_cfm_cb = le_pairing_complete_cb;
3559 conn->security_cfm_cb = le_pairing_complete_cb;
3560 conn->disconn_cfm_cb = le_pairing_complete_cb;
3563 conn->io_capability = cp->io_cap;
3564 cmd->user_data = hci_conn_get(conn);
3566 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3567 hci_conn_security(conn, sec_level, auth_type, true)) {
3568 cmd->cmd_complete(cmd, 0);
3569 mgmt_pending_remove(cmd);
3575 hci_dev_unlock(hdev);
3579 static int abort_conn_sync(struct hci_dev *hdev, void *data)
3581 struct hci_conn *conn;
3582 u16 handle = PTR_ERR(data);
3584 conn = hci_conn_hash_lookup_handle(hdev, handle);
3588 return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM);
3591 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3594 struct mgmt_addr_info *addr = data;
3595 struct mgmt_pending_cmd *cmd;
3596 struct hci_conn *conn;
3599 bt_dev_dbg(hdev, "sock %p", sk);
3603 if (!hdev_is_powered(hdev)) {
3604 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3605 MGMT_STATUS_NOT_POWERED);
3609 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3611 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3612 MGMT_STATUS_INVALID_PARAMS);
3616 conn = cmd->user_data;
3618 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3619 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3620 MGMT_STATUS_INVALID_PARAMS);
3624 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3625 mgmt_pending_remove(cmd);
3627 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3628 addr, sizeof(*addr));
3630 /* Since user doesn't want to proceed with the connection, abort any
3631 * ongoing pairing and then terminate the link if it was created
3632 * because of the pair device action.
3634 if (addr->type == BDADDR_BREDR)
3635 hci_remove_link_key(hdev, &addr->bdaddr);
3637 smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3638 le_addr_type(addr->type));
3640 if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3641 hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle),
3645 hci_dev_unlock(hdev);
3649 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3650 struct mgmt_addr_info *addr, u16 mgmt_op,
3651 u16 hci_op, __le32 passkey)
3653 struct mgmt_pending_cmd *cmd;
3654 struct hci_conn *conn;
3659 if (!hdev_is_powered(hdev)) {
3660 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3661 MGMT_STATUS_NOT_POWERED, addr,
3666 if (addr->type == BDADDR_BREDR)
3667 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3669 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3670 le_addr_type(addr->type));
3673 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3674 MGMT_STATUS_NOT_CONNECTED, addr,
3679 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3680 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3682 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3683 MGMT_STATUS_SUCCESS, addr,
3686 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3687 MGMT_STATUS_FAILED, addr,
3693 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3699 cmd->cmd_complete = addr_cmd_complete;
3701 /* Continue with pairing via HCI */
3702 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3703 struct hci_cp_user_passkey_reply cp;
3705 bacpy(&cp.bdaddr, &addr->bdaddr);
3706 cp.passkey = passkey;
3707 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3709 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3713 mgmt_pending_remove(cmd);
3716 hci_dev_unlock(hdev);
3720 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3721 void *data, u16 len)
3723 struct mgmt_cp_pin_code_neg_reply *cp = data;
3725 bt_dev_dbg(hdev, "sock %p", sk);
3727 return user_pairing_resp(sk, hdev, &cp->addr,
3728 MGMT_OP_PIN_CODE_NEG_REPLY,
3729 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3732 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3735 struct mgmt_cp_user_confirm_reply *cp = data;
3737 bt_dev_dbg(hdev, "sock %p", sk);
3739 if (len != sizeof(*cp))
3740 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3741 MGMT_STATUS_INVALID_PARAMS);
3743 return user_pairing_resp(sk, hdev, &cp->addr,
3744 MGMT_OP_USER_CONFIRM_REPLY,
3745 HCI_OP_USER_CONFIRM_REPLY, 0);
3748 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3749 void *data, u16 len)
3751 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3753 bt_dev_dbg(hdev, "sock %p", sk);
3755 return user_pairing_resp(sk, hdev, &cp->addr,
3756 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3757 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3760 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3763 struct mgmt_cp_user_passkey_reply *cp = data;
3765 bt_dev_dbg(hdev, "sock %p", sk);
3767 return user_pairing_resp(sk, hdev, &cp->addr,
3768 MGMT_OP_USER_PASSKEY_REPLY,
3769 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3772 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3773 void *data, u16 len)
3775 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3777 bt_dev_dbg(hdev, "sock %p", sk);
3779 return user_pairing_resp(sk, hdev, &cp->addr,
3780 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3781 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3784 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3786 struct adv_info *adv_instance;
3788 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3792 /* stop if current instance doesn't need to be changed */
3793 if (!(adv_instance->flags & flags))
3796 cancel_adv_timeout(hdev);
3798 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3802 hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3807 static int name_changed_sync(struct hci_dev *hdev, void *data)
3809 return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3812 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3814 struct mgmt_pending_cmd *cmd = data;
3815 struct mgmt_cp_set_local_name *cp = cmd->param;
3816 u8 status = mgmt_status(err);
3818 bt_dev_dbg(hdev, "err %d", err);
3820 if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3824 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3827 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3830 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3831 hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3834 mgmt_pending_remove(cmd);
3837 static int set_name_sync(struct hci_dev *hdev, void *data)
3839 if (lmp_bredr_capable(hdev)) {
3840 hci_update_name_sync(hdev);
3841 hci_update_eir_sync(hdev);
3844 /* The name is stored in the scan response data and so
3845 * no need to update the advertising data here.
3847 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3848 hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3853 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3856 struct mgmt_cp_set_local_name *cp = data;
3857 struct mgmt_pending_cmd *cmd;
3860 bt_dev_dbg(hdev, "sock %p", sk);
3864 /* If the old values are the same as the new ones just return a
3865 * direct command complete event.
3867 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3868 !memcmp(hdev->short_name, cp->short_name,
3869 sizeof(hdev->short_name))) {
3870 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3875 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3877 if (!hdev_is_powered(hdev)) {
3878 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3880 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3885 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3886 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3887 ext_info_changed(hdev, sk);
3892 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3896 err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3900 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3901 MGMT_STATUS_FAILED);
3904 mgmt_pending_remove(cmd);
3909 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3912 hci_dev_unlock(hdev);
3916 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3918 return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3921 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3924 struct mgmt_cp_set_appearance *cp = data;
3928 bt_dev_dbg(hdev, "sock %p", sk);
3930 if (!lmp_le_capable(hdev))
3931 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3932 MGMT_STATUS_NOT_SUPPORTED);
3934 appearance = le16_to_cpu(cp->appearance);
3938 if (hdev->appearance != appearance) {
3939 hdev->appearance = appearance;
3941 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3942 hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3945 ext_info_changed(hdev, sk);
3948 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3951 hci_dev_unlock(hdev);
3956 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3957 void *data, u16 len)
3959 struct mgmt_rp_get_phy_configuration rp;
3961 bt_dev_dbg(hdev, "sock %p", sk);
3965 memset(&rp, 0, sizeof(rp));
3967 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3968 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3969 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3971 hci_dev_unlock(hdev);
3973 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3977 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3979 struct mgmt_ev_phy_configuration_changed ev;
3981 memset(&ev, 0, sizeof(ev));
3983 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3985 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3989 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3991 struct mgmt_pending_cmd *cmd = data;
3992 struct sk_buff *skb = cmd->skb;
3993 u8 status = mgmt_status(err);
3995 if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
4000 status = MGMT_STATUS_FAILED;
4001 else if (IS_ERR(skb))
4002 status = mgmt_status(PTR_ERR(skb));
4004 status = mgmt_status(skb->data[0]);
4007 bt_dev_dbg(hdev, "status %d", status);
4010 mgmt_cmd_status(cmd->sk, hdev->id,
4011 MGMT_OP_SET_PHY_CONFIGURATION, status);
4013 mgmt_cmd_complete(cmd->sk, hdev->id,
4014 MGMT_OP_SET_PHY_CONFIGURATION, 0,
4017 mgmt_phy_configuration_changed(hdev, cmd->sk);
4020 if (skb && !IS_ERR(skb))
4023 mgmt_pending_remove(cmd);
4026 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
4028 struct mgmt_pending_cmd *cmd = data;
4029 struct mgmt_cp_set_phy_configuration *cp = cmd->param;
4030 struct hci_cp_le_set_default_phy cp_phy;
4031 u32 selected_phys = __le32_to_cpu(cp->selected_phys);
4033 memset(&cp_phy, 0, sizeof(cp_phy));
4035 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
4036 cp_phy.all_phys |= 0x01;
4038 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
4039 cp_phy.all_phys |= 0x02;
4041 if (selected_phys & MGMT_PHY_LE_1M_TX)
4042 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
4044 if (selected_phys & MGMT_PHY_LE_2M_TX)
4045 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
4047 if (selected_phys & MGMT_PHY_LE_CODED_TX)
4048 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
4050 if (selected_phys & MGMT_PHY_LE_1M_RX)
4051 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
4053 if (selected_phys & MGMT_PHY_LE_2M_RX)
4054 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
4056 if (selected_phys & MGMT_PHY_LE_CODED_RX)
4057 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
4059 cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4060 sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
4065 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
4066 void *data, u16 len)
4068 struct mgmt_cp_set_phy_configuration *cp = data;
4069 struct mgmt_pending_cmd *cmd;
4070 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
4071 u16 pkt_type = (HCI_DH1 | HCI_DM1);
4072 bool changed = false;
4075 bt_dev_dbg(hdev, "sock %p", sk);
4077 configurable_phys = get_configurable_phys(hdev);
4078 supported_phys = get_supported_phys(hdev);
4079 selected_phys = __le32_to_cpu(cp->selected_phys);
4081 if (selected_phys & ~supported_phys)
4082 return mgmt_cmd_status(sk, hdev->id,
4083 MGMT_OP_SET_PHY_CONFIGURATION,
4084 MGMT_STATUS_INVALID_PARAMS);
4086 unconfigure_phys = supported_phys & ~configurable_phys;
4088 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
4089 return mgmt_cmd_status(sk, hdev->id,
4090 MGMT_OP_SET_PHY_CONFIGURATION,
4091 MGMT_STATUS_INVALID_PARAMS);
4093 if (selected_phys == get_selected_phys(hdev))
4094 return mgmt_cmd_complete(sk, hdev->id,
4095 MGMT_OP_SET_PHY_CONFIGURATION,
4100 if (!hdev_is_powered(hdev)) {
4101 err = mgmt_cmd_status(sk, hdev->id,
4102 MGMT_OP_SET_PHY_CONFIGURATION,
4103 MGMT_STATUS_REJECTED);
4107 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
4108 err = mgmt_cmd_status(sk, hdev->id,
4109 MGMT_OP_SET_PHY_CONFIGURATION,
4114 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
4115 pkt_type |= (HCI_DH3 | HCI_DM3);
4117 pkt_type &= ~(HCI_DH3 | HCI_DM3);
4119 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
4120 pkt_type |= (HCI_DH5 | HCI_DM5);
4122 pkt_type &= ~(HCI_DH5 | HCI_DM5);
4124 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
4125 pkt_type &= ~HCI_2DH1;
4127 pkt_type |= HCI_2DH1;
4129 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
4130 pkt_type &= ~HCI_2DH3;
4132 pkt_type |= HCI_2DH3;
4134 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
4135 pkt_type &= ~HCI_2DH5;
4137 pkt_type |= HCI_2DH5;
4139 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
4140 pkt_type &= ~HCI_3DH1;
4142 pkt_type |= HCI_3DH1;
4144 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
4145 pkt_type &= ~HCI_3DH3;
4147 pkt_type |= HCI_3DH3;
4149 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
4150 pkt_type &= ~HCI_3DH5;
4152 pkt_type |= HCI_3DH5;
4154 if (pkt_type != hdev->pkt_type) {
4155 hdev->pkt_type = pkt_type;
4159 if ((selected_phys & MGMT_PHY_LE_MASK) ==
4160 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
4162 mgmt_phy_configuration_changed(hdev, sk);
4164 err = mgmt_cmd_complete(sk, hdev->id,
4165 MGMT_OP_SET_PHY_CONFIGURATION,
4171 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
4176 err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
4177 set_default_phy_complete);
4180 err = mgmt_cmd_status(sk, hdev->id,
4181 MGMT_OP_SET_PHY_CONFIGURATION,
4182 MGMT_STATUS_FAILED);
4185 mgmt_pending_remove(cmd);
4189 hci_dev_unlock(hdev);
4194 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
4197 int err = MGMT_STATUS_SUCCESS;
4198 struct mgmt_cp_set_blocked_keys *keys = data;
4199 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
4200 sizeof(struct mgmt_blocked_key_info));
4201 u16 key_count, expected_len;
4204 bt_dev_dbg(hdev, "sock %p", sk);
4206 key_count = __le16_to_cpu(keys->key_count);
4207 if (key_count > max_key_count) {
4208 bt_dev_err(hdev, "too big key_count value %u", key_count);
4209 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4210 MGMT_STATUS_INVALID_PARAMS);
4213 expected_len = struct_size(keys, keys, key_count);
4214 if (expected_len != len) {
4215 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
4217 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4218 MGMT_STATUS_INVALID_PARAMS);
4223 hci_blocked_keys_clear(hdev);
4225 for (i = 0; i < key_count; ++i) {
4226 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
4229 err = MGMT_STATUS_NO_RESOURCES;
4233 b->type = keys->keys[i].type;
4234 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
4235 list_add_rcu(&b->list, &hdev->blocked_keys);
4237 hci_dev_unlock(hdev);
4239 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
4243 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
4244 void *data, u16 len)
4246 struct mgmt_mode *cp = data;
4248 bool changed = false;
4250 bt_dev_dbg(hdev, "sock %p", sk);
4252 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
4253 return mgmt_cmd_status(sk, hdev->id,
4254 MGMT_OP_SET_WIDEBAND_SPEECH,
4255 MGMT_STATUS_NOT_SUPPORTED);
4257 if (cp->val != 0x00 && cp->val != 0x01)
4258 return mgmt_cmd_status(sk, hdev->id,
4259 MGMT_OP_SET_WIDEBAND_SPEECH,
4260 MGMT_STATUS_INVALID_PARAMS);
4264 if (hdev_is_powered(hdev) &&
4265 !!cp->val != hci_dev_test_flag(hdev,
4266 HCI_WIDEBAND_SPEECH_ENABLED)) {
4267 err = mgmt_cmd_status(sk, hdev->id,
4268 MGMT_OP_SET_WIDEBAND_SPEECH,
4269 MGMT_STATUS_REJECTED);
4274 changed = !hci_dev_test_and_set_flag(hdev,
4275 HCI_WIDEBAND_SPEECH_ENABLED);
4277 changed = hci_dev_test_and_clear_flag(hdev,
4278 HCI_WIDEBAND_SPEECH_ENABLED);
4280 err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
4285 err = new_settings(hdev, sk);
4288 hci_dev_unlock(hdev);
4292 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
4293 void *data, u16 data_len)
4296 struct mgmt_rp_read_controller_cap *rp = (void *)buf;
4299 u8 tx_power_range[2];
4301 bt_dev_dbg(hdev, "sock %p", sk);
4303 memset(&buf, 0, sizeof(buf));
4307 /* When the Read Simple Pairing Options command is supported, then
4308 * the remote public key validation is supported.
4310 * Alternatively, when Microsoft extensions are available, they can
4311 * indicate support for public key validation as well.
4313 if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
4314 flags |= 0x01; /* Remote public key validation (BR/EDR) */
4316 flags |= 0x02; /* Remote public key validation (LE) */
4318 /* When the Read Encryption Key Size command is supported, then the
4319 * encryption key size is enforced.
4321 if (hdev->commands[20] & 0x10)
4322 flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */
4324 flags |= 0x08; /* Encryption key size enforcement (LE) */
4326 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
4329 /* When the Read Simple Pairing Options command is supported, then
4330 * also max encryption key size information is provided.
4332 if (hdev->commands[41] & 0x08)
4333 cap_len = eir_append_le16(rp->cap, cap_len,
4334 MGMT_CAP_MAX_ENC_KEY_SIZE,
4335 hdev->max_enc_key_size);
4337 cap_len = eir_append_le16(rp->cap, cap_len,
4338 MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
4339 SMP_MAX_ENC_KEY_SIZE);
4341 /* Append the min/max LE tx power parameters if we were able to fetch
4342 * it from the controller
4344 if (hdev->commands[38] & 0x80) {
4345 memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
4346 memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
4347 cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
4351 rp->cap_len = cpu_to_le16(cap_len);
4353 hci_dev_unlock(hdev);
4355 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
4356 rp, sizeof(*rp) + cap_len);
4359 #ifdef CONFIG_BT_FEATURE_DEBUG
4360 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
4361 static const u8 debug_uuid[16] = {
4362 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
4363 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
4367 /* 330859bc-7506-492d-9370-9a6f0614037f */
4368 static const u8 quality_report_uuid[16] = {
4369 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
4370 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
4373 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
4374 static const u8 offload_codecs_uuid[16] = {
4375 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
4376 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
4379 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
4380 static const u8 le_simultaneous_roles_uuid[16] = {
4381 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
4382 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
4385 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
4386 static const u8 rpa_resolution_uuid[16] = {
4387 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
4388 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
4391 /* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */
4392 static const u8 iso_socket_uuid[16] = {
4393 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98,
4394 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,
4397 /* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */
4398 static const u8 mgmt_mesh_uuid[16] = {
4399 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf,
4400 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c,
4403 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
4404 void *data, u16 data_len)
4406 struct mgmt_rp_read_exp_features_info *rp;
4412 bt_dev_dbg(hdev, "sock %p", sk);
4414 /* Enough space for 7 features */
4415 len = sizeof(*rp) + (sizeof(rp->features[0]) * 7);
4416 rp = kzalloc(len, GFP_KERNEL);
4420 #ifdef CONFIG_BT_FEATURE_DEBUG
4422 flags = bt_dbg_get() ? BIT(0) : 0;
4424 memcpy(rp->features[idx].uuid, debug_uuid, 16);
4425 rp->features[idx].flags = cpu_to_le32(flags);
4430 if (hdev && hci_dev_le_state_simultaneous(hdev)) {
4431 if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
4436 memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
4437 rp->features[idx].flags = cpu_to_le32(flags);
4441 if (hdev && ll_privacy_capable(hdev)) {
4442 if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
4443 flags = BIT(0) | BIT(1);
4447 memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
4448 rp->features[idx].flags = cpu_to_le32(flags);
4452 if (hdev && (aosp_has_quality_report(hdev) ||
4453 hdev->set_quality_report)) {
4454 if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
4459 memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
4460 rp->features[idx].flags = cpu_to_le32(flags);
4464 if (hdev && hdev->get_data_path_id) {
4465 if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
4470 memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
4471 rp->features[idx].flags = cpu_to_le32(flags);
4475 if (IS_ENABLED(CONFIG_BT_LE)) {
4476 flags = iso_enabled() ? BIT(0) : 0;
4477 memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
4478 rp->features[idx].flags = cpu_to_le32(flags);
4482 if (hdev && lmp_le_capable(hdev)) {
4483 if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL))
4488 memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16);
4489 rp->features[idx].flags = cpu_to_le32(flags);
4493 rp->feature_count = cpu_to_le16(idx);
4495 /* After reading the experimental features information, enable
4496 * the events to update client on any future change.
4498 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4500 status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4501 MGMT_OP_READ_EXP_FEATURES_INFO,
4502 0, rp, sizeof(*rp) + (20 * idx));
4508 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4511 struct mgmt_ev_exp_feature_changed ev;
4513 memset(&ev, 0, sizeof(ev));
4514 memcpy(ev.uuid, rpa_resolution_uuid, 16);
4515 ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4517 // Do we need to be atomic with the conn_flags?
4518 if (enabled && privacy_mode_capable(hdev))
4519 hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY;
4521 hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY;
4523 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4525 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4529 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4530 bool enabled, struct sock *skip)
4532 struct mgmt_ev_exp_feature_changed ev;
4534 memset(&ev, 0, sizeof(ev));
4535 memcpy(ev.uuid, uuid, 16);
4536 ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4538 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4540 HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4543 #define EXP_FEAT(_uuid, _set_func) \
4546 .set_func = _set_func, \
4549 /* The zero key uuid is special. Multiple exp features are set through it. */
4550 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4551 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4553 struct mgmt_rp_set_exp_feature rp;
4555 memset(rp.uuid, 0, 16);
4556 rp.flags = cpu_to_le32(0);
4558 #ifdef CONFIG_BT_FEATURE_DEBUG
4560 bool changed = bt_dbg_get();
4565 exp_feature_changed(NULL, ZERO_KEY, false, sk);
4569 if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4572 changed = hci_dev_test_and_clear_flag(hdev,
4573 HCI_ENABLE_LL_PRIVACY);
4575 exp_feature_changed(hdev, rpa_resolution_uuid, false,
4579 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4581 return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4582 MGMT_OP_SET_EXP_FEATURE, 0,
4586 #ifdef CONFIG_BT_FEATURE_DEBUG
4587 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4588 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4590 struct mgmt_rp_set_exp_feature rp;
4595 /* Command requires to use the non-controller index */
4597 return mgmt_cmd_status(sk, hdev->id,
4598 MGMT_OP_SET_EXP_FEATURE,
4599 MGMT_STATUS_INVALID_INDEX);
4601 /* Parameters are limited to a single octet */
4602 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4603 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4604 MGMT_OP_SET_EXP_FEATURE,
4605 MGMT_STATUS_INVALID_PARAMS);
4607 /* Only boolean on/off is supported */
4608 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4609 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4610 MGMT_OP_SET_EXP_FEATURE,
4611 MGMT_STATUS_INVALID_PARAMS);
4613 val = !!cp->param[0];
4614 changed = val ? !bt_dbg_get() : bt_dbg_get();
4617 memcpy(rp.uuid, debug_uuid, 16);
4618 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4620 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4622 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4623 MGMT_OP_SET_EXP_FEATURE, 0,
4627 exp_feature_changed(hdev, debug_uuid, val, sk);
4633 static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev,
4634 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4636 struct mgmt_rp_set_exp_feature rp;
4640 /* Command requires to use the controller index */
4642 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4643 MGMT_OP_SET_EXP_FEATURE,
4644 MGMT_STATUS_INVALID_INDEX);
4646 /* Parameters are limited to a single octet */
4647 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4648 return mgmt_cmd_status(sk, hdev->id,
4649 MGMT_OP_SET_EXP_FEATURE,
4650 MGMT_STATUS_INVALID_PARAMS);
4652 /* Only boolean on/off is supported */
4653 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4654 return mgmt_cmd_status(sk, hdev->id,
4655 MGMT_OP_SET_EXP_FEATURE,
4656 MGMT_STATUS_INVALID_PARAMS);
4658 val = !!cp->param[0];
4661 changed = !hci_dev_test_and_set_flag(hdev,
4662 HCI_MESH_EXPERIMENTAL);
4664 hci_dev_clear_flag(hdev, HCI_MESH);
4665 changed = hci_dev_test_and_clear_flag(hdev,
4666 HCI_MESH_EXPERIMENTAL);
4669 memcpy(rp.uuid, mgmt_mesh_uuid, 16);
4670 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4672 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4674 err = mgmt_cmd_complete(sk, hdev->id,
4675 MGMT_OP_SET_EXP_FEATURE, 0,
4679 exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk);
4684 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4685 struct mgmt_cp_set_exp_feature *cp,
4688 struct mgmt_rp_set_exp_feature rp;
4693 /* Command requires to use the controller index */
4695 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4696 MGMT_OP_SET_EXP_FEATURE,
4697 MGMT_STATUS_INVALID_INDEX);
4699 /* Changes can only be made when controller is powered down */
4700 if (hdev_is_powered(hdev))
4701 return mgmt_cmd_status(sk, hdev->id,
4702 MGMT_OP_SET_EXP_FEATURE,
4703 MGMT_STATUS_REJECTED);
4705 /* Parameters are limited to a single octet */
4706 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4707 return mgmt_cmd_status(sk, hdev->id,
4708 MGMT_OP_SET_EXP_FEATURE,
4709 MGMT_STATUS_INVALID_PARAMS);
4711 /* Only boolean on/off is supported */
4712 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4713 return mgmt_cmd_status(sk, hdev->id,
4714 MGMT_OP_SET_EXP_FEATURE,
4715 MGMT_STATUS_INVALID_PARAMS);
4717 val = !!cp->param[0];
4720 changed = !hci_dev_test_and_set_flag(hdev,
4721 HCI_ENABLE_LL_PRIVACY);
4722 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4724 /* Enable LL privacy + supported settings changed */
4725 flags = BIT(0) | BIT(1);
4727 changed = hci_dev_test_and_clear_flag(hdev,
4728 HCI_ENABLE_LL_PRIVACY);
4730 /* Disable LL privacy + supported settings changed */
4734 memcpy(rp.uuid, rpa_resolution_uuid, 16);
4735 rp.flags = cpu_to_le32(flags);
4737 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4739 err = mgmt_cmd_complete(sk, hdev->id,
4740 MGMT_OP_SET_EXP_FEATURE, 0,
4744 exp_ll_privacy_feature_changed(val, hdev, sk);
4749 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4750 struct mgmt_cp_set_exp_feature *cp,
4753 struct mgmt_rp_set_exp_feature rp;
4757 /* Command requires to use a valid controller index */
4759 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4760 MGMT_OP_SET_EXP_FEATURE,
4761 MGMT_STATUS_INVALID_INDEX);
4763 /* Parameters are limited to a single octet */
4764 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4765 return mgmt_cmd_status(sk, hdev->id,
4766 MGMT_OP_SET_EXP_FEATURE,
4767 MGMT_STATUS_INVALID_PARAMS);
4769 /* Only boolean on/off is supported */
4770 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4771 return mgmt_cmd_status(sk, hdev->id,
4772 MGMT_OP_SET_EXP_FEATURE,
4773 MGMT_STATUS_INVALID_PARAMS);
4775 hci_req_sync_lock(hdev);
4777 val = !!cp->param[0];
4778 changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4780 if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4781 err = mgmt_cmd_status(sk, hdev->id,
4782 MGMT_OP_SET_EXP_FEATURE,
4783 MGMT_STATUS_NOT_SUPPORTED);
4784 goto unlock_quality_report;
4788 if (hdev->set_quality_report)
4789 err = hdev->set_quality_report(hdev, val);
4791 err = aosp_set_quality_report(hdev, val);
4794 err = mgmt_cmd_status(sk, hdev->id,
4795 MGMT_OP_SET_EXP_FEATURE,
4796 MGMT_STATUS_FAILED);
4797 goto unlock_quality_report;
4801 hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4803 hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4806 bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4808 memcpy(rp.uuid, quality_report_uuid, 16);
4809 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4810 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4812 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4816 exp_feature_changed(hdev, quality_report_uuid, val, sk);
4818 unlock_quality_report:
4819 hci_req_sync_unlock(hdev);
4823 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4824 struct mgmt_cp_set_exp_feature *cp,
4829 struct mgmt_rp_set_exp_feature rp;
4831 /* Command requires to use a valid controller index */
4833 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4834 MGMT_OP_SET_EXP_FEATURE,
4835 MGMT_STATUS_INVALID_INDEX);
4837 /* Parameters are limited to a single octet */
4838 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4839 return mgmt_cmd_status(sk, hdev->id,
4840 MGMT_OP_SET_EXP_FEATURE,
4841 MGMT_STATUS_INVALID_PARAMS);
4843 /* Only boolean on/off is supported */
4844 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4845 return mgmt_cmd_status(sk, hdev->id,
4846 MGMT_OP_SET_EXP_FEATURE,
4847 MGMT_STATUS_INVALID_PARAMS);
4849 val = !!cp->param[0];
4850 changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4852 if (!hdev->get_data_path_id) {
4853 return mgmt_cmd_status(sk, hdev->id,
4854 MGMT_OP_SET_EXP_FEATURE,
4855 MGMT_STATUS_NOT_SUPPORTED);
4860 hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4862 hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4865 bt_dev_info(hdev, "offload codecs enable %d changed %d",
4868 memcpy(rp.uuid, offload_codecs_uuid, 16);
4869 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4870 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4871 err = mgmt_cmd_complete(sk, hdev->id,
4872 MGMT_OP_SET_EXP_FEATURE, 0,
4876 exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4881 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4882 struct mgmt_cp_set_exp_feature *cp,
4887 struct mgmt_rp_set_exp_feature rp;
4889 /* Command requires to use a valid controller index */
4891 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4892 MGMT_OP_SET_EXP_FEATURE,
4893 MGMT_STATUS_INVALID_INDEX);
4895 /* Parameters are limited to a single octet */
4896 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4897 return mgmt_cmd_status(sk, hdev->id,
4898 MGMT_OP_SET_EXP_FEATURE,
4899 MGMT_STATUS_INVALID_PARAMS);
4901 /* Only boolean on/off is supported */
4902 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4903 return mgmt_cmd_status(sk, hdev->id,
4904 MGMT_OP_SET_EXP_FEATURE,
4905 MGMT_STATUS_INVALID_PARAMS);
4907 val = !!cp->param[0];
4908 changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4910 if (!hci_dev_le_state_simultaneous(hdev)) {
4911 return mgmt_cmd_status(sk, hdev->id,
4912 MGMT_OP_SET_EXP_FEATURE,
4913 MGMT_STATUS_NOT_SUPPORTED);
4918 hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4920 hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4923 bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4926 memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4927 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4928 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4929 err = mgmt_cmd_complete(sk, hdev->id,
4930 MGMT_OP_SET_EXP_FEATURE, 0,
4934 exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4940 static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev,
4941 struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4943 struct mgmt_rp_set_exp_feature rp;
4944 bool val, changed = false;
4947 /* Command requires to use the non-controller index */
4949 return mgmt_cmd_status(sk, hdev->id,
4950 MGMT_OP_SET_EXP_FEATURE,
4951 MGMT_STATUS_INVALID_INDEX);
4953 /* Parameters are limited to a single octet */
4954 if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4955 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4956 MGMT_OP_SET_EXP_FEATURE,
4957 MGMT_STATUS_INVALID_PARAMS);
4959 /* Only boolean on/off is supported */
4960 if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4961 return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4962 MGMT_OP_SET_EXP_FEATURE,
4963 MGMT_STATUS_INVALID_PARAMS);
4965 val = cp->param[0] ? true : false;
4974 memcpy(rp.uuid, iso_socket_uuid, 16);
4975 rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4977 hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4979 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4980 MGMT_OP_SET_EXP_FEATURE, 0,
4984 exp_feature_changed(hdev, iso_socket_uuid, val, sk);
4990 static const struct mgmt_exp_feature {
4992 int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4993 struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4994 } exp_features[] = {
4995 EXP_FEAT(ZERO_KEY, set_zero_key_func),
4996 #ifdef CONFIG_BT_FEATURE_DEBUG
4997 EXP_FEAT(debug_uuid, set_debug_func),
4999 EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),
5000 EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
5001 EXP_FEAT(quality_report_uuid, set_quality_report_func),
5002 EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
5003 EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
5005 EXP_FEAT(iso_socket_uuid, set_iso_socket_func),
5008 /* end with a null feature */
5009 EXP_FEAT(NULL, NULL)
5012 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
5013 void *data, u16 data_len)
5015 struct mgmt_cp_set_exp_feature *cp = data;
5018 bt_dev_dbg(hdev, "sock %p", sk);
5020 for (i = 0; exp_features[i].uuid; i++) {
5021 if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
5022 return exp_features[i].set_func(sk, hdev, cp, data_len);
5025 return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
5026 MGMT_OP_SET_EXP_FEATURE,
5027 MGMT_STATUS_NOT_SUPPORTED);
5030 static u32 get_params_flags(struct hci_dev *hdev,
5031 struct hci_conn_params *params)
5033 u32 flags = hdev->conn_flags;
5035 /* Devices using RPAs can only be programmed in the acceptlist if
5036 * LL Privacy has been enable otherwise they cannot mark
5037 * HCI_CONN_FLAG_REMOTE_WAKEUP.
5039 if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
5040 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type))
5041 flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
5046 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5049 struct mgmt_cp_get_device_flags *cp = data;
5050 struct mgmt_rp_get_device_flags rp;
5051 struct bdaddr_list_with_flags *br_params;
5052 struct hci_conn_params *params;
5053 u32 supported_flags;
5054 u32 current_flags = 0;
5055 u8 status = MGMT_STATUS_INVALID_PARAMS;
5057 bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
5058 &cp->addr.bdaddr, cp->addr.type);
5062 supported_flags = hdev->conn_flags;
5064 memset(&rp, 0, sizeof(rp));
5066 if (cp->addr.type == BDADDR_BREDR) {
5067 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5073 current_flags = br_params->flags;
5075 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5076 le_addr_type(cp->addr.type));
5080 supported_flags = get_params_flags(hdev, params);
5081 current_flags = params->flags;
5084 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5085 rp.addr.type = cp->addr.type;
5086 rp.supported_flags = cpu_to_le32(supported_flags);
5087 rp.current_flags = cpu_to_le32(current_flags);
5089 status = MGMT_STATUS_SUCCESS;
5092 hci_dev_unlock(hdev);
5094 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
5098 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
5099 bdaddr_t *bdaddr, u8 bdaddr_type,
5100 u32 supported_flags, u32 current_flags)
5102 struct mgmt_ev_device_flags_changed ev;
5104 bacpy(&ev.addr.bdaddr, bdaddr);
5105 ev.addr.type = bdaddr_type;
5106 ev.supported_flags = cpu_to_le32(supported_flags);
5107 ev.current_flags = cpu_to_le32(current_flags);
5109 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
5112 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
5115 struct mgmt_cp_set_device_flags *cp = data;
5116 struct bdaddr_list_with_flags *br_params;
5117 struct hci_conn_params *params;
5118 u8 status = MGMT_STATUS_INVALID_PARAMS;
5119 u32 supported_flags;
5120 u32 current_flags = __le32_to_cpu(cp->current_flags);
5122 bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
5123 &cp->addr.bdaddr, cp->addr.type, current_flags);
5125 // We should take hci_dev_lock() early, I think.. conn_flags can change
5126 supported_flags = hdev->conn_flags;
5128 if ((supported_flags | current_flags) != supported_flags) {
5129 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5130 current_flags, supported_flags);
5136 if (cp->addr.type == BDADDR_BREDR) {
5137 br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
5142 br_params->flags = current_flags;
5143 status = MGMT_STATUS_SUCCESS;
5145 bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
5146 &cp->addr.bdaddr, cp->addr.type);
5152 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5153 le_addr_type(cp->addr.type));
5155 bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
5156 &cp->addr.bdaddr, le_addr_type(cp->addr.type));
5160 supported_flags = get_params_flags(hdev, params);
5162 if ((supported_flags | current_flags) != supported_flags) {
5163 bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
5164 current_flags, supported_flags);
5168 params->flags = current_flags;
5169 status = MGMT_STATUS_SUCCESS;
5171 /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
5174 if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
5175 hci_update_passive_scan(hdev);
5178 hci_dev_unlock(hdev);
5181 if (status == MGMT_STATUS_SUCCESS)
5182 device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
5183 supported_flags, current_flags);
5185 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
5186 &cp->addr, sizeof(cp->addr));
5189 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
5192 struct mgmt_ev_adv_monitor_added ev;
5194 ev.monitor_handle = cpu_to_le16(handle);
5196 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
5199 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
5201 struct mgmt_ev_adv_monitor_removed ev;
5202 struct mgmt_pending_cmd *cmd;
5203 struct sock *sk_skip = NULL;
5204 struct mgmt_cp_remove_adv_monitor *cp;
5206 cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
5210 if (cp->monitor_handle)
5214 ev.monitor_handle = cpu_to_le16(handle);
5216 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
5219 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
5220 void *data, u16 len)
5222 struct adv_monitor *monitor = NULL;
5223 struct mgmt_rp_read_adv_monitor_features *rp = NULL;
5226 __u32 supported = 0;
5228 __u16 num_handles = 0;
5229 __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
5231 BT_DBG("request for %s", hdev->name);
5235 if (msft_monitor_supported(hdev))
5236 supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
5238 idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
5239 handles[num_handles++] = monitor->handle;
5241 hci_dev_unlock(hdev);
5243 rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
5244 rp = kmalloc(rp_size, GFP_KERNEL);
5248 /* All supported features are currently enabled */
5249 enabled = supported;
5251 rp->supported_features = cpu_to_le32(supported);
5252 rp->enabled_features = cpu_to_le32(enabled);
5253 rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
5254 rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
5255 rp->num_handles = cpu_to_le16(num_handles);
5257 memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
5259 err = mgmt_cmd_complete(sk, hdev->id,
5260 MGMT_OP_READ_ADV_MONITOR_FEATURES,
5261 MGMT_STATUS_SUCCESS, rp, rp_size);
5268 static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
5269 void *data, int status)
5271 struct mgmt_rp_add_adv_patterns_monitor rp;
5272 struct mgmt_pending_cmd *cmd = data;
5273 struct adv_monitor *monitor = cmd->user_data;
5277 rp.monitor_handle = cpu_to_le16(monitor->handle);
5280 mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
5281 hdev->adv_monitors_cnt++;
5282 if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
5283 monitor->state = ADV_MONITOR_STATE_REGISTERED;
5284 hci_update_passive_scan(hdev);
5287 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5288 mgmt_status(status), &rp, sizeof(rp));
5289 mgmt_pending_remove(cmd);
5291 hci_dev_unlock(hdev);
5292 bt_dev_dbg(hdev, "add monitor %d complete, status %d",
5293 rp.monitor_handle, status);
5296 static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
5298 struct mgmt_pending_cmd *cmd = data;
5299 struct adv_monitor *monitor = cmd->user_data;
5301 return hci_add_adv_monitor(hdev, monitor);
5304 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5305 struct adv_monitor *m, u8 status,
5306 void *data, u16 len, u16 op)
5308 struct mgmt_pending_cmd *cmd;
5316 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5317 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5318 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
5319 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
5320 status = MGMT_STATUS_BUSY;
5324 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5326 status = MGMT_STATUS_NO_RESOURCES;
5331 err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd,
5332 mgmt_add_adv_patterns_monitor_complete);
5335 status = MGMT_STATUS_NO_RESOURCES;
5337 status = MGMT_STATUS_FAILED;
5342 hci_dev_unlock(hdev);
5347 hci_free_adv_monitor(hdev, m);
5348 hci_dev_unlock(hdev);
5349 return mgmt_cmd_status(sk, hdev->id, op, status);
5352 static void parse_adv_monitor_rssi(struct adv_monitor *m,
5353 struct mgmt_adv_rssi_thresholds *rssi)
5356 m->rssi.low_threshold = rssi->low_threshold;
5357 m->rssi.low_threshold_timeout =
5358 __le16_to_cpu(rssi->low_threshold_timeout);
5359 m->rssi.high_threshold = rssi->high_threshold;
5360 m->rssi.high_threshold_timeout =
5361 __le16_to_cpu(rssi->high_threshold_timeout);
5362 m->rssi.sampling_period = rssi->sampling_period;
5364 /* Default values. These numbers are the least constricting
5365 * parameters for MSFT API to work, so it behaves as if there
5366 * are no rssi parameter to consider. May need to be changed
5367 * if other API are to be supported.
5369 m->rssi.low_threshold = -127;
5370 m->rssi.low_threshold_timeout = 60;
5371 m->rssi.high_threshold = -127;
5372 m->rssi.high_threshold_timeout = 0;
5373 m->rssi.sampling_period = 0;
5377 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
5378 struct mgmt_adv_pattern *patterns)
5380 u8 offset = 0, length = 0;
5381 struct adv_pattern *p = NULL;
5384 for (i = 0; i < pattern_count; i++) {
5385 offset = patterns[i].offset;
5386 length = patterns[i].length;
5387 if (offset >= HCI_MAX_AD_LENGTH ||
5388 length > HCI_MAX_AD_LENGTH ||
5389 (offset + length) > HCI_MAX_AD_LENGTH)
5390 return MGMT_STATUS_INVALID_PARAMS;
5392 p = kmalloc(sizeof(*p), GFP_KERNEL);
5394 return MGMT_STATUS_NO_RESOURCES;
5396 p->ad_type = patterns[i].ad_type;
5397 p->offset = patterns[i].offset;
5398 p->length = patterns[i].length;
5399 memcpy(p->value, patterns[i].value, p->length);
5401 INIT_LIST_HEAD(&p->list);
5402 list_add(&p->list, &m->patterns);
5405 return MGMT_STATUS_SUCCESS;
5408 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
5409 void *data, u16 len)
5411 struct mgmt_cp_add_adv_patterns_monitor *cp = data;
5412 struct adv_monitor *m = NULL;
5413 u8 status = MGMT_STATUS_SUCCESS;
5414 size_t expected_size = sizeof(*cp);
5416 BT_DBG("request for %s", hdev->name);
5418 if (len <= sizeof(*cp)) {
5419 status = MGMT_STATUS_INVALID_PARAMS;
5423 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5424 if (len != expected_size) {
5425 status = MGMT_STATUS_INVALID_PARAMS;
5429 m = kzalloc(sizeof(*m), GFP_KERNEL);
5431 status = MGMT_STATUS_NO_RESOURCES;
5435 INIT_LIST_HEAD(&m->patterns);
5437 parse_adv_monitor_rssi(m, NULL);
5438 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5441 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5442 MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
5445 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
5446 void *data, u16 len)
5448 struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
5449 struct adv_monitor *m = NULL;
5450 u8 status = MGMT_STATUS_SUCCESS;
5451 size_t expected_size = sizeof(*cp);
5453 BT_DBG("request for %s", hdev->name);
5455 if (len <= sizeof(*cp)) {
5456 status = MGMT_STATUS_INVALID_PARAMS;
5460 expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
5461 if (len != expected_size) {
5462 status = MGMT_STATUS_INVALID_PARAMS;
5466 m = kzalloc(sizeof(*m), GFP_KERNEL);
5468 status = MGMT_STATUS_NO_RESOURCES;
5472 INIT_LIST_HEAD(&m->patterns);
5474 parse_adv_monitor_rssi(m, &cp->rssi);
5475 status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
5478 return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
5479 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
5482 static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev,
5483 void *data, int status)
5485 struct mgmt_rp_remove_adv_monitor rp;
5486 struct mgmt_pending_cmd *cmd = data;
5487 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5491 rp.monitor_handle = cp->monitor_handle;
5494 hci_update_passive_scan(hdev);
5496 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
5497 mgmt_status(status), &rp, sizeof(rp));
5498 mgmt_pending_remove(cmd);
5500 hci_dev_unlock(hdev);
5501 bt_dev_dbg(hdev, "remove monitor %d complete, status %d",
5502 rp.monitor_handle, status);
5505 static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data)
5507 struct mgmt_pending_cmd *cmd = data;
5508 struct mgmt_cp_remove_adv_monitor *cp = cmd->param;
5509 u16 handle = __le16_to_cpu(cp->monitor_handle);
5512 return hci_remove_all_adv_monitor(hdev);
5514 return hci_remove_single_adv_monitor(hdev, handle);
5517 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
5518 void *data, u16 len)
5520 struct mgmt_pending_cmd *cmd;
5525 if (pending_find(MGMT_OP_SET_LE, hdev) ||
5526 pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
5527 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
5528 pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
5529 status = MGMT_STATUS_BUSY;
5533 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
5535 status = MGMT_STATUS_NO_RESOURCES;
5539 err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
5540 mgmt_remove_adv_monitor_complete);
5543 mgmt_pending_remove(cmd);
5546 status = MGMT_STATUS_NO_RESOURCES;
5548 status = MGMT_STATUS_FAILED;
5553 hci_dev_unlock(hdev);
5558 hci_dev_unlock(hdev);
5559 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
5563 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
5565 struct mgmt_rp_read_local_oob_data mgmt_rp;
5566 size_t rp_size = sizeof(mgmt_rp);
5567 struct mgmt_pending_cmd *cmd = data;
5568 struct sk_buff *skb = cmd->skb;
5569 u8 status = mgmt_status(err);
5573 status = MGMT_STATUS_FAILED;
5574 else if (IS_ERR(skb))
5575 status = mgmt_status(PTR_ERR(skb));
5577 status = mgmt_status(skb->data[0]);
5580 bt_dev_dbg(hdev, "status %d", status);
5583 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
5587 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
5589 if (!bredr_sc_enabled(hdev)) {
5590 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
5592 if (skb->len < sizeof(*rp)) {
5593 mgmt_cmd_status(cmd->sk, hdev->id,
5594 MGMT_OP_READ_LOCAL_OOB_DATA,
5595 MGMT_STATUS_FAILED);
5599 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5600 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5602 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5604 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5606 if (skb->len < sizeof(*rp)) {
5607 mgmt_cmd_status(cmd->sk, hdev->id,
5608 MGMT_OP_READ_LOCAL_OOB_DATA,
5609 MGMT_STATUS_FAILED);
5613 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5614 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5616 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5617 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5620 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5621 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5624 if (skb && !IS_ERR(skb))
5627 mgmt_pending_free(cmd);
5630 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5632 struct mgmt_pending_cmd *cmd = data;
5634 if (bredr_sc_enabled(hdev))
5635 cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5637 cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5639 if (IS_ERR(cmd->skb))
5640 return PTR_ERR(cmd->skb);
5645 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5646 void *data, u16 data_len)
5648 struct mgmt_pending_cmd *cmd;
5651 bt_dev_dbg(hdev, "sock %p", sk);
5655 if (!hdev_is_powered(hdev)) {
5656 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5657 MGMT_STATUS_NOT_POWERED);
5661 if (!lmp_ssp_capable(hdev)) {
5662 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5663 MGMT_STATUS_NOT_SUPPORTED);
5667 cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5671 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5672 read_local_oob_data_complete);
5675 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5676 MGMT_STATUS_FAILED);
5679 mgmt_pending_free(cmd);
5683 hci_dev_unlock(hdev);
5687 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5688 void *data, u16 len)
5690 struct mgmt_addr_info *addr = data;
5693 bt_dev_dbg(hdev, "sock %p", sk);
5695 if (!bdaddr_type_is_valid(addr->type))
5696 return mgmt_cmd_complete(sk, hdev->id,
5697 MGMT_OP_ADD_REMOTE_OOB_DATA,
5698 MGMT_STATUS_INVALID_PARAMS,
5699 addr, sizeof(*addr));
5703 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5704 struct mgmt_cp_add_remote_oob_data *cp = data;
5707 if (cp->addr.type != BDADDR_BREDR) {
5708 err = mgmt_cmd_complete(sk, hdev->id,
5709 MGMT_OP_ADD_REMOTE_OOB_DATA,
5710 MGMT_STATUS_INVALID_PARAMS,
5711 &cp->addr, sizeof(cp->addr));
5715 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5716 cp->addr.type, cp->hash,
5717 cp->rand, NULL, NULL);
5719 status = MGMT_STATUS_FAILED;
5721 status = MGMT_STATUS_SUCCESS;
5723 err = mgmt_cmd_complete(sk, hdev->id,
5724 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5725 &cp->addr, sizeof(cp->addr));
5726 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5727 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5728 u8 *rand192, *hash192, *rand256, *hash256;
5731 if (bdaddr_type_is_le(cp->addr.type)) {
5732 /* Enforce zero-valued 192-bit parameters as
5733 * long as legacy SMP OOB isn't implemented.
5735 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5736 memcmp(cp->hash192, ZERO_KEY, 16)) {
5737 err = mgmt_cmd_complete(sk, hdev->id,
5738 MGMT_OP_ADD_REMOTE_OOB_DATA,
5739 MGMT_STATUS_INVALID_PARAMS,
5740 addr, sizeof(*addr));
5747 /* In case one of the P-192 values is set to zero,
5748 * then just disable OOB data for P-192.
5750 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5751 !memcmp(cp->hash192, ZERO_KEY, 16)) {
5755 rand192 = cp->rand192;
5756 hash192 = cp->hash192;
5760 /* In case one of the P-256 values is set to zero, then just
5761 * disable OOB data for P-256.
5763 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5764 !memcmp(cp->hash256, ZERO_KEY, 16)) {
5768 rand256 = cp->rand256;
5769 hash256 = cp->hash256;
5772 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5773 cp->addr.type, hash192, rand192,
5776 status = MGMT_STATUS_FAILED;
5778 status = MGMT_STATUS_SUCCESS;
5780 err = mgmt_cmd_complete(sk, hdev->id,
5781 MGMT_OP_ADD_REMOTE_OOB_DATA,
5782 status, &cp->addr, sizeof(cp->addr));
5784 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5786 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5787 MGMT_STATUS_INVALID_PARAMS);
5791 hci_dev_unlock(hdev);
5795 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5796 void *data, u16 len)
5798 struct mgmt_cp_remove_remote_oob_data *cp = data;
5802 bt_dev_dbg(hdev, "sock %p", sk);
5804 if (cp->addr.type != BDADDR_BREDR)
5805 return mgmt_cmd_complete(sk, hdev->id,
5806 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5807 MGMT_STATUS_INVALID_PARAMS,
5808 &cp->addr, sizeof(cp->addr));
5812 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5813 hci_remote_oob_data_clear(hdev);
5814 status = MGMT_STATUS_SUCCESS;
5818 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5820 status = MGMT_STATUS_INVALID_PARAMS;
5822 status = MGMT_STATUS_SUCCESS;
5825 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5826 status, &cp->addr, sizeof(cp->addr));
5828 hci_dev_unlock(hdev);
5832 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5834 struct mgmt_pending_cmd *cmd;
5836 bt_dev_dbg(hdev, "status %u", status);
5840 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5842 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5845 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5848 cmd->cmd_complete(cmd, mgmt_status(status));
5849 mgmt_pending_remove(cmd);
5852 hci_dev_unlock(hdev);
5855 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5856 uint8_t *mgmt_status)
5859 case DISCOV_TYPE_LE:
5860 *mgmt_status = mgmt_le_support(hdev);
5864 case DISCOV_TYPE_INTERLEAVED:
5865 *mgmt_status = mgmt_le_support(hdev);
5869 case DISCOV_TYPE_BREDR:
5870 *mgmt_status = mgmt_bredr_support(hdev);
5875 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5882 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5884 struct mgmt_pending_cmd *cmd = data;
5886 if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5887 cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5888 cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5891 bt_dev_dbg(hdev, "err %d", err);
5893 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5895 mgmt_pending_remove(cmd);
5897 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5901 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5903 return hci_start_discovery_sync(hdev);
5906 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5907 u16 op, void *data, u16 len)
5909 struct mgmt_cp_start_discovery *cp = data;
5910 struct mgmt_pending_cmd *cmd;
5914 bt_dev_dbg(hdev, "sock %p", sk);
5918 if (!hdev_is_powered(hdev)) {
5919 err = mgmt_cmd_complete(sk, hdev->id, op,
5920 MGMT_STATUS_NOT_POWERED,
5921 &cp->type, sizeof(cp->type));
5925 if (hdev->discovery.state != DISCOVERY_STOPPED ||
5926 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5927 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5928 &cp->type, sizeof(cp->type));
5932 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5933 err = mgmt_cmd_complete(sk, hdev->id, op, status,
5934 &cp->type, sizeof(cp->type));
5938 /* Can't start discovery when it is paused */
5939 if (hdev->discovery_paused) {
5940 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5941 &cp->type, sizeof(cp->type));
5945 /* Clear the discovery filter first to free any previously
5946 * allocated memory for the UUID list.
5948 hci_discovery_filter_clear(hdev);
5950 hdev->discovery.type = cp->type;
5951 hdev->discovery.report_invalid_rssi = false;
5952 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5953 hdev->discovery.limited = true;
5955 hdev->discovery.limited = false;
5957 cmd = mgmt_pending_add(sk, op, hdev, data, len);
5963 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5964 start_discovery_complete);
5966 mgmt_pending_remove(cmd);
5970 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5973 hci_dev_unlock(hdev);
5977 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5978 void *data, u16 len)
5980 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5984 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5985 void *data, u16 len)
5987 return start_discovery_internal(sk, hdev,
5988 MGMT_OP_START_LIMITED_DISCOVERY,
5992 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5993 void *data, u16 len)
5995 struct mgmt_cp_start_service_discovery *cp = data;
5996 struct mgmt_pending_cmd *cmd;
5997 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5998 u16 uuid_count, expected_len;
6002 bt_dev_dbg(hdev, "sock %p", sk);
6006 if (!hdev_is_powered(hdev)) {
6007 err = mgmt_cmd_complete(sk, hdev->id,
6008 MGMT_OP_START_SERVICE_DISCOVERY,
6009 MGMT_STATUS_NOT_POWERED,
6010 &cp->type, sizeof(cp->type));
6014 if (hdev->discovery.state != DISCOVERY_STOPPED ||
6015 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
6016 err = mgmt_cmd_complete(sk, hdev->id,
6017 MGMT_OP_START_SERVICE_DISCOVERY,
6018 MGMT_STATUS_BUSY, &cp->type,
6023 if (hdev->discovery_paused) {
6024 err = mgmt_cmd_complete(sk, hdev->id,
6025 MGMT_OP_START_SERVICE_DISCOVERY,
6026 MGMT_STATUS_BUSY, &cp->type,
6031 uuid_count = __le16_to_cpu(cp->uuid_count);
6032 if (uuid_count > max_uuid_count) {
6033 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
6035 err = mgmt_cmd_complete(sk, hdev->id,
6036 MGMT_OP_START_SERVICE_DISCOVERY,
6037 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6042 expected_len = sizeof(*cp) + uuid_count * 16;
6043 if (expected_len != len) {
6044 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
6046 err = mgmt_cmd_complete(sk, hdev->id,
6047 MGMT_OP_START_SERVICE_DISCOVERY,
6048 MGMT_STATUS_INVALID_PARAMS, &cp->type,
6053 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
6054 err = mgmt_cmd_complete(sk, hdev->id,
6055 MGMT_OP_START_SERVICE_DISCOVERY,
6056 status, &cp->type, sizeof(cp->type));
6060 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
6067 /* Clear the discovery filter first to free any previously
6068 * allocated memory for the UUID list.
6070 hci_discovery_filter_clear(hdev);
6072 hdev->discovery.result_filtering = true;
6073 hdev->discovery.type = cp->type;
6074 hdev->discovery.rssi = cp->rssi;
6075 hdev->discovery.uuid_count = uuid_count;
6077 if (uuid_count > 0) {
6078 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
6080 if (!hdev->discovery.uuids) {
6081 err = mgmt_cmd_complete(sk, hdev->id,
6082 MGMT_OP_START_SERVICE_DISCOVERY,
6084 &cp->type, sizeof(cp->type));
6085 mgmt_pending_remove(cmd);
6090 err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
6091 start_discovery_complete);
6093 mgmt_pending_remove(cmd);
6097 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6100 hci_dev_unlock(hdev);
6104 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
6106 struct mgmt_pending_cmd *cmd;
6108 bt_dev_dbg(hdev, "status %u", status);
6112 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6114 cmd->cmd_complete(cmd, mgmt_status(status));
6115 mgmt_pending_remove(cmd);
6118 hci_dev_unlock(hdev);
6121 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
6123 struct mgmt_pending_cmd *cmd = data;
6125 if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
6128 bt_dev_dbg(hdev, "err %d", err);
6130 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
6132 mgmt_pending_remove(cmd);
6135 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
6138 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
6140 return hci_stop_discovery_sync(hdev);
6143 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
6146 struct mgmt_cp_stop_discovery *mgmt_cp = data;
6147 struct mgmt_pending_cmd *cmd;
6150 bt_dev_dbg(hdev, "sock %p", sk);
6154 if (!hci_discovery_active(hdev)) {
6155 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6156 MGMT_STATUS_REJECTED, &mgmt_cp->type,
6157 sizeof(mgmt_cp->type));
6161 if (hdev->discovery.type != mgmt_cp->type) {
6162 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
6163 MGMT_STATUS_INVALID_PARAMS,
6164 &mgmt_cp->type, sizeof(mgmt_cp->type));
6168 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
6174 err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
6175 stop_discovery_complete);
6177 mgmt_pending_remove(cmd);
6181 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
6184 hci_dev_unlock(hdev);
6188 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
6191 struct mgmt_cp_confirm_name *cp = data;
6192 struct inquiry_entry *e;
6195 bt_dev_dbg(hdev, "sock %p", sk);
6199 if (!hci_discovery_active(hdev)) {
6200 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6201 MGMT_STATUS_FAILED, &cp->addr,
6206 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
6208 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
6209 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
6214 if (cp->name_known) {
6215 e->name_state = NAME_KNOWN;
6218 e->name_state = NAME_NEEDED;
6219 hci_inquiry_cache_update_resolve(hdev, e);
6222 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
6223 &cp->addr, sizeof(cp->addr));
6226 hci_dev_unlock(hdev);
6230 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
6233 struct mgmt_cp_block_device *cp = data;
6237 bt_dev_dbg(hdev, "sock %p", sk);
6239 if (!bdaddr_type_is_valid(cp->addr.type))
6240 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
6241 MGMT_STATUS_INVALID_PARAMS,
6242 &cp->addr, sizeof(cp->addr));
6246 err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
6249 status = MGMT_STATUS_FAILED;
6253 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6255 status = MGMT_STATUS_SUCCESS;
6258 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
6259 &cp->addr, sizeof(cp->addr));
6261 hci_dev_unlock(hdev);
6266 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
6269 struct mgmt_cp_unblock_device *cp = data;
6273 bt_dev_dbg(hdev, "sock %p", sk);
6275 if (!bdaddr_type_is_valid(cp->addr.type))
6276 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
6277 MGMT_STATUS_INVALID_PARAMS,
6278 &cp->addr, sizeof(cp->addr));
6282 err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
6285 status = MGMT_STATUS_INVALID_PARAMS;
6289 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
6291 status = MGMT_STATUS_SUCCESS;
6294 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
6295 &cp->addr, sizeof(cp->addr));
6297 hci_dev_unlock(hdev);
6302 static int set_device_id_sync(struct hci_dev *hdev, void *data)
6304 return hci_update_eir_sync(hdev);
6307 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
6310 struct mgmt_cp_set_device_id *cp = data;
6314 bt_dev_dbg(hdev, "sock %p", sk);
6316 source = __le16_to_cpu(cp->source);
6318 if (source > 0x0002)
6319 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
6320 MGMT_STATUS_INVALID_PARAMS);
6324 hdev->devid_source = source;
6325 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
6326 hdev->devid_product = __le16_to_cpu(cp->product);
6327 hdev->devid_version = __le16_to_cpu(cp->version);
6329 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
6332 hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
6334 hci_dev_unlock(hdev);
6339 static void enable_advertising_instance(struct hci_dev *hdev, int err)
6342 bt_dev_err(hdev, "failed to re-configure advertising %d", err);
6344 bt_dev_dbg(hdev, "status %d", err);
6347 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
6349 struct cmd_lookup match = { NULL, hdev };
6351 struct adv_info *adv_instance;
6352 u8 status = mgmt_status(err);
6355 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
6356 cmd_status_rsp, &status);
6360 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
6361 hci_dev_set_flag(hdev, HCI_ADVERTISING);
6363 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
6365 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
6368 new_settings(hdev, match.sk);
6373 /* If "Set Advertising" was just disabled and instance advertising was
6374 * set up earlier, then re-enable multi-instance advertising.
6376 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6377 list_empty(&hdev->adv_instances))
6380 instance = hdev->cur_adv_instance;
6382 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
6383 struct adv_info, list);
6387 instance = adv_instance->instance;
6390 err = hci_schedule_adv_instance_sync(hdev, instance, true);
6392 enable_advertising_instance(hdev, err);
6395 static int set_adv_sync(struct hci_dev *hdev, void *data)
6397 struct mgmt_pending_cmd *cmd = data;
6398 struct mgmt_mode *cp = cmd->param;
6401 if (cp->val == 0x02)
6402 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6404 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6406 cancel_adv_timeout(hdev);
6409 /* Switch to instance "0" for the Set Advertising setting.
6410 * We cannot use update_[adv|scan_rsp]_data() here as the
6411 * HCI_ADVERTISING flag is not yet set.
6413 hdev->cur_adv_instance = 0x00;
6415 if (ext_adv_capable(hdev)) {
6416 hci_start_ext_adv_sync(hdev, 0x00);
6418 hci_update_adv_data_sync(hdev, 0x00);
6419 hci_update_scan_rsp_data_sync(hdev, 0x00);
6420 hci_enable_advertising_sync(hdev);
6423 hci_disable_advertising_sync(hdev);
6429 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
6432 struct mgmt_mode *cp = data;
6433 struct mgmt_pending_cmd *cmd;
6437 bt_dev_dbg(hdev, "sock %p", sk);
6439 status = mgmt_le_support(hdev);
6441 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6444 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6445 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6446 MGMT_STATUS_INVALID_PARAMS);
6448 if (hdev->advertising_paused)
6449 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6456 /* The following conditions are ones which mean that we should
6457 * not do any HCI communication but directly send a mgmt
6458 * response to user space (after toggling the flag if
6461 if (!hdev_is_powered(hdev) ||
6462 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
6463 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
6464 hci_dev_test_flag(hdev, HCI_MESH) ||
6465 hci_conn_num(hdev, LE_LINK) > 0 ||
6466 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6467 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
6471 hdev->cur_adv_instance = 0x00;
6472 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
6473 if (cp->val == 0x02)
6474 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6476 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6478 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
6479 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
6482 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
6487 err = new_settings(hdev, sk);
6492 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
6493 pending_find(MGMT_OP_SET_LE, hdev)) {
6494 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
6499 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
6503 err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
6504 set_advertising_complete);
6507 mgmt_pending_remove(cmd);
6510 hci_dev_unlock(hdev);
6514 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
6515 void *data, u16 len)
6517 struct mgmt_cp_set_static_address *cp = data;
6520 bt_dev_dbg(hdev, "sock %p", sk);
6522 if (!lmp_le_capable(hdev))
6523 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6524 MGMT_STATUS_NOT_SUPPORTED);
6526 if (hdev_is_powered(hdev))
6527 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
6528 MGMT_STATUS_REJECTED);
6530 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
6531 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
6532 return mgmt_cmd_status(sk, hdev->id,
6533 MGMT_OP_SET_STATIC_ADDRESS,
6534 MGMT_STATUS_INVALID_PARAMS);
6536 /* Two most significant bits shall be set */
6537 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
6538 return mgmt_cmd_status(sk, hdev->id,
6539 MGMT_OP_SET_STATIC_ADDRESS,
6540 MGMT_STATUS_INVALID_PARAMS);
6545 bacpy(&hdev->static_addr, &cp->bdaddr);
6547 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
6551 err = new_settings(hdev, sk);
6554 hci_dev_unlock(hdev);
6558 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
6559 void *data, u16 len)
6561 struct mgmt_cp_set_scan_params *cp = data;
6562 __u16 interval, window;
6565 bt_dev_dbg(hdev, "sock %p", sk);
6567 if (!lmp_le_capable(hdev))
6568 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6569 MGMT_STATUS_NOT_SUPPORTED);
6571 interval = __le16_to_cpu(cp->interval);
6573 if (interval < 0x0004 || interval > 0x4000)
6574 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6575 MGMT_STATUS_INVALID_PARAMS);
6577 window = __le16_to_cpu(cp->window);
6579 if (window < 0x0004 || window > 0x4000)
6580 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6581 MGMT_STATUS_INVALID_PARAMS);
6583 if (window > interval)
6584 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
6585 MGMT_STATUS_INVALID_PARAMS);
6589 hdev->le_scan_interval = interval;
6590 hdev->le_scan_window = window;
6592 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
6595 /* If background scan is running, restart it so new parameters are
6598 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6599 hdev->discovery.state == DISCOVERY_STOPPED)
6600 hci_update_passive_scan(hdev);
6602 hci_dev_unlock(hdev);
6607 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6609 struct mgmt_pending_cmd *cmd = data;
6611 bt_dev_dbg(hdev, "err %d", err);
6614 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6617 struct mgmt_mode *cp = cmd->param;
6620 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6622 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6624 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6625 new_settings(hdev, cmd->sk);
6628 mgmt_pending_free(cmd);
6631 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6633 struct mgmt_pending_cmd *cmd = data;
6634 struct mgmt_mode *cp = cmd->param;
6636 return hci_write_fast_connectable_sync(hdev, cp->val);
6639 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6640 void *data, u16 len)
6642 struct mgmt_mode *cp = data;
6643 struct mgmt_pending_cmd *cmd;
6646 bt_dev_dbg(hdev, "sock %p", sk);
6648 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6649 hdev->hci_ver < BLUETOOTH_VER_1_2)
6650 return mgmt_cmd_status(sk, hdev->id,
6651 MGMT_OP_SET_FAST_CONNECTABLE,
6652 MGMT_STATUS_NOT_SUPPORTED);
6654 if (cp->val != 0x00 && cp->val != 0x01)
6655 return mgmt_cmd_status(sk, hdev->id,
6656 MGMT_OP_SET_FAST_CONNECTABLE,
6657 MGMT_STATUS_INVALID_PARAMS);
6661 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6662 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6666 if (!hdev_is_powered(hdev)) {
6667 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6668 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6669 new_settings(hdev, sk);
6673 cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6678 err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6679 fast_connectable_complete);
6682 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6683 MGMT_STATUS_FAILED);
6686 mgmt_pending_free(cmd);
6690 hci_dev_unlock(hdev);
6695 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6697 struct mgmt_pending_cmd *cmd = data;
6699 bt_dev_dbg(hdev, "err %d", err);
6702 u8 mgmt_err = mgmt_status(err);
6704 /* We need to restore the flag if related HCI commands
6707 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6709 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6711 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6712 new_settings(hdev, cmd->sk);
6715 mgmt_pending_free(cmd);
6718 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6722 status = hci_write_fast_connectable_sync(hdev, false);
6725 status = hci_update_scan_sync(hdev);
6727 /* Since only the advertising data flags will change, there
6728 * is no need to update the scan response data.
6731 status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6736 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6738 struct mgmt_mode *cp = data;
6739 struct mgmt_pending_cmd *cmd;
6742 bt_dev_dbg(hdev, "sock %p", sk);
6744 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6745 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6746 MGMT_STATUS_NOT_SUPPORTED);
6748 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6749 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6750 MGMT_STATUS_REJECTED);
6752 if (cp->val != 0x00 && cp->val != 0x01)
6753 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6754 MGMT_STATUS_INVALID_PARAMS);
6758 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6759 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6763 if (!hdev_is_powered(hdev)) {
6765 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6766 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6767 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6768 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6769 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6772 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6774 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6778 err = new_settings(hdev, sk);
6782 /* Reject disabling when powered on */
6784 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6785 MGMT_STATUS_REJECTED);
6788 /* When configuring a dual-mode controller to operate
6789 * with LE only and using a static address, then switching
6790 * BR/EDR back on is not allowed.
6792 * Dual-mode controllers shall operate with the public
6793 * address as its identity address for BR/EDR and LE. So
6794 * reject the attempt to create an invalid configuration.
6796 * The same restrictions applies when secure connections
6797 * has been enabled. For BR/EDR this is a controller feature
6798 * while for LE it is a host stack feature. This means that
6799 * switching BR/EDR back on when secure connections has been
6800 * enabled is not a supported transaction.
6802 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6803 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6804 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6805 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6806 MGMT_STATUS_REJECTED);
6811 cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6815 err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6816 set_bredr_complete);
6819 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6820 MGMT_STATUS_FAILED);
6822 mgmt_pending_free(cmd);
6827 /* We need to flip the bit already here so that
6828 * hci_req_update_adv_data generates the correct flags.
6830 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6833 hci_dev_unlock(hdev);
6837 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6839 struct mgmt_pending_cmd *cmd = data;
6840 struct mgmt_mode *cp;
6842 bt_dev_dbg(hdev, "err %d", err);
6845 u8 mgmt_err = mgmt_status(err);
6847 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6855 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6856 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6859 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6860 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6863 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6864 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6868 send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6869 new_settings(hdev, cmd->sk);
6872 mgmt_pending_free(cmd);
6875 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6877 struct mgmt_pending_cmd *cmd = data;
6878 struct mgmt_mode *cp = cmd->param;
6881 /* Force write of val */
6882 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6884 return hci_write_sc_support_sync(hdev, val);
6887 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6888 void *data, u16 len)
6890 struct mgmt_mode *cp = data;
6891 struct mgmt_pending_cmd *cmd;
6895 bt_dev_dbg(hdev, "sock %p", sk);
6897 if (!lmp_sc_capable(hdev) &&
6898 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6899 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6900 MGMT_STATUS_NOT_SUPPORTED);
6902 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6903 lmp_sc_capable(hdev) &&
6904 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6905 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6906 MGMT_STATUS_REJECTED);
6908 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6909 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6910 MGMT_STATUS_INVALID_PARAMS);
6914 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6915 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6919 changed = !hci_dev_test_and_set_flag(hdev,
6921 if (cp->val == 0x02)
6922 hci_dev_set_flag(hdev, HCI_SC_ONLY);
6924 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6926 changed = hci_dev_test_and_clear_flag(hdev,
6928 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6931 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6936 err = new_settings(hdev, sk);
6943 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6944 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6945 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6949 cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6953 err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6954 set_secure_conn_complete);
6957 mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6958 MGMT_STATUS_FAILED);
6960 mgmt_pending_free(cmd);
6964 hci_dev_unlock(hdev);
6968 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6969 void *data, u16 len)
6971 struct mgmt_mode *cp = data;
6972 bool changed, use_changed;
6975 bt_dev_dbg(hdev, "sock %p", sk);
6977 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6978 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6979 MGMT_STATUS_INVALID_PARAMS);
6984 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6986 changed = hci_dev_test_and_clear_flag(hdev,
6987 HCI_KEEP_DEBUG_KEYS);
6989 if (cp->val == 0x02)
6990 use_changed = !hci_dev_test_and_set_flag(hdev,
6991 HCI_USE_DEBUG_KEYS);
6993 use_changed = hci_dev_test_and_clear_flag(hdev,
6994 HCI_USE_DEBUG_KEYS);
6996 if (hdev_is_powered(hdev) && use_changed &&
6997 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6998 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6999 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
7000 sizeof(mode), &mode);
7003 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
7008 err = new_settings(hdev, sk);
7011 hci_dev_unlock(hdev);
7015 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7018 struct mgmt_cp_set_privacy *cp = cp_data;
7022 bt_dev_dbg(hdev, "sock %p", sk);
7024 if (!lmp_le_capable(hdev))
7025 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7026 MGMT_STATUS_NOT_SUPPORTED);
7028 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
7029 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7030 MGMT_STATUS_INVALID_PARAMS);
7033 /* commenting out since set privacy command is always rejected
7034 * if this condition is enabled.
7036 if (hdev_is_powered(hdev))
7037 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
7038 MGMT_STATUS_REJECTED);
7043 /* If user space supports this command it is also expected to
7044 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
7046 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7049 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
7050 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
7051 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
7052 hci_adv_instances_set_rpa_expired(hdev, true);
7053 if (cp->privacy == 0x02)
7054 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
7056 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7058 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
7059 memset(hdev->irk, 0, sizeof(hdev->irk));
7060 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
7061 hci_adv_instances_set_rpa_expired(hdev, false);
7062 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
7065 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
7070 err = new_settings(hdev, sk);
7073 hci_dev_unlock(hdev);
7077 static bool irk_is_valid(struct mgmt_irk_info *irk)
7079 switch (irk->addr.type) {
7080 case BDADDR_LE_PUBLIC:
7083 case BDADDR_LE_RANDOM:
7084 /* Two most significant bits shall be set */
7085 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
7093 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
7096 struct mgmt_cp_load_irks *cp = cp_data;
7097 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
7098 sizeof(struct mgmt_irk_info));
7099 u16 irk_count, expected_len;
7102 bt_dev_dbg(hdev, "sock %p", sk);
7104 if (!lmp_le_capable(hdev))
7105 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7106 MGMT_STATUS_NOT_SUPPORTED);
7108 irk_count = __le16_to_cpu(cp->irk_count);
7109 if (irk_count > max_irk_count) {
7110 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
7112 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7113 MGMT_STATUS_INVALID_PARAMS);
7116 expected_len = struct_size(cp, irks, irk_count);
7117 if (expected_len != len) {
7118 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
7120 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
7121 MGMT_STATUS_INVALID_PARAMS);
7124 bt_dev_dbg(hdev, "irk_count %u", irk_count);
7126 for (i = 0; i < irk_count; i++) {
7127 struct mgmt_irk_info *key = &cp->irks[i];
7129 if (!irk_is_valid(key))
7130 return mgmt_cmd_status(sk, hdev->id,
7132 MGMT_STATUS_INVALID_PARAMS);
7137 hci_smp_irks_clear(hdev);
7139 for (i = 0; i < irk_count; i++) {
7140 struct mgmt_irk_info *irk = &cp->irks[i];
7142 if (hci_is_blocked_key(hdev,
7143 HCI_BLOCKED_KEY_TYPE_IRK,
7145 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
7150 hci_add_irk(hdev, &irk->addr.bdaddr,
7151 le_addr_type(irk->addr.type), irk->val,
7155 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
7157 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
7159 hci_dev_unlock(hdev);
7165 static int set_advertising_params(struct sock *sk, struct hci_dev *hdev,
7166 void *data, u16 len)
7168 struct mgmt_cp_set_advertising_params *cp = data;
7173 BT_DBG("%s", hdev->name);
7175 if (!lmp_le_capable(hdev))
7176 return mgmt_cmd_status(sk, hdev->id,
7177 MGMT_OP_SET_ADVERTISING_PARAMS,
7178 MGMT_STATUS_NOT_SUPPORTED);
7180 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7181 return mgmt_cmd_status(sk, hdev->id,
7182 MGMT_OP_SET_ADVERTISING_PARAMS,
7185 min_interval = __le16_to_cpu(cp->interval_min);
7186 max_interval = __le16_to_cpu(cp->interval_max);
7188 if (min_interval > max_interval ||
7189 min_interval < 0x0020 || max_interval > 0x4000)
7190 return mgmt_cmd_status(sk, hdev->id,
7191 MGMT_OP_SET_ADVERTISING_PARAMS,
7192 MGMT_STATUS_INVALID_PARAMS);
7196 hdev->le_adv_min_interval = min_interval;
7197 hdev->le_adv_max_interval = max_interval;
7198 hdev->adv_filter_policy = cp->filter_policy;
7199 hdev->adv_type = cp->type;
7201 err = mgmt_cmd_complete(sk, hdev->id,
7202 MGMT_OP_SET_ADVERTISING_PARAMS, 0, NULL, 0);
7204 hci_dev_unlock(hdev);
7209 static void set_advertising_data_complete(struct hci_dev *hdev,
7210 u8 status, u16 opcode)
7212 struct mgmt_cp_set_advertising_data *cp;
7213 struct mgmt_pending_cmd *cmd;
7215 BT_DBG("status 0x%02x", status);
7219 cmd = pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev);
7226 mgmt_cmd_status(cmd->sk, hdev->id,
7227 MGMT_OP_SET_ADVERTISING_DATA,
7228 mgmt_status(status));
7230 mgmt_cmd_complete(cmd->sk, hdev->id,
7231 MGMT_OP_SET_ADVERTISING_DATA, 0,
7234 mgmt_pending_remove(cmd);
7237 hci_dev_unlock(hdev);
7240 static int set_advertising_data(struct sock *sk, struct hci_dev *hdev,
7241 void *data, u16 len)
7243 struct mgmt_pending_cmd *cmd;
7244 struct hci_request req;
7245 struct mgmt_cp_set_advertising_data *cp = data;
7246 struct hci_cp_le_set_adv_data adv;
7249 BT_DBG("%s", hdev->name);
7251 if (!lmp_le_capable(hdev)) {
7252 return mgmt_cmd_status(sk, hdev->id,
7253 MGMT_OP_SET_ADVERTISING_DATA,
7254 MGMT_STATUS_NOT_SUPPORTED);
7259 if (pending_find(MGMT_OP_SET_ADVERTISING_DATA, hdev)) {
7260 err = mgmt_cmd_status(sk, hdev->id,
7261 MGMT_OP_SET_ADVERTISING_DATA,
7266 if (len > HCI_MAX_AD_LENGTH) {
7267 err = mgmt_cmd_status(sk, hdev->id,
7268 MGMT_OP_SET_ADVERTISING_DATA,
7269 MGMT_STATUS_INVALID_PARAMS);
7273 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING_DATA,
7280 hci_req_init(&req, hdev);
7282 memset(&adv, 0, sizeof(adv));
7283 memcpy(adv.data, cp->data, len);
7286 hci_req_add(&req, HCI_OP_LE_SET_ADV_DATA, sizeof(adv), &adv);
7288 err = hci_req_run(&req, set_advertising_data_complete);
7290 mgmt_pending_remove(cmd);
7293 hci_dev_unlock(hdev);
7298 /* Adv White List feature */
7299 static void add_white_list_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7301 struct mgmt_cp_add_dev_white_list *cp;
7302 struct mgmt_pending_cmd *cmd;
7304 BT_DBG("status 0x%02x", status);
7308 cmd = pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev);
7315 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7316 mgmt_status(status));
7318 mgmt_cmd_complete(cmd->sk, hdev->id,
7319 MGMT_OP_ADD_DEV_WHITE_LIST, 0, cp, sizeof(*cp));
7321 mgmt_pending_remove(cmd);
7324 hci_dev_unlock(hdev);
7327 static int add_white_list(struct sock *sk, struct hci_dev *hdev,
7328 void *data, u16 len)
7330 struct mgmt_pending_cmd *cmd;
7331 struct mgmt_cp_add_dev_white_list *cp = data;
7332 struct hci_request req;
7335 BT_DBG("%s", hdev->name);
7337 if (!lmp_le_capable(hdev))
7338 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7339 MGMT_STATUS_NOT_SUPPORTED);
7341 if (!hdev_is_powered(hdev))
7342 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7343 MGMT_STATUS_REJECTED);
7347 if (pending_find(MGMT_OP_ADD_DEV_WHITE_LIST, hdev)) {
7348 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_DEV_WHITE_LIST,
7353 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_DEV_WHITE_LIST, hdev, data, len);
7359 hci_req_init(&req, hdev);
7361 hci_req_add(&req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(*cp), cp);
7363 err = hci_req_run(&req, add_white_list_complete);
7365 mgmt_pending_remove(cmd);
7370 hci_dev_unlock(hdev);
7375 static void remove_from_white_list_complete(struct hci_dev *hdev,
7376 u8 status, u16 opcode)
7378 struct mgmt_cp_remove_dev_from_white_list *cp;
7379 struct mgmt_pending_cmd *cmd;
7381 BT_DBG("status 0x%02x", status);
7385 cmd = pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev);
7392 mgmt_cmd_status(cmd->sk, hdev->id,
7393 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7394 mgmt_status(status));
7396 mgmt_cmd_complete(cmd->sk, hdev->id,
7397 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, 0,
7400 mgmt_pending_remove(cmd);
7403 hci_dev_unlock(hdev);
7406 static int remove_from_white_list(struct sock *sk, struct hci_dev *hdev,
7407 void *data, u16 len)
7409 struct mgmt_pending_cmd *cmd;
7410 struct mgmt_cp_remove_dev_from_white_list *cp = data;
7411 struct hci_request req;
7414 BT_DBG("%s", hdev->name);
7416 if (!lmp_le_capable(hdev))
7417 return mgmt_cmd_status(sk, hdev->id,
7418 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7419 MGMT_STATUS_NOT_SUPPORTED);
7421 if (!hdev_is_powered(hdev))
7422 return mgmt_cmd_status(sk, hdev->id,
7423 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7424 MGMT_STATUS_REJECTED);
7428 if (pending_find(MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST, hdev)) {
7429 err = mgmt_cmd_status(sk, hdev->id,
7430 MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7435 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_DEV_FROM_WHITE_LIST,
7442 hci_req_init(&req, hdev);
7444 hci_req_add(&req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(*cp), cp);
7446 err = hci_req_run(&req, remove_from_white_list_complete);
7448 mgmt_pending_remove(cmd);
7453 hci_dev_unlock(hdev);
7458 static void clear_white_list_complete(struct hci_dev *hdev, u8 status,
7461 struct mgmt_pending_cmd *cmd;
7463 BT_DBG("status 0x%02x", status);
7467 cmd = pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev);
7472 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_CLEAR_DEV_WHITE_LIST,
7473 mgmt_status(status));
7475 mgmt_cmd_complete(cmd->sk, hdev->id,
7476 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7479 mgmt_pending_remove(cmd);
7482 hci_dev_unlock(hdev);
7485 static int clear_white_list(struct sock *sk, struct hci_dev *hdev,
7486 void *data, u16 len)
7488 struct mgmt_pending_cmd *cmd;
7489 struct hci_request req;
7492 BT_DBG("%s", hdev->name);
7494 if (!lmp_le_capable(hdev))
7495 return mgmt_cmd_status(sk, hdev->id,
7496 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7497 MGMT_STATUS_NOT_SUPPORTED);
7499 if (!hdev_is_powered(hdev))
7500 return mgmt_cmd_status(sk, hdev->id,
7501 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7502 MGMT_STATUS_REJECTED);
7506 if (pending_find(MGMT_OP_CLEAR_DEV_WHITE_LIST, hdev)) {
7507 err = mgmt_cmd_status(sk, hdev->id,
7508 MGMT_OP_CLEAR_DEV_WHITE_LIST,
7513 cmd = mgmt_pending_add(sk, MGMT_OP_CLEAR_DEV_WHITE_LIST,
7520 hci_req_init(&req, hdev);
7522 hci_req_add(&req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
7524 err = hci_req_run(&req, clear_white_list_complete);
7526 mgmt_pending_remove(cmd);
7531 hci_dev_unlock(hdev);
7536 static void set_scan_rsp_data_complete(struct hci_dev *hdev, u8 status,
7539 struct mgmt_cp_set_scan_rsp_data *cp;
7540 struct mgmt_pending_cmd *cmd;
7542 BT_DBG("status 0x%02x", status);
7546 cmd = pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev);
7553 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7554 mgmt_status(status));
7556 mgmt_cmd_complete(cmd->sk, hdev->id,
7557 MGMT_OP_SET_SCAN_RSP_DATA, 0,
7560 mgmt_pending_remove(cmd);
7563 hci_dev_unlock(hdev);
7566 static int set_scan_rsp_data(struct sock *sk, struct hci_dev *hdev, void *data,
7569 struct mgmt_pending_cmd *cmd;
7570 struct hci_request req;
7571 struct mgmt_cp_set_scan_rsp_data *cp = data;
7572 struct hci_cp_le_set_scan_rsp_data rsp;
7575 BT_DBG("%s", hdev->name);
7577 if (!lmp_le_capable(hdev))
7578 return mgmt_cmd_status(sk, hdev->id,
7579 MGMT_OP_SET_SCAN_RSP_DATA,
7580 MGMT_STATUS_NOT_SUPPORTED);
7584 if (pending_find(MGMT_OP_SET_SCAN_RSP_DATA, hdev)) {
7585 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7590 if (len > HCI_MAX_AD_LENGTH) {
7591 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_RSP_DATA,
7592 MGMT_STATUS_INVALID_PARAMS);
7596 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SCAN_RSP_DATA, hdev, data, len);
7602 hci_req_init(&req, hdev);
7604 memset(&rsp, 0, sizeof(rsp));
7605 memcpy(rsp.data, cp->data, len);
7608 hci_req_add(&req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(rsp), &rsp);
7610 err = hci_req_run(&req, set_scan_rsp_data_complete);
7612 mgmt_pending_remove(cmd);
7615 hci_dev_unlock(hdev);
7620 static void set_rssi_threshold_complete(struct hci_dev *hdev,
7621 u8 status, u16 opcode)
7623 struct mgmt_pending_cmd *cmd;
7625 BT_DBG("status 0x%02x", status);
7629 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7634 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7635 mgmt_status(status));
7637 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7640 mgmt_pending_remove(cmd);
7643 hci_dev_unlock(hdev);
7646 static void set_rssi_disable_complete(struct hci_dev *hdev,
7647 u8 status, u16 opcode)
7649 struct mgmt_pending_cmd *cmd;
7651 BT_DBG("status 0x%02x", status);
7655 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7660 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7661 mgmt_status(status));
7663 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7666 mgmt_pending_remove(cmd);
7669 hci_dev_unlock(hdev);
7672 int mgmt_set_rssi_threshold(struct sock *sk, struct hci_dev *hdev,
7673 void *data, u16 len)
7676 struct hci_cp_set_rssi_threshold th = { 0, };
7677 struct mgmt_cp_set_enable_rssi *cp = data;
7678 struct hci_conn *conn;
7679 struct mgmt_pending_cmd *cmd;
7680 struct hci_request req;
7685 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7687 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7688 MGMT_STATUS_FAILED);
7692 if (!lmp_le_capable(hdev)) {
7693 mgmt_pending_remove(cmd);
7694 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7695 MGMT_STATUS_NOT_SUPPORTED);
7699 if (!hdev_is_powered(hdev)) {
7700 BT_DBG("%s", hdev->name);
7701 mgmt_pending_remove(cmd);
7702 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7703 MGMT_STATUS_NOT_POWERED);
7707 if (cp->link_type == 0x01)
7708 dest_type = LE_LINK;
7710 dest_type = ACL_LINK;
7712 /* Get LE/ACL link handle info */
7713 conn = hci_conn_hash_lookup_ba(hdev,
7714 dest_type, &cp->bdaddr);
7717 err = mgmt_cmd_complete(sk, hdev->id,
7718 MGMT_OP_SET_RSSI_ENABLE, 1, NULL, 0);
7719 mgmt_pending_remove(cmd);
7723 hci_req_init(&req, hdev);
7725 th.hci_le_ext_opcode = 0x0B;
7727 th.conn_handle = conn->handle;
7728 th.alert_mask = 0x07;
7729 th.low_th = cp->low_th;
7730 th.in_range_th = cp->in_range_th;
7731 th.high_th = cp->high_th;
7733 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
7734 err = hci_req_run(&req, set_rssi_threshold_complete);
7737 mgmt_pending_remove(cmd);
7738 BT_ERR("Error in requesting hci_req_run");
7743 hci_dev_unlock(hdev);
7747 void mgmt_rssi_enable_success(struct sock *sk, struct hci_dev *hdev,
7748 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
7750 struct mgmt_cc_rsp_enable_rssi mgmt_rp = { 0, };
7751 struct mgmt_cp_set_enable_rssi *cp = data;
7752 struct mgmt_pending_cmd *cmd;
7757 mgmt_rp.status = rp->status;
7758 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
7759 mgmt_rp.bt_address = cp->bdaddr;
7760 mgmt_rp.link_type = cp->link_type;
7762 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7763 MGMT_STATUS_SUCCESS, &mgmt_rp,
7764 sizeof(struct mgmt_cc_rsp_enable_rssi));
7766 mgmt_event(MGMT_EV_RSSI_ENABLED, hdev, &mgmt_rp,
7767 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
7769 hci_conn_rssi_unset_all(hdev, mgmt_rp.link_type);
7770 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
7771 &mgmt_rp.bt_address, true);
7775 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7777 mgmt_pending_remove(cmd);
7779 hci_dev_unlock(hdev);
7782 void mgmt_rssi_disable_success(struct sock *sk, struct hci_dev *hdev,
7783 void *data, struct hci_cc_rsp_enable_rssi *rp, int success)
7785 struct mgmt_cc_rp_disable_rssi mgmt_rp = { 0, };
7786 struct mgmt_cp_disable_rssi *cp = data;
7787 struct mgmt_pending_cmd *cmd;
7792 mgmt_rp.status = rp->status;
7793 mgmt_rp.le_ext_opcode = rp->le_ext_opcode;
7794 mgmt_rp.bt_address = cp->bdaddr;
7795 mgmt_rp.link_type = cp->link_type;
7797 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7798 MGMT_STATUS_SUCCESS, &mgmt_rp,
7799 sizeof(struct mgmt_cc_rsp_enable_rssi));
7801 mgmt_event(MGMT_EV_RSSI_DISABLED, hdev, &mgmt_rp,
7802 sizeof(struct mgmt_cc_rsp_enable_rssi), NULL);
7804 hci_conn_rssi_state_set(hdev, mgmt_rp.link_type,
7805 &mgmt_rp.bt_address, false);
7809 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7811 mgmt_pending_remove(cmd);
7813 hci_dev_unlock(hdev);
7816 static int mgmt_set_disable_rssi(struct sock *sk, struct hci_dev *hdev,
7817 void *data, u16 len)
7819 struct mgmt_pending_cmd *cmd;
7820 struct hci_request req;
7821 struct hci_cp_set_enable_rssi cp_en = { 0, };
7824 BT_DBG("Set Disable RSSI.");
7826 cp_en.hci_le_ext_opcode = 0x01;
7827 cp_en.le_enable_cs_Features = 0x00;
7828 cp_en.data[0] = 0x00;
7829 cp_en.data[1] = 0x00;
7830 cp_en.data[2] = 0x00;
7834 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7836 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7837 MGMT_STATUS_FAILED);
7841 if (!lmp_le_capable(hdev)) {
7842 mgmt_pending_remove(cmd);
7843 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7844 MGMT_STATUS_NOT_SUPPORTED);
7848 if (!hdev_is_powered(hdev)) {
7849 BT_DBG("%s", hdev->name);
7850 mgmt_pending_remove(cmd);
7851 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
7852 MGMT_STATUS_NOT_POWERED);
7856 hci_req_init(&req, hdev);
7858 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
7859 sizeof(struct hci_cp_set_enable_rssi),
7860 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
7861 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
7863 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
7864 err = hci_req_run(&req, set_rssi_disable_complete);
7867 mgmt_pending_remove(cmd);
7868 BT_ERR("Error in requesting hci_req_run");
7873 hci_dev_unlock(hdev);
7877 void mgmt_enable_rssi_cc(struct hci_dev *hdev, void *response, u8 status)
7879 struct hci_cc_rsp_enable_rssi *rp = response;
7880 struct mgmt_pending_cmd *cmd_enable = NULL;
7881 struct mgmt_pending_cmd *cmd_disable = NULL;
7882 struct mgmt_cp_set_enable_rssi *cp_en;
7883 struct mgmt_cp_disable_rssi *cp_dis;
7886 cmd_enable = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7887 cmd_disable = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
7888 hci_dev_unlock(hdev);
7891 BT_DBG("Enable Request");
7894 BT_DBG("Disable Request");
7897 cp_en = cmd_enable->param;
7902 switch (rp->le_ext_opcode) {
7904 BT_DBG("RSSI enabled.. Setting Threshold...");
7905 mgmt_set_rssi_threshold(cmd_enable->sk, hdev,
7906 cp_en, sizeof(*cp_en));
7910 BT_DBG("Sending RSSI enable success");
7911 mgmt_rssi_enable_success(cmd_enable->sk, hdev,
7912 cp_en, rp, rp->status);
7916 } else if (cmd_disable) {
7917 cp_dis = cmd_disable->param;
7922 switch (rp->le_ext_opcode) {
7924 BT_DBG("Sending RSSI disable success");
7925 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
7926 cp_dis, rp, rp->status);
7931 * Only unset RSSI Threshold values for the Link if
7932 * RSSI is monitored for other BREDR or LE Links
7934 if (hci_conn_hash_lookup_rssi_count(hdev) > 1) {
7935 BT_DBG("Unset Threshold. Other links being monitored");
7936 mgmt_rssi_disable_success(cmd_disable->sk, hdev,
7937 cp_dis, rp, rp->status);
7939 BT_DBG("Unset Threshold. Disabling...");
7940 mgmt_set_disable_rssi(cmd_disable->sk, hdev,
7941 cp_dis, sizeof(*cp_dis));
7948 static void set_rssi_enable_complete(struct hci_dev *hdev, u8 status,
7951 struct mgmt_pending_cmd *cmd;
7953 BT_DBG("status 0x%02x", status);
7957 cmd = pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev);
7962 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7963 mgmt_status(status));
7965 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE, 0,
7968 mgmt_pending_remove(cmd);
7971 hci_dev_unlock(hdev);
7974 static int set_enable_rssi(struct sock *sk, struct hci_dev *hdev,
7975 void *data, u16 len)
7977 struct mgmt_pending_cmd *cmd;
7978 struct hci_request req;
7979 struct mgmt_cp_set_enable_rssi *cp = data;
7980 struct hci_cp_set_enable_rssi cp_en = { 0, };
7983 BT_DBG("Set Enable RSSI.");
7985 cp_en.hci_le_ext_opcode = 0x01;
7986 cp_en.le_enable_cs_Features = 0x04;
7987 cp_en.data[0] = 0x00;
7988 cp_en.data[1] = 0x00;
7989 cp_en.data[2] = 0x00;
7993 if (!lmp_le_capable(hdev)) {
7994 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
7995 MGMT_STATUS_NOT_SUPPORTED);
7999 if (!hdev_is_powered(hdev)) {
8000 BT_DBG("%s", hdev->name);
8001 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
8002 MGMT_STATUS_NOT_POWERED);
8006 if (pending_find(MGMT_OP_SET_RSSI_ENABLE, hdev)) {
8007 BT_DBG("%s", hdev->name);
8008 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_ENABLE,
8013 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_ENABLE, hdev, cp,
8016 BT_DBG("%s", hdev->name);
8021 /* If RSSI is already enabled directly set Threshold values */
8022 if (hci_conn_hash_lookup_rssi_count(hdev) > 0) {
8023 hci_dev_unlock(hdev);
8024 BT_DBG("RSSI Enabled. Directly set Threshold");
8025 err = mgmt_set_rssi_threshold(sk, hdev, cp, sizeof(*cp));
8029 hci_req_init(&req, hdev);
8031 BT_DBG("Enable Len: %zu [%2.2X %2.2X %2.2X %2.2X %2.2X]",
8032 sizeof(struct hci_cp_set_enable_rssi),
8033 cp_en.hci_le_ext_opcode, cp_en.le_enable_cs_Features,
8034 cp_en.data[0], cp_en.data[1], cp_en.data[2]);
8036 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(cp_en), &cp_en);
8037 err = hci_req_run(&req, set_rssi_enable_complete);
8040 mgmt_pending_remove(cmd);
8041 BT_ERR("Error in requesting hci_req_run");
8046 hci_dev_unlock(hdev);
8051 static void get_raw_rssi_complete(struct hci_dev *hdev, u8 status, u16 opcode)
8053 struct mgmt_pending_cmd *cmd;
8055 BT_DBG("status 0x%02x", status);
8059 cmd = pending_find(MGMT_OP_GET_RAW_RSSI, hdev);
8063 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8064 MGMT_STATUS_SUCCESS, &status, 1);
8066 mgmt_pending_remove(cmd);
8069 hci_dev_unlock(hdev);
8072 static int get_raw_rssi(struct sock *sk, struct hci_dev *hdev, void *data,
8075 struct mgmt_pending_cmd *cmd;
8076 struct hci_request req;
8077 struct mgmt_cp_get_raw_rssi *cp = data;
8078 struct hci_cp_get_raw_rssi hci_cp;
8080 struct hci_conn *conn;
8084 BT_DBG("Get Raw RSSI.");
8088 if (!lmp_le_capable(hdev)) {
8089 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8090 MGMT_STATUS_NOT_SUPPORTED);
8094 if (cp->link_type == 0x01)
8095 dest_type = LE_LINK;
8097 dest_type = ACL_LINK;
8099 /* Get LE/BREDR link handle info */
8100 conn = hci_conn_hash_lookup_ba(hdev,
8101 dest_type, &cp->bt_address);
8103 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8104 MGMT_STATUS_NOT_CONNECTED);
8107 hci_cp.conn_handle = conn->handle;
8109 if (!hdev_is_powered(hdev)) {
8110 BT_DBG("%s", hdev->name);
8111 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8112 MGMT_STATUS_NOT_POWERED);
8116 if (pending_find(MGMT_OP_GET_RAW_RSSI, hdev)) {
8117 BT_DBG("%s", hdev->name);
8118 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_RAW_RSSI,
8123 cmd = mgmt_pending_add(sk, MGMT_OP_GET_RAW_RSSI, hdev, data, len);
8125 BT_DBG("%s", hdev->name);
8130 hci_req_init(&req, hdev);
8132 BT_DBG("Connection Handle [%d]", hci_cp.conn_handle);
8133 hci_req_add(&req, HCI_OP_GET_RAW_RSSI, sizeof(hci_cp), &hci_cp);
8134 err = hci_req_run(&req, get_raw_rssi_complete);
8137 mgmt_pending_remove(cmd);
8138 BT_ERR("Error in requesting hci_req_run");
8142 hci_dev_unlock(hdev);
8147 void mgmt_raw_rssi_response(struct hci_dev *hdev,
8148 struct hci_cc_rp_get_raw_rssi *rp, int success)
8150 struct mgmt_cc_rp_get_raw_rssi mgmt_rp = { 0, };
8151 struct hci_conn *conn;
8153 mgmt_rp.status = rp->status;
8154 mgmt_rp.rssi_dbm = rp->rssi_dbm;
8156 conn = hci_conn_hash_lookup_handle(hdev, rp->conn_handle);
8160 bacpy(&mgmt_rp.bt_address, &conn->dst);
8161 if (conn->type == LE_LINK)
8162 mgmt_rp.link_type = 0x01;
8164 mgmt_rp.link_type = 0x00;
8166 mgmt_event(MGMT_EV_RAW_RSSI, hdev, &mgmt_rp,
8167 sizeof(struct mgmt_cc_rp_get_raw_rssi), NULL);
8170 static void set_disable_threshold_complete(struct hci_dev *hdev,
8171 u8 status, u16 opcode)
8173 struct mgmt_pending_cmd *cmd;
8175 BT_DBG("status 0x%02x", status);
8179 cmd = pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev);
8183 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8184 MGMT_STATUS_SUCCESS, &status, 1);
8186 mgmt_pending_remove(cmd);
8189 hci_dev_unlock(hdev);
8192 /** Removes monitoring for a link*/
8193 static int set_disable_threshold(struct sock *sk, struct hci_dev *hdev,
8194 void *data, u16 len)
8197 struct hci_cp_set_rssi_threshold th = { 0, };
8198 struct mgmt_cp_disable_rssi *cp = data;
8199 struct hci_conn *conn;
8200 struct mgmt_pending_cmd *cmd;
8201 struct hci_request req;
8204 BT_DBG("Set Disable RSSI.");
8208 if (!lmp_le_capable(hdev)) {
8209 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8210 MGMT_STATUS_NOT_SUPPORTED);
8214 /* Get LE/ACL link handle info*/
8215 if (cp->link_type == 0x01)
8216 dest_type = LE_LINK;
8218 dest_type = ACL_LINK;
8220 conn = hci_conn_hash_lookup_ba(hdev, dest_type, &cp->bdaddr);
8222 err = mgmt_cmd_complete(sk, hdev->id,
8223 MGMT_OP_SET_RSSI_DISABLE, 1, NULL, 0);
8227 th.hci_le_ext_opcode = 0x0B;
8229 th.conn_handle = conn->handle;
8230 th.alert_mask = 0x00;
8232 th.in_range_th = 0x00;
8235 if (!hdev_is_powered(hdev)) {
8236 BT_DBG("%s", hdev->name);
8237 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8242 if (pending_find(MGMT_OP_SET_RSSI_DISABLE, hdev)) {
8243 BT_DBG("%s", hdev->name);
8244 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_RSSI_DISABLE,
8249 cmd = mgmt_pending_add(sk, MGMT_OP_SET_RSSI_DISABLE, hdev, cp,
8252 BT_DBG("%s", hdev->name);
8257 hci_req_init(&req, hdev);
8259 hci_req_add(&req, HCI_OP_ENABLE_RSSI, sizeof(th), &th);
8260 err = hci_req_run(&req, set_disable_threshold_complete);
8262 mgmt_pending_remove(cmd);
8263 BT_ERR("Error in requesting hci_req_run");
8268 hci_dev_unlock(hdev);
8273 void mgmt_rssi_alert_evt(struct hci_dev *hdev, u16 conn_handle,
8274 s8 alert_type, s8 rssi_dbm)
8276 struct mgmt_ev_vendor_specific_rssi_alert mgmt_ev;
8277 struct hci_conn *conn;
8279 BT_DBG("RSSI alert [%2.2X %2.2X %2.2X]",
8280 conn_handle, alert_type, rssi_dbm);
8282 conn = hci_conn_hash_lookup_handle(hdev, conn_handle);
8285 BT_ERR("RSSI alert Error: Device not found for handle");
8288 bacpy(&mgmt_ev.bdaddr, &conn->dst);
8290 if (conn->type == LE_LINK)
8291 mgmt_ev.link_type = 0x01;
8293 mgmt_ev.link_type = 0x00;
8295 mgmt_ev.alert_type = alert_type;
8296 mgmt_ev.rssi_dbm = rssi_dbm;
8298 mgmt_event(MGMT_EV_RSSI_ALERT, hdev, &mgmt_ev,
8299 sizeof(struct mgmt_ev_vendor_specific_rssi_alert),
8303 static int mgmt_start_le_discovery_failed(struct hci_dev *hdev, u8 status)
8305 struct mgmt_pending_cmd *cmd;
8309 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
8311 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
8315 type = hdev->le_discovery.type;
8317 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
8318 mgmt_status(status), &type, sizeof(type));
8319 mgmt_pending_remove(cmd);
8324 static void start_le_discovery_complete(struct hci_dev *hdev, u8 status,
8327 unsigned long timeout = 0;
8329 BT_DBG("status %d", status);
8333 mgmt_start_le_discovery_failed(hdev, status);
8334 hci_dev_unlock(hdev);
8339 hci_le_discovery_set_state(hdev, DISCOVERY_FINDING);
8340 hci_dev_unlock(hdev);
8342 if (hdev->le_discovery.type != DISCOV_TYPE_LE)
8343 BT_ERR("Invalid discovery type %d", hdev->le_discovery.type);
8348 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
8351 static int start_le_discovery(struct sock *sk, struct hci_dev *hdev,
8352 void *data, u16 len)
8354 struct mgmt_cp_start_le_discovery *cp = data;
8355 struct mgmt_pending_cmd *cmd;
8356 struct hci_cp_le_set_scan_param param_cp;
8357 struct hci_cp_le_set_scan_enable enable_cp;
8358 struct hci_request req;
8359 u8 status, own_addr_type;
8362 BT_DBG("%s", hdev->name);
8364 if (!hdev_is_powered(hdev)) {
8365 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8366 MGMT_STATUS_NOT_POWERED);
8370 if (hdev->le_discovery.state != DISCOVERY_STOPPED) {
8371 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8376 if (cp->type != DISCOV_TYPE_LE) {
8377 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8378 MGMT_STATUS_INVALID_PARAMS);
8382 cmd = mgmt_pending_add(sk, MGMT_OP_START_LE_DISCOVERY, hdev, NULL, 0);
8388 hdev->le_discovery.type = cp->type;
8390 hci_req_init(&req, hdev);
8392 status = mgmt_le_support(hdev);
8394 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8396 mgmt_pending_remove(cmd);
8400 /* If controller is scanning, it means the background scanning
8401 * is running. Thus, we should temporarily stop it in order to
8402 * set the discovery scanning parameters.
8404 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
8405 hci_req_add_le_scan_disable(&req, false);
8407 memset(¶m_cp, 0, sizeof(param_cp));
8409 /* All active scans will be done with either a resolvable
8410 * private address (when privacy feature has been enabled)
8411 * or unresolvable private address.
8413 err = hci_update_random_address_sync(hdev, true, hci_dev_test_flag(hdev, HCI_PRIVACY), &own_addr_type);
8415 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_START_LE_DISCOVERY,
8416 MGMT_STATUS_FAILED);
8417 mgmt_pending_remove(cmd);
8421 param_cp.type = hdev->le_scan_type;
8422 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
8423 param_cp.window = cpu_to_le16(hdev->le_scan_window);
8424 param_cp.own_address_type = own_addr_type;
8425 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
8428 memset(&enable_cp, 0, sizeof(enable_cp));
8429 enable_cp.enable = LE_SCAN_ENABLE;
8430 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
8432 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
8435 err = hci_req_run(&req, start_le_discovery_complete);
8437 mgmt_pending_remove(cmd);
8439 hci_le_discovery_set_state(hdev, DISCOVERY_STARTING);
8445 static int mgmt_stop_le_discovery_failed(struct hci_dev *hdev, u8 status)
8447 struct mgmt_pending_cmd *cmd;
8450 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
8454 err = mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode,
8455 mgmt_status(status), &hdev->le_discovery.type,
8456 sizeof(hdev->le_discovery.type));
8457 mgmt_pending_remove(cmd);
8462 static void stop_le_discovery_complete(struct hci_dev *hdev, u8 status,
8465 BT_DBG("status %d", status);
8470 mgmt_stop_le_discovery_failed(hdev, status);
8474 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPED);
8477 hci_dev_unlock(hdev);
8480 static int stop_le_discovery(struct sock *sk, struct hci_dev *hdev,
8481 void *data, u16 len)
8483 struct mgmt_cp_stop_le_discovery *mgmt_cp = data;
8484 struct mgmt_pending_cmd *cmd;
8485 struct hci_request req;
8488 BT_DBG("%s", hdev->name);
8492 if (!hci_le_discovery_active(hdev)) {
8493 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8494 MGMT_STATUS_REJECTED, &mgmt_cp->type,
8495 sizeof(mgmt_cp->type));
8499 if (hdev->le_discovery.type != mgmt_cp->type) {
8500 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8501 MGMT_STATUS_INVALID_PARAMS,
8502 &mgmt_cp->type, sizeof(mgmt_cp->type));
8506 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_LE_DISCOVERY, hdev, NULL, 0);
8512 hci_req_init(&req, hdev);
8514 if (hdev->le_discovery.state != DISCOVERY_FINDING) {
8515 BT_DBG("unknown le discovery state %u",
8516 hdev->le_discovery.state);
8518 mgmt_pending_remove(cmd);
8519 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_LE_DISCOVERY,
8520 MGMT_STATUS_FAILED, &mgmt_cp->type,
8521 sizeof(mgmt_cp->type));
8525 cancel_delayed_work(&hdev->le_scan_disable);
8526 hci_req_add_le_scan_disable(&req, false);
8528 err = hci_req_run(&req, stop_le_discovery_complete);
8530 mgmt_pending_remove(cmd);
8532 hci_le_discovery_set_state(hdev, DISCOVERY_STOPPING);
8535 hci_dev_unlock(hdev);
8539 /* Separate LE discovery */
8540 void mgmt_le_discovering(struct hci_dev *hdev, u8 discovering)
8542 struct mgmt_ev_discovering ev;
8543 struct mgmt_pending_cmd *cmd;
8545 BT_DBG("%s le discovering %u", hdev->name, discovering);
8548 cmd = pending_find(MGMT_OP_START_LE_DISCOVERY, hdev);
8550 cmd = pending_find(MGMT_OP_STOP_LE_DISCOVERY, hdev);
8553 u8 type = hdev->le_discovery.type;
8555 mgmt_cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
8557 mgmt_pending_remove(cmd);
8560 memset(&ev, 0, sizeof(ev));
8561 ev.type = hdev->le_discovery.type;
8562 ev.discovering = discovering;
8564 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8567 static int disable_le_auto_connect(struct sock *sk, struct hci_dev *hdev,
8568 void *data, u16 len)
8572 BT_DBG("%s", hdev->name);
8576 err = hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
8578 BT_ERR("HCI_OP_LE_CREATE_CONN_CANCEL is failed");
8580 hci_dev_unlock(hdev);
8585 static inline int check_le_conn_update_param(u16 min, u16 max, u16 latency,
8590 if (min > max || min < 6 || max > 3200)
8593 if (to_multiplier < 10 || to_multiplier > 3200)
8596 if (max >= to_multiplier * 8)
8599 max_latency = (to_multiplier * 8 / max) - 1;
8601 if (latency > 499 || latency > max_latency)
8607 static int le_conn_update(struct sock *sk, struct hci_dev *hdev, void *data,
8610 struct mgmt_cp_le_conn_update *cp = data;
8612 struct hci_conn *conn;
8613 u16 min, max, latency, supervision_timeout;
8616 if (!hdev_is_powered(hdev))
8617 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8618 MGMT_STATUS_NOT_POWERED);
8620 min = __le16_to_cpu(cp->conn_interval_min);
8621 max = __le16_to_cpu(cp->conn_interval_max);
8622 latency = __le16_to_cpu(cp->conn_latency);
8623 supervision_timeout = __le16_to_cpu(cp->supervision_timeout);
8625 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x supervision_timeout: 0x%4.4x",
8626 min, max, latency, supervision_timeout);
8628 err = check_le_conn_update_param(min, max, latency,
8629 supervision_timeout);
8632 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8633 MGMT_STATUS_INVALID_PARAMS);
8637 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
8639 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE,
8640 MGMT_STATUS_NOT_CONNECTED);
8641 hci_dev_unlock(hdev);
8645 hci_dev_unlock(hdev);
8647 hci_le_conn_update(conn, min, max, latency, supervision_timeout);
8649 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_CONN_UPDATE, 0,
8653 static void set_manufacturer_data_complete(struct hci_dev *hdev, u8 status,
8656 struct mgmt_cp_set_manufacturer_data *cp;
8657 struct mgmt_pending_cmd *cmd;
8659 BT_DBG("status 0x%02x", status);
8663 cmd = pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev);
8670 mgmt_cmd_status(cmd->sk, hdev->id,
8671 MGMT_OP_SET_MANUFACTURER_DATA,
8672 mgmt_status(status));
8674 mgmt_cmd_complete(cmd->sk, hdev->id,
8675 MGMT_OP_SET_MANUFACTURER_DATA, 0,
8678 mgmt_pending_remove(cmd);
8681 hci_dev_unlock(hdev);
8684 static int set_manufacturer_data(struct sock *sk, struct hci_dev *hdev,
8685 void *data, u16 len)
8687 struct mgmt_pending_cmd *cmd;
8688 struct hci_request req;
8689 struct mgmt_cp_set_manufacturer_data *cp = data;
8690 u8 old_data[HCI_MAX_EIR_LENGTH] = {0, };
8694 BT_DBG("%s", hdev->name);
8696 if (!lmp_bredr_capable(hdev))
8697 return mgmt_cmd_status(sk, hdev->id,
8698 MGMT_OP_SET_MANUFACTURER_DATA,
8699 MGMT_STATUS_NOT_SUPPORTED);
8701 if (cp->data[0] == 0 ||
8702 cp->data[0] - 1 > sizeof(hdev->manufacturer_data))
8703 return mgmt_cmd_status(sk, hdev->id,
8704 MGMT_OP_SET_MANUFACTURER_DATA,
8705 MGMT_STATUS_INVALID_PARAMS);
8707 if (cp->data[1] != 0xFF)
8708 return mgmt_cmd_status(sk, hdev->id,
8709 MGMT_OP_SET_MANUFACTURER_DATA,
8710 MGMT_STATUS_NOT_SUPPORTED);
8714 if (pending_find(MGMT_OP_SET_MANUFACTURER_DATA, hdev)) {
8715 err = mgmt_cmd_status(sk, hdev->id,
8716 MGMT_OP_SET_MANUFACTURER_DATA,
8721 cmd = mgmt_pending_add(sk, MGMT_OP_SET_MANUFACTURER_DATA, hdev, data,
8728 hci_req_init(&req, hdev);
8730 /* if new data is same as previous data then return command
8733 if (hdev->manufacturer_len == cp->data[0] - 1 &&
8734 !memcmp(hdev->manufacturer_data, cp->data + 2, cp->data[0] - 1)) {
8735 mgmt_pending_remove(cmd);
8736 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MANUFACTURER_DATA,
8737 0, cp, sizeof(*cp));
8742 old_len = hdev->manufacturer_len;
8744 memcpy(old_data, hdev->manufacturer_data, old_len);
8746 hdev->manufacturer_len = cp->data[0] - 1;
8747 if (hdev->manufacturer_len > 0)
8748 memcpy(hdev->manufacturer_data, cp->data + 2,
8749 hdev->manufacturer_len);
8751 hci_update_eir_sync(hdev);
8753 err = hci_req_run(&req, set_manufacturer_data_complete);
8755 mgmt_pending_remove(cmd);
8760 hci_dev_unlock(hdev);
8765 memset(hdev->manufacturer_data, 0x00, sizeof(hdev->manufacturer_data));
8766 hdev->manufacturer_len = old_len;
8767 if (hdev->manufacturer_len > 0)
8768 memcpy(hdev->manufacturer_data, old_data,
8769 hdev->manufacturer_len);
8770 hci_dev_unlock(hdev);
8774 static int le_set_scan_params(struct sock *sk, struct hci_dev *hdev,
8775 void *data, u16 len)
8777 struct mgmt_cp_le_set_scan_params *cp = data;
8778 __u16 interval, window;
8781 BT_DBG("%s", hdev->name);
8783 if (!lmp_le_capable(hdev))
8784 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8785 MGMT_STATUS_NOT_SUPPORTED);
8787 interval = __le16_to_cpu(cp->interval);
8789 if (interval < 0x0004 || interval > 0x4000)
8790 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8791 MGMT_STATUS_INVALID_PARAMS);
8793 window = __le16_to_cpu(cp->window);
8795 if (window < 0x0004 || window > 0x4000)
8796 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8797 MGMT_STATUS_INVALID_PARAMS);
8799 if (window > interval)
8800 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS,
8801 MGMT_STATUS_INVALID_PARAMS);
8805 hdev->le_scan_type = cp->type;
8806 hdev->le_scan_interval = interval;
8807 hdev->le_scan_window = window;
8809 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_SCAN_PARAMS, 0,
8812 /* If background scan is running, restart it so new parameters are
8815 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
8816 hdev->discovery.state == DISCOVERY_STOPPED) {
8817 struct hci_request req;
8819 hci_req_init(&req, hdev);
8821 hci_req_add_le_scan_disable(&req, false);
8822 hci_req_add_le_passive_scan(&req);
8824 hci_req_run(&req, NULL);
8827 hci_dev_unlock(hdev);
8832 static int set_voice_setting(struct sock *sk, struct hci_dev *hdev,
8833 void *data, u16 len)
8835 struct mgmt_cp_set_voice_setting *cp = data;
8836 struct hci_conn *conn;
8837 struct hci_conn *sco_conn;
8841 BT_DBG("%s", hdev->name);
8843 if (!lmp_bredr_capable(hdev)) {
8844 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING,
8845 MGMT_STATUS_NOT_SUPPORTED);
8850 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
8852 err = mgmt_cmd_complete(sk, hdev->id,
8853 MGMT_OP_SET_VOICE_SETTING, 0, NULL, 0);
8857 conn->voice_setting = cp->voice_setting;
8858 conn->sco_role = cp->sco_role;
8860 sco_conn = hci_conn_hash_lookup_sco(hdev);
8861 if (sco_conn && bacmp(&sco_conn->dst, &cp->bdaddr) != 0) {
8862 BT_ERR("There is other SCO connection.");
8866 if (conn->sco_role == MGMT_SCO_ROLE_HANDSFREE) {
8867 if (conn->voice_setting == 0x0063)
8868 sco_connect_set_wbc(hdev);
8870 sco_connect_set_nbc(hdev);
8872 if (conn->voice_setting == 0x0063)
8873 sco_connect_set_gw_wbc(hdev);
8875 sco_connect_set_gw_nbc(hdev);
8879 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_VOICE_SETTING, 0,
8883 hci_dev_unlock(hdev);
8887 static int get_adv_tx_power(struct sock *sk, struct hci_dev *hdev,
8888 void *data, u16 len)
8890 struct mgmt_rp_get_adv_tx_power *rp;
8894 BT_DBG("%s", hdev->name);
8898 rp_len = sizeof(*rp);
8899 rp = kmalloc(rp_len, GFP_KERNEL);
8905 rp->adv_tx_power = hdev->adv_tx_power;
8907 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_TX_POWER, 0, rp,
8913 hci_dev_unlock(hdev);
8918 void mgmt_hardware_error(struct hci_dev *hdev, u8 err_code)
8920 struct mgmt_ev_hardware_error ev;
8922 ev.error_code = err_code;
8923 mgmt_event(MGMT_EV_HARDWARE_ERROR, hdev, &ev, sizeof(ev), NULL);
8926 void mgmt_tx_timeout_error(struct hci_dev *hdev)
8928 mgmt_event(MGMT_EV_TX_TIMEOUT_ERROR, hdev, NULL, 0, NULL);
8931 void mgmt_multi_adv_state_change_evt(struct hci_dev *hdev, u8 adv_instance,
8932 u8 state_change_reason, u16 connection_handle)
8934 struct mgmt_ev_vendor_specific_multi_adv_state_changed mgmt_ev;
8936 BT_DBG("Multi adv state changed [%2.2X %2.2X %2.2X]",
8937 adv_instance, state_change_reason, connection_handle);
8939 mgmt_ev.adv_instance = adv_instance;
8940 mgmt_ev.state_change_reason = state_change_reason;
8941 mgmt_ev.connection_handle = connection_handle;
8943 mgmt_event(MGMT_EV_MULTI_ADV_STATE_CHANGED, hdev, &mgmt_ev,
8944 sizeof(struct mgmt_ev_vendor_specific_multi_adv_state_changed),
8948 static int enable_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
8949 void *data, u16 len)
8952 struct mgmt_cp_enable_6lowpan *cp = data;
8954 BT_DBG("%s", hdev->name);
8958 if (!hdev_is_powered(hdev)) {
8959 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8960 MGMT_STATUS_NOT_POWERED);
8964 if (!lmp_le_capable(hdev)) {
8965 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8966 MGMT_STATUS_NOT_SUPPORTED);
8970 if (cp->enable_6lowpan)
8971 bt_6lowpan_enable();
8973 bt_6lowpan_disable();
8975 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ENABLE_6LOWPAN,
8976 MGMT_STATUS_SUCCESS, NULL, 0);
8978 hci_dev_unlock(hdev);
8982 static int connect_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
8983 void *data, u16 len)
8985 struct mgmt_cp_connect_6lowpan *cp = data;
8986 __u8 addr_type = ADDR_LE_DEV_PUBLIC;
8989 BT_DBG("%s", hdev->name);
8993 if (!lmp_le_capable(hdev)) {
8994 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
8995 MGMT_STATUS_NOT_SUPPORTED);
8999 if (!hdev_is_powered(hdev)) {
9000 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
9001 MGMT_STATUS_REJECTED);
9005 if (bdaddr_type_is_le(cp->addr.type)) {
9006 if (cp->addr.type == BDADDR_LE_PUBLIC)
9007 addr_type = ADDR_LE_DEV_PUBLIC;
9009 addr_type = ADDR_LE_DEV_RANDOM;
9011 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
9012 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
9016 hci_dev_unlock(hdev);
9018 /* 6lowpan Connect */
9019 err = _bt_6lowpan_connect(&cp->addr.bdaddr, cp->addr.type);
9024 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN,
9025 MGMT_STATUS_REJECTED, NULL, 0);
9030 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN, 0,
9033 hci_dev_unlock(hdev);
9037 static int disconnect_bt_6lowpan(struct sock *sk, struct hci_dev *hdev,
9038 void *data, u16 len)
9040 struct mgmt_cp_disconnect_6lowpan *cp = data;
9041 struct hci_conn *conn = NULL;
9042 __u8 addr_type = ADDR_LE_DEV_PUBLIC;
9045 BT_DBG("%s", hdev->name);
9049 if (!lmp_le_capable(hdev)) {
9050 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT_6LOWPAN,
9051 MGMT_STATUS_NOT_SUPPORTED);
9055 if (!hdev_is_powered(hdev)) {
9056 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_DISCONNECT_6LOWPAN,
9057 MGMT_STATUS_REJECTED);
9061 if (bdaddr_type_is_le(cp->addr.type)) {
9062 if (cp->addr.type == BDADDR_LE_PUBLIC)
9063 addr_type = ADDR_LE_DEV_PUBLIC;
9065 addr_type = ADDR_LE_DEV_RANDOM;
9067 err = mgmt_cmd_complete(sk, hdev->id,
9068 MGMT_OP_DISCONNECT_6LOWPAN,
9069 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
9073 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
9075 err = mgmt_cmd_complete(sk, hdev->id,
9076 MGMT_OP_DISCONNECT_6LOWPAN,
9077 MGMT_STATUS_NOT_CONNECTED, NULL, 0);
9081 if (conn->dst_type != addr_type) {
9082 err = mgmt_cmd_complete(sk, hdev->id,
9083 MGMT_OP_DISCONNECT_6LOWPAN,
9084 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
9088 if (conn->state != BT_CONNECTED) {
9089 err = mgmt_cmd_complete(sk, hdev->id,
9090 MGMT_OP_DISCONNECT_6LOWPAN,
9091 MGMT_STATUS_NOT_CONNECTED, NULL, 0);
9095 /* 6lowpan Disconnect */
9096 err = _bt_6lowpan_disconnect(conn->l2cap_data, cp->addr.type);
9098 err = mgmt_cmd_complete(sk, hdev->id,
9099 MGMT_OP_DISCONNECT_6LOWPAN,
9100 MGMT_STATUS_REJECTED, NULL, 0);
9104 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONNECT_6LOWPAN, 0,
9108 hci_dev_unlock(hdev);
9112 void mgmt_6lowpan_conn_changed(struct hci_dev *hdev, char if_name[16],
9113 bdaddr_t *bdaddr, u8 addr_type, bool connected)
9116 struct mgmt_ev_6lowpan_conn_state_changed *ev = (void *)buf;
9119 memset(buf, 0, sizeof(buf));
9120 bacpy(&ev->addr.bdaddr, bdaddr);
9121 ev->addr.type = addr_type;
9122 ev->connected = connected;
9123 memcpy(ev->ifname, (__u8 *)if_name, 16);
9125 ev_size = sizeof(*ev);
9127 mgmt_event(MGMT_EV_6LOWPAN_CONN_STATE_CHANGED, hdev, ev, ev_size, NULL);
9130 void mgmt_le_read_maximum_data_length_complete(struct hci_dev *hdev, u8 status)
9132 struct mgmt_pending_cmd *cmd;
9133 struct mgmt_rp_le_read_maximum_data_length rp;
9135 BT_DBG("%s status %u", hdev->name, status);
9137 cmd = pending_find(MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, hdev);
9142 mgmt_cmd_status(cmd->sk, hdev->id,
9143 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
9144 mgmt_status(status));
9146 memset(&rp, 0, sizeof(rp));
9148 rp.max_tx_octets = cpu_to_le16(hdev->le_max_tx_len);
9149 rp.max_tx_time = cpu_to_le16(hdev->le_max_tx_time);
9150 rp.max_rx_octets = cpu_to_le16(hdev->le_max_rx_len);
9151 rp.max_rx_time = cpu_to_le16(hdev->le_max_rx_time);
9153 mgmt_cmd_complete(cmd->sk, hdev->id,
9154 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, 0,
9157 mgmt_pending_remove(cmd);
9160 static int read_maximum_le_data_length(struct sock *sk,
9161 struct hci_dev *hdev, void *data, u16 len)
9163 struct mgmt_pending_cmd *cmd;
9166 BT_DBG("read_maximum_le_data_length %s", hdev->name);
9170 if (!hdev_is_powered(hdev)) {
9171 err = mgmt_cmd_status(sk, hdev->id,
9172 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
9173 MGMT_STATUS_NOT_POWERED);
9177 if (!lmp_le_capable(hdev)) {
9178 err = mgmt_cmd_status(sk, hdev->id,
9179 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
9180 MGMT_STATUS_NOT_SUPPORTED);
9184 if (pending_find(MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH, hdev)) {
9185 err = mgmt_cmd_status(sk, hdev->id,
9186 MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
9191 cmd = mgmt_pending_add(sk, MGMT_OP_LE_READ_MAXIMUM_DATA_LENGTH,
9198 err = hci_send_cmd(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
9200 mgmt_pending_remove(cmd);
9203 hci_dev_unlock(hdev);
9207 void mgmt_le_write_host_suggested_data_length_complete(struct hci_dev *hdev,
9210 struct mgmt_pending_cmd *cmd;
9212 BT_DBG("status 0x%02x", status);
9216 cmd = pending_find(MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH, hdev);
9218 BT_ERR("cmd not found in the pending list");
9223 mgmt_cmd_status(cmd->sk, hdev->id,
9224 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9225 mgmt_status(status));
9227 mgmt_cmd_complete(cmd->sk, hdev->id,
9228 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9231 mgmt_pending_remove(cmd);
9234 hci_dev_unlock(hdev);
9237 static int write_host_suggested_le_data_length(struct sock *sk,
9238 struct hci_dev *hdev, void *data, u16 len)
9240 struct mgmt_pending_cmd *cmd;
9241 struct mgmt_cp_le_write_host_suggested_data_length *cp = data;
9242 struct hci_cp_le_write_def_data_len hci_data;
9245 BT_DBG("Write host suggested data length request for %s", hdev->name);
9249 if (!hdev_is_powered(hdev)) {
9250 err = mgmt_cmd_status(sk, hdev->id,
9251 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9252 MGMT_STATUS_NOT_POWERED);
9256 if (!lmp_le_capable(hdev)) {
9257 err = mgmt_cmd_status(sk, hdev->id,
9258 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9259 MGMT_STATUS_NOT_SUPPORTED);
9263 if (pending_find(MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH, hdev)) {
9264 err = mgmt_cmd_status(sk, hdev->id,
9265 MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9270 cmd = mgmt_pending_add(sk, MGMT_OP_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH,
9277 hci_data.tx_len = cp->def_tx_octets;
9278 hci_data.tx_time = cp->def_tx_time;
9280 err = hci_send_cmd(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN,
9281 sizeof(hci_data), &hci_data);
9283 mgmt_pending_remove(cmd);
9286 hci_dev_unlock(hdev);
9291 void mgmt_le_read_host_suggested_data_length_complete(struct hci_dev *hdev,
9294 struct mgmt_pending_cmd *cmd;
9295 struct mgmt_rp_le_read_host_suggested_data_length rp;
9297 BT_DBG("%s status %u", hdev->name, status);
9299 cmd = pending_find(MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH, hdev);
9301 BT_ERR("cmd not found in the pending list");
9306 mgmt_cmd_status(cmd->sk, hdev->id,
9307 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
9308 mgmt_status(status));
9310 memset(&rp, 0, sizeof(rp));
9312 rp.def_tx_octets = cpu_to_le16(hdev->le_def_tx_len);
9313 rp.def_tx_time = cpu_to_le16(hdev->le_def_tx_time);
9315 mgmt_cmd_complete(cmd->sk, hdev->id,
9316 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH, 0,
9319 mgmt_pending_remove(cmd);
9322 static int read_host_suggested_data_length(struct sock *sk,
9323 struct hci_dev *hdev, void *data, u16 len)
9325 struct mgmt_pending_cmd *cmd;
9328 BT_DBG("read_host_suggested_data_length %s", hdev->name);
9332 if (!hdev_is_powered(hdev)) {
9333 err = mgmt_cmd_status(sk, hdev->id,
9334 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
9335 MGMT_STATUS_NOT_POWERED);
9339 if (!lmp_le_capable(hdev)) {
9340 err = mgmt_cmd_status(sk, hdev->id,
9341 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
9342 MGMT_STATUS_NOT_SUPPORTED);
9346 if (pending_find(MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH, hdev)) {
9347 err = mgmt_cmd_status(sk, hdev->id,
9348 MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
9353 cmd = mgmt_pending_add(sk, MGMT_OP_LE_READ_HOST_SUGGESTED_DATA_LENGTH,
9360 err = hci_send_cmd(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
9362 mgmt_pending_remove(cmd);
9365 hci_dev_unlock(hdev);
9370 static int set_le_data_length_params(struct sock *sk, struct hci_dev *hdev,
9371 void *data, u16 len)
9373 struct mgmt_cp_le_set_data_length *cp = data;
9374 struct mgmt_rp_le_set_data_length *rp;
9375 struct mgmt_pending_cmd *cmd;
9376 struct hci_conn *conn;
9378 u16 max_tx_octets, max_tx_time;
9381 BT_INFO("Set Data length for the device %s", hdev->name);
9385 rp_len = sizeof(*rp);
9386 rp = kmalloc(rp_len, GFP_KERNEL);
9392 if (!hdev_is_powered(hdev)) {
9393 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_DATA_LENGTH,
9394 MGMT_STATUS_NOT_POWERED);
9398 if (!lmp_le_capable(hdev)) {
9399 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_DATA_LENGTH,
9400 MGMT_STATUS_NOT_SUPPORTED);
9404 if (pending_find(MGMT_OP_LE_SET_DATA_LENGTH, hdev)) {
9405 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_DATA_LENGTH,
9410 cmd = mgmt_pending_add(sk, MGMT_OP_LE_SET_DATA_LENGTH, hdev, data, len);
9416 max_tx_octets = __le16_to_cpu(cp->max_tx_octets);
9417 max_tx_time = __le16_to_cpu(cp->max_tx_time);
9419 BT_DBG("max_tx_octets 0x%4.4x max_tx_time 0x%4.4x latency",
9420 max_tx_octets, max_tx_time);
9422 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
9424 mgmt_cmd_status(sk, hdev->id, MGMT_OP_LE_SET_DATA_LENGTH,
9425 MGMT_STATUS_NOT_CONNECTED);
9429 hci_dev_unlock(hdev);
9431 err = hci_le_set_data_length(conn, max_tx_octets, max_tx_time);
9433 mgmt_pending_remove(cmd);
9435 rp->handle = conn->handle;
9439 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LE_SET_DATA_LENGTH, 0,
9443 hci_dev_unlock(hdev);
9448 void mgmt_le_data_length_change_complete(struct hci_dev *hdev,
9449 bdaddr_t *bdaddr, u16 tx_octets, u16 tx_time,
9450 u16 rx_octets, u16 rx_time)
9452 struct mgmt_ev_le_data_length_changed ev;
9454 bacpy(&ev.addr.bdaddr, bdaddr);
9455 ev.max_tx_octets = tx_octets;
9456 ev.max_tx_time = tx_time;
9457 ev.max_rx_octets = rx_octets;
9458 ev.max_rx_time = rx_time;
9460 mgmt_event(MGMT_EV_LE_DATA_LENGTH_CHANGED, hdev, &ev, sizeof(ev), NULL);
9462 #endif /* TIZEN_BT */
9464 static bool ltk_is_valid(struct mgmt_ltk_info *key)
9466 if (key->initiator != 0x00 && key->initiator != 0x01)
9469 switch (key->addr.type) {
9470 case BDADDR_LE_PUBLIC:
9473 case BDADDR_LE_RANDOM:
9474 /* Two most significant bits shall be set */
9475 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
9483 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
9484 void *cp_data, u16 len)
9486 struct mgmt_cp_load_long_term_keys *cp = cp_data;
9487 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
9488 sizeof(struct mgmt_ltk_info));
9489 u16 key_count, expected_len;
9492 bt_dev_dbg(hdev, "sock %p", sk);
9494 if (!lmp_le_capable(hdev))
9495 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
9496 MGMT_STATUS_NOT_SUPPORTED);
9498 key_count = __le16_to_cpu(cp->key_count);
9499 if (key_count > max_key_count) {
9500 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
9502 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
9503 MGMT_STATUS_INVALID_PARAMS);
9506 expected_len = struct_size(cp, keys, key_count);
9507 if (expected_len != len) {
9508 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
9510 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
9511 MGMT_STATUS_INVALID_PARAMS);
9514 bt_dev_dbg(hdev, "key_count %u", key_count);
9516 for (i = 0; i < key_count; i++) {
9517 struct mgmt_ltk_info *key = &cp->keys[i];
9519 if (!ltk_is_valid(key))
9520 return mgmt_cmd_status(sk, hdev->id,
9521 MGMT_OP_LOAD_LONG_TERM_KEYS,
9522 MGMT_STATUS_INVALID_PARAMS);
9527 hci_smp_ltks_clear(hdev);
9529 for (i = 0; i < key_count; i++) {
9530 struct mgmt_ltk_info *key = &cp->keys[i];
9531 u8 type, authenticated;
9533 if (hci_is_blocked_key(hdev,
9534 HCI_BLOCKED_KEY_TYPE_LTK,
9536 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
9541 switch (key->type) {
9542 case MGMT_LTK_UNAUTHENTICATED:
9543 authenticated = 0x00;
9544 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
9546 case MGMT_LTK_AUTHENTICATED:
9547 authenticated = 0x01;
9548 type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
9550 case MGMT_LTK_P256_UNAUTH:
9551 authenticated = 0x00;
9552 type = SMP_LTK_P256;
9554 case MGMT_LTK_P256_AUTH:
9555 authenticated = 0x01;
9556 type = SMP_LTK_P256;
9558 case MGMT_LTK_P256_DEBUG:
9559 authenticated = 0x00;
9560 type = SMP_LTK_P256_DEBUG;
9566 hci_add_ltk(hdev, &key->addr.bdaddr,
9567 le_addr_type(key->addr.type), type, authenticated,
9568 key->val, key->enc_size, key->ediv, key->rand);
9571 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
9574 hci_dev_unlock(hdev);
9579 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
9581 struct mgmt_pending_cmd *cmd = data;
9582 struct hci_conn *conn = cmd->user_data;
9583 struct mgmt_cp_get_conn_info *cp = cmd->param;
9584 struct mgmt_rp_get_conn_info rp;
9587 bt_dev_dbg(hdev, "err %d", err);
9589 memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
9591 status = mgmt_status(err);
9592 if (status == MGMT_STATUS_SUCCESS) {
9593 rp.rssi = conn->rssi;
9594 rp.tx_power = conn->tx_power;
9595 rp.max_tx_power = conn->max_tx_power;
9597 rp.rssi = HCI_RSSI_INVALID;
9598 rp.tx_power = HCI_TX_POWER_INVALID;
9599 rp.max_tx_power = HCI_TX_POWER_INVALID;
9602 mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
9605 mgmt_pending_free(cmd);
9608 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
9610 struct mgmt_pending_cmd *cmd = data;
9611 struct mgmt_cp_get_conn_info *cp = cmd->param;
9612 struct hci_conn *conn;
9616 /* Make sure we are still connected */
9617 if (cp->addr.type == BDADDR_BREDR)
9618 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9621 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
9623 if (!conn || conn->state != BT_CONNECTED)
9624 return MGMT_STATUS_NOT_CONNECTED;
9626 cmd->user_data = conn;
9627 handle = cpu_to_le16(conn->handle);
9629 /* Refresh RSSI each time */
9630 err = hci_read_rssi_sync(hdev, handle);
9632 /* For LE links TX power does not change thus we don't need to
9633 * query for it once value is known.
9635 if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
9636 conn->tx_power == HCI_TX_POWER_INVALID))
9637 err = hci_read_tx_power_sync(hdev, handle, 0x00);
9639 /* Max TX power needs to be read only once per connection */
9640 if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
9641 err = hci_read_tx_power_sync(hdev, handle, 0x01);
9646 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
9649 struct mgmt_cp_get_conn_info *cp = data;
9650 struct mgmt_rp_get_conn_info rp;
9651 struct hci_conn *conn;
9652 unsigned long conn_info_age;
9655 bt_dev_dbg(hdev, "sock %p", sk);
9657 memset(&rp, 0, sizeof(rp));
9658 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9659 rp.addr.type = cp->addr.type;
9661 if (!bdaddr_type_is_valid(cp->addr.type))
9662 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9663 MGMT_STATUS_INVALID_PARAMS,
9668 if (!hdev_is_powered(hdev)) {
9669 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9670 MGMT_STATUS_NOT_POWERED, &rp,
9675 if (cp->addr.type == BDADDR_BREDR)
9676 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9679 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
9681 if (!conn || conn->state != BT_CONNECTED) {
9682 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9683 MGMT_STATUS_NOT_CONNECTED, &rp,
9688 /* To avoid client trying to guess when to poll again for information we
9689 * calculate conn info age as random value between min/max set in hdev.
9691 conn_info_age = hdev->conn_info_min_age +
9692 prandom_u32_max(hdev->conn_info_max_age -
9693 hdev->conn_info_min_age);
9695 /* Query controller to refresh cached values if they are too old or were
9698 if (time_after(jiffies, conn->conn_info_timestamp +
9699 msecs_to_jiffies(conn_info_age)) ||
9700 !conn->conn_info_timestamp) {
9701 struct mgmt_pending_cmd *cmd;
9703 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
9708 err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
9709 cmd, get_conn_info_complete);
9713 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9714 MGMT_STATUS_FAILED, &rp, sizeof(rp));
9717 mgmt_pending_free(cmd);
9722 conn->conn_info_timestamp = jiffies;
9724 /* Cache is valid, just reply with values cached in hci_conn */
9725 rp.rssi = conn->rssi;
9726 rp.tx_power = conn->tx_power;
9727 rp.max_tx_power = conn->max_tx_power;
9729 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
9730 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
9734 hci_dev_unlock(hdev);
9738 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
9740 struct mgmt_pending_cmd *cmd = data;
9741 struct mgmt_cp_get_clock_info *cp = cmd->param;
9742 struct mgmt_rp_get_clock_info rp;
9743 struct hci_conn *conn = cmd->user_data;
9744 u8 status = mgmt_status(err);
9746 bt_dev_dbg(hdev, "err %d", err);
9748 memset(&rp, 0, sizeof(rp));
9749 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9750 rp.addr.type = cp->addr.type;
9755 rp.local_clock = cpu_to_le32(hdev->clock);
9758 rp.piconet_clock = cpu_to_le32(conn->clock);
9759 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
9763 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
9766 mgmt_pending_free(cmd);
9769 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
9771 struct mgmt_pending_cmd *cmd = data;
9772 struct mgmt_cp_get_clock_info *cp = cmd->param;
9773 struct hci_cp_read_clock hci_cp;
9774 struct hci_conn *conn;
9776 memset(&hci_cp, 0, sizeof(hci_cp));
9777 hci_read_clock_sync(hdev, &hci_cp);
9779 /* Make sure connection still exists */
9780 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
9781 if (!conn || conn->state != BT_CONNECTED)
9782 return MGMT_STATUS_NOT_CONNECTED;
9784 cmd->user_data = conn;
9785 hci_cp.handle = cpu_to_le16(conn->handle);
9786 hci_cp.which = 0x01; /* Piconet clock */
9788 return hci_read_clock_sync(hdev, &hci_cp);
9791 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
9794 struct mgmt_cp_get_clock_info *cp = data;
9795 struct mgmt_rp_get_clock_info rp;
9796 struct mgmt_pending_cmd *cmd;
9797 struct hci_conn *conn;
9800 bt_dev_dbg(hdev, "sock %p", sk);
9802 memset(&rp, 0, sizeof(rp));
9803 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
9804 rp.addr.type = cp->addr.type;
9806 if (cp->addr.type != BDADDR_BREDR)
9807 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9808 MGMT_STATUS_INVALID_PARAMS,
9813 if (!hdev_is_powered(hdev)) {
9814 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9815 MGMT_STATUS_NOT_POWERED, &rp,
9820 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
9821 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
9823 if (!conn || conn->state != BT_CONNECTED) {
9824 err = mgmt_cmd_complete(sk, hdev->id,
9825 MGMT_OP_GET_CLOCK_INFO,
9826 MGMT_STATUS_NOT_CONNECTED,
9834 cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
9838 err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
9839 get_clock_info_complete);
9842 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
9843 MGMT_STATUS_FAILED, &rp, sizeof(rp));
9846 mgmt_pending_free(cmd);
9851 hci_dev_unlock(hdev);
9855 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
9857 struct hci_conn *conn;
9859 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
9863 if (conn->dst_type != type)
9866 if (conn->state != BT_CONNECTED)
9872 /* This function requires the caller holds hdev->lock */
9873 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
9874 u8 addr_type, u8 auto_connect)
9876 struct hci_conn_params *params;
9878 params = hci_conn_params_add(hdev, addr, addr_type);
9882 if (params->auto_connect == auto_connect)
9885 list_del_init(¶ms->action);
9887 switch (auto_connect) {
9888 case HCI_AUTO_CONN_DISABLED:
9889 case HCI_AUTO_CONN_LINK_LOSS:
9890 /* If auto connect is being disabled when we're trying to
9891 * connect to device, keep connecting.
9893 if (params->explicit_connect)
9894 list_add(¶ms->action, &hdev->pend_le_conns);
9896 case HCI_AUTO_CONN_REPORT:
9897 if (params->explicit_connect)
9898 list_add(¶ms->action, &hdev->pend_le_conns);
9900 list_add(¶ms->action, &hdev->pend_le_reports);
9902 case HCI_AUTO_CONN_DIRECT:
9903 case HCI_AUTO_CONN_ALWAYS:
9904 if (!is_connected(hdev, addr, addr_type))
9905 list_add(¶ms->action, &hdev->pend_le_conns);
9909 params->auto_connect = auto_connect;
9911 bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
9912 addr, addr_type, auto_connect);
9917 static void device_added(struct sock *sk, struct hci_dev *hdev,
9918 bdaddr_t *bdaddr, u8 type, u8 action)
9920 struct mgmt_ev_device_added ev;
9922 bacpy(&ev.addr.bdaddr, bdaddr);
9923 ev.addr.type = type;
9926 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
9929 static int add_device_sync(struct hci_dev *hdev, void *data)
9931 return hci_update_passive_scan_sync(hdev);
9934 static int add_device(struct sock *sk, struct hci_dev *hdev,
9935 void *data, u16 len)
9937 struct mgmt_cp_add_device *cp = data;
9938 u8 auto_conn, addr_type;
9939 struct hci_conn_params *params;
9941 u32 current_flags = 0;
9942 u32 supported_flags;
9944 bt_dev_dbg(hdev, "sock %p", sk);
9946 if (!bdaddr_type_is_valid(cp->addr.type) ||
9947 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
9948 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9949 MGMT_STATUS_INVALID_PARAMS,
9950 &cp->addr, sizeof(cp->addr));
9952 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
9953 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9954 MGMT_STATUS_INVALID_PARAMS,
9955 &cp->addr, sizeof(cp->addr));
9959 if (cp->addr.type == BDADDR_BREDR) {
9960 /* Only incoming connections action is supported for now */
9961 if (cp->action != 0x01) {
9962 err = mgmt_cmd_complete(sk, hdev->id,
9964 MGMT_STATUS_INVALID_PARAMS,
9965 &cp->addr, sizeof(cp->addr));
9969 err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
9975 hci_update_scan(hdev);
9980 addr_type = le_addr_type(cp->addr.type);
9982 if (cp->action == 0x02)
9983 auto_conn = HCI_AUTO_CONN_ALWAYS;
9984 else if (cp->action == 0x01)
9985 auto_conn = HCI_AUTO_CONN_DIRECT;
9987 auto_conn = HCI_AUTO_CONN_REPORT;
9989 /* Kernel internally uses conn_params with resolvable private
9990 * address, but Add Device allows only identity addresses.
9991 * Make sure it is enforced before calling
9992 * hci_conn_params_lookup.
9994 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
9995 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
9996 MGMT_STATUS_INVALID_PARAMS,
9997 &cp->addr, sizeof(cp->addr));
10001 /* If the connection parameters don't exist for this device,
10002 * they will be created and configured with defaults.
10004 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
10006 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
10007 MGMT_STATUS_FAILED, &cp->addr,
10011 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
10014 current_flags = params->flags;
10017 err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
10022 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
10023 supported_flags = hdev->conn_flags;
10024 device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
10025 supported_flags, current_flags);
10027 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
10028 MGMT_STATUS_SUCCESS, &cp->addr,
10032 hci_dev_unlock(hdev);
10036 static void device_removed(struct sock *sk, struct hci_dev *hdev,
10037 bdaddr_t *bdaddr, u8 type)
10039 struct mgmt_ev_device_removed ev;
10041 bacpy(&ev.addr.bdaddr, bdaddr);
10042 ev.addr.type = type;
10044 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
10047 static int remove_device_sync(struct hci_dev *hdev, void *data)
10049 return hci_update_passive_scan_sync(hdev);
10052 static int remove_device(struct sock *sk, struct hci_dev *hdev,
10053 void *data, u16 len)
10055 struct mgmt_cp_remove_device *cp = data;
10058 bt_dev_dbg(hdev, "sock %p", sk);
10060 hci_dev_lock(hdev);
10062 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
10063 struct hci_conn_params *params;
10066 if (!bdaddr_type_is_valid(cp->addr.type)) {
10067 err = mgmt_cmd_complete(sk, hdev->id,
10068 MGMT_OP_REMOVE_DEVICE,
10069 MGMT_STATUS_INVALID_PARAMS,
10070 &cp->addr, sizeof(cp->addr));
10074 if (cp->addr.type == BDADDR_BREDR) {
10075 err = hci_bdaddr_list_del(&hdev->accept_list,
10079 err = mgmt_cmd_complete(sk, hdev->id,
10080 MGMT_OP_REMOVE_DEVICE,
10081 MGMT_STATUS_INVALID_PARAMS,
10087 hci_update_scan(hdev);
10089 device_removed(sk, hdev, &cp->addr.bdaddr,
10094 addr_type = le_addr_type(cp->addr.type);
10096 /* Kernel internally uses conn_params with resolvable private
10097 * address, but Remove Device allows only identity addresses.
10098 * Make sure it is enforced before calling
10099 * hci_conn_params_lookup.
10101 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
10102 err = mgmt_cmd_complete(sk, hdev->id,
10103 MGMT_OP_REMOVE_DEVICE,
10104 MGMT_STATUS_INVALID_PARAMS,
10105 &cp->addr, sizeof(cp->addr));
10109 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
10112 err = mgmt_cmd_complete(sk, hdev->id,
10113 MGMT_OP_REMOVE_DEVICE,
10114 MGMT_STATUS_INVALID_PARAMS,
10115 &cp->addr, sizeof(cp->addr));
10119 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
10120 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
10121 err = mgmt_cmd_complete(sk, hdev->id,
10122 MGMT_OP_REMOVE_DEVICE,
10123 MGMT_STATUS_INVALID_PARAMS,
10124 &cp->addr, sizeof(cp->addr));
10128 list_del(¶ms->action);
10129 list_del(¶ms->list);
10132 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
10134 struct hci_conn_params *p, *tmp;
10135 struct bdaddr_list *b, *btmp;
10137 if (cp->addr.type) {
10138 err = mgmt_cmd_complete(sk, hdev->id,
10139 MGMT_OP_REMOVE_DEVICE,
10140 MGMT_STATUS_INVALID_PARAMS,
10141 &cp->addr, sizeof(cp->addr));
10145 list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
10146 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
10147 list_del(&b->list);
10151 hci_update_scan(hdev);
10153 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
10154 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
10156 device_removed(sk, hdev, &p->addr, p->addr_type);
10157 if (p->explicit_connect) {
10158 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
10161 list_del(&p->action);
10162 list_del(&p->list);
10166 bt_dev_dbg(hdev, "All LE connection parameters were removed");
10169 hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
10172 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
10173 MGMT_STATUS_SUCCESS, &cp->addr,
10176 hci_dev_unlock(hdev);
10180 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
10183 struct mgmt_cp_load_conn_param *cp = data;
10184 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
10185 sizeof(struct mgmt_conn_param));
10186 u16 param_count, expected_len;
10189 if (!lmp_le_capable(hdev))
10190 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
10191 MGMT_STATUS_NOT_SUPPORTED);
10193 param_count = __le16_to_cpu(cp->param_count);
10194 if (param_count > max_param_count) {
10195 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
10197 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
10198 MGMT_STATUS_INVALID_PARAMS);
10201 expected_len = struct_size(cp, params, param_count);
10202 if (expected_len != len) {
10203 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
10204 expected_len, len);
10205 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
10206 MGMT_STATUS_INVALID_PARAMS);
10209 bt_dev_dbg(hdev, "param_count %u", param_count);
10211 hci_dev_lock(hdev);
10213 hci_conn_params_clear_disabled(hdev);
10215 for (i = 0; i < param_count; i++) {
10216 struct mgmt_conn_param *param = &cp->params[i];
10217 struct hci_conn_params *hci_param;
10218 u16 min, max, latency, timeout;
10221 bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr,
10224 if (param->addr.type == BDADDR_LE_PUBLIC) {
10225 addr_type = ADDR_LE_DEV_PUBLIC;
10226 } else if (param->addr.type == BDADDR_LE_RANDOM) {
10227 addr_type = ADDR_LE_DEV_RANDOM;
10229 bt_dev_err(hdev, "ignoring invalid connection parameters");
10233 min = le16_to_cpu(param->min_interval);
10234 max = le16_to_cpu(param->max_interval);
10235 latency = le16_to_cpu(param->latency);
10236 timeout = le16_to_cpu(param->timeout);
10238 bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
10239 min, max, latency, timeout);
10241 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
10242 bt_dev_err(hdev, "ignoring invalid connection parameters");
10246 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
10249 bt_dev_err(hdev, "failed to add connection parameters");
10253 hci_param->conn_min_interval = min;
10254 hci_param->conn_max_interval = max;
10255 hci_param->conn_latency = latency;
10256 hci_param->supervision_timeout = timeout;
10259 hci_dev_unlock(hdev);
10261 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
10265 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
10266 void *data, u16 len)
10268 struct mgmt_cp_set_external_config *cp = data;
10272 bt_dev_dbg(hdev, "sock %p", sk);
10274 if (hdev_is_powered(hdev))
10275 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
10276 MGMT_STATUS_REJECTED);
10278 if (cp->config != 0x00 && cp->config != 0x01)
10279 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
10280 MGMT_STATUS_INVALID_PARAMS);
10282 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
10283 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
10284 MGMT_STATUS_NOT_SUPPORTED);
10286 hci_dev_lock(hdev);
10289 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
10291 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
10293 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
10300 err = new_options(hdev, sk);
10302 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
10303 mgmt_index_removed(hdev);
10305 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
10306 hci_dev_set_flag(hdev, HCI_CONFIG);
10307 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
10309 queue_work(hdev->req_workqueue, &hdev->power_on);
10311 set_bit(HCI_RAW, &hdev->flags);
10312 mgmt_index_added(hdev);
10317 hci_dev_unlock(hdev);
10321 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
10322 void *data, u16 len)
10324 struct mgmt_cp_set_public_address *cp = data;
10328 bt_dev_dbg(hdev, "sock %p", sk);
10330 if (hdev_is_powered(hdev))
10331 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
10332 MGMT_STATUS_REJECTED);
10334 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
10335 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
10336 MGMT_STATUS_INVALID_PARAMS);
10338 if (!hdev->set_bdaddr)
10339 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
10340 MGMT_STATUS_NOT_SUPPORTED);
10342 hci_dev_lock(hdev);
10344 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
10345 bacpy(&hdev->public_addr, &cp->bdaddr);
10347 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
10354 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
10355 err = new_options(hdev, sk);
10357 if (is_configured(hdev)) {
10358 mgmt_index_removed(hdev);
10360 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
10362 hci_dev_set_flag(hdev, HCI_CONFIG);
10363 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
10365 queue_work(hdev->req_workqueue, &hdev->power_on);
10369 hci_dev_unlock(hdev);
10374 int mgmt_device_name_update(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *name,
10378 struct mgmt_ev_device_name_update *ev = (void *)buf;
10384 bacpy(&ev->addr.bdaddr, bdaddr);
10385 ev->addr.type = BDADDR_BREDR;
10387 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
10390 ev->eir_len = cpu_to_le16(eir_len);
10392 return mgmt_event(MGMT_EV_DEVICE_NAME_UPDATE, hdev, buf,
10393 sizeof(*ev) + eir_len, NULL);
10396 int mgmt_le_conn_update_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
10397 u8 link_type, u8 addr_type, u8 status)
10399 struct mgmt_ev_conn_update_failed ev;
10401 bacpy(&ev.addr.bdaddr, bdaddr);
10402 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10403 ev.status = status;
10405 return mgmt_event(MGMT_EV_CONN_UPDATE_FAILED, hdev,
10406 &ev, sizeof(ev), NULL);
10409 int mgmt_le_conn_updated(struct hci_dev *hdev, bdaddr_t *bdaddr,
10410 u8 link_type, u8 addr_type, u16 conn_interval,
10411 u16 conn_latency, u16 supervision_timeout)
10413 struct mgmt_ev_conn_updated ev;
10415 bacpy(&ev.addr.bdaddr, bdaddr);
10416 ev.addr.type = link_to_bdaddr(link_type, addr_type);
10417 ev.conn_interval = cpu_to_le16(conn_interval);
10418 ev.conn_latency = cpu_to_le16(conn_latency);
10419 ev.supervision_timeout = cpu_to_le16(supervision_timeout);
10421 return mgmt_event(MGMT_EV_CONN_UPDATED, hdev,
10422 &ev, sizeof(ev), NULL);
10425 /* le device found event - Pass adv type */
10426 void mgmt_le_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
10427 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, u8 *eir,
10428 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, u8 adv_type)
10431 struct mgmt_ev_le_device_found *ev = (void *)buf;
10434 if (!hci_discovery_active(hdev) && !hci_le_discovery_active(hdev))
10437 /* Make sure that the buffer is big enough. The 5 extra bytes
10438 * are for the potential CoD field.
10440 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
10443 memset(buf, 0, sizeof(buf));
10445 bacpy(&ev->addr.bdaddr, bdaddr);
10446 ev->addr.type = link_to_bdaddr(link_type, addr_type);
10448 ev->flags = cpu_to_le32(flags);
10449 ev->adv_type = adv_type;
10452 memcpy(ev->eir, eir, eir_len);
10454 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV, NULL))
10455 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
10458 if (scan_rsp_len > 0)
10459 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
10461 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
10462 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
10464 mgmt_event(MGMT_EV_LE_DEVICE_FOUND, hdev, ev, ev_size, NULL);
10468 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
10471 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
10472 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
10473 u8 *h192, *r192, *h256, *r256;
10474 struct mgmt_pending_cmd *cmd = data;
10475 struct sk_buff *skb = cmd->skb;
10476 u8 status = mgmt_status(err);
10479 if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
10484 status = MGMT_STATUS_FAILED;
10485 else if (IS_ERR(skb))
10486 status = mgmt_status(PTR_ERR(skb));
10488 status = mgmt_status(skb->data[0]);
10491 bt_dev_dbg(hdev, "status %u", status);
10493 mgmt_cp = cmd->param;
10496 status = mgmt_status(status);
10503 } else if (!bredr_sc_enabled(hdev)) {
10504 struct hci_rp_read_local_oob_data *rp;
10506 if (skb->len != sizeof(*rp)) {
10507 status = MGMT_STATUS_FAILED;
10510 status = MGMT_STATUS_SUCCESS;
10511 rp = (void *)skb->data;
10513 eir_len = 5 + 18 + 18;
10520 struct hci_rp_read_local_oob_ext_data *rp;
10522 if (skb->len != sizeof(*rp)) {
10523 status = MGMT_STATUS_FAILED;
10526 status = MGMT_STATUS_SUCCESS;
10527 rp = (void *)skb->data;
10529 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
10530 eir_len = 5 + 18 + 18;
10534 eir_len = 5 + 18 + 18 + 18 + 18;
10535 h192 = rp->hash192;
10536 r192 = rp->rand192;
10539 h256 = rp->hash256;
10540 r256 = rp->rand256;
10544 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
10551 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
10552 hdev->dev_class, 3);
10554 if (h192 && r192) {
10555 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10556 EIR_SSP_HASH_C192, h192, 16);
10557 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10558 EIR_SSP_RAND_R192, r192, 16);
10561 if (h256 && r256) {
10562 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10563 EIR_SSP_HASH_C256, h256, 16);
10564 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
10565 EIR_SSP_RAND_R256, r256, 16);
10569 mgmt_rp->type = mgmt_cp->type;
10570 mgmt_rp->eir_len = cpu_to_le16(eir_len);
10572 err = mgmt_cmd_complete(cmd->sk, hdev->id,
10573 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
10574 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
10575 if (err < 0 || status)
10578 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
10580 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
10581 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
10582 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
10584 if (skb && !IS_ERR(skb))
10588 mgmt_pending_remove(cmd);
10591 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
10592 struct mgmt_cp_read_local_oob_ext_data *cp)
10594 struct mgmt_pending_cmd *cmd;
10597 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
10602 err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
10603 read_local_oob_ext_data_complete);
10606 mgmt_pending_remove(cmd);
10613 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
10614 void *data, u16 data_len)
10616 struct mgmt_cp_read_local_oob_ext_data *cp = data;
10617 struct mgmt_rp_read_local_oob_ext_data *rp;
10620 u8 status, flags, role, addr[7], hash[16], rand[16];
10623 bt_dev_dbg(hdev, "sock %p", sk);
10625 if (hdev_is_powered(hdev)) {
10626 switch (cp->type) {
10627 case BIT(BDADDR_BREDR):
10628 status = mgmt_bredr_support(hdev);
10634 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
10635 status = mgmt_le_support(hdev);
10639 eir_len = 9 + 3 + 18 + 18 + 3;
10642 status = MGMT_STATUS_INVALID_PARAMS;
10647 status = MGMT_STATUS_NOT_POWERED;
10651 rp_len = sizeof(*rp) + eir_len;
10652 rp = kmalloc(rp_len, GFP_ATOMIC);
10656 if (!status && !lmp_ssp_capable(hdev)) {
10657 status = MGMT_STATUS_NOT_SUPPORTED;
10664 hci_dev_lock(hdev);
10667 switch (cp->type) {
10668 case BIT(BDADDR_BREDR):
10669 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
10670 err = read_local_ssp_oob_req(hdev, sk, cp);
10671 hci_dev_unlock(hdev);
10675 status = MGMT_STATUS_FAILED;
10678 eir_len = eir_append_data(rp->eir, eir_len,
10680 hdev->dev_class, 3);
10683 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
10684 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
10685 smp_generate_oob(hdev, hash, rand) < 0) {
10686 hci_dev_unlock(hdev);
10687 status = MGMT_STATUS_FAILED;
10691 /* This should return the active RPA, but since the RPA
10692 * is only programmed on demand, it is really hard to fill
10693 * this in at the moment. For now disallow retrieving
10694 * local out-of-band data when privacy is in use.
10696 * Returning the identity address will not help here since
10697 * pairing happens before the identity resolving key is
10698 * known and thus the connection establishment happens
10699 * based on the RPA and not the identity address.
10701 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
10702 hci_dev_unlock(hdev);
10703 status = MGMT_STATUS_REJECTED;
10707 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
10708 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
10709 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
10710 bacmp(&hdev->static_addr, BDADDR_ANY))) {
10711 memcpy(addr, &hdev->static_addr, 6);
10714 memcpy(addr, &hdev->bdaddr, 6);
10718 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
10719 addr, sizeof(addr));
10721 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
10726 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
10727 &role, sizeof(role));
10729 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
10730 eir_len = eir_append_data(rp->eir, eir_len,
10732 hash, sizeof(hash));
10734 eir_len = eir_append_data(rp->eir, eir_len,
10736 rand, sizeof(rand));
10739 flags = mgmt_get_adv_discov_flags(hdev);
10741 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
10742 flags |= LE_AD_NO_BREDR;
10744 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
10745 &flags, sizeof(flags));
10749 hci_dev_unlock(hdev);
10751 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
10753 status = MGMT_STATUS_SUCCESS;
10756 rp->type = cp->type;
10757 rp->eir_len = cpu_to_le16(eir_len);
10759 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
10760 status, rp, sizeof(*rp) + eir_len);
10761 if (err < 0 || status)
10764 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
10765 rp, sizeof(*rp) + eir_len,
10766 HCI_MGMT_OOB_DATA_EVENTS, sk);
10774 static u32 get_supported_adv_flags(struct hci_dev *hdev)
10778 flags |= MGMT_ADV_FLAG_CONNECTABLE;
10779 flags |= MGMT_ADV_FLAG_DISCOV;
10780 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
10781 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
10782 flags |= MGMT_ADV_FLAG_APPEARANCE;
10783 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
10784 flags |= MGMT_ADV_PARAM_DURATION;
10785 flags |= MGMT_ADV_PARAM_TIMEOUT;
10786 flags |= MGMT_ADV_PARAM_INTERVALS;
10787 flags |= MGMT_ADV_PARAM_TX_POWER;
10788 flags |= MGMT_ADV_PARAM_SCAN_RSP;
10790 /* In extended adv TX_POWER returned from Set Adv Param
10791 * will be always valid.
10793 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))
10794 flags |= MGMT_ADV_FLAG_TX_POWER;
10796 if (ext_adv_capable(hdev)) {
10797 flags |= MGMT_ADV_FLAG_SEC_1M;
10798 flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
10799 flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
10801 if (hdev->le_features[1] & HCI_LE_PHY_2M)
10802 flags |= MGMT_ADV_FLAG_SEC_2M;
10804 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
10805 flags |= MGMT_ADV_FLAG_SEC_CODED;
10811 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
10812 void *data, u16 data_len)
10814 struct mgmt_rp_read_adv_features *rp;
10817 struct adv_info *adv_instance;
10818 u32 supported_flags;
10821 bt_dev_dbg(hdev, "sock %p", sk);
10823 if (!lmp_le_capable(hdev))
10824 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
10825 MGMT_STATUS_REJECTED);
10827 hci_dev_lock(hdev);
10829 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
10830 rp = kmalloc(rp_len, GFP_ATOMIC);
10832 hci_dev_unlock(hdev);
10836 supported_flags = get_supported_adv_flags(hdev);
10838 rp->supported_flags = cpu_to_le32(supported_flags);
10839 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
10840 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
10841 rp->max_instances = hdev->le_num_of_adv_sets;
10842 rp->num_instances = hdev->adv_instance_cnt;
10844 instance = rp->instance;
10845 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
10846 /* Only instances 1-le_num_of_adv_sets are externally visible */
10847 if (adv_instance->instance <= hdev->adv_instance_cnt) {
10848 *instance = adv_instance->instance;
10851 rp->num_instances--;
10856 hci_dev_unlock(hdev);
10858 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
10859 MGMT_STATUS_SUCCESS, rp, rp_len);
10866 static u8 calculate_name_len(struct hci_dev *hdev)
10868 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
10870 return eir_append_local_name(hdev, buf, 0);
10873 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
10876 u8 max_len = HCI_MAX_AD_LENGTH;
10879 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
10880 MGMT_ADV_FLAG_LIMITED_DISCOV |
10881 MGMT_ADV_FLAG_MANAGED_FLAGS))
10884 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
10887 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
10888 max_len -= calculate_name_len(hdev);
10890 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
10897 static bool flags_managed(u32 adv_flags)
10899 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
10900 MGMT_ADV_FLAG_LIMITED_DISCOV |
10901 MGMT_ADV_FLAG_MANAGED_FLAGS);
10904 static bool tx_power_managed(u32 adv_flags)
10906 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
10909 static bool name_managed(u32 adv_flags)
10911 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
10914 static bool appearance_managed(u32 adv_flags)
10916 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
10919 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
10920 u8 len, bool is_adv_data)
10925 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
10930 /* Make sure that the data is correctly formatted. */
10931 for (i = 0; i < len; i += (cur_len + 1)) {
10937 if (data[i + 1] == EIR_FLAGS &&
10938 (!is_adv_data || flags_managed(adv_flags)))
10941 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
10944 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
10947 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
10950 if (data[i + 1] == EIR_APPEARANCE &&
10951 appearance_managed(adv_flags))
10954 /* If the current field length would exceed the total data
10955 * length, then it's invalid.
10957 if (i + cur_len >= len)
10964 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
10966 u32 supported_flags, phy_flags;
10968 /* The current implementation only supports a subset of the specified
10969 * flags. Also need to check mutual exclusiveness of sec flags.
10971 supported_flags = get_supported_adv_flags(hdev);
10972 phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
10973 if (adv_flags & ~supported_flags ||
10974 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
10980 static bool adv_busy(struct hci_dev *hdev)
10982 return pending_find(MGMT_OP_SET_LE, hdev);
10985 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
10988 struct adv_info *adv, *n;
10990 bt_dev_dbg(hdev, "err %d", err);
10992 hci_dev_lock(hdev);
10994 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
11001 adv->pending = false;
11005 instance = adv->instance;
11007 if (hdev->cur_adv_instance == instance)
11008 cancel_adv_timeout(hdev);
11010 hci_remove_adv_instance(hdev, instance);
11011 mgmt_advertising_removed(sk, hdev, instance);
11014 hci_dev_unlock(hdev);
11017 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
11019 struct mgmt_pending_cmd *cmd = data;
11020 struct mgmt_cp_add_advertising *cp = cmd->param;
11021 struct mgmt_rp_add_advertising rp;
11023 memset(&rp, 0, sizeof(rp));
11025 rp.instance = cp->instance;
11028 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
11031 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
11032 mgmt_status(err), &rp, sizeof(rp));
11034 add_adv_complete(hdev, cmd->sk, cp->instance, err);
11036 mgmt_pending_free(cmd);
11039 static int add_advertising_sync(struct hci_dev *hdev, void *data)
11041 struct mgmt_pending_cmd *cmd = data;
11042 struct mgmt_cp_add_advertising *cp = cmd->param;
11044 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
11047 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
11048 void *data, u16 data_len)
11050 struct mgmt_cp_add_advertising *cp = data;
11051 struct mgmt_rp_add_advertising rp;
11054 u16 timeout, duration;
11055 unsigned int prev_instance_cnt;
11056 u8 schedule_instance = 0;
11057 struct adv_info *adv, *next_instance;
11059 struct mgmt_pending_cmd *cmd;
11061 bt_dev_dbg(hdev, "sock %p", sk);
11063 status = mgmt_le_support(hdev);
11065 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
11068 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
11069 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
11070 MGMT_STATUS_INVALID_PARAMS);
11072 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
11073 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
11074 MGMT_STATUS_INVALID_PARAMS);
11076 flags = __le32_to_cpu(cp->flags);
11077 timeout = __le16_to_cpu(cp->timeout);
11078 duration = __le16_to_cpu(cp->duration);
11080 if (!requested_adv_flags_are_valid(hdev, flags))
11081 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
11082 MGMT_STATUS_INVALID_PARAMS);
11084 hci_dev_lock(hdev);
11086 if (timeout && !hdev_is_powered(hdev)) {
11087 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
11088 MGMT_STATUS_REJECTED);
11092 if (adv_busy(hdev)) {
11093 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
11098 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
11099 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
11100 cp->scan_rsp_len, false)) {
11101 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
11102 MGMT_STATUS_INVALID_PARAMS);
11106 prev_instance_cnt = hdev->adv_instance_cnt;
11108 adv = hci_add_adv_instance(hdev, cp->instance, flags,
11109 cp->adv_data_len, cp->data,
11111 cp->data + cp->adv_data_len,
11113 HCI_ADV_TX_POWER_NO_PREFERENCE,
11114 hdev->le_adv_min_interval,
11115 hdev->le_adv_max_interval, 0);
11117 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
11118 MGMT_STATUS_FAILED);
11122 /* Only trigger an advertising added event if a new instance was
11125 if (hdev->adv_instance_cnt > prev_instance_cnt)
11126 mgmt_advertising_added(sk, hdev, cp->instance);
11128 if (hdev->cur_adv_instance == cp->instance) {
11129 /* If the currently advertised instance is being changed then
11130 * cancel the current advertising and schedule the next
11131 * instance. If there is only one instance then the overridden
11132 * advertising data will be visible right away.
11134 cancel_adv_timeout(hdev);
11136 next_instance = hci_get_next_instance(hdev, cp->instance);
11138 schedule_instance = next_instance->instance;
11139 } else if (!hdev->adv_instance_timeout) {
11140 /* Immediately advertise the new instance if no other
11141 * instance is currently being advertised.
11143 schedule_instance = cp->instance;
11146 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
11147 * there is no instance to be advertised then we have no HCI
11148 * communication to make. Simply return.
11150 if (!hdev_is_powered(hdev) ||
11151 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
11152 !schedule_instance) {
11153 rp.instance = cp->instance;
11154 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
11155 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11159 /* We're good to go, update advertising data, parameters, and start
11162 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
11169 cp->instance = schedule_instance;
11171 err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
11172 add_advertising_complete);
11174 mgmt_pending_free(cmd);
11177 hci_dev_unlock(hdev);
11182 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
11185 struct mgmt_pending_cmd *cmd = data;
11186 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
11187 struct mgmt_rp_add_ext_adv_params rp;
11188 struct adv_info *adv;
11191 BT_DBG("%s", hdev->name);
11193 hci_dev_lock(hdev);
11195 adv = hci_find_adv_instance(hdev, cp->instance);
11199 rp.instance = cp->instance;
11200 rp.tx_power = adv->tx_power;
11202 /* While we're at it, inform userspace of the available space for this
11203 * advertisement, given the flags that will be used.
11205 flags = __le32_to_cpu(cp->flags);
11206 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
11207 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
11210 /* If this advertisement was previously advertising and we
11211 * failed to update it, we signal that it has been removed and
11212 * delete its structure
11215 mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
11217 hci_remove_adv_instance(hdev, cp->instance);
11219 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
11222 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
11223 mgmt_status(err), &rp, sizeof(rp));
11228 mgmt_pending_free(cmd);
11230 hci_dev_unlock(hdev);
11233 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
11235 struct mgmt_pending_cmd *cmd = data;
11236 struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
11238 return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
11241 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
11242 void *data, u16 data_len)
11244 struct mgmt_cp_add_ext_adv_params *cp = data;
11245 struct mgmt_rp_add_ext_adv_params rp;
11246 struct mgmt_pending_cmd *cmd = NULL;
11247 struct adv_info *adv;
11248 u32 flags, min_interval, max_interval;
11249 u16 timeout, duration;
11254 BT_DBG("%s", hdev->name);
11256 status = mgmt_le_support(hdev);
11258 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11261 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
11262 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11263 MGMT_STATUS_INVALID_PARAMS);
11265 /* The purpose of breaking add_advertising into two separate MGMT calls
11266 * for params and data is to allow more parameters to be added to this
11267 * structure in the future. For this reason, we verify that we have the
11268 * bare minimum structure we know of when the interface was defined. Any
11269 * extra parameters we don't know about will be ignored in this request.
11271 if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
11272 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11273 MGMT_STATUS_INVALID_PARAMS);
11275 flags = __le32_to_cpu(cp->flags);
11277 if (!requested_adv_flags_are_valid(hdev, flags))
11278 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11279 MGMT_STATUS_INVALID_PARAMS);
11281 hci_dev_lock(hdev);
11283 /* In new interface, we require that we are powered to register */
11284 if (!hdev_is_powered(hdev)) {
11285 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11286 MGMT_STATUS_REJECTED);
11290 if (adv_busy(hdev)) {
11291 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11296 /* Parse defined parameters from request, use defaults otherwise */
11297 timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
11298 __le16_to_cpu(cp->timeout) : 0;
11300 duration = (flags & MGMT_ADV_PARAM_DURATION) ?
11301 __le16_to_cpu(cp->duration) :
11302 hdev->def_multi_adv_rotation_duration;
11304 min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
11305 __le32_to_cpu(cp->min_interval) :
11306 hdev->le_adv_min_interval;
11308 max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
11309 __le32_to_cpu(cp->max_interval) :
11310 hdev->le_adv_max_interval;
11312 tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
11314 HCI_ADV_TX_POWER_NO_PREFERENCE;
11316 /* Create advertising instance with no advertising or response data */
11317 adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,
11318 timeout, duration, tx_power, min_interval,
11322 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
11323 MGMT_STATUS_FAILED);
11327 /* Submit request for advertising params if ext adv available */
11328 if (ext_adv_capable(hdev)) {
11329 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
11333 hci_remove_adv_instance(hdev, cp->instance);
11337 err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
11338 add_ext_adv_params_complete);
11340 mgmt_pending_free(cmd);
11342 rp.instance = cp->instance;
11343 rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
11344 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
11345 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
11346 err = mgmt_cmd_complete(sk, hdev->id,
11347 MGMT_OP_ADD_EXT_ADV_PARAMS,
11348 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11352 hci_dev_unlock(hdev);
11357 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
11359 struct mgmt_pending_cmd *cmd = data;
11360 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
11361 struct mgmt_rp_add_advertising rp;
11363 add_adv_complete(hdev, cmd->sk, cp->instance, err);
11365 memset(&rp, 0, sizeof(rp));
11367 rp.instance = cp->instance;
11370 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
11373 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
11374 mgmt_status(err), &rp, sizeof(rp));
11376 mgmt_pending_free(cmd);
11379 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
11381 struct mgmt_pending_cmd *cmd = data;
11382 struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
11385 if (ext_adv_capable(hdev)) {
11386 err = hci_update_adv_data_sync(hdev, cp->instance);
11390 err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
11394 return hci_enable_ext_advertising_sync(hdev, cp->instance);
11397 return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
11400 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
11403 struct mgmt_cp_add_ext_adv_data *cp = data;
11404 struct mgmt_rp_add_ext_adv_data rp;
11405 u8 schedule_instance = 0;
11406 struct adv_info *next_instance;
11407 struct adv_info *adv_instance;
11409 struct mgmt_pending_cmd *cmd;
11411 BT_DBG("%s", hdev->name);
11413 hci_dev_lock(hdev);
11415 adv_instance = hci_find_adv_instance(hdev, cp->instance);
11417 if (!adv_instance) {
11418 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
11419 MGMT_STATUS_INVALID_PARAMS);
11423 /* In new interface, we require that we are powered to register */
11424 if (!hdev_is_powered(hdev)) {
11425 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
11426 MGMT_STATUS_REJECTED);
11427 goto clear_new_instance;
11430 if (adv_busy(hdev)) {
11431 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
11433 goto clear_new_instance;
11436 /* Validate new data */
11437 if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
11438 cp->adv_data_len, true) ||
11439 !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
11440 cp->adv_data_len, cp->scan_rsp_len, false)) {
11441 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
11442 MGMT_STATUS_INVALID_PARAMS);
11443 goto clear_new_instance;
11446 /* Set the data in the advertising instance */
11447 hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
11448 cp->data, cp->scan_rsp_len,
11449 cp->data + cp->adv_data_len);
11451 /* If using software rotation, determine next instance to use */
11452 if (hdev->cur_adv_instance == cp->instance) {
11453 /* If the currently advertised instance is being changed
11454 * then cancel the current advertising and schedule the
11455 * next instance. If there is only one instance then the
11456 * overridden advertising data will be visible right
11459 cancel_adv_timeout(hdev);
11461 next_instance = hci_get_next_instance(hdev, cp->instance);
11463 schedule_instance = next_instance->instance;
11464 } else if (!hdev->adv_instance_timeout) {
11465 /* Immediately advertise the new instance if no other
11466 * instance is currently being advertised.
11468 schedule_instance = cp->instance;
11471 /* If the HCI_ADVERTISING flag is set or there is no instance to
11472 * be advertised then we have no HCI communication to make.
11475 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
11476 if (adv_instance->pending) {
11477 mgmt_advertising_added(sk, hdev, cp->instance);
11478 adv_instance->pending = false;
11480 rp.instance = cp->instance;
11481 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
11482 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11486 cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
11490 goto clear_new_instance;
11493 err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
11494 add_ext_adv_data_complete);
11496 mgmt_pending_free(cmd);
11497 goto clear_new_instance;
11500 /* We were successful in updating data, so trigger advertising_added
11501 * event if this is an instance that wasn't previously advertising. If
11502 * a failure occurs in the requests we initiated, we will remove the
11503 * instance again in add_advertising_complete
11505 if (adv_instance->pending)
11506 mgmt_advertising_added(sk, hdev, cp->instance);
11510 clear_new_instance:
11511 hci_remove_adv_instance(hdev, cp->instance);
11514 hci_dev_unlock(hdev);
11519 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
11522 struct mgmt_pending_cmd *cmd = data;
11523 struct mgmt_cp_remove_advertising *cp = cmd->param;
11524 struct mgmt_rp_remove_advertising rp;
11526 bt_dev_dbg(hdev, "err %d", err);
11528 memset(&rp, 0, sizeof(rp));
11529 rp.instance = cp->instance;
11532 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
11535 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
11536 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11538 mgmt_pending_free(cmd);
11541 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
11543 struct mgmt_pending_cmd *cmd = data;
11544 struct mgmt_cp_remove_advertising *cp = cmd->param;
11547 err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
11551 if (list_empty(&hdev->adv_instances))
11552 err = hci_disable_advertising_sync(hdev);
11557 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
11558 void *data, u16 data_len)
11560 struct mgmt_cp_remove_advertising *cp = data;
11561 struct mgmt_pending_cmd *cmd;
11564 bt_dev_dbg(hdev, "sock %p", sk);
11566 hci_dev_lock(hdev);
11568 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
11569 err = mgmt_cmd_status(sk, hdev->id,
11570 MGMT_OP_REMOVE_ADVERTISING,
11571 MGMT_STATUS_INVALID_PARAMS);
11575 if (pending_find(MGMT_OP_SET_LE, hdev)) {
11576 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
11581 if (list_empty(&hdev->adv_instances)) {
11582 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
11583 MGMT_STATUS_INVALID_PARAMS);
11587 cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
11594 err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
11595 remove_advertising_complete);
11597 mgmt_pending_free(cmd);
11600 hci_dev_unlock(hdev);
11605 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
11606 void *data, u16 data_len)
11608 struct mgmt_cp_get_adv_size_info *cp = data;
11609 struct mgmt_rp_get_adv_size_info rp;
11610 u32 flags, supported_flags;
11612 bt_dev_dbg(hdev, "sock %p", sk);
11614 if (!lmp_le_capable(hdev))
11615 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11616 MGMT_STATUS_REJECTED);
11618 if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
11619 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11620 MGMT_STATUS_INVALID_PARAMS);
11622 flags = __le32_to_cpu(cp->flags);
11624 /* The current implementation only supports a subset of the specified
11627 supported_flags = get_supported_adv_flags(hdev);
11628 if (flags & ~supported_flags)
11629 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11630 MGMT_STATUS_INVALID_PARAMS);
11632 rp.instance = cp->instance;
11633 rp.flags = cp->flags;
11634 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
11635 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
11637 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
11638 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
11641 static const struct hci_mgmt_handler mgmt_handlers[] = {
11642 { NULL }, /* 0x0000 (no command) */
11643 { read_version, MGMT_READ_VERSION_SIZE,
11645 HCI_MGMT_UNTRUSTED },
11646 { read_commands, MGMT_READ_COMMANDS_SIZE,
11648 HCI_MGMT_UNTRUSTED },
11649 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
11651 HCI_MGMT_UNTRUSTED },
11652 { read_controller_info, MGMT_READ_INFO_SIZE,
11653 HCI_MGMT_UNTRUSTED },
11654 { set_powered, MGMT_SETTING_SIZE },
11655 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
11656 { set_connectable, MGMT_SETTING_SIZE },
11657 { set_fast_connectable, MGMT_SETTING_SIZE },
11658 { set_bondable, MGMT_SETTING_SIZE },
11659 { set_link_security, MGMT_SETTING_SIZE },
11660 { set_ssp, MGMT_SETTING_SIZE },
11661 { set_hs, MGMT_SETTING_SIZE },
11662 { set_le, MGMT_SETTING_SIZE },
11663 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
11664 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
11665 { add_uuid, MGMT_ADD_UUID_SIZE },
11666 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
11667 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
11668 HCI_MGMT_VAR_LEN },
11669 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
11670 HCI_MGMT_VAR_LEN },
11671 { disconnect, MGMT_DISCONNECT_SIZE },
11672 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
11673 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
11674 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
11675 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
11676 { pair_device, MGMT_PAIR_DEVICE_SIZE },
11677 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
11678 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
11679 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
11680 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
11681 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
11682 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
11683 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
11684 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
11685 HCI_MGMT_VAR_LEN },
11686 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
11687 { start_discovery, MGMT_START_DISCOVERY_SIZE },
11688 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
11689 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
11690 { block_device, MGMT_BLOCK_DEVICE_SIZE },
11691 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
11692 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
11693 { set_advertising, MGMT_SETTING_SIZE },
11694 { set_bredr, MGMT_SETTING_SIZE },
11695 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
11696 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
11697 { set_secure_conn, MGMT_SETTING_SIZE },
11698 { set_debug_keys, MGMT_SETTING_SIZE },
11699 { set_privacy, MGMT_SET_PRIVACY_SIZE },
11700 { load_irks, MGMT_LOAD_IRKS_SIZE,
11701 HCI_MGMT_VAR_LEN },
11702 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
11703 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
11704 { add_device, MGMT_ADD_DEVICE_SIZE },
11705 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
11706 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
11707 HCI_MGMT_VAR_LEN },
11708 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
11710 HCI_MGMT_UNTRUSTED },
11711 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
11712 HCI_MGMT_UNCONFIGURED |
11713 HCI_MGMT_UNTRUSTED },
11714 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
11715 HCI_MGMT_UNCONFIGURED },
11716 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
11717 HCI_MGMT_UNCONFIGURED },
11718 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
11719 HCI_MGMT_VAR_LEN },
11720 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
11721 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
11723 HCI_MGMT_UNTRUSTED },
11724 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
11725 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
11726 HCI_MGMT_VAR_LEN },
11727 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
11728 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
11729 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
11730 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
11731 HCI_MGMT_UNTRUSTED },
11732 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
11733 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
11734 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
11735 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
11736 HCI_MGMT_VAR_LEN },
11737 { set_wideband_speech, MGMT_SETTING_SIZE },
11738 { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE,
11739 HCI_MGMT_UNTRUSTED },
11740 { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE,
11741 HCI_MGMT_UNTRUSTED |
11742 HCI_MGMT_HDEV_OPTIONAL },
11743 { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE,
11745 HCI_MGMT_HDEV_OPTIONAL },
11746 { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
11747 HCI_MGMT_UNTRUSTED },
11748 { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
11749 HCI_MGMT_VAR_LEN },
11750 { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
11751 HCI_MGMT_UNTRUSTED },
11752 { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
11753 HCI_MGMT_VAR_LEN },
11754 { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE },
11755 { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE },
11756 { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
11757 { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
11758 HCI_MGMT_VAR_LEN },
11759 { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE },
11760 { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
11761 HCI_MGMT_VAR_LEN },
11762 { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE,
11763 HCI_MGMT_VAR_LEN },
11764 { add_adv_patterns_monitor_rssi,
11765 MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
11766 HCI_MGMT_VAR_LEN },
11767 { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE,
11768 HCI_MGMT_VAR_LEN },
11769 { mesh_features, MGMT_MESH_READ_FEATURES_SIZE },
11770 { mesh_send, MGMT_MESH_SEND_SIZE,
11771 HCI_MGMT_VAR_LEN },
11772 { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE },
11776 static const struct hci_mgmt_handler tizen_mgmt_handlers[] = {
11777 { NULL }, /* 0x0000 (no command) */
11778 { set_advertising_params, MGMT_SET_ADVERTISING_PARAMS_SIZE },
11779 { set_advertising_data, MGMT_SET_ADV_MIN_APP_DATA_SIZE,
11780 HCI_MGMT_VAR_LEN },
11781 { set_scan_rsp_data, MGMT_SET_SCAN_RSP_MIN_APP_DATA_SIZE,
11782 HCI_MGMT_VAR_LEN },
11783 { add_white_list, MGMT_ADD_DEV_WHITE_LIST_SIZE },
11784 { remove_from_white_list, MGMT_REMOVE_DEV_FROM_WHITE_LIST_SIZE },
11785 { clear_white_list, MGMT_OP_CLEAR_DEV_WHITE_LIST_SIZE },
11786 { set_enable_rssi, MGMT_SET_RSSI_ENABLE_SIZE },
11787 { get_raw_rssi, MGMT_GET_RAW_RSSI_SIZE },
11788 { set_disable_threshold, MGMT_SET_RSSI_DISABLE_SIZE },
11789 { start_le_discovery, MGMT_START_LE_DISCOVERY_SIZE },
11790 { stop_le_discovery, MGMT_STOP_LE_DISCOVERY_SIZE },
11791 { disable_le_auto_connect, MGMT_DISABLE_LE_AUTO_CONNECT_SIZE },
11792 { le_conn_update, MGMT_LE_CONN_UPDATE_SIZE },
11793 { set_manufacturer_data, MGMT_SET_MANUFACTURER_DATA_SIZE },
11794 { le_set_scan_params, MGMT_LE_SET_SCAN_PARAMS_SIZE },
11795 { set_voice_setting, MGMT_SET_VOICE_SETTING_SIZE },
11796 { get_adv_tx_power, MGMT_GET_ADV_TX_POWER_SIZE },
11797 { enable_bt_6lowpan, MGMT_ENABLE_BT_6LOWPAN_SIZE },
11798 { connect_bt_6lowpan, MGMT_CONNECT_6LOWPAN_SIZE },
11799 { disconnect_bt_6lowpan, MGMT_DISCONNECT_6LOWPAN_SIZE },
11800 { read_maximum_le_data_length,
11801 MGMT_LE_READ_MAXIMUM_DATA_LENGTH_SIZE },
11802 { write_host_suggested_le_data_length,
11803 MGMT_LE_WRITE_HOST_SUGGESTED_DATA_LENGTH_SIZE },
11804 { read_host_suggested_data_length,
11805 MGMT_LE_READ_HOST_SUGGESTED_DATA_LENGTH_SIZE },
11806 { set_le_data_length_params,
11807 MGMT_LE_SET_DATA_LENGTH_SIZE },
11811 void mgmt_index_added(struct hci_dev *hdev)
11813 struct mgmt_ev_ext_index ev;
11815 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
11818 switch (hdev->dev_type) {
11820 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
11821 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
11822 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
11825 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
11826 HCI_MGMT_INDEX_EVENTS);
11837 ev.bus = hdev->bus;
11839 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
11840 HCI_MGMT_EXT_INDEX_EVENTS);
11843 void mgmt_index_removed(struct hci_dev *hdev)
11845 struct mgmt_ev_ext_index ev;
11846 u8 status = MGMT_STATUS_INVALID_INDEX;
11848 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
11851 switch (hdev->dev_type) {
11853 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
11855 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
11856 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
11857 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
11860 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
11861 HCI_MGMT_INDEX_EVENTS);
11872 ev.bus = hdev->bus;
11874 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
11875 HCI_MGMT_EXT_INDEX_EVENTS);
11877 /* Cancel any remaining timed work */
11878 if (!hci_dev_test_flag(hdev, HCI_MGMT))
11880 cancel_delayed_work_sync(&hdev->discov_off);
11881 cancel_delayed_work_sync(&hdev->service_cache);
11882 cancel_delayed_work_sync(&hdev->rpa_expired);
11885 void mgmt_power_on(struct hci_dev *hdev, int err)
11887 struct cmd_lookup match = { NULL, hdev };
11889 bt_dev_dbg(hdev, "err %d", err);
11891 hci_dev_lock(hdev);
11894 restart_le_actions(hdev);
11895 hci_update_passive_scan(hdev);
11898 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
11900 new_settings(hdev, match.sk);
11903 sock_put(match.sk);
11905 hci_dev_unlock(hdev);
11908 void __mgmt_power_off(struct hci_dev *hdev)
11910 struct cmd_lookup match = { NULL, hdev };
11911 u8 status, zero_cod[] = { 0, 0, 0 };
11913 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
11915 /* If the power off is because of hdev unregistration let
11916 * use the appropriate INVALID_INDEX status. Otherwise use
11917 * NOT_POWERED. We cover both scenarios here since later in
11918 * mgmt_index_removed() any hci_conn callbacks will have already
11919 * been triggered, potentially causing misleading DISCONNECTED
11920 * status responses.
11922 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
11923 status = MGMT_STATUS_INVALID_INDEX;
11925 status = MGMT_STATUS_NOT_POWERED;
11927 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
11929 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
11930 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
11931 zero_cod, sizeof(zero_cod),
11932 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
11933 ext_info_changed(hdev, NULL);
11936 new_settings(hdev, match.sk);
11939 sock_put(match.sk);
11942 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
11944 struct mgmt_pending_cmd *cmd;
11947 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
11951 if (err == -ERFKILL)
11952 status = MGMT_STATUS_RFKILLED;
11954 status = MGMT_STATUS_FAILED;
11956 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
11958 mgmt_pending_remove(cmd);
11961 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
11964 struct mgmt_ev_new_link_key ev;
11966 memset(&ev, 0, sizeof(ev));
11968 ev.store_hint = persistent;
11969 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
11970 ev.key.addr.type = BDADDR_BREDR;
11971 ev.key.type = key->type;
11972 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
11973 ev.key.pin_len = key->pin_len;
11975 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
11978 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
11980 switch (ltk->type) {
11982 case SMP_LTK_RESPONDER:
11983 if (ltk->authenticated)
11984 return MGMT_LTK_AUTHENTICATED;
11985 return MGMT_LTK_UNAUTHENTICATED;
11987 if (ltk->authenticated)
11988 return MGMT_LTK_P256_AUTH;
11989 return MGMT_LTK_P256_UNAUTH;
11990 case SMP_LTK_P256_DEBUG:
11991 return MGMT_LTK_P256_DEBUG;
11994 return MGMT_LTK_UNAUTHENTICATED;
11997 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
11999 struct mgmt_ev_new_long_term_key ev;
12001 memset(&ev, 0, sizeof(ev));
12003 /* Devices using resolvable or non-resolvable random addresses
12004 * without providing an identity resolving key don't require
12005 * to store long term keys. Their addresses will change the
12006 * next time around.
12008 * Only when a remote device provides an identity address
12009 * make sure the long term key is stored. If the remote
12010 * identity is known, the long term keys are internally
12011 * mapped to the identity address. So allow static random
12012 * and public addresses here.
12014 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
12015 (key->bdaddr.b[5] & 0xc0) != 0xc0)
12016 ev.store_hint = 0x00;
12018 ev.store_hint = persistent;
12020 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
12021 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
12022 ev.key.type = mgmt_ltk_type(key);
12023 ev.key.enc_size = key->enc_size;
12024 ev.key.ediv = key->ediv;
12025 ev.key.rand = key->rand;
12027 if (key->type == SMP_LTK)
12028 ev.key.initiator = 1;
12030 /* Make sure we copy only the significant bytes based on the
12031 * encryption key size, and set the rest of the value to zeroes.
12033 memcpy(ev.key.val, key->val, key->enc_size);
12034 memset(ev.key.val + key->enc_size, 0,
12035 sizeof(ev.key.val) - key->enc_size);
12037 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
12040 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
12042 struct mgmt_ev_new_irk ev;
12044 memset(&ev, 0, sizeof(ev));
12046 ev.store_hint = persistent;
12048 bacpy(&ev.rpa, &irk->rpa);
12049 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
12050 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
12051 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
12053 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
12056 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
12059 struct mgmt_ev_new_csrk ev;
12061 memset(&ev, 0, sizeof(ev));
12063 /* Devices using resolvable or non-resolvable random addresses
12064 * without providing an identity resolving key don't require
12065 * to store signature resolving keys. Their addresses will change
12066 * the next time around.
12068 * Only when a remote device provides an identity address
12069 * make sure the signature resolving key is stored. So allow
12070 * static random and public addresses here.
12072 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
12073 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
12074 ev.store_hint = 0x00;
12076 ev.store_hint = persistent;
12078 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
12079 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
12080 ev.key.type = csrk->type;
12081 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
12083 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
12086 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
12087 u8 bdaddr_type, u8 store_hint, u16 min_interval,
12088 u16 max_interval, u16 latency, u16 timeout)
12090 struct mgmt_ev_new_conn_param ev;
12092 if (!hci_is_identity_address(bdaddr, bdaddr_type))
12095 memset(&ev, 0, sizeof(ev));
12096 bacpy(&ev.addr.bdaddr, bdaddr);
12097 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
12098 ev.store_hint = store_hint;
12099 ev.min_interval = cpu_to_le16(min_interval);
12100 ev.max_interval = cpu_to_le16(max_interval);
12101 ev.latency = cpu_to_le16(latency);
12102 ev.timeout = cpu_to_le16(timeout);
12104 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
12107 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
12108 u8 *name, u8 name_len)
12110 struct sk_buff *skb;
12111 struct mgmt_ev_device_connected *ev;
12115 /* allocate buff for LE or BR/EDR adv */
12116 if (conn->le_adv_data_len > 0)
12117 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
12118 sizeof(*ev) + conn->le_adv_data_len);
12120 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
12121 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
12122 eir_precalc_len(sizeof(conn->dev_class)));
12124 ev = skb_put(skb, sizeof(*ev));
12125 bacpy(&ev->addr.bdaddr, &conn->dst);
12126 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
12129 flags |= MGMT_DEV_FOUND_INITIATED_CONN;
12131 ev->flags = __cpu_to_le32(flags);
12133 /* We must ensure that the EIR Data fields are ordered and
12134 * unique. Keep it simple for now and avoid the problem by not
12135 * adding any BR/EDR data to the LE adv.
12137 if (conn->le_adv_data_len > 0) {
12138 skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
12139 eir_len = conn->le_adv_data_len;
12142 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
12144 if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
12145 eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
12146 conn->dev_class, sizeof(conn->dev_class));
12149 ev->eir_len = cpu_to_le16(eir_len);
12151 mgmt_event_skb(skb, NULL);
12154 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
12156 struct sock **sk = data;
12158 cmd->cmd_complete(cmd, 0);
12163 mgmt_pending_remove(cmd);
12166 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
12168 struct hci_dev *hdev = data;
12169 struct mgmt_cp_unpair_device *cp = cmd->param;
12171 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
12173 cmd->cmd_complete(cmd, 0);
12174 mgmt_pending_remove(cmd);
12177 bool mgmt_powering_down(struct hci_dev *hdev)
12179 struct mgmt_pending_cmd *cmd;
12180 struct mgmt_mode *cp;
12182 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
12193 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
12194 u8 link_type, u8 addr_type, u8 reason,
12195 bool mgmt_connected)
12197 struct mgmt_ev_device_disconnected ev;
12198 struct sock *sk = NULL;
12200 /* The connection is still in hci_conn_hash so test for 1
12201 * instead of 0 to know if this is the last one.
12203 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
12204 cancel_delayed_work(&hdev->power_off);
12205 queue_work(hdev->req_workqueue, &hdev->power_off.work);
12208 if (!mgmt_connected)
12211 if (link_type != ACL_LINK && link_type != LE_LINK)
12214 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
12216 bacpy(&ev.addr.bdaddr, bdaddr);
12217 ev.addr.type = link_to_bdaddr(link_type, addr_type);
12218 ev.reason = reason;
12220 /* Report disconnects due to suspend */
12221 if (hdev->suspended)
12222 ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
12224 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
12229 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
12233 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
12234 u8 link_type, u8 addr_type, u8 status)
12236 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
12237 struct mgmt_cp_disconnect *cp;
12238 struct mgmt_pending_cmd *cmd;
12240 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
12243 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
12249 if (bacmp(bdaddr, &cp->addr.bdaddr))
12252 if (cp->addr.type != bdaddr_type)
12255 cmd->cmd_complete(cmd, mgmt_status(status));
12256 mgmt_pending_remove(cmd);
12259 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
12260 u8 addr_type, u8 status)
12262 struct mgmt_ev_connect_failed ev;
12264 /* The connection is still in hci_conn_hash so test for 1
12265 * instead of 0 to know if this is the last one.
12267 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
12268 cancel_delayed_work(&hdev->power_off);
12269 queue_work(hdev->req_workqueue, &hdev->power_off.work);
12272 bacpy(&ev.addr.bdaddr, bdaddr);
12273 ev.addr.type = link_to_bdaddr(link_type, addr_type);
12274 ev.status = mgmt_status(status);
12276 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
12279 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
12281 struct mgmt_ev_pin_code_request ev;
12283 bacpy(&ev.addr.bdaddr, bdaddr);
12284 ev.addr.type = BDADDR_BREDR;
12285 ev.secure = secure;
12287 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
12290 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12293 struct mgmt_pending_cmd *cmd;
12295 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
12299 cmd->cmd_complete(cmd, mgmt_status(status));
12300 mgmt_pending_remove(cmd);
12303 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12306 struct mgmt_pending_cmd *cmd;
12308 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
12312 cmd->cmd_complete(cmd, mgmt_status(status));
12313 mgmt_pending_remove(cmd);
12316 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
12317 u8 link_type, u8 addr_type, u32 value,
12320 struct mgmt_ev_user_confirm_request ev;
12322 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
12324 bacpy(&ev.addr.bdaddr, bdaddr);
12325 ev.addr.type = link_to_bdaddr(link_type, addr_type);
12326 ev.confirm_hint = confirm_hint;
12327 ev.value = cpu_to_le32(value);
12329 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
12333 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
12334 u8 link_type, u8 addr_type)
12336 struct mgmt_ev_user_passkey_request ev;
12338 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
12340 bacpy(&ev.addr.bdaddr, bdaddr);
12341 ev.addr.type = link_to_bdaddr(link_type, addr_type);
12343 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
12347 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12348 u8 link_type, u8 addr_type, u8 status,
12351 struct mgmt_pending_cmd *cmd;
12353 cmd = pending_find(opcode, hdev);
12357 cmd->cmd_complete(cmd, mgmt_status(status));
12358 mgmt_pending_remove(cmd);
12363 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12364 u8 link_type, u8 addr_type, u8 status)
12366 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
12367 status, MGMT_OP_USER_CONFIRM_REPLY);
12370 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12371 u8 link_type, u8 addr_type, u8 status)
12373 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
12375 MGMT_OP_USER_CONFIRM_NEG_REPLY);
12378 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12379 u8 link_type, u8 addr_type, u8 status)
12381 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
12382 status, MGMT_OP_USER_PASSKEY_REPLY);
12385 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
12386 u8 link_type, u8 addr_type, u8 status)
12388 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
12390 MGMT_OP_USER_PASSKEY_NEG_REPLY);
12393 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
12394 u8 link_type, u8 addr_type, u32 passkey,
12397 struct mgmt_ev_passkey_notify ev;
12399 bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
12401 bacpy(&ev.addr.bdaddr, bdaddr);
12402 ev.addr.type = link_to_bdaddr(link_type, addr_type);
12403 ev.passkey = __cpu_to_le32(passkey);
12404 ev.entered = entered;
12406 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
12409 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
12411 struct mgmt_ev_auth_failed ev;
12412 struct mgmt_pending_cmd *cmd;
12413 u8 status = mgmt_status(hci_status);
12415 bacpy(&ev.addr.bdaddr, &conn->dst);
12416 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
12417 ev.status = status;
12419 cmd = find_pairing(conn);
12421 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
12422 cmd ? cmd->sk : NULL);
12425 cmd->cmd_complete(cmd, status);
12426 mgmt_pending_remove(cmd);
12430 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
12432 struct cmd_lookup match = { NULL, hdev };
12436 u8 mgmt_err = mgmt_status(status);
12437 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
12438 cmd_status_rsp, &mgmt_err);
12442 if (test_bit(HCI_AUTH, &hdev->flags))
12443 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
12445 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
12447 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
12451 new_settings(hdev, match.sk);
12454 sock_put(match.sk);
12457 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
12459 struct cmd_lookup *match = data;
12461 if (match->sk == NULL) {
12462 match->sk = cmd->sk;
12463 sock_hold(match->sk);
12467 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
12470 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
12472 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
12473 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
12474 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
12477 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
12478 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
12479 ext_info_changed(hdev, NULL);
12483 sock_put(match.sk);
12486 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
12488 struct mgmt_cp_set_local_name ev;
12489 struct mgmt_pending_cmd *cmd;
12494 memset(&ev, 0, sizeof(ev));
12495 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
12496 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
12498 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
12500 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
12502 /* If this is a HCI command related to powering on the
12503 * HCI dev don't send any mgmt signals.
12505 if (pending_find(MGMT_OP_SET_POWERED, hdev))
12509 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
12510 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
12511 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
12514 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
12518 for (i = 0; i < uuid_count; i++) {
12519 if (!memcmp(uuid, uuids[i], 16))
12526 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
12530 while (parsed < eir_len) {
12531 u8 field_len = eir[0];
12535 if (field_len == 0)
12538 if (eir_len - parsed < field_len + 1)
12542 case EIR_UUID16_ALL:
12543 case EIR_UUID16_SOME:
12544 for (i = 0; i + 3 <= field_len; i += 2) {
12545 memcpy(uuid, bluetooth_base_uuid, 16);
12546 uuid[13] = eir[i + 3];
12547 uuid[12] = eir[i + 2];
12548 if (has_uuid(uuid, uuid_count, uuids))
12552 case EIR_UUID32_ALL:
12553 case EIR_UUID32_SOME:
12554 for (i = 0; i + 5 <= field_len; i += 4) {
12555 memcpy(uuid, bluetooth_base_uuid, 16);
12556 uuid[15] = eir[i + 5];
12557 uuid[14] = eir[i + 4];
12558 uuid[13] = eir[i + 3];
12559 uuid[12] = eir[i + 2];
12560 if (has_uuid(uuid, uuid_count, uuids))
12564 case EIR_UUID128_ALL:
12565 case EIR_UUID128_SOME:
12566 for (i = 0; i + 17 <= field_len; i += 16) {
12567 memcpy(uuid, eir + i + 2, 16);
12568 if (has_uuid(uuid, uuid_count, uuids))
12574 parsed += field_len + 1;
12575 eir += field_len + 1;
12581 static void restart_le_scan(struct hci_dev *hdev)
12583 /* If controller is not scanning we are done. */
12584 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
12587 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
12588 hdev->discovery.scan_start +
12589 hdev->discovery.scan_duration))
12592 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
12593 DISCOV_LE_RESTART_DELAY);
12596 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
12597 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
12599 /* If a RSSI threshold has been specified, and
12600 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
12601 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
12602 * is set, let it through for further processing, as we might need to
12603 * restart the scan.
12605 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
12606 * the results are also dropped.
12608 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
12609 (rssi == HCI_RSSI_INVALID ||
12610 (rssi < hdev->discovery.rssi &&
12611 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
12614 if (hdev->discovery.uuid_count != 0) {
12615 /* If a list of UUIDs is provided in filter, results with no
12616 * matching UUID should be dropped.
12618 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
12619 hdev->discovery.uuids) &&
12620 !eir_has_uuids(scan_rsp, scan_rsp_len,
12621 hdev->discovery.uuid_count,
12622 hdev->discovery.uuids))
12626 /* If duplicate filtering does not report RSSI changes, then restart
12627 * scanning to ensure updated result with updated RSSI values.
12629 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
12630 restart_le_scan(hdev);
12632 /* Validate RSSI value against the RSSI threshold once more. */
12633 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
12634 rssi < hdev->discovery.rssi)
12641 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
12642 bdaddr_t *bdaddr, u8 addr_type)
12644 struct mgmt_ev_adv_monitor_device_lost ev;
12646 ev.monitor_handle = cpu_to_le16(handle);
12647 bacpy(&ev.addr.bdaddr, bdaddr);
12648 ev.addr.type = addr_type;
12650 mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
12654 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
12655 struct sk_buff *skb,
12656 struct sock *skip_sk,
12659 struct sk_buff *advmon_skb;
12660 size_t advmon_skb_len;
12661 __le16 *monitor_handle;
12666 advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
12667 sizeof(struct mgmt_ev_device_found)) + skb->len;
12668 advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
12673 /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
12674 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
12675 * store monitor_handle of the matched monitor.
12677 monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
12678 *monitor_handle = cpu_to_le16(handle);
12679 skb_put_data(advmon_skb, skb->data, skb->len);
12681 mgmt_event_skb(advmon_skb, skip_sk);
12684 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
12685 bdaddr_t *bdaddr, bool report_device,
12686 struct sk_buff *skb,
12687 struct sock *skip_sk)
12689 struct monitored_device *dev, *tmp;
12690 bool matched = false;
12691 bool notified = false;
12693 /* We have received the Advertisement Report because:
12694 * 1. the kernel has initiated active discovery
12695 * 2. if not, we have pend_le_reports > 0 in which case we are doing
12697 * 3. if none of the above is true, we have one or more active
12698 * Advertisement Monitor
12700 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
12701 * and report ONLY one advertisement per device for the matched Monitor
12702 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
12704 * For case 3, since we are not active scanning and all advertisements
12705 * received are due to a matched Advertisement Monitor, report all
12706 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
12708 if (report_device && !hdev->advmon_pend_notify) {
12709 mgmt_event_skb(skb, skip_sk);
12713 hdev->advmon_pend_notify = false;
12715 list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
12716 if (!bacmp(&dev->bdaddr, bdaddr)) {
12719 if (!dev->notified) {
12720 mgmt_send_adv_monitor_device_found(hdev, skb,
12724 dev->notified = true;
12728 if (!dev->notified)
12729 hdev->advmon_pend_notify = true;
12732 if (!report_device &&
12733 ((matched && !notified) || !msft_monitor_supported(hdev))) {
12734 /* Handle 0 indicates that we are not active scanning and this
12735 * is a subsequent advertisement report for an already matched
12736 * Advertisement Monitor or the controller offloading support
12737 * is not available.
12739 mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
12743 mgmt_event_skb(skb, skip_sk);
12748 static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr,
12749 u8 addr_type, s8 rssi, u32 flags, u8 *eir,
12750 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
12753 struct sk_buff *skb;
12754 struct mgmt_ev_mesh_device_found *ev;
12757 if (!hdev->mesh_ad_types[0])
12760 /* Scan for requested AD types */
12762 for (i = 0; i + 1 < eir_len; i += eir[i] + 1) {
12763 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
12764 if (!hdev->mesh_ad_types[j])
12767 if (hdev->mesh_ad_types[j] == eir[i + 1])
12773 if (scan_rsp_len > 0) {
12774 for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) {
12775 for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) {
12776 if (!hdev->mesh_ad_types[j])
12779 if (hdev->mesh_ad_types[j] == scan_rsp[i + 1])
12788 skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND,
12789 sizeof(*ev) + eir_len + scan_rsp_len);
12793 ev = skb_put(skb, sizeof(*ev));
12795 bacpy(&ev->addr.bdaddr, bdaddr);
12796 ev->addr.type = link_to_bdaddr(LE_LINK, addr_type);
12798 ev->flags = cpu_to_le32(flags);
12799 ev->instant = cpu_to_le64(instant);
12802 /* Copy EIR or advertising data into event */
12803 skb_put_data(skb, eir, eir_len);
12805 if (scan_rsp_len > 0)
12806 /* Append scan response data to event */
12807 skb_put_data(skb, scan_rsp, scan_rsp_len);
12809 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
12811 mgmt_event_skb(skb, NULL);
12814 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
12815 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
12816 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len,
12819 struct sk_buff *skb;
12820 struct mgmt_ev_device_found *ev;
12821 bool report_device = hci_discovery_active(hdev);
12823 if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK)
12824 mesh_device_found(hdev, bdaddr, addr_type, rssi, flags,
12825 eir, eir_len, scan_rsp, scan_rsp_len,
12828 /* Don't send events for a non-kernel initiated discovery. With
12829 * LE one exception is if we have pend_le_reports > 0 in which
12830 * case we're doing passive scanning and want these events.
12832 if (!hci_discovery_active(hdev)) {
12833 if (link_type == ACL_LINK)
12835 if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
12836 report_device = true;
12837 else if (!hci_is_adv_monitoring(hdev))
12841 if (hdev->discovery.result_filtering) {
12842 /* We are using service discovery */
12843 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
12848 if (hdev->discovery.limited) {
12849 /* Check for limited discoverable bit */
12851 if (!(dev_class[1] & 0x20))
12854 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
12855 if (!flags || !(flags[0] & LE_AD_LIMITED))
12860 /* Allocate skb. The 5 extra bytes are for the potential CoD field */
12861 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
12862 sizeof(*ev) + eir_len + scan_rsp_len + 5);
12866 ev = skb_put(skb, sizeof(*ev));
12868 /* In case of device discovery with BR/EDR devices (pre 1.2), the
12869 * RSSI value was reported as 0 when not available. This behavior
12870 * is kept when using device discovery. This is required for full
12871 * backwards compatibility with the API.
12873 * However when using service discovery, the value 127 will be
12874 * returned when the RSSI is not available.
12876 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
12877 link_type == ACL_LINK)
12880 bacpy(&ev->addr.bdaddr, bdaddr);
12881 ev->addr.type = link_to_bdaddr(link_type, addr_type);
12883 ev->flags = cpu_to_le32(flags);
12886 /* Copy EIR or advertising data into event */
12887 skb_put_data(skb, eir, eir_len);
12889 if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
12892 eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
12894 skb_put_data(skb, eir_cod, sizeof(eir_cod));
12897 if (scan_rsp_len > 0)
12898 /* Append scan response data to event */
12899 skb_put_data(skb, scan_rsp, scan_rsp_len);
12901 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
12903 mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
12906 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
12907 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
12909 struct sk_buff *skb;
12910 struct mgmt_ev_device_found *ev;
12914 skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
12915 sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
12917 ev = skb_put(skb, sizeof(*ev));
12918 bacpy(&ev->addr.bdaddr, bdaddr);
12919 ev->addr.type = link_to_bdaddr(link_type, addr_type);
12923 eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
12925 flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
12927 ev->eir_len = cpu_to_le16(eir_len);
12928 ev->flags = cpu_to_le32(flags);
12930 mgmt_event_skb(skb, NULL);
12933 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
12935 struct mgmt_ev_discovering ev;
12937 bt_dev_dbg(hdev, "discovering %u", discovering);
12939 memset(&ev, 0, sizeof(ev));
12940 ev.type = hdev->discovery.type;
12941 ev.discovering = discovering;
12943 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
12946 void mgmt_suspending(struct hci_dev *hdev, u8 state)
12948 struct mgmt_ev_controller_suspend ev;
12950 ev.suspend_state = state;
12951 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
12954 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
12957 struct mgmt_ev_controller_resume ev;
12959 ev.wake_reason = reason;
12961 bacpy(&ev.addr.bdaddr, bdaddr);
12962 ev.addr.type = addr_type;
12964 memset(&ev.addr, 0, sizeof(ev.addr));
12967 mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
12970 static struct hci_mgmt_chan chan = {
12971 .channel = HCI_CHANNEL_CONTROL,
12972 .handler_count = ARRAY_SIZE(mgmt_handlers),
12973 .handlers = mgmt_handlers,
12975 .tizen_handler_count = ARRAY_SIZE(tizen_mgmt_handlers),
12976 .tizen_handlers = tizen_mgmt_handlers,
12978 .hdev_init = mgmt_init_hdev,
12981 int mgmt_init(void)
12983 return hci_mgmt_chan_register(&chan);
12986 void mgmt_exit(void)
12988 hci_mgmt_chan_unregister(&chan);
12991 void mgmt_cleanup(struct sock *sk)
12993 struct mgmt_mesh_tx *mesh_tx;
12994 struct hci_dev *hdev;
12996 read_lock(&hci_dev_list_lock);
12998 list_for_each_entry(hdev, &hci_dev_list, list) {
13000 mesh_tx = mgmt_mesh_next(hdev, sk);
13003 mesh_send_complete(hdev, mesh_tx, true);
13007 read_unlock(&hci_dev_list_lock);